summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-10-14 15:15:35 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-14 15:15:35 -0700
commit55e0500eb5c0440a3d43074edbd8db3e95851b66 (patch)
tree874b9da7a764df298441242ce79b9fd89c2910df /drivers
parent4815519ed0af833884ce9c288183bf1ae3cb9caa (diff)
parent69f4ec1edb136d2d2511d1ef96f94ef0aeecefdf (diff)
downloadlinux-next-55e0500eb5c0440a3d43074edbd8db3e95851b66.tar.gz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "The usual driver updates (ufs, qla2xxx, tcmu, ibmvfc, lpfc, smartpqi, hisi_sas, qedi, qedf, mpt3sas) and minor bug fixes. There are only three core changes: adding sense codes, cleaning up noretry and adding an option for limitless retries" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (226 commits) scsi: hisi_sas: Recover PHY state according to the status before reset scsi: hisi_sas: Filter out new PHY up events during suspend scsi: hisi_sas: Add device link between SCSI devices and hisi_hba scsi: hisi_sas: Add check for methods _PS0 and _PR0 scsi: hisi_sas: Add controller runtime PM support for v3 hw scsi: hisi_sas: Switch to new framework to support suspend and resume scsi: hisi_sas: Use hisi_hba->cq_nvecs for calling calling synchronize_irq() scsi: qedf: Remove redundant assignment to variable 'rc' scsi: lpfc: Remove unneeded variable 'status' in lpfc_fcp_cpu_map_store() scsi: snic: Convert to use DEFINE_SEQ_ATTRIBUTE macro scsi: qla4xxx: Delete unneeded variable 'status' in qla4xxx_process_ddb_changed scsi: sun_esp: Use module_platform_driver to simplify the code scsi: sun3x_esp: Use module_platform_driver to simplify the code scsi: sni_53c710: Use module_platform_driver to simplify the code scsi: qlogicpti: Use module_platform_driver to simplify the code scsi: mac_esp: Use module_platform_driver to simplify the code scsi: jazz_esp: Use module_platform_driver to simplify the code scsi: mvumi: Fix error return in mvumi_io_attach() scsi: lpfc: Drop nodelist reference on error in lpfc_gen_req() scsi: be2iscsi: Fix a theoretical leak in beiscsi_create_eqs() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/message/fusion/mptctl.c5
-rw-r--r--drivers/message/fusion/mptfc.c6
-rw-r--r--drivers/message/fusion/mptscsih.c3
-rw-r--r--drivers/s390/scsi/zfcp_erp.c8
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c10
-rw-r--r--drivers/scsi/aacraid/aachba.c11
-rw-r--r--drivers/scsi/aacraid/commctrl.c20
-rw-r--r--drivers/scsi/aacraid/commsup.c9
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c3
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c7
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c3
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx.h8
-rw-r--r--drivers/scsi/arm/cumana_2.c19
-rw-r--r--drivers/scsi/arm/eesox.c9
-rw-r--r--drivers/scsi/arm/oak.c2
-rw-r--r--drivers/scsi/arm/powertec.c9
-rw-r--r--drivers/scsi/be2iscsi/be_main.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c10
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c6
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c2
-rw-r--r--drivers/scsi/csiostor/csio_hw.c2
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c6
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c8
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h16
-rw-r--r--drivers/scsi/dc395x.c16
-rw-r--r--drivers/scsi/dpt_i2o.c3
-rw-r--r--drivers/scsi/esas2r/esas2r_ioctl.c28
-rw-r--r--drivers/scsi/fdomain_isa.c5
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c6
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c9
-rw-r--r--drivers/scsi/fnic/fnic_main.c5
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c3
-rw-r--r--drivers/scsi/gdth.c2
-rw-r--r--drivers/scsi/hisi_sas/Kconfig1
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h37
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c123
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c24
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c4
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c251
-rw-r--r--drivers/scsi/hpsa.c17
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/hpsa_cmd.h1
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c229
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h160
-rw-r--r--drivers/scsi/isci/host.c2
-rw-r--r--drivers/scsi/isci/init.c2
-rw-r--r--drivers/scsi/isci/phy.c2
-rw-r--r--drivers/scsi/jazz_esp.c14
-rw-r--r--drivers/scsi/libfc/fc_disc.c4
-rw-r--r--drivers/scsi/libsas/sas_ata.c8
-rw-r--r--drivers/scsi/libsas/sas_discover.c8
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c3
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_debugfs.c5
-rw-r--r--drivers/scsi/mac_esp.c14
-rw-r--r--drivers/scsi/megaraid.c192
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c67
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h21
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c2
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c16
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c366
-rw-r--r--drivers/scsi/mvsas/mv_init.c4
-rw-r--r--drivers/scsi/mvumi.c1
-rw-r--r--drivers/scsi/myrb.c6
-rw-r--r--drivers/scsi/myrs.c8
-rw-r--r--drivers/scsi/nsp32.c4
-rw-r--r--drivers/scsi/pmcraid.c3
-rw-r--r--drivers/scsi/qedf/qedf.h9
-rw-r--r--drivers/scsi/qedf/qedf_els.c34
-rw-r--r--drivers/scsi/qedf/qedf_io.c12
-rw-r--r--drivers/scsi/qedf/qedf_main.c151
-rw-r--r--drivers/scsi/qedi/qedi.h6
-rw-r--r--drivers/scsi/qedi/qedi_fw.c30
-rw-r--r--drivers/scsi/qedi/qedi_iscsi.c7
-rw-r--r--drivers/scsi/qedi/qedi_main.c131
-rw-r--r--drivers/scsi/qla1280.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c97
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h66
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c231
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h8
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c99
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h98
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c77
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c71
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c36
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c147
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c53
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h6
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c2
-rw-r--r--drivers/scsi/qlogicpti.c14
-rw-r--r--drivers/scsi/scsi_debug.c83
-rw-r--r--drivers/scsi/scsi_error.c37
-rw-r--r--drivers/scsi/scsi_lib.c77
-rw-r--r--drivers/scsi/scsi_priv.h1
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c101
-rw-r--r--drivers/scsi/sd.h1
-rw-r--r--drivers/scsi/sense_codes.h54
-rw-r--r--drivers/scsi/smartpqi/Kconfig4
-rw-r--r--drivers/scsi/smartpqi/smartpqi.h7
-rw-r--r--drivers/scsi/smartpqi/smartpqi_init.c476
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sas_transport.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.c2
-rw-r--r--drivers/scsi/smartpqi/smartpqi_sis.h2
-rw-r--r--drivers/scsi/sni_53c710.c14
-rw-r--r--drivers/scsi/snic/snic_debugfs.c16
-rw-r--r--drivers/scsi/snic/snic_scsi.c8
-rw-r--r--drivers/scsi/sun3x_esp.c14
-rw-r--r--drivers/scsi/sun_esp.c14
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_fw.c6
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c6
-rw-r--r--drivers/scsi/ufs/Kconfig1
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c13
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c266
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.h29
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c262
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h11
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c10
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c4
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c127
-rw-r--r--drivers/scsi/ufs/ufshcd.c846
-rw-r--r--drivers/scsi/ufs/ufshcd.h22
-rw-r--r--drivers/scsi/ufs/ufshci.h1
-rw-r--r--drivers/scsi/ufs/unipro.h3
-rw-r--r--drivers/scsi/virtio_scsi.c7
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/target_core_user.c344
139 files changed, 4090 insertions, 2166 deletions
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 1074b882c57c..24aebad60366 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -2593,7 +2593,7 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Get the data transfer speeds
*/
data_sz = ioc->spi_data.sdp0length * 4;
- pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+ pg0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
if (pg0_alloc) {
hdr.PageVersion = ioc->spi_data.sdp0version;
hdr.PageLength = data_sz;
@@ -2657,8 +2657,7 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
/* Issue the second config page request */
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
- pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
- ioc->pcidev, data_sz, &page_dma);
+ pg3_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
if (pg3_alloc) {
cfg.physAddr = page_dma;
cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index 4314a3352b96..f92b0433f599 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -763,7 +763,7 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
data_sz = hdr.PageLength * 4;
rc = -ENOMEM;
- ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+ ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
if (ppage0_alloc) {
try_again:
@@ -904,7 +904,7 @@ start_over:
if (data_sz < sizeof(FCPortPage1_t))
data_sz = sizeof(FCPortPage1_t);
- page1_alloc = (FCPortPage1_t *) pci_alloc_consistent(ioc->pcidev,
+ page1_alloc = pci_alloc_consistent(ioc->pcidev,
data_sz,
&page1_dma);
if (!page1_alloc)
@@ -922,8 +922,6 @@ start_over:
}
}
- memset(page1_alloc,0,data_sz);
-
cfg.physAddr = page1_dma;
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 8543f0324d5a..a5ef9faf71c7 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -1516,7 +1516,6 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
int ii;
int retval;
MPT_ADAPTER *ioc = hd->ioc;
- unsigned long timeleft;
u8 issue_hard_reset;
u32 ioc_raw_state;
unsigned long time_count;
@@ -1614,7 +1613,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
}
}
- timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+ wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
timeout*HZ);
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
retval = FAILED;
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 59e662df5774..78d52a4c55f5 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -1607,7 +1607,6 @@ check_target:
static int zfcp_erp_thread(void *data)
{
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
- struct list_head *next;
struct zfcp_erp_action *act;
unsigned long flags;
@@ -1620,12 +1619,11 @@ static int zfcp_erp_thread(void *data)
break;
write_lock_irqsave(&adapter->erp_lock, flags);
- next = adapter->erp_ready_head.next;
+ act = list_first_entry_or_null(&adapter->erp_ready_head,
+ struct zfcp_erp_action, list);
write_unlock_irqrestore(&adapter->erp_lock, flags);
- if (next != &adapter->erp_ready_head) {
- act = list_entry(next, struct zfcp_erp_action, list);
-
+ if (act) {
/* there is more to come after dismission, no notify */
if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
zfcp_erp_wakeup(adapter);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 140186fe1d1e..6cb963a06777 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -426,9 +426,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
* or it has been dismissed due to a queue shutdown, this function
* is called to process the completion status and trigger further
* events related to the FSF request.
+ * Caller must ensure that the request has been removed from
+ * adapter->req_list, to protect against concurrent modification
+ * by zfcp_erp_strategy_check_fsfreq().
*/
static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
{
+ struct zfcp_erp_action *erp_action;
+
if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
zfcp_fsf_status_read_handler(req);
return;
@@ -439,8 +444,9 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
zfcp_fsf_fsfstatus_eval(req);
req->handler(req);
- if (req->erp_action)
- zfcp_erp_notify(req->erp_action, 0);
+ erp_action = req->erp_action;
+ if (erp_action)
+ zfcp_erp_notify(erp_action, 0);
if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
zfcp_fsf_req_free(req);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index fd6ae5c38086..31233f6a0274 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -242,7 +242,7 @@ int aac_commit = -1;
int startup_timeout = 180;
int aif_timeout = 120;
int aac_sync_mode; /* Only Sync. transfer - disabled */
-int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
+static int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
@@ -290,7 +290,7 @@ MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
" blocks (FIB) allocated. Valid values are 512 and down. Default is"
" to use suggestion from Firmware.");
-int acbsize = -1;
+static int acbsize = -1;
module_param(acbsize, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
" size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
@@ -321,7 +321,7 @@ int aac_reset_devices;
module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
-int aac_wwn = 1;
+static int aac_wwn = 1;
module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
"\t0 - Disable\n"
@@ -2229,10 +2229,10 @@ int aac_get_adapter_info(struct aac_dev* dev)
}
if (dev->dac_support) {
- if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(64))) {
if (!dev->in_reset)
dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
- } else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
+ } else if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(32))) {
dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
dev->dac_support = 0;
} else {
@@ -3253,7 +3253,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case START_STOP:
return aac_start_stop(scsicmd);
- fallthrough;
default:
/*
* Unhandled commands
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 59e82a832042..e3e157a74988 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -670,8 +670,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, sg_count[i],
- data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p, sg_count[i],
+ data_dir);
hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
hbacmd->sge[i].addr_lo = cpu_to_le32(
(u32)(addr & 0xffffffff));
@@ -732,8 +732,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
@@ -788,8 +788,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
@@ -844,7 +844,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ usg->sg[i].count,
+ data_dir);
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
byte_count += usg->sg[i].count;
@@ -883,8 +885,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
goto cleanup;
}
}
- addr = pci_map_single(dev->pdev, p,
- sg_count[i], data_dir);
+ addr = dma_map_single(&dev->pdev->dev, p,
+ sg_count[i], data_dir);
psg->sg[i].addr = cpu_to_le32(addr);
byte_count += sg_count[i];
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 383e74fea6ed..b99ca1b0c553 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1551,6 +1551,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
aac_fib_map_free(aac);
dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
aac->comm_phys);
+ aac_adapter_ioremap(aac, 0);
aac->comm_addr = NULL;
aac->comm_phys = 0;
kfree(aac->queues);
@@ -1561,15 +1562,15 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
dmamask = DMA_BIT_MASK(32);
quirks = aac_get_driver_ident(index)->quirks;
if (quirks & AAC_QUIRK_31BIT)
- retval = pci_set_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_mask(&aac->pdev->dev, dmamask);
else if (!(quirks & AAC_QUIRK_SRC))
- retval = pci_set_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_mask(&aac->pdev->dev, dmamask);
else
- retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
if (quirks & AAC_QUIRK_31BIT && !retval) {
dmamask = DMA_BIT_MASK(31);
- retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
+ retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
}
if (retval)
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index a3aee146537b..8f3772480582 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1659,7 +1659,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
goto out;
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
- error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (error) {
dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
goto out_disable_pdev;
@@ -1678,7 +1678,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
mask_bits = 32;
}
- error = pci_set_consistent_dma_mask(pdev, dmamask);
+ error = dma_set_coherent_mask(&pdev->dev, dmamask);
if (error) {
dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
, mask_bits);
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index 1c617c0d5899..98b02e7d38bb 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -9402,10 +9402,9 @@ ahd_loadseq(struct ahd_softc *ahd)
if (cs_count != 0) {
cs_count *= sizeof(struct cs);
- ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ ahd->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
if (ahd->critical_sections == NULL)
panic("ahd_loadseq: Could not malloc");
- memcpy(ahd->critical_sections, cs_table, cs_count);
}
ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 7c321303969e..f32398939f74 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -952,8 +952,8 @@ int
ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
int flags, bus_dmamap_t *mapp)
{
- *vaddr = pci_alloc_consistent(ahd->dev_softc,
- dmat->maxsize, mapp);
+ *vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp,
+ GFP_ATOMIC);
if (*vaddr == NULL)
return (ENOMEM);
return(0);
@@ -963,8 +963,7 @@ void
ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
void* vaddr, bus_dmamap_t map)
{
- pci_free_consistent(ahd->dev_softc, dmat->maxsize,
- vaddr, map);
+ dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map);
}
int
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index 2231c4afa531..725bb7f58054 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -6879,10 +6879,9 @@ ahc_loadseq(struct ahc_softc *ahc)
if (cs_count != 0) {
cs_count *= sizeof(struct cs);
- ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
+ ahc->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
if (ahc->critical_sections == NULL)
panic("ahc_loadseq: Could not malloc");
- memcpy(ahc->critical_sections, cs_table, cs_count);
}
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index e7ccb8b80fc1..7bba961d1ae0 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -730,7 +730,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
- if (error != 0)
+ if (error != SUCCESS)
printk("aic7xxx_abort returns 0x%x\n", error);
return (error);
}
@@ -744,7 +744,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
int error;
error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
- if (error != 0)
+ if (error != SUCCESS)
printk("aic7xxx_dev_reset returns 0x%x\n", error);
return (error);
}
diff --git a/drivers/scsi/aic94xx/aic94xx.h b/drivers/scsi/aic94xx/aic94xx.h
index c23bbb609126..98978bc199ff 100644
--- a/drivers/scsi/aic94xx/aic94xx.h
+++ b/drivers/scsi/aic94xx/aic94xx.h
@@ -42,14 +42,6 @@
extern struct kmem_cache *asd_dma_token_cache;
extern struct kmem_cache *asd_ascb_cache;
-static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
-{
- int i;
- for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
- snprintf(p, 3, "%02X", sas_addr[i]);
- *p = '\0';
-}
-
struct asd_ha_struct;
struct asd_ascb;
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 29294f0ef8a9..9dcd912267e6 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -166,14 +166,15 @@ cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
- dma_dir = DMA_MODE_WRITE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
+ dma_dir = DMA_MODE_WRITE;
alatch_dir = ALATCH_DMA_OUT;
- else
- map_dir = DMA_FROM_DEVICE,
- dma_dir = DMA_MODE_READ,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
+ dma_dir = DMA_MODE_READ;
alatch_dir = ALATCH_DMA_IN;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
@@ -326,10 +327,12 @@ cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
cumanascsi_2_terminator_ctl(host, 0);
else
ret = -EINVAL;
- } else
+ } else {
ret = -EINVAL;
- } else
+ }
+ } else {
ret = -EINVAL;
+ }
return ret;
}
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 591ae2a6dd74..5eb2415dda9d 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -165,12 +165,13 @@ eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
dma_dir = DMA_MODE_WRITE;
- else
- map_dir = DMA_FROM_DEVICE,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
dma_dir = DMA_MODE_READ;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index 7c9d361e91a9..78f33d57c3e8 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -120,7 +120,7 @@ static struct scsi_host_template oakscsi_template = {
static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
- int ret = -ENOMEM;
+ int ret;
ret = ecard_request_resources(ec);
if (ret)
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index d99ef014528e..9cc73da4e876 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -138,12 +138,13 @@ powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
- if (direction == DMA_OUT)
- map_dir = DMA_TO_DEVICE,
+ if (direction == DMA_OUT) {
+ map_dir = DMA_TO_DEVICE;
dma_dir = DMA_MODE_WRITE;
- else
- map_dir = DMA_FROM_DEVICE,
+ } else {
+ map_dir = DMA_FROM_DEVICE;
dma_dir = DMA_MODE_READ;
+ }
dma_map_sg(dev, info->sg, bufs, map_dir);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 5c3513a4b450..202ba925c494 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
goto create_eq_error;
}
+ mem->dma = paddr;
mem->va = eq_vaddress;
ret = be_fill_queue(eq, phba->params.num_eq_entries,
sizeof(struct be_eq_entry), eq_vaddress);
@@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
goto create_eq_error;
}
- mem->dma = paddr;
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
BEISCSI_EQ_DELAY_DEF);
if (ret) {
@@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
goto create_cq_error;
}
+ mem->dma = paddr;
ret = be_fill_queue(cq, phba->params.num_cq_entries,
sizeof(struct sol_cqe), cq_vaddress);
if (ret) {
@@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
goto create_cq_error;
}
- mem->dma = paddr;
ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
false, 0);
if (ret) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 5cdeeb3539fd..6890bbe04a8c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -50,7 +50,7 @@ struct workqueue_struct *bnx2fc_wq;
* Here the io threads are per cpu but the l2 thread is just one
*/
struct fcoe_percpu_s bnx2fc_global;
-DEFINE_SPINLOCK(bnx2fc_global_lock);
+static DEFINE_SPINLOCK(bnx2fc_global_lock);
static struct cnic_ulp_ops bnx2fc_cnic_cb;
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
@@ -108,22 +108,22 @@ MODULE_PARM_DESC(debug_logging,
"\t\t0x10 - fcoe L2 fame related logs.\n"
"\t\t0xff - LOG all messages.");
-uint bnx2fc_devloss_tmo;
+static uint bnx2fc_devloss_tmo;
module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO);
MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports "
"attached via bnx2fc.");
-uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
+static uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO);
MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default "
"0xffff.");
-uint bnx2fc_queue_depth;
+static uint bnx2fc_queue_depth;
module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO);
MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices "
"attached via bnx2fc.");
-uint bnx2fc_log_fka;
+static uint bnx2fc_log_fka;
module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
"initiating a FIP keep alive when debug logging is enabled.");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 1aba5897ccb0..1a0dc18d6915 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -864,7 +864,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
if (!abts_io_req) {
- printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "abts: couldn't allocate cmd\n");
rc = FAILED;
goto abts_err;
}
@@ -957,7 +957,7 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
if (!seq_clnp_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
rc = -ENOMEM;
kfree(cb_arg);
goto cleanup_err;
@@ -1015,7 +1015,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
if (!cleanup_io_req) {
- printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
+ printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
rc = -1;
goto cleanup_err;
}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index 6018cdd17702..2b3f0c10478e 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -474,8 +474,6 @@ static int __init bnx2i_mod_init(void)
if (sq_size && !is_power_of_2(sq_size))
sq_size = roundup_pow_of_two(sq_size);
- mutex_init(&bnx2i_dev_lock);
-
bnx2i_scsi_xport_template =
iscsi_register_transport(&bnx2i_iscsi_transport);
if (!bnx2i_scsi_xport_template) {
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
index 7fa20609d5e7..e43c5413ce29 100644
--- a/drivers/scsi/csiostor/csio_hw.c
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
- ret = EINVAL;
+ ret = -EINVAL;
goto bye;
}
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
index 00cf33573136..55e74da2f3cb 100644
--- a/drivers/scsi/csiostor/csio_scsi.c
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -933,14 +933,14 @@ csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
* abort for that I/O by the FW crossed each other.
* The FW returned FW_EINVAL. The original I/O would have
* returned with FW_SUCCESS or any other SCSI error.
- * 3. The FW couldnt sent the abort out on the wire, as there
+ * 3. The FW couldn't sent the abort out on the wire, as there
* was an I-T nexus loss (link down, remote device logged
* out etc). FW sent back an appropriate IT nexus loss status
* for the abort.
* 4. FW sent an abort, but abort timed out (remote device
* didnt respond). FW replied back with
* FW_SCSI_ABORT_TIMEDOUT.
- * 5. FW couldnt genuinely abort the request for some reason,
+ * 5. FW couldn't genuinely abort the request for some reason,
* and sent us an error.
*
* The first 3 scenarios are treated as succesful abort
@@ -1859,7 +1859,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
spin_unlock_irqrestore(&hw->lock, flags);
if (retval != 0) {
- csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
+ csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
ioreq, retval);
CSIO_INC_STATS(scsim, n_busy_error);
goto err_put_req;
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 0e8621a6956d..f078b3c4e083 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -77,9 +77,9 @@ int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
{
struct cxgbi_ports_map *pmap = &cdev->pmap;
- pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
- sizeof(struct cxgbi_sock *),
- GFP_KERNEL);
+ pmap->port_csk = kvzalloc(array_size(max_conn,
+ sizeof(struct cxgbi_sock *)),
+ GFP_KERNEL | __GFP_NOWARN);
if (!pmap->port_csk) {
pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
return -ENOMEM;
@@ -124,7 +124,7 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
if (cdev->cdev2ppm)
cxgbi_ppm_release(cdev->cdev2ppm(cdev));
if (cdev->pmap.max_connect)
- cxgbi_free_big_mem(cdev->pmap.port_csk);
+ kvfree(cdev->pmap.port_csk);
kfree(cdev);
}
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index fc7255fefcd3..3687b5c0cf90 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -575,22 +575,6 @@ struct cxgbi_iso_info {
u32 buffer_offset;
};
-static inline void *cxgbi_alloc_big_mem(unsigned int size,
- gfp_t gfp)
-{
- void *p = kzalloc(size, gfp | __GFP_NOWARN);
-
- if (!p)
- p = vzalloc(size);
-
- return p;
-}
-
-static inline void cxgbi_free_big_mem(void *addr)
-{
- kvfree(addr);
-}
-
static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
{
if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 37c6cc374079..0c251a3b99b7 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -902,7 +902,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
nseg = scsi_dma_map(cmd);
BUG_ON(nseg < 0);
- if (dir == PCI_DMA_NONE || !nseg) {
+ if (dir == DMA_NONE || !nseg) {
dprintkdbg(DBG_0,
"build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
@@ -3135,7 +3135,7 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
struct scsi_cmnd *cmd = srb->cmd;
enum dma_data_direction dir = cmd->sc_data_direction;
- if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
+ if (scsi_sg_count(cmd) && dir != DMA_NONE) {
/* unmap DC395x SG list */
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
srb->sg_bus_addr, SEGMENTX_LEN);
@@ -3333,7 +3333,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
if (!ckc_only && (cmd->result & RES_DID) == 0
&& cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
- && dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
+ && dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
dcb->inquiry7 = ptr->Flags;
/*if( srb->cmd->cmnd[0] == INQUIRY && */
@@ -4504,14 +4504,8 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
/*seq_printf(m, "\n"); */
seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
- seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
- acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
- acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
- acb->dcb_map[6], acb->dcb_map[7]);
- seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
- acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
- acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
- acb->dcb_map[14], acb->dcb_map[15]);
+ seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]);
+ seq_printf(m, " %8ph\n", &acb->dcb_map[8]);
seq_puts(m,
"Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index f654ad8a3d69..4251212acbbe 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -408,9 +408,6 @@ static void adpt_inquiry(adpt_hba* pHba)
static int adpt_slave_configure(struct scsi_device * device)
{
struct Scsi_Host *host = device->host;
- adpt_hba* pHba;
-
- pHba = (adpt_hba *) host->hostdata[0];
if (host->can_queue && device->tagged_supported) {
scsi_change_queue_depth(device,
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c
index cc620f10eabc..08f4e43c7d9e 100644
--- a/drivers/scsi/esas2r/esas2r_ioctl.c
+++ b/drivers/scsi/esas2r/esas2r_ioctl.c
@@ -1548,11 +1548,10 @@ static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
a->firmware.orig_len = length;
- a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
- (size_t)length,
- (dma_addr_t *)&a->firmware.
- phys,
- GFP_KERNEL);
+ a->firmware.data = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)length,
+ (dma_addr_t *)&a->firmware.phys,
+ GFP_KERNEL);
if (!a->firmware.data) {
esas2r_debug("buffer alloc failed!");
@@ -1895,11 +1894,11 @@ int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
if (!a->vda_buffer) {
dma_addr_t dma_addr;
- a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
- (size_t)
- VDA_MAX_BUFFER_SIZE,
- &dma_addr,
- GFP_KERNEL);
+ a->vda_buffer = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)
+ VDA_MAX_BUFFER_SIZE,
+ &dma_addr,
+ GFP_KERNEL);
a->ppvda_buffer = dma_addr;
}
@@ -2064,11 +2063,10 @@ int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
re_allocate_buffer:
a->fs_api_buffer_size = length;
- a->fs_api_buffer = (u8 *)dma_alloc_coherent(
- &a->pcid->dev,
- (size_t)a->fs_api_buffer_size,
- (dma_addr_t *)&a->ppfs_api_buffer,
- GFP_KERNEL);
+ a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev,
+ (size_t)a->fs_api_buffer_size,
+ (dma_addr_t *)&a->ppfs_api_buffer,
+ GFP_KERNEL);
}
}
diff --git a/drivers/scsi/fdomain_isa.c b/drivers/scsi/fdomain_isa.c
index f2da4fa382e8..e0cdcd2003d0 100644
--- a/drivers/scsi/fdomain_isa.c
+++ b/drivers/scsi/fdomain_isa.c
@@ -111,12 +111,11 @@ static int fdomain_isa_match(struct device *dev, unsigned int ndev)
base = readb(p + sig->base_offset) +
(readb(p + sig->base_offset + 1) << 8);
iounmap(p);
- if (base)
+ if (base) {
dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n",
bios_base, base);
- else
+ } else { /* no I/O base in BIOS area */
dev_info(dev, "BIOS at 0x%lx\n", bios_base);
- if (!base) { /* no I/O base in BIOS area */
/* save BIOS signature for later use in port probing */
saved_sig = sig;
return 0;
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
index 13f7d88d6e57..6c049360f136 100644
--- a/drivers/scsi/fnic/fnic_debugfs.c
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -120,11 +120,11 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
len = 0;
trace_type = (u8 *)filp->private_data;
if (*trace_type == fc_trc_flag->fnic_trace)
- len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+ len = sprintf(buf, "%d\n", fnic_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_trace)
- len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
+ len = sprintf(buf, "%d\n", fnic_fc_tracing_enabled);
else if (*trace_type == fc_trc_flag->fc_clear)
- len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
+ len = sprintf(buf, "%d\n", fnic_fc_trace_cleared);
else
pr_err("fnic: Cannot read to any debugfs file\n");
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 673887e383cc..e3384afb7cbd 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -309,12 +309,10 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
struct fc_frame_header *fh = NULL;
struct fip_desc *desc;
struct fip_encaps *els;
- enum fip_desc_type els_dtype = 0;
u16 op;
u8 els_op;
u8 sub;
- size_t els_len = 0;
size_t rlen;
size_t dlen = 0;
@@ -346,10 +344,8 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
return 0;
- els_len = dlen - sizeof(*els);
els = (struct fip_encaps *)desc;
fh = (struct fc_frame_header *)(els + 1);
- els_dtype = desc->fip_dtype;
if (!fh)
return 0;
@@ -376,7 +372,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
struct sk_buff *skb;
char *eth_fr;
- int fr_len;
struct fip_vlan *vlan;
u64 vlan_tov;
@@ -391,7 +386,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
if (!skb)
return;
- fr_len = sizeof(*vlan);
eth_fr = (char *)skb->data;
vlan = (struct fip_vlan *)eth_fr;
@@ -837,7 +831,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
struct sk_buff *skb;
struct fc_frame *fp;
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
- unsigned int eth_hdrs_stripped;
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe = 0, fcoe_sof, fcoe_eof;
u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
@@ -867,7 +860,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
&ingress_port, &packet_error,
&fcoe_enc_error, &fcs_ok, &vlan_stripped,
&vlan);
- eth_hdrs_stripped = 1;
skb_trim(skb, fcp_bytes_written);
fr_sof(fp) = sof;
fr_eof(fp) = eof;
@@ -884,7 +876,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
&tcp_udp_csum_ok, &udp, &tcp,
&ipv4_csum_ok, &ipv6, &ipv4,
&ipv4_fragment, &fcs_ok);
- eth_hdrs_stripped = 0;
skb_trim(skb, bytes_written);
if (!fcs_ok) {
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 7910b573bacb..5f8a7ef8f6a8 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -443,7 +443,7 @@ static void fnic_notify_timer_start(struct fnic *fnic)
default:
/* Using intr for notification for INTx/MSI-X */
break;
- };
+ }
}
static int fnic_dev_wait(struct vnic_dev *vdev,
@@ -552,8 +552,7 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
{
- u16 old_vlan;
- old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
+ vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
}
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 03b1805b106c..d1f7b84bbfe8 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1402,7 +1402,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
}
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- goto cleanup_scsi_cmd;
+ continue;
}
CMD_SP(sc) = NULL;
@@ -1417,7 +1417,6 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
-cleanup_scsi_cmd:
sc->result = DID_TRANSPORT_DISRUPTED << 16;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 7f150d52b4a6..dc0e17729acf 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -3007,7 +3007,6 @@ static char *async_cache_tab[] = {
static int gdth_async_event(gdth_ha_str *ha)
{
gdth_cmd_str *cmdp;
- int cmd_index;
cmdp= ha->pccb;
TRACE2(("gdth_async_event() ha %d serv %d\n",
@@ -3019,7 +3018,6 @@ static int gdth_async_event(gdth_ha_str *ha)
gdth_delay(0);
cmdp->Service = SCREENSERVICE;
cmdp->RequestBuffer = SCREEN_CMND;
- cmd_index = gdth_get_cmd_index(ha);
gdth_set_sema0(ha);
cmdp->OpCode = GDT_READ;
cmdp->BoardNode = LOCALBOARD;
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index 13ed9073fc72..b8148b1733f8 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -15,5 +15,6 @@ config SCSI_HISI_SAS_PCI
tristate "HiSilicon SAS on PCI bus"
depends on SCSI_HISI_SAS
depends on PCI
+ depends on ACPI
help
This driver supports HiSilicon's SAS HBA based on PCI device
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index e6acbf940712..a25cfc11c96d 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -21,6 +21,7 @@
#include <linux/of_address.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/timer.h>
@@ -34,6 +35,7 @@
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
#define HISI_SAS_RESET_BIT 0
#define HISI_SAS_REJECT_CMD_BIT 1
+#define HISI_SAS_PM_BIT 2
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
#define HISI_SAS_RESERVED_IPTT 96
#define HISI_SAS_UNRESERVED_IPTT \
@@ -275,6 +277,39 @@ enum hisi_sas_debugfs_cache_type {
HISI_SAS_IOST_CACHE,
};
+enum hisi_sas_debugfs_bist_ffe_cfg {
+ FFE_SAS_1_5_GBPS,
+ FFE_SAS_3_0_GBPS,
+ FFE_SAS_6_0_GBPS,
+ FFE_SAS_12_0_GBPS,
+ FFE_RESV,
+ FFE_SATA_1_5_GBPS,
+ FFE_SATA_3_0_GBPS,
+ FFE_SATA_6_0_GBPS,
+ FFE_CFG_MAX
+};
+
+enum hisi_sas_debugfs_bist_fixed_code {
+ FIXED_CODE,
+ FIXED_CODE_1,
+ FIXED_CODE_MAX
+};
+
+enum {
+ HISI_SAS_BIST_CODE_MODE_PRBS7,
+ HISI_SAS_BIST_CODE_MODE_PRBS23,
+ HISI_SAS_BIST_CODE_MODE_PRBS31,
+ HISI_SAS_BIST_CODE_MODE_JTPAT,
+ HISI_SAS_BIST_CODE_MODE_CJTPAT,
+ HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
+ HISI_SAS_BIST_CODE_MODE_TRAIN,
+ HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
+ HISI_SAS_BIST_CODE_MODE_HFTP,
+ HISI_SAS_BIST_CODE_MODE_MFTP,
+ HISI_SAS_BIST_CODE_MODE_LFTP,
+ HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
+};
+
struct hisi_sas_hw {
int (*hw_init)(struct hisi_hba *hisi_hba);
void (*setup_itct)(struct hisi_hba *hisi_hba,
@@ -441,6 +476,8 @@ struct hisi_hba {
int debugfs_bist_mode;
u32 debugfs_bist_cnt;
int debugfs_bist_enable;
+ u32 debugfs_bist_ffe[HISI_SAS_MAX_PHYS][FFE_CFG_MAX];
+ u32 debugfs_bist_fixed_code[FIXED_CODE_MAX];
/* debugfs memories */
/* Put Global AXI and RAS Register into register array */
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index a994c7b8d26f..128583dfccf2 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -229,17 +229,18 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
task->lldd_task = NULL;
if (!sas_protocol_ata(task->task_proto)) {
- struct sas_ssp_task *ssp_task = &task->ssp_task;
- struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
-
if (slot->n_elem)
dma_unmap_sg(dev, task->scatter,
task->num_scatter,
task->data_dir);
- if (slot->n_elem_dif)
+ if (slot->n_elem_dif) {
+ struct sas_ssp_task *ssp_task = &task->ssp_task;
+ struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
+
dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
scsi_prot_sg_count(scsi_cmnd),
task->data_dir);
+ }
}
}
@@ -334,7 +335,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
}
if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
- dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
+ dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
*n_elem);
rc = -EINVAL;
goto err_out_dma_unmap;
@@ -620,6 +621,12 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
if (!phy->phy_attached)
return;
+ if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
+ !sas_phy->suspended) {
+ dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
+ return;
+ }
+
sas_ha = &hisi_hba->sha;
sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
@@ -1431,7 +1438,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
} else {
hisi_sas_phy_down(hisi_hba, phy_no, 0);
}
-
}
}
@@ -1547,7 +1553,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
{
struct Scsi_Host *shost = hisi_hba->shost;
- u32 state;
/* Init and wait for PHYs to come up and all libsas event finished. */
hisi_hba->hw->phys_init(hisi_hba);
@@ -1562,8 +1567,7 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
scsi_unblock_requests(shost);
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
- state = hisi_hba->hw->get_phys_state(hisi_hba);
- hisi_sas_rescan_topology(hisi_hba, state);
+ hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
}
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
@@ -3335,21 +3339,6 @@ enum {
HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
};
-enum {
- HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
- HISI_SAS_BIST_CODE_MODE_PRBS23,
- HISI_SAS_BIST_CODE_MODE_PRBS31,
- HISI_SAS_BIST_CODE_MODE_JTPAT,
- HISI_SAS_BIST_CODE_MODE_CJTPAT,
- HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
- HISI_SAS_BIST_CODE_MODE_TRAIN,
- HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
- HISI_SAS_BIST_CODE_MODE_HFTP,
- HISI_SAS_BIST_CODE_MODE_MFTP,
- HISI_SAS_BIST_CODE_MODE_LFTP,
- HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
-};
-
static const struct {
int value;
char *name;
@@ -3705,6 +3694,58 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
.owner = THIS_MODULE,
};
+static const struct {
+ char *name;
+} hisi_sas_debugfs_ffe_name[FFE_CFG_MAX] = {
+ { "SAS_1_5_GBPS" },
+ { "SAS_3_0_GBPS" },
+ { "SAS_6_0_GBPS" },
+ { "SAS_12_0_GBPS" },
+ { "FFE_RESV" },
+ { "SATA_1_5_GBPS" },
+ { "SATA_3_0_GBPS" },
+ { "SATA_6_0_GBPS" },
+};
+
+static ssize_t hisi_sas_debugfs_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct seq_file *m = filp->private_data;
+ u32 *val = m->private;
+ int res;
+
+ res = kstrtouint_from_user(buf, count, 0, val);
+ if (res)
+ return res;
+
+ return count;
+}
+
+static int hisi_sas_debugfs_show(struct seq_file *s, void *p)
+{
+ u32 *val = s->private;
+
+ seq_printf(s, "0x%x\n", *val);
+
+ return 0;
+}
+
+static int hisi_sas_debugfs_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, hisi_sas_debugfs_show,
+ inode->i_private);
+}
+
+static const struct file_operations hisi_sas_debugfs_ops = {
+ .open = hisi_sas_debugfs_open,
+ .read = seq_read,
+ .write = hisi_sas_debugfs_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .owner = THIS_MODULE,
+};
+
static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
const char __user *buf,
size_t count, loff_t *ppos)
@@ -3902,6 +3943,9 @@ static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
{
+ struct dentry *ports_dentry;
+ int phy_no;
+
hisi_hba->debugfs_bist_dentry =
debugfs_create_dir("bist", hisi_hba->debugfs_dir);
debugfs_create_file("link_rate", 0600,
@@ -3912,6 +3956,16 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
hisi_hba->debugfs_bist_dentry, hisi_hba,
&hisi_sas_debugfs_bist_code_mode_ops);
+ debugfs_create_file("fixed_code", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[0],
+ &hisi_sas_debugfs_ops);
+
+ debugfs_create_file("fixed_code_1", 0600,
+ hisi_hba->debugfs_bist_dentry,
+ &hisi_hba->debugfs_bist_fixed_code[1],
+ &hisi_sas_debugfs_ops);
+
debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
@@ -3925,6 +3979,27 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
+ ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry);
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct dentry *port_dentry;
+ struct dentry *ffe_dentry;
+ char name[256];
+ int i;
+
+ snprintf(name, 256, "%d", phy_no);
+ port_dentry = debugfs_create_dir(name, ports_dentry);
+ ffe_dentry = debugfs_create_dir("ffe", port_dentry);
+ for (i = 0; i < FFE_CFG_MAX; i++) {
+ if (i == FFE_RESV)
+ continue;
+ debugfs_create_file(hisi_sas_debugfs_ffe_name[i].name,
+ 0600, ffe_dentry,
+ &hisi_hba->debugfs_bist_ffe[phy_no][i],
+ &hisi_sas_debugfs_ops);
+ }
+ }
+
hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 7922a9bb1b28..45e866cb9164 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -752,7 +752,7 @@ static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
rc = reset_hw_v1_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
@@ -1166,7 +1166,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
{
- dev_err(dev, "slot err: SATA/STP not supported");
+ dev_err(dev, "slot err: SATA/STP not supported\n");
}
break;
default:
@@ -1218,35 +1218,35 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
u32 info_reg = hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO);
if (info_reg & HGC_INVLD_DQE_INFO_DQ_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq IPTT err",
+ dev_err(dev, "slot complete: [%d:%d] has dq IPTT err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_TYPE_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq type err",
+ dev_err(dev, "slot complete: [%d:%d] has dq type err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_FORCE_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq force phy err",
+ dev_err(dev, "slot complete: [%d:%d] has dq force phy err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_PHY_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq phy id err",
+ dev_err(dev, "slot complete: [%d:%d] has dq phy id err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_ABORT_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq abort flag err",
+ dev_err(dev, "slot complete: [%d:%d] has dq abort flag err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_IPTT_OF_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err",
+ dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_SSP_ERR_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err",
+ dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
if (info_reg & HGC_INVLD_DQE_INFO_OFL_MSK)
- dev_err(dev, "slot complete: [%d:%d] has dq order frame len err",
+ dev_err(dev, "slot complete: [%d:%d] has dq order frame len err\n",
slot->cmplt_queue, slot->cmplt_queue_slot);
ts->stat = SAS_OPEN_REJECT;
@@ -1294,7 +1294,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
case SAS_PROTOCOL_SATA:
case SAS_PROTOCOL_STP:
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
- dev_err(dev, "slot complete: SATA/STP not supported");
+ dev_err(dev, "slot complete: SATA/STP not supported\n");
break;
default:
@@ -1417,7 +1417,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
if (!(irq_value & CHL_INT2_SL_RX_BC_ACK_MSK)) {
- dev_err(dev, "bcast: irq_value = %x not set enable bit",
+ dev_err(dev, "bcast: irq_value = %x not set enable bit\n",
irq_value);
res = IRQ_NONE;
goto end;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 043f47ba3600..b57177b52fac 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -1202,7 +1202,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe);
hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
for (i = 0; i < hisi_hba->queue_count; i++)
- hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
@@ -1382,7 +1382,7 @@ static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
rc = reset_hw_v2_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index b6f75a1764df..7133ca859b5e 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -191,8 +191,10 @@
#define PHY_CFG_PHY_RST_OFF 3
#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
-#define CFG_PROG_PHY_LINK_RATE_OFF 8
-#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF)
+#define CFG_PROG_PHY_LINK_RATE_OFF 0
+#define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF)
+#define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8
+#define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF)
#define PHY_CTRL (PORT_BASE + 0x14)
#define PHY_CTRL_RESET_OFF 0
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
@@ -295,6 +297,7 @@
#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
#define COARSETUNE_TIME (PORT_BASE + 0x304)
+#define TXDEEMPH_G1 (PORT_BASE + 0x350)
#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
@@ -565,7 +568,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
{
- int i;
+ int i, j;
/* Global registers init */
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
@@ -593,25 +596,24 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
for (i = 0; i < hisi_hba->queue_count; i++)
- hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
+ hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
for (i = 0; i < hisi_hba->n_phy; i++) {
+ enum sas_linkrate max;
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- u32 prog_phy_link_rate = 0x800;
+ u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i,
+ PROG_PHY_LINK_RATE);
+ prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
- SAS_LINK_RATE_1_5_GBPS)) {
- prog_phy_link_rate = 0x855;
- } else {
- enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
-
- prog_phy_link_rate =
- hisi_sas_get_prog_phy_linkrate_mask(max) |
- 0x800;
- }
+ SAS_LINK_RATE_1_5_GBPS))
+ max = SAS_LINK_RATE_12_0_GBPS;
+ else
+ max = sas_phy->phy->maximum_linkrate;
+ prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
@@ -636,6 +638,13 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
/* used for 12G negotiate */
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
+
+ /* get default FFE configuration for BIST */
+ for (j = 0; j < FFE_CFG_MAX; j++) {
+ u32 val = hisi_sas_phy_read32(hisi_hba, i,
+ TXDEEMPH_G1 + (j * 0x4));
+ hisi_hba->debugfs_bist_ffe[i][j] = val;
+ }
}
for (i = 0; i < hisi_hba->queue_count; i++) {
@@ -894,13 +903,14 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
{
struct device *dev = hisi_hba->dev;
+ struct acpi_device *acpi_dev;
union acpi_object *obj;
guid_t guid;
int rc;
rc = reset_hw_v3_hw(hisi_hba);
if (rc) {
- dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
+ dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
return rc;
}
@@ -924,6 +934,9 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
else
ACPI_FREE(obj);
+ acpi_dev = ACPI_COMPANION(dev);
+ if (!acpi_device_power_manageable(acpi_dev))
+ dev_notice(dev, "neither _PS0 nor _PR0 is defined\n");
return 0;
}
@@ -1341,7 +1354,6 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
-
}
static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
@@ -1447,7 +1459,6 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
/* dw7 */
hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
hdr->transfer_tags = cpu_to_le32(slot->idx);
-
}
static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
@@ -2469,8 +2480,10 @@ static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
struct sas_phy_linkrates *r)
{
enum sas_linkrate max = r->maximum_linkrate;
- u32 prog_phy_link_rate = 0x800;
+ u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no,
+ PROG_PHY_LINK_RATE);
+ prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
prog_phy_link_rate);
@@ -2484,10 +2497,11 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
synchronize_irq(pci_irq_vector(pdev, 1));
synchronize_irq(pci_irq_vector(pdev, 2));
synchronize_irq(pci_irq_vector(pdev, 11));
- for (i = 0; i < hisi_hba->queue_count; i++) {
+ for (i = 0; i < hisi_hba->queue_count; i++)
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
+
+ for (i = 0; i < hisi_hba->cq_nvecs; i++)
synchronize_irq(pci_irq_vector(pdev, i + 16));
- }
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
@@ -2712,6 +2726,33 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
}
static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
+static int slave_configure_v3_hw(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
+ struct domain_device *ddev = sdev_to_domain_dev(sdev);
+ struct hisi_hba *hisi_hba = shost_priv(shost);
+ struct device *dev = hisi_hba->dev;
+ int ret = sas_slave_configure(sdev);
+
+ if (ret)
+ return ret;
+ if (!dev_is_sata(ddev))
+ sas_change_queue_depth(sdev, 64);
+
+ if (sdev->type == TYPE_ENCLOSURE)
+ return 0;
+
+ if (!device_link_add(&sdev->sdev_gendev, dev,
+ DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) {
+ if (pm_runtime_enabled(dev)) {
+ dev_info(dev, "add device link failed, disable runtime PM for the host\n");
+ pm_runtime_disable(dev);
+ }
+ }
+
+ return 0;
+}
+
static struct device_attribute *host_attrs_v3_hw[] = {
&dev_attr_phy_event_threshold,
&dev_attr_intr_conv_v3_hw,
@@ -2936,42 +2977,48 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba)
{
u32 reg_val;
- int phy_id = hisi_hba->debugfs_bist_phy_no;
+ int phy_no = hisi_hba->debugfs_bist_phy_no;
+ int i;
/* disable PHY */
- hisi_sas_phy_enable(hisi_hba, phy_id, 0);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 0);
+
+ /* update FFE */
+ for (i = 0; i < FFE_CFG_MAX; i++)
+ hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4),
+ hisi_hba->debugfs_bist_ffe[phy_no][i]);
/* disable ALOS */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
reg_val |= CFG_ALOS_CHK_DISABLE_MSK;
- hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
}
static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
{
u32 reg_val;
- int phy_id = hisi_hba->debugfs_bist_phy_no;
+ int phy_no = hisi_hba->debugfs_bist_phy_no;
/* disable loopback */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL);
reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
CFG_BIST_TEST_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val);
/* enable ALOS */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK;
- hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
/* restore the linkrate */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE);
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
/* init OOB link rate as 1.5 Gbits */
- reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
- reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF);
- hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val);
+ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
+ reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val);
/* enable PHY */
- hisi_sas_phy_enable(hisi_hba, phy_id, 1);
+ hisi_sas_phy_enable(hisi_hba, phy_no, 1);
}
#define SAS_PHY_BIST_CODE_INIT 0x1
@@ -2980,60 +3027,75 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
{
u32 reg_val, mode_tmp;
u32 linkrate = hisi_hba->debugfs_bist_linkrate;
- u32 phy_id = hisi_hba->debugfs_bist_phy_no;
+ u32 phy_no = hisi_hba->debugfs_bist_phy_no;
+ u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no];
u32 code_mode = hisi_hba->debugfs_bist_code_mode;
u32 path_mode = hisi_hba->debugfs_bist_mode;
+ u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0];
struct device *dev = hisi_hba->dev;
- dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
- linkrate, phy_id, code_mode, path_mode);
+ dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
+ phy_no, linkrate, code_mode, path_mode,
+ ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS],
+ ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS],
+ ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS],
+ ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE],
+ fix_code[FIXED_CODE_1]);
mode_tmp = path_mode ? 2 : 1;
if (enable) {
/* some preparations before bist test */
hisi_sas_bist_test_prep_v3_hw(hisi_hba);
/* set linkrate of bit test*/
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
PROG_PHY_LINK_RATE);
- reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
- reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- PROG_PHY_LINK_RATE, reg_val);
+ reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
+ reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
+ hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
+ reg_val);
/* set code mode of bit test */
- reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
SAS_PHY_BIST_CTRL);
- reg_val &= ~(CFG_BIST_MODE_SEL_MSK |
- CFG_LOOP_TEST_MODE_MSK |
- CFG_RX_BIST_EN_MSK |
- CFG_TX_BIST_EN_MSK |
- CFG_BIST_TEST_MSK);
+ reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK |
+ CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
+ CFG_BIST_TEST_MSK);
reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) |
(mode_tmp << CFG_LOOP_TEST_MODE_OFF) |
CFG_BIST_TEST_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
+ reg_val);
/* set the bist init value */
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CODE,
- SAS_PHY_BIST_CODE_INIT);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CODE1,
- SAS_PHY_BIST_CODE1_INIT);
+ if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) {
+ reg_val = hisi_hba->debugfs_bist_fixed_code[0];
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE, reg_val);
+
+ reg_val = hisi_hba->debugfs_bist_fixed_code[1];
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE1, reg_val);
+ } else {
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE,
+ SAS_PHY_BIST_CODE_INIT);
+ hisi_sas_phy_write32(hisi_hba, phy_no,
+ SAS_PHY_BIST_CODE1,
+ SAS_PHY_BIST_CODE1_INIT);
+ }
mdelay(100);
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
- hisi_sas_phy_write32(hisi_hba, phy_id,
- SAS_PHY_BIST_CTRL, reg_val);
+ hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
+ reg_val);
/* clear error bit */
mdelay(100);
- hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
+ hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT);
} else {
/* disable bist test and recover it */
hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba,
- phy_id, SAS_BIST_ERR_CNT);
+ phy_no, SAS_BIST_ERR_CNT);
hisi_sas_bist_test_restore_v3_hw(hisi_hba);
}
@@ -3056,7 +3118,7 @@ static struct scsi_host_template sht_v3_hw = {
.queuecommand = sas_queuecommand,
.dma_need_drain = ata_scsi_dma_need_drain,
.target_alloc = sas_target_alloc,
- .slave_configure = hisi_sas_slave_configure,
+ .slave_configure = slave_configure_v3_hw,
.scan_finished = hisi_sas_scan_finished,
.scan_start = hisi_sas_scan_start,
.map_queues = hisi_sas_map_queues,
@@ -3266,6 +3328,17 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
scsi_scan_host(shost);
+ /*
+ * For the situation that there are ATA disks connected with SAS
+ * controller, it additionally creates ata_port which will affect the
+ * child_count of hisi_hba->dev. Even if suspended all the disks,
+ * ata_port is still and the child_count of hisi_hba->dev is not 0.
+ * So use pm_suspend_ignore_children() to ignore the effect to
+ * hisi_hba->dev.
+ */
+ pm_suspend_ignore_children(dev, true);
+ pm_runtime_put_noidle(&pdev->dev);
+
return 0;
err_out_register_ha:
@@ -3305,6 +3378,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = sha->core.shost;
+ pm_runtime_get_noresume(dev);
if (timer_pending(&hisi_hba->timer))
del_timer(&hisi_hba->timer);
@@ -3359,8 +3433,9 @@ enum {
hip08,
};
-static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
+static int _suspend_v3_hw(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct device *dev = hisi_hba->dev;
@@ -3391,7 +3466,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
hisi_sas_init_mem(hisi_hba);
- device_state = pci_choose_state(pdev, state);
+ device_state = pci_choose_state(pdev, PMSG_SUSPEND);
dev_warn(dev, "entering operating state [D%d]\n",
device_state);
pci_save_state(pdev);
@@ -3404,8 +3479,9 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
return 0;
}
-static int hisi_sas_v3_resume(struct pci_dev *pdev)
+static int _resume_v3_hw(struct device *device)
{
+ struct pci_dev *pdev = to_pci_dev(device);
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct hisi_hba *hisi_hba = sha->lldd_ha;
struct Scsi_Host *shost = hisi_hba->shost;
@@ -3442,6 +3518,34 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
return 0;
}
+static int suspend_v3_hw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ int rc;
+
+ set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ rc = _suspend_v3_hw(device);
+ if (rc)
+ clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ return rc;
+}
+
+static int resume_v3_hw(struct device *device)
+{
+ struct pci_dev *pdev = to_pci_dev(device);
+ struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+ struct hisi_hba *hisi_hba = sha->lldd_ha;
+ int rc = _resume_v3_hw(device);
+
+ clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
+
+ return rc;
+}
+
static const struct pci_device_id sas_v3_pci_table[] = {
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
{}
@@ -3453,14 +3557,29 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
.reset_done = hisi_sas_reset_done_v3_hw,
};
+static int runtime_suspend_v3_hw(struct device *dev)
+{
+ return suspend_v3_hw(dev);
+}
+
+static int runtime_resume_v3_hw(struct device *dev)
+{
+ return resume_v3_hw(dev);
+}
+
+static const struct dev_pm_ops hisi_sas_v3_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(suspend_v3_hw, resume_v3_hw)
+ SET_RUNTIME_PM_OPS(runtime_suspend_v3_hw,
+ runtime_resume_v3_hw, NULL)
+};
+
static struct pci_driver sas_v3_pci_driver = {
.name = DRV_NAME,
.id_table = sas_v3_pci_table,
.probe = hisi_sas_v3_probe,
.remove = hisi_sas_v3_remove,
- .suspend = hisi_sas_v3_suspend,
- .resume = hisi_sas_v3_resume,
.err_handler = &hisi_sas_err_handler,
+ .driver.pm = &hisi_sas_v3_pm_ops,
};
module_pci_driver(sas_v3_pci_driver);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 48d5da59262b..83ce4f11a589 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
@@ -9329,10 +9330,10 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
{
if (h->ioaccel_cmd_pool) {
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
- h->ioaccel_cmd_pool,
- h->ioaccel_cmd_pool_dhandle);
+ dma_free_coherent(&h->pdev->dev,
+ h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
+ h->ioaccel_cmd_pool,
+ h->ioaccel_cmd_pool_dhandle);
h->ioaccel_cmd_pool = NULL;
h->ioaccel_cmd_pool_dhandle = 0;
}
@@ -9382,10 +9383,10 @@ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
hpsa_free_ioaccel2_sg_chain_blocks(h);
if (h->ioaccel2_cmd_pool) {
- pci_free_consistent(h->pdev,
- h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
- h->ioaccel2_cmd_pool,
- h->ioaccel2_cmd_pool_dhandle);
+ dma_free_coherent(&h->pdev->dev,
+ h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
+ h->ioaccel2_cmd_pool,
+ h->ioaccel2_cmd_pool_dhandle);
h->ioaccel2_cmd_pool = NULL;
h->ioaccel2_cmd_pool_dhandle = 0;
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index 6b87d9815b35..99b0750850b2 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 7825cbfea4dc..46df2e3ff89b 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -1,5 +1,6 @@
/*
* Disk Array driver for HP Smart Array SAS controllers
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright 2016 Microsemi Corporation
* Copyright 2014-2015 PMC-Sierra, Inc.
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index ea7c8930592d..070cf516b98f 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -134,6 +134,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
static const char *unknown_error = "unknown error";
@@ -431,7 +432,20 @@ static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
}
break;
case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
- if (action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+ if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
+ action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
+ tgt->action = action;
+ rc = 0;
+ }
+ break;
+ case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
+ if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
tgt->action = action;
rc = 0;
}
@@ -441,16 +455,18 @@ static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
tgt->action = action;
rc = 0;
}
+ break;
case IBMVFC_TGT_ACTION_DELETED_RPORT:
break;
default:
- if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
- tgt->add_rport = 0;
tgt->action = action;
rc = 0;
break;
}
+ if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
+ tgt->add_rport = 0;
+
return rc;
}
@@ -548,7 +564,8 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
**/
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
{
- if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
+ if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
+ vhost->state == IBMVFC_ACTIVE) {
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
scsi_block_requests(vhost->host);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
@@ -2574,7 +2591,9 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *dev_rport;
struct scsi_device *sdev;
- unsigned long rc;
+ struct ibmvfc_target *tgt;
+ unsigned long rc, flags;
+ unsigned int found;
ENTER;
shost_for_each_device(sdev, shost) {
@@ -2588,6 +2607,27 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
if (rc == FAILED)
ibmvfc_issue_fc_host_lip(shost);
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ found = 0;
+ list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->scsi_id == rport->port_id) {
+ found++;
+ break;
+ }
+ }
+
+ if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ /*
+ * If we get here, that means we previously attempted to send
+ * an implicit logout to the target but it failed, most likely
+ * due to I/O being pending, so we need to send it again
+ */
+ ibmvfc_del_tgt(tgt);
+ ibmvfc_reinit_host(vhost);
+ }
+
+ spin_unlock_irqrestore(shost->host_lock, flags);
LEAVE;
}
@@ -3623,7 +3663,18 @@ static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
vhost->discovery_threads--;
ibmvfc_free_event(evt);
- ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+ /*
+ * If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
+ * driver in which case we need to free up all the targets. If we are
+ * not unloading, we will still go through a hard reset to get out of
+ * offline state, so there is no need to track the old targets in that
+ * case.
+ */
+ if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ else
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
kref_put(&tgt->kref, ibmvfc_release_tgt);
@@ -3662,6 +3713,94 @@ static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
}
/**
+ * ibmvfc_tgt_move_login_done - Completion handler for Move Login
+ * @evt: ibmvfc event struct
+ *
+ **/
+static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_target *tgt = evt->tgt;
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
+ u32 status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+ switch (status) {
+ case IBMVFC_MAD_SUCCESS:
+ tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
+ tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
+ tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
+ tgt->ids.port_id = tgt->scsi_id;
+ memcpy(&tgt->service_parms, &rsp->service_parms,
+ sizeof(tgt->service_parms));
+ memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
+ sizeof(tgt->service_parms_change));
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
+ break;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ break;
+ case IBMVFC_MAD_CRQ_ERROR:
+ ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+ break;
+ case IBMVFC_MAD_FAILED:
+ default:
+ level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
+
+ tgt_log(tgt, level,
+ "Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
+ tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
+ status);
+ break;
+ }
+
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ ibmvfc_free_event(evt);
+ wake_up(&vhost->work_wait_q);
+}
+
+
+/**
+ * ibmvfc_tgt_move_login - Initiate a move login for specified target
+ * @tgt: ibmvfc target struct
+ *
+ **/
+static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
+{
+ struct ibmvfc_host *vhost = tgt->vhost;
+ struct ibmvfc_move_login *move;
+ struct ibmvfc_event *evt;
+
+ if (vhost->discovery_threads >= disc_threads)
+ return;
+
+ kref_get(&tgt->kref);
+ evt = ibmvfc_get_event(vhost);
+ vhost->discovery_threads++;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
+ ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
+ evt->tgt = tgt;
+ move = &evt->iu.move_login;
+ memset(move, 0, sizeof(*move));
+ move->common.version = cpu_to_be32(1);
+ move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
+ move->common.length = cpu_to_be16(sizeof(*move));
+
+ move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
+ move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
+ move->wwpn = cpu_to_be64(tgt->wwpn);
+ move->node_name = cpu_to_be64(tgt->ids.node_name);
+
+ if (ibmvfc_send_event(evt, vhost, default_timeout)) {
+ vhost->discovery_threads--;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+ kref_put(&tgt->kref, ibmvfc_release_tgt);
+ } else
+ tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
+}
+
+/**
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
* @mad: ibmvfc passthru mad struct
* @tgt: ibmvfc target struct
@@ -3979,31 +4118,77 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
+static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
+ struct ibmvfc_discover_targets_entry *target)
{
+ struct ibmvfc_target *stgt = NULL;
+ struct ibmvfc_target *wtgt = NULL;
struct ibmvfc_target *tgt;
unsigned long flags;
+ u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
+ u64 wwpn = be64_to_cpu(target->wwpn);
+ /* Look to see if we already have a target allocated for this SCSI ID or WWPN */
spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
+ if (tgt->wwpn == wwpn) {
+ wtgt = tgt;
+ break;
+ }
+ }
+
+ list_for_each_entry(tgt, &vhost->targets, queue) {
if (tgt->scsi_id == scsi_id) {
- if (tgt->need_login)
- ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ stgt = tgt;
+ break;
+ }
+ }
+
+ if (wtgt && !stgt) {
+ /*
+ * A WWPN target has moved and we still are tracking the old
+ * SCSI ID. The only way we should be able to get here is if
+ * we attempted to send an implicit logout for the old SCSI ID
+ * and it failed for some reason, such as there being I/O
+ * pending to the target. In this case, we will have already
+ * deleted the rport from the FC transport so we do a move
+ * login, which works even with I/O pending, as it will cancel
+ * any active commands.
+ */
+ if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
+ /*
+ * Do a move login here. The old target is no longer
+ * known to the transport layer We don't use the
+ * normal ibmvfc_set_tgt_action to set this, as we
+ * don't normally want to allow this state change.
+ */
+ wtgt->old_scsi_id = wtgt->scsi_id;
+ wtgt->scsi_id = scsi_id;
+ wtgt->action = IBMVFC_TGT_ACTION_INIT;
+ ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
goto unlock_out;
+ } else {
+ tgt_err(wtgt, "Unexpected target state: %d, %p\n",
+ wtgt->action, wtgt->rport);
}
+ } else if (stgt) {
+ if (tgt->need_login)
+ ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
+ goto unlock_out;
}
spin_unlock_irqrestore(vhost->host->host_lock, flags);
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
memset(tgt, 0, sizeof(*tgt));
tgt->scsi_id = scsi_id;
+ tgt->wwpn = wwpn;
tgt->vhost = vhost;
tgt->need_login = 1;
- tgt->cancel_key = vhost->task_set++;
timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
kref_init(&tgt->kref);
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
spin_lock_irqsave(vhost->host->host_lock, flags);
+ tgt->cancel_key = vhost->task_set++;
list_add_tail(&tgt->queue, &vhost->targets);
unlock_out:
@@ -4023,9 +4208,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
int i, rc;
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
- rc = ibmvfc_alloc_target(vhost,
- be32_to_cpu(vhost->disc_buf->scsi_id[i]) &
- IBMVFC_DISC_TGT_SCSI_ID_MASK);
+ rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
return rc;
}
@@ -4085,6 +4268,7 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
+ mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
if (!ibmvfc_send_event(evt, vhost, default_timeout))
@@ -4420,6 +4604,13 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+ tgt->rport = NULL;
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ fc_remote_port_delete(rport);
+ return;
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return;
@@ -4543,6 +4734,15 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
del_timer_sync(&tgt->timer);
kref_put(&tgt->kref, ibmvfc_release_tgt);
return;
+ } else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
+ tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
+ rport = tgt->rport;
+ tgt->rport = NULL;
+ ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ if (rport)
+ fc_remote_port_delete(rport);
+ return;
}
}
@@ -4775,7 +4975,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_sg_pool;
}
- vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
+ vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
&vhost->disc_buf_dma, GFP_KERNEL);
@@ -4928,6 +5128,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
if (IS_ERR(vhost->work_thread)) {
dev_err(dev, "Couldn't create kernel thread: %ld\n",
PTR_ERR(vhost->work_thread));
+ rc = PTR_ERR(vhost->work_thread);
goto free_host_mem;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 907889f1fa9d..34debccfb142 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -120,10 +120,14 @@ enum ibmvfc_mad_types {
IBMVFC_PORT_LOGIN = 0x0004,
IBMVFC_PROCESS_LOGIN = 0x0008,
IBMVFC_QUERY_TARGET = 0x0010,
+ IBMVFC_MOVE_LOGIN = 0x0020,
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
IBMVFC_PASSTHRU = 0x0200,
IBMVFC_TMF_MAD = 0x0100,
IBMVFC_NPIV_LOGOUT = 0x0800,
+ IBMVFC_CHANNEL_ENQUIRY = 0x1000,
+ IBMVFC_CHANNEL_SETUP = 0x2000,
+ IBMVFC_CONNECTION_INFO = 0x4000,
};
struct ibmvfc_mad_common {
@@ -133,16 +137,16 @@ struct ibmvfc_mad_common {
__be16 status;
__be16 length;
__be64 tag;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_npiv_login_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_npiv_logout_mad {
struct ibmvfc_mad_common common;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
#define IBMVFC_MAX_NAME 256
@@ -162,13 +166,15 @@ struct ibmvfc_npiv_login {
__be32 max_cmds;
__be64 capabilities;
#define IBMVFC_CAN_MIGRATE 0x01
+#define IBMVFC_CAN_USE_CHANNELS 0x02
+#define IBMVFC_CAN_HANDLE_FPIN 0x04
__be64 node_name;
struct srp_direct_buf async;
u8 partition_name[IBMVFC_MAX_NAME];
u8 device_name[IBMVFC_MAX_NAME];
u8 drc_name[IBMVFC_MAX_NAME];
__be64 reserved2[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_common_svc_parms {
__be16 fcph_version;
@@ -177,7 +183,7 @@ struct ibmvfc_common_svc_parms {
__be16 bb_rcv_sz; /* upper nibble is BB_SC_N */
__be32 ratov;
__be32 edtov;
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_service_parms {
struct ibmvfc_common_svc_parms common;
@@ -192,7 +198,8 @@ struct ibmvfc_service_parms {
__be32 ext_len;
__be32 reserved[30];
__be32 clk_sync_qos[2];
-}__attribute__((packed, aligned (4)));
+ __be32 reserved2;
+} __packed __aligned(4);
struct ibmvfc_npiv_login_resp {
__be32 version;
@@ -204,6 +211,7 @@ struct ibmvfc_npiv_login_resp {
__be64 capabilities;
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
+#define IBMVFC_CAN_SUPPORT_CHANNELS 0x20
__be32 max_cmds;
__be32 scsi_id_sz;
__be64 max_dma_len;
@@ -217,29 +225,32 @@ struct ibmvfc_npiv_login_resp {
u8 drc_name[IBMVFC_MAX_NAME];
struct ibmvfc_service_parms service_parms;
__be64 reserved2;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
union ibmvfc_npiv_login_data {
struct ibmvfc_npiv_login login;
struct ibmvfc_npiv_login_resp resp;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
-struct ibmvfc_discover_targets_buf {
- __be32 scsi_id[1];
+struct ibmvfc_discover_targets_entry {
+ __be32 scsi_id;
+ __be32 pad;
+ __be64 wwpn;
#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
-};
+} __packed __aligned(8);
struct ibmvfc_discover_targets {
struct ibmvfc_mad_common common;
struct srp_direct_buf buffer;
__be32 flags;
+#define IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST 0x02
__be16 status;
__be16 error;
__be32 bufflen;
__be32 num_avail;
__be32 num_written;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_fc_reason {
IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
@@ -283,7 +294,27 @@ struct ibmvfc_port_login {
struct ibmvfc_service_parms service_parms;
struct ibmvfc_service_parms service_parms_change;
__be64 reserved3[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
+
+struct ibmvfc_move_login {
+ struct ibmvfc_mad_common common;
+ __be64 old_scsi_id;
+ __be64 new_scsi_id;
+ __be64 wwpn;
+ __be64 node_name;
+ __be32 flags;
+#define IBMVFC_MOVE_LOGIN_IMPLICIT_OLD_FAILED 0x01
+#define IBMVFC_MOVE_LOGIN_IMPLICIT_NEW_FAILED 0x02
+#define IBMVFC_MOVE_LOGIN_PORT_LOGIN_FAILED 0x04
+ __be32 reserved;
+ struct ibmvfc_service_parms service_parms;
+ struct ibmvfc_service_parms service_parms_change;
+ __be32 reserved2;
+ __be16 service_class;
+ __be16 vios_flags;
+#define IBMVFC_MOVE_LOGIN_VF_NOT_SENT_ADAPTER 0x01
+ __be64 reserved3;
+} __packed __aligned(8);
struct ibmvfc_prli_svc_parms {
u8 type;
@@ -303,7 +334,7 @@ struct ibmvfc_prli_svc_parms {
#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_process_login {
struct ibmvfc_mad_common common;
@@ -314,7 +345,7 @@ struct ibmvfc_process_login {
__be16 error; /* also fc_reason */
__be32 reserved2;
__be64 reserved3[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_query_tgt {
struct ibmvfc_mad_common common;
@@ -325,13 +356,13 @@ struct ibmvfc_query_tgt {
__be16 fc_explain;
__be16 fc_type;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_implicit_logout {
struct ibmvfc_mad_common common;
__be64 old_scsi_id;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_tmf {
struct ibmvfc_mad_common common;
@@ -348,7 +379,7 @@ struct ibmvfc_tmf {
__be32 my_cancel_key;
__be32 pad;
__be64 reserved[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_fcp_rsp_info_codes {
RSP_NO_FAILURE = 0x00,
@@ -361,7 +392,7 @@ struct ibmvfc_fcp_rsp_info {
u8 reserved[3];
u8 rsp_code;
u8 reserved2[4];
-}__attribute__((packed, aligned (2)));
+} __packed __aligned(2);
enum ibmvfc_fcp_rsp_flags {
FCP_BIDI_RSP = 0x80,
@@ -377,7 +408,7 @@ enum ibmvfc_fcp_rsp_flags {
union ibmvfc_fcp_rsp_data {
struct ibmvfc_fcp_rsp_info info;
u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_fcp_rsp {
__be64 reserved;
@@ -388,7 +419,7 @@ struct ibmvfc_fcp_rsp {
__be32 fcp_sense_len;
__be32 fcp_rsp_len;
union ibmvfc_fcp_rsp_data data;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_cmd_flags {
IBMVFC_SCATTERLIST = 0x0001,
@@ -422,7 +453,7 @@ struct ibmvfc_fcp_cmd_iu {
#define IBMVFC_WRDATA 0x01
u8 cdb[IBMVFC_MAX_CDB_LEN];
__be32 xfer_len;
-}__attribute__((packed, aligned (4)));
+} __packed __aligned(4);
struct ibmvfc_cmd {
__be64 task_tag;
@@ -446,7 +477,7 @@ struct ibmvfc_cmd {
__be64 reserved3[2];
struct ibmvfc_fcp_cmd_iu iu;
struct ibmvfc_fcp_rsp rsp;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_passthru_fc_iu {
__be32 payload[7];
@@ -473,18 +504,64 @@ struct ibmvfc_passthru_iu {
__be64 scsi_id;
__be64 tag;
__be64 reserved2[2];
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_passthru_mad {
struct ibmvfc_mad_common common;
struct srp_direct_buf cmd_ioba;
struct ibmvfc_passthru_iu iu;
struct ibmvfc_passthru_fc_iu fc_iu;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
+
+struct ibmvfc_channel_enquiry {
+ struct ibmvfc_mad_common common;
+ __be32 flags;
+#define IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT 0x01
+#define IBMVFC_SUPPORT_VARIABLE_SUBQ_MSG 0x02
+#define IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT 0x04
+ __be32 num_scsi_subq_channels;
+ __be32 num_nvmeof_subq_channels;
+ __be32 num_scsi_vas_channels;
+ __be32 num_nvmeof_vas_channels;
+} __packed __aligned(8);
+
+struct ibmvfc_channel_setup_mad {
+ struct ibmvfc_mad_common common;
+ struct srp_direct_buf buffer;
+} __packed __aligned(8);
+
+#define IBMVFC_MAX_CHANNELS 502
+
+struct ibmvfc_channel_setup {
+ __be32 flags;
+#define IBMVFC_CANCEL_CHANNELS 0x01
+#define IBMVFC_USE_BUFFER 0x02
+#define IBMVFC_CHANNELS_CANCELED 0x04
+ __be32 reserved;
+ __be32 num_scsi_subq_channels;
+ __be32 num_nvmeof_subq_channels;
+ __be32 num_scsi_vas_channels;
+ __be32 num_nvmeof_vas_channels;
+ struct srp_direct_buf buffer;
+ __be64 reserved2[5];
+ __be64 channel_handles[IBMVFC_MAX_CHANNELS];
+} __packed __aligned(8);
+
+struct ibmvfc_connection_info {
+ struct ibmvfc_mad_common common;
+ __be64 information_bits;
+#define IBMVFC_NO_FC_IO_CHANNEL 0x01
+#define IBMVFC_NO_PHYP_VAS 0x02
+#define IBMVFC_NO_PHYP_SUBQ 0x04
+#define IBMVFC_PHYP_DEPRECATED_SUBQ 0x08
+#define IBMVFC_PHYP_PRESERVED_SUBQ 0x10
+#define IBMVFC_PHYP_FULL_SUBQ 0x20
+ __be64 reserved[16];
+} __packed __aligned(8);
struct ibmvfc_trace_start_entry {
u32 xfer_len;
-}__attribute__((packed));
+} __packed;
struct ibmvfc_trace_end_entry {
u16 status;
@@ -493,7 +570,7 @@ struct ibmvfc_trace_end_entry {
u8 rsp_code;
u8 scsi_status;
u8 reserved;
-}__attribute__((packed));
+} __packed;
struct ibmvfc_trace_entry {
struct ibmvfc_event *evt;
@@ -510,7 +587,7 @@ struct ibmvfc_trace_entry {
struct ibmvfc_trace_start_entry start;
struct ibmvfc_trace_end_entry end;
} u;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
enum ibmvfc_crq_formats {
IBMVFC_CMD_FORMAT = 0x01,
@@ -532,6 +609,7 @@ enum ibmvfc_async_event {
IBMVFC_AE_HALT = 0x0400,
IBMVFC_AE_RESUME = 0x0800,
IBMVFC_AE_ADAPTER_FAILED = 0x1000,
+ IBMVFC_AE_FPIN = 0x2000,
};
struct ibmvfc_async_desc {
@@ -545,7 +623,7 @@ struct ibmvfc_crq {
volatile u8 format;
u8 reserved[6];
volatile __be64 ioba;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_crq_queue {
struct ibmvfc_crq *msgs;
@@ -560,17 +638,25 @@ enum ibmvfc_ae_link_state {
IBMVFC_AE_LS_LINK_DEAD = 0x08,
};
+enum ibmvfc_ae_fpin_status {
+ IBMVFC_AE_FPIN_LINK_CONGESTED = 0x1,
+ IBMVFC_AE_FPIN_PORT_CONGESTED = 0x2,
+ IBMVFC_AE_FPIN_PORT_CLEARED = 0x3,
+ IBMVFC_AE_FPIN_PORT_DEGRADED = 0x4,
+};
+
struct ibmvfc_async_crq {
volatile u8 valid;
u8 link_state;
- u8 pad[2];
+ u8 fpin_status;
+ u8 pad;
__be32 pad2;
volatile __be64 event;
volatile __be64 scsi_id;
volatile __be64 wwpn;
volatile __be64 node_name;
__be64 reserved;
-}__attribute__((packed, aligned (8)));
+} __packed __aligned(8);
struct ibmvfc_async_crq_queue {
struct ibmvfc_async_crq *msgs;
@@ -585,12 +671,16 @@ union ibmvfc_iu {
struct ibmvfc_discover_targets discover_targets;
struct ibmvfc_port_login plogi;
struct ibmvfc_process_login prli;
+ struct ibmvfc_move_login move_login;
struct ibmvfc_query_tgt query_tgt;
struct ibmvfc_implicit_logout implicit_logout;
struct ibmvfc_tmf tmf;
struct ibmvfc_cmd cmd;
struct ibmvfc_passthru_mad passthru;
-}__attribute__((packed, aligned (8)));
+ struct ibmvfc_channel_enquiry channel_enquiry;
+ struct ibmvfc_channel_setup_mad channel_setup;
+ struct ibmvfc_connection_info connection_info;
+} __packed __aligned(8);
enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_NONE = 0,
@@ -600,12 +690,16 @@ enum ibmvfc_target_action {
IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT,
IBMVFC_TGT_ACTION_DEL_RPORT,
IBMVFC_TGT_ACTION_DELETED_RPORT,
+ IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT,
+ IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT,
};
struct ibmvfc_target {
struct list_head queue;
struct ibmvfc_host *vhost;
u64 scsi_id;
+ u64 wwpn;
+ u64 old_scsi_id;
struct fc_rport *rport;
int target_id;
enum ibmvfc_target_action action;
@@ -701,7 +795,7 @@ struct ibmvfc_host {
dma_addr_t login_buf_dma;
int disc_buf_sz;
int log_level;
- struct ibmvfc_discover_targets_buf *disc_buf;
+ struct ibmvfc_discover_targets_entry *disc_buf;
struct mutex passthru_mutex;
int task_set;
int init_retries;
diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c
index 7b5deae68d33..7ebfa3c8cdc7 100644
--- a/drivers/scsi/isci/host.c
+++ b/drivers/scsi/isci/host.c
@@ -2671,7 +2671,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
struct isci_request *ireq)
{
enum sci_status status;
- u16 index;
switch (ihost->sm.current_state_id) {
case SCIC_STOPPING:
@@ -2682,7 +2681,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
if (status != SCI_SUCCESS)
return status;
- index = ISCI_TAG_TCI(ireq->io_tag);
clear_bit(IREQ_ACTIVE, &ireq->flags);
return SCI_SUCCESS;
default:
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index 085e285f427d..93bc9019667f 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -142,7 +142,7 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
-struct device_attribute *isci_host_attrs[] = {
+static struct device_attribute *isci_host_attrs[] = {
&dev_attr_isci_id,
NULL
};
diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c
index 4cacb800b530..7041e2e3ab48 100644
--- a/drivers/scsi/isci/phy.c
+++ b/drivers/scsi/isci/phy.c
@@ -669,7 +669,7 @@ static const char *phy_event_name(u32 event_code)
phy_state_name(state), phy_event_name(code), code)
-void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
+static void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
{
u32 val;
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 7f683e42c798..f0ed6863cc70 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -201,21 +201,9 @@ static struct platform_driver esp_jazz_driver = {
.name = "jazz_esp",
},
};
-
-static int __init jazz_esp_init(void)
-{
- return platform_driver_register(&esp_jazz_driver);
-}
-
-static void __exit jazz_esp_exit(void)
-{
- platform_driver_unregister(&esp_jazz_driver);
-}
+module_platform_driver(esp_jazz_driver);
MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(jazz_esp_init);
-module_exit(jazz_esp_exit);
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index e67abb184a8a..942fc60f7c21 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -301,8 +301,8 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
struct fc_lport *lport = fc_disc_lport(disc);
unsigned long delay = 0;
- FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
- PTR_ERR(fp), disc->retry_count,
+ FC_DISC_DBG(disc, "Error %d, retries %d/%d\n",
+ PTR_ERR_OR_ZERO(fp), disc->retry_count,
FC_DISC_RETRY_LIMIT);
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index a4887985aad6..024e5a550759 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -726,19 +726,13 @@ void sas_resume_sata(struct asd_sas_port *port)
*/
int sas_discover_sata(struct domain_device *dev)
{
- int res;
-
if (dev->dev_type == SAS_SATA_PM)
return -ENODEV;
dev->sata_dev.class = sas_get_ata_command_set(dev);
sas_fill_in_rphy(dev, dev->rphy);
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- return res;
-
- return 0;
+ return sas_notify_lldd_dev_found(dev);
}
static void async_sas_ata_eh(void *data, async_cookie_t cookie)
diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c
index d0f9e90e3279..161c9b387da7 100644
--- a/drivers/scsi/libsas/sas_discover.c
+++ b/drivers/scsi/libsas/sas_discover.c
@@ -278,13 +278,7 @@ static void sas_resume_devices(struct work_struct *work)
*/
int sas_discover_end_dev(struct domain_device *dev)
{
- int res;
-
- res = sas_notify_lldd_dev_found(dev);
- if (res)
- return res;
-
- return 0;
+ return sas_notify_lldd_dev_found(dev);
}
/* ---------- Device registration and unregistration ---------- */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ece6c250ebaf..e94eac194676 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -5338,8 +5338,7 @@ static ssize_t
lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
- int status = -EINVAL;
- return status;
+ return -EINVAL;
}
/*
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index d0141a23a833..a8bf4d0d58f0 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -387,6 +387,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
if (rc == IOCB_ERROR) {
+ geniocb->context_un.ndlp = NULL;
+ lpfc_nlp_put(ndlp);
lpfc_sli_release_iocbq(phba, geniocb);
return 1;
}
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index ae0a8252128c..c9a327b13e5c 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -1696,7 +1696,6 @@ static int
lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
{
struct lpfc_hba *phba = vport->phba;
- struct lpfc_sli4_hdw_queue *qp;
struct lpfc_hdwq_stat *c_stat;
int i, j, len;
uint32_t tot_xmt;
@@ -1726,8 +1725,6 @@ lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
goto buffer_done;
for (i = 0; i < phba->cfg_hdw_queue; i++) {
- qp = &phba->sli4_hba.hdwq[i];
-
tot_rcv = 0;
tot_xmt = 0;
tot_cmpl = 0;
@@ -5944,7 +5941,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
phba, &lpfc_debugfs_op_lockstat);
if (!phba->debug_lockstat) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
- "4610 Cant create debugfs lockstat\n");
+ "4610 Can't create debugfs lockstat\n");
goto debug_failed;
}
#endif
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 1c78bc10c790..6d23ab5aee56 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -439,22 +439,10 @@ static struct platform_driver esp_mac_driver = {
.name = DRV_MODULE_NAME,
},
};
-
-static int __init mac_esp_init(void)
-{
- return platform_driver_register(&esp_mac_driver);
-}
-
-static void __exit mac_esp_exit(void)
-{
- platform_driver_unregister(&esp_mac_driver);
-}
+module_platform_driver(esp_mac_driver);
MODULE_DESCRIPTION("Mac ESP SCSI driver");
MODULE_AUTHOR("Finn Thain");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
-
-module_init(mac_esp_init);
-module_exit(mac_esp_exit);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index ac406049e7c8..80f546976c7e 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -133,8 +133,10 @@ mega_setup_mailbox(adapter_t *adapter)
{
unsigned long align;
- adapter->una_mbox64 = pci_alloc_consistent(adapter->dev,
- sizeof(mbox64_t), &adapter->una_mbox64_dma);
+ adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mbox64_t),
+ &adapter->una_mbox64_dma,
+ GFP_KERNEL);
if( !adapter->una_mbox64 ) return -1;
@@ -222,8 +224,9 @@ mega_query_adapter(adapter_t *adapter)
mraid_inquiry *inq;
dma_addr_t dma_handle;
- ext_inq = pci_alloc_consistent(adapter->dev,
- sizeof(mraid_ext_inquiry), &dma_handle);
+ ext_inq = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mraid_ext_inquiry),
+ &dma_handle, GFP_KERNEL);
if( ext_inq == NULL ) return -1;
@@ -243,8 +246,9 @@ mega_query_adapter(adapter_t *adapter)
mega_8_to_40ld(inq, inquiry3,
(mega_product_info *)&adapter->product_info);
- pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry),
- ext_inq, dma_handle);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mraid_ext_inquiry), ext_inq,
+ dma_handle);
} else { /*adapter supports 40ld */
adapter->flag |= BOARD_40LD;
@@ -253,9 +257,10 @@ mega_query_adapter(adapter_t *adapter)
* get product_info, which is static information and will be
* unchanged
*/
- prod_info_dma_handle = pci_map_single(adapter->dev, (void *)
- &adapter->product_info,
- sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+ prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
+ (void *)&adapter->product_info,
+ sizeof(mega_product_info),
+ DMA_FROM_DEVICE);
mbox->m_out.xferaddr = prod_info_dma_handle;
@@ -267,8 +272,8 @@ mega_query_adapter(adapter_t *adapter)
"Product_info cmd failed with error: %d\n",
retval);
- pci_unmap_single(adapter->dev, prod_info_dma_handle,
- sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
+ dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
+ sizeof(mega_product_info), DMA_FROM_DEVICE);
}
@@ -645,7 +650,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
scb->raw_mbox[3] = ldrv_num;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
return scb;
#else
@@ -709,7 +714,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
}
- scb->dma_direction = PCI_DMA_FROMDEVICE;
+ scb->dma_direction = DMA_FROM_DEVICE;
pthru->numsgelements = mega_build_sglist(adapter, scb,
&pthru->dataxferaddr, &pthru->dataxferlen);
@@ -839,10 +844,10 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
* If it is a read command
*/
if( (*cmd->cmnd & 0x0F) == 0x08 ) {
- scb->dma_direction = PCI_DMA_FROMDEVICE;
+ scb->dma_direction = DMA_FROM_DEVICE;
}
else {
- scb->dma_direction = PCI_DMA_TODEVICE;
+ scb->dma_direction = DMA_TO_DEVICE;
}
/* Calculate Scatter-Gather info */
@@ -877,7 +882,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
scb->raw_mbox[3] = ldrv_num;
- scb->dma_direction = PCI_DMA_NONE;
+ scb->dma_direction = DMA_NONE;
return scb;
#endif
@@ -971,7 +976,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
- scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ scb->dma_direction = DMA_BIDIRECTIONAL;
/* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
switch (cmd->cmnd[0]) {
@@ -1035,7 +1040,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
/* Not sure about the direction */
- scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
+ scb->dma_direction = DMA_BIDIRECTIONAL;
switch(cmd->cmnd[0]) {
case INQUIRY:
@@ -1813,25 +1818,25 @@ mega_free_sgl(adapter_t *adapter)
scb = &adapter->scb_list[i];
if( scb->sgl64 ) {
- pci_free_consistent(adapter->dev,
- sizeof(mega_sgl64) * adapter->sglen,
- scb->sgl64,
- scb->sgl_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ scb->sgl64, scb->sgl_dma_addr);
scb->sgl64 = NULL;
}
if( scb->pthru ) {
- pci_free_consistent(adapter->dev, sizeof(mega_passthru),
- scb->pthru, scb->pthru_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_passthru), scb->pthru,
+ scb->pthru_dma_addr);
scb->pthru = NULL;
}
if( scb->epthru ) {
- pci_free_consistent(adapter->dev,
- sizeof(mega_ext_passthru),
- scb->epthru, scb->epthru_dma_addr);
+ dma_free_coherent(&adapter->dev->dev,
+ sizeof(mega_ext_passthru),
+ scb->epthru, scb->epthru_dma_addr);
scb->epthru = NULL;
}
@@ -2004,7 +2009,7 @@ make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
- if( pci_set_dma_mask(*pdev, DMA_BIT_MASK(32)) != 0 ) {
+ if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
kfree(*pdev);
return -1;
}
@@ -2028,14 +2033,16 @@ free_local_pdev(struct pci_dev *pdev)
static inline void *
mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
{
- return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle);
+ return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
+ dma_handle, GFP_KERNEL);
}
static inline void
mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
{
- pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle);
+ dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
+ dma_handle);
}
@@ -2349,7 +2356,8 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
}
- scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle);
+ scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
+ GFP_KERNEL);
if( scsi_inq == NULL ) {
seq_puts(m, "memory not available for scsi inq.\n");
goto free_inquiry;
@@ -2422,7 +2430,7 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
}
free_pci:
- pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle);
+ dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
@@ -2542,8 +2550,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
raid_inq.logdrv_info.num_ldrv;
}
- disk_array = pci_alloc_consistent(pdev, array_sz,
- &disk_array_dma_handle);
+ disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
+ &disk_array_dma_handle, GFP_KERNEL);
if( disk_array == NULL ) {
seq_puts(m, "memory not available.\n");
@@ -2662,8 +2670,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
}
free_pci:
- pci_free_consistent(pdev, array_sz, disk_array,
- disk_array_dma_handle);
+ dma_free_coherent(&pdev->dev, array_sz, disk_array,
+ disk_array_dma_handle);
free_inquiry:
mega_free_inquiry(inquiry, dma_handle, pdev);
free_pdev:
@@ -2881,9 +2889,9 @@ mega_init_scb(adapter_t *adapter)
scb->idx = i;
- scb->sgl64 = pci_alloc_consistent(adapter->dev,
- sizeof(mega_sgl64) * adapter->sglen,
- &scb->sgl_dma_addr);
+ scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_sgl64) * adapter->sglen,
+ &scb->sgl_dma_addr, GFP_KERNEL);
scb->sgl = (mega_sglist *)scb->sgl64;
@@ -2893,9 +2901,9 @@ mega_init_scb(adapter_t *adapter)
return -1;
}
- scb->pthru = pci_alloc_consistent(adapter->dev,
- sizeof(mega_passthru),
- &scb->pthru_dma_addr);
+ scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_passthru),
+ &scb->pthru_dma_addr, GFP_KERNEL);
if( !scb->pthru ) {
dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
@@ -2903,9 +2911,9 @@ mega_init_scb(adapter_t *adapter)
return -1;
}
- scb->epthru = pci_alloc_consistent(adapter->dev,
- sizeof(mega_ext_passthru),
- &scb->epthru_dma_addr);
+ scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
+ sizeof(mega_ext_passthru),
+ &scb->epthru_dma_addr, GFP_KERNEL);
if( !scb->epthru ) {
dev_warn(&adapter->dev->dev,
@@ -3145,9 +3153,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
/* Passthru commands */
- pthru = pci_alloc_consistent(pdev,
- sizeof(mega_passthru),
- &pthru_dma_hndl);
+ pthru = dma_alloc_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ &pthru_dma_hndl, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
@@ -3165,9 +3173,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
if( copy_from_user(pthru, upthru,
sizeof(mega_passthru)) ) {
- pci_free_consistent(pdev,
- sizeof(mega_passthru), pthru,
- pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ pthru, pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3178,15 +3186,16 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
* Is there a data transfer
*/
if( pthru->dataxferlen ) {
- data = pci_alloc_consistent(pdev,
- pthru->dataxferlen,
- &data_dma_hndl);
+ data = dma_alloc_coherent(&pdev->dev,
+ pthru->dataxferlen,
+ &data_dma_hndl,
+ GFP_KERNEL);
if( data == NULL ) {
- pci_free_consistent(pdev,
- sizeof(mega_passthru),
- pthru,
- pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ sizeof(mega_passthru),
+ pthru,
+ pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3251,13 +3260,13 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
freemem_and_return:
if( pthru->dataxferlen ) {
- pci_free_consistent(pdev,
- pthru->dataxferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ pthru->dataxferlen, data,
+ data_dma_hndl);
}
- pci_free_consistent(pdev, sizeof(mega_passthru),
- pthru, pthru_dma_hndl);
+ dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
+ pthru, pthru_dma_hndl);
free_local_pdev(pdev);
@@ -3270,8 +3279,10 @@ freemem_and_return:
* Is there a data transfer
*/
if( uioc.xferlen ) {
- data = pci_alloc_consistent(pdev,
- uioc.xferlen, &data_dma_hndl);
+ data = dma_alloc_coherent(&pdev->dev,
+ uioc.xferlen,
+ &data_dma_hndl,
+ GFP_KERNEL);
if( data == NULL ) {
free_local_pdev(pdev);
@@ -3291,9 +3302,9 @@ freemem_and_return:
if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
uioc.xferlen) ) {
- pci_free_consistent(pdev,
- uioc.xferlen,
- data, data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ uioc.xferlen, data,
+ data_dma_hndl);
free_local_pdev(pdev);
@@ -3314,9 +3325,9 @@ freemem_and_return:
if( rval ) {
if( uioc.xferlen ) {
- pci_free_consistent(pdev,
- uioc.xferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev,
+ uioc.xferlen, data,
+ data_dma_hndl);
}
free_local_pdev(pdev);
@@ -3336,9 +3347,8 @@ freemem_and_return:
}
if( uioc.xferlen ) {
- pci_free_consistent(pdev,
- uioc.xferlen, data,
- data_dma_hndl);
+ dma_free_coherent(&pdev->dev, uioc.xferlen,
+ data, data_dma_hndl);
}
free_local_pdev(pdev);
@@ -4004,8 +4014,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
*/
if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
- pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru),
- &pthru_dma_handle);
+ pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
+ &pthru_dma_handle, GFP_KERNEL);
if( pthru == NULL ) {
free_local_pdev(pdev);
@@ -4041,8 +4051,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
rval = mega_internal_command(adapter, &mc, pthru);
- pci_free_consistent(pdev, sizeof(mega_passthru), pthru,
- pthru_dma_handle);
+ dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
+ pthru_dma_handle);
free_local_pdev(pdev);
@@ -4267,8 +4277,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/*
* Allocate buffer to issue internal commands.
*/
- adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
- MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
+ adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
+ MEGA_BUFFER_SIZE,
+ &adapter->buf_dma_handle,
+ GFP_KERNEL);
if (!adapter->mega_buffer) {
dev_warn(&pdev->dev, "out of RAM\n");
goto out_host_put;
@@ -4427,10 +4439,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Set the Mode of addressing to 64 bit if we can */
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
- pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
adapter->has_64bit_addr = 1;
} else {
- pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
adapter->has_64bit_addr = 0;
}
@@ -4469,15 +4481,15 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
out_free_mbox:
- pci_free_consistent(adapter->dev, sizeof(mbox64_t),
- adapter->una_mbox64, adapter->una_mbox64_dma);
+ dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
out_free_irq:
free_irq(adapter->host->irq, adapter);
out_free_scb_list:
kfree(adapter->scb_list);
out_free_cmd_buffer:
- pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
- adapter->mega_buffer, adapter->buf_dma_handle);
+ dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
out_host_put:
scsi_host_put(host);
out_iounmap:
@@ -4551,11 +4563,11 @@ megaraid_remove_one(struct pci_dev *pdev)
sprintf(buf, "hba%d", adapter->host->host_no);
remove_proc_subtree(buf, mega_proc_dir_entry);
- pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
- adapter->mega_buffer, adapter->buf_dma_handle);
+ dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
+ adapter->mega_buffer, adapter->buf_dma_handle);
kfree(adapter->scb_list);
- pci_free_consistent(adapter->dev, sizeof(mbox64_t),
- adapter->una_mbox64, adapter->una_mbox64_dma);
+ dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
+ adapter->una_mbox64, adapter->una_mbox64_dma);
scsi_host_put(host);
pci_disable_device(pdev);
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 020270ce790b..41cd66fc7d81 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -78,7 +78,7 @@ unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
module_param(resetwaittime, int, 0444);
MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
-int smp_affinity_enable = 1;
+static int smp_affinity_enable = 1;
module_param(smp_affinity_enable, int, 0444);
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 8062bd99add8..93230cd1982f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -129,8 +129,6 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
static int
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
static void
-_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
-static void
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
/**
@@ -680,7 +678,7 @@ _base_fault_reset_work(struct work_struct *work)
ioc->shost_recovery = 1;
spin_unlock_irqrestore(
&ioc->ioc_reset_in_progress_lock, flags);
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
_base_clear_outstanding_commands(ioc);
}
@@ -1466,13 +1464,13 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
}
/**
- * _base_mask_interrupts - disable interrupts
+ * mpt3sas_base_mask_interrupts - disable interrupts
* @ioc: per adapter object
*
* Disabling ResetIRQ, Reply and Doorbell Interrupts
*/
-static void
-_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
{
u32 him_register;
@@ -1484,13 +1482,13 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
}
/**
- * _base_unmask_interrupts - enable interrupts
+ * mpt3sas_base_unmask_interrupts - enable interrupts
* @ioc: per adapter object
*
* Enabling only Reply Interrupts
*/
-static void
-_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+void
+mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
{
u32 him_register;
@@ -1628,7 +1626,7 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
* So that FW can find enough entries to post the Reply
* Descriptors in the reply descriptor post queue.
*/
- if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
+ if (completed_cmds >= ioc->thresh_hold) {
if (ioc->combined_reply_queue) {
writel(reply_q->reply_post_host_index |
((msix_index & 7) <<
@@ -1787,12 +1785,14 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
/**
* mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
* @ioc: per adapter object
+ * @poll: poll over reply descriptor pools incase interrupt for
+ * timed-out SCSI command got delayed
* Context: non ISR conext
*
* Called when a Task Management request has completed.
*/
void
-mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
+mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
{
struct adapter_reply_queue *reply_q;
@@ -1809,19 +1809,25 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
/* TMs are on msix_index == 0 */
if (reply_q->msix_index == 0)
continue;
+ synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
if (reply_q->irq_poll_scheduled) {
/* Calling irq_poll_disable will wait for any pending
* callbacks to have completed.
*/
irq_poll_disable(&reply_q->irqpoll);
irq_poll_enable(&reply_q->irqpoll);
- reply_q->irq_poll_scheduled = false;
- reply_q->irq_line_enable = true;
- enable_irq(reply_q->os_irq);
- continue;
+ /* check how the scheduled poll has ended,
+ * clean up only if necessary
+ */
+ if (reply_q->irq_poll_scheduled) {
+ reply_q->irq_poll_scheduled = false;
+ reply_q->irq_line_enable = true;
+ enable_irq(reply_q->os_irq);
+ }
}
- synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
}
+ if (poll)
+ _base_process_reply_queue(reply_q);
}
/**
@@ -3372,7 +3378,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
goto out_fail;
}
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
r = _base_get_ioc_facts(ioc);
if (r) {
@@ -5257,7 +5263,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
_base_release_memory_pools(ioc);
goto retry_allocation;
}
- memset(ioc->request, 0, sz);
if (retry_sz)
ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
@@ -5619,6 +5624,23 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
}
/**
+ * _base_dump_reg_set - This function will print hexdump of register set.
+ * @ioc: per adapter object
+ *
+ * Returns nothing.
+ */
+static inline void
+_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned int i, sz = 256;
+ u32 __iomem *reg = (u32 __iomem *)ioc->chip;
+
+ ioc_info(ioc, "System Register set:\n");
+ for (i = 0; i < (sz / sizeof(u32)); i++)
+ pr_info("%08x: %08x\n", (i * 4), readl(&reg[i]));
+}
+
+/**
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
* a write to the doorbell)
* @ioc: per adapter object
@@ -6797,6 +6819,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (count++ > 20) {
ioc_info(ioc,
"Stop writing magic sequence after 20 retries\n");
+ _base_dump_reg_set(ioc);
goto out;
}
@@ -6825,6 +6848,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (host_diagnostic == 0xFFFFFFFF) {
ioc_info(ioc,
"Invalid host diagnostic register value\n");
+ _base_dump_reg_set(ioc);
goto out;
}
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
@@ -6859,6 +6883,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
if (ioc_state) {
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
__func__, ioc_state);
+ _base_dump_reg_set(ioc);
goto out;
}
@@ -7101,7 +7126,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
skip_init_reply_post_host_index:
- _base_unmask_interrupts(ioc);
+ mpt3sas_base_unmask_interrupts(ioc);
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
r = _base_display_fwpkg_version(ioc);
@@ -7150,7 +7175,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
/* synchronizing freeing resource with pci_access_mutex lock */
mutex_lock(&ioc->pci_access_mutex);
if (ioc->chip_phys && ioc->chip) {
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
ioc->shost_recovery = 1;
_base_make_ioc_ready(ioc, SOFT_RESET);
ioc->shost_recovery = 0;
@@ -7716,7 +7741,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
}
_base_pre_reset_handler(ioc);
mpt3sas_wait_for_commands_to_complete(ioc);
- _base_mask_interrupts(ioc);
+ mpt3sas_base_mask_interrupts(ioc);
r = _base_make_ioc_ready(ioc, type);
if (r)
goto out;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 4ff876c31272..bc8beb10f3fc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -76,8 +76,8 @@
#define MPT3SAS_DRIVER_NAME "mpt3sas"
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION "34.100.00.00"
-#define MPT3SAS_MAJOR_VERSION 34
+#define MPT3SAS_DRIVER_VERSION "35.100.00.00"
+#define MPT3SAS_MAJOR_VERSION 35
#define MPT3SAS_MINOR_VERSION 100
#define MPT3SAS_BUILD_VERSION 0
#define MPT3SAS_RELEASE_VERSION 00
@@ -1036,6 +1036,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
* @firmware_event_thread: ""
* @fw_event_lock:
* @fw_event_list: list of fw events
+ * @current_evet: current processing firmware event
+ * @fw_event_cleanup: set to one while cleaning up the fw events
* @aen_event_read_flag: event log was read
* @broadcast_aen_busy: broadcast aen waiting to be serviced
* @shost_recovery: host reset in progress
@@ -1217,6 +1219,8 @@ struct MPT3SAS_ADAPTER {
struct workqueue_struct *firmware_event_thread;
spinlock_t fw_event_lock;
struct list_head fw_event_list;
+ struct fw_event_work *current_event;
+ u8 fw_events_cleanup;
/* misc flags */
int aen_event_read_flag;
@@ -1524,7 +1528,9 @@ __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
u16 smid);
void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid);
dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
-void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll);
+void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
u16 handle);
@@ -1604,11 +1610,12 @@ void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
struct MPT3SAS_ADAPTER *ioc);
void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
-int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
- u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method);
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task,
- u8 timeout, u8 tr_method);
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method);
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 11026e0ef3d0..4a0ddc7c95e4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -371,7 +371,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
}
r = 0;
- memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ memset(ioc->config_cmds.reply, 0, sizeof(Mpi2ConfigReply_t));
ioc->config_cmds.status = MPT3_CMD_PENDING;
config_request = mpt3sas_base_get_msg_frame(ioc, smid);
ioc->config_cmds.smid = smid;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index 7c119b904834..0f2b681449e6 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -1109,13 +1109,15 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
pcie_device->device_info))))
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, pcie_device->reset_timeout,
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
else
mpt3sas_scsih_issue_locked_tm(ioc,
le16_to_cpu(mpi_request->FunctionDependent1),
- 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 0, 0, 0,
+ MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
} else
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
@@ -3384,12 +3386,10 @@ host_trace_buffer_enable_store(struct device *cdev,
&&
(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
- pci_free_consistent(ioc->pdev,
- ioc->diag_buffer_sz[
- MPI2_DIAG_BUF_TYPE_TRACE],
- ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
- ioc->diag_buffer_dma[
- MPI2_DIAG_BUF_TYPE_TRACE]);
+ dma_free_coherent(&ioc->pdev->dev,
+ ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
+ ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]);
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
NULL;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 2e2756d8a49b..5f845d7094fc 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -1513,6 +1513,66 @@ _scsih_is_nvme_pciescsi_device(u32 device_info)
}
/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ int smid;
+ struct scsi_cmnd *scmd;
+
+ for (smid = 1;
+ smid <= ioc->shost->can_queue; smid++) {
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel)
+ return 1;
+ }
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ int smid;
+ struct scsi_cmnd *scmd;
+
+ for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
+
+ scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ if (scmd->device->id == id &&
+ scmd->device->channel == channel &&
+ scmd->device->lun == lun)
+ return 1;
+ }
+ return 0;
+}
+
+/**
* mpt3sas_scsih_scsi_lookup_get - returns scmd entry
* @ioc: per adapter object
* @smid: system request message index
@@ -2701,9 +2761,101 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
}
/**
+ * scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
+ * @ioc - per adapter object
+ * @channel - the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Look whether TM has aborted the timed out SCSI command, if
+ * TM has aborted the IO then return SUCCESS else return FAILED.
+ */
+static int
+scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task)
+{
+
+ if (smid_task <= ioc->shost->can_queue) {
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (!(_scsih_scsi_lookup_find_by_target(ioc,
+ id, channel)))
+ return SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
+ lun, channel)))
+ return SUCCESS;
+ break;
+ default:
+ return SUCCESS;
+ }
+ } else if (smid_task == ioc->scsih_cmds.smid) {
+ if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ } else if (smid_task == ioc->ctl_cmds.smid) {
+ if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
+ (ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
+ return SUCCESS;
+ }
+
+ return FAILED;
+}
+
+/**
+ * scsih_tm_post_processing - post processing of target & LUN reset
+ * @ioc - per adapter object
+ * @handle: device handle
+ * @channel - the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ *
+ * Post processing of target & LUN reset. Due to interrupt latency
+ * issue it possible that interrupt for aborted IO might not be
+ * received yet. So before returning failure status, poll the
+ * reply descriptor pools for the reply of timed out SCSI command.
+ * Return FAILED status if reply for timed out is not received
+ * otherwise return SUCCESS.
+ */
+static int
+scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task)
+{
+ int rc;
+
+ rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+ if (rc == SUCCESS)
+ return rc;
+
+ ioc_info(ioc,
+ "Poll ReplyDescriptor queues for completion of"
+ " smid(%d), task_type(0x%02x), handle(0x%04x)\n",
+ smid_task, type, handle);
+
+ /*
+ * Due to interrupt latency issues, driver may receive interrupt for
+ * TM first and then for aborted SCSI IO command. So, poll all the
+ * ReplyDescriptor pools before returning the FAILED status to SML.
+ */
+ mpt3sas_base_mask_interrupts(ioc);
+ mpt3sas_base_sync_reply_irqs(ioc, 1);
+ mpt3sas_base_unmask_interrupts(ioc);
+
+ return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
+}
+
+/**
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
* @ioc: per adapter struct
* @handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
* @lun: lun number
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
* @smid_task: smid assigned to the task
@@ -2720,11 +2872,13 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
* Return: SUCCESS or FAILED.
*/
int
-mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
- u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
+ u8 timeout, u8 tr_method)
{
Mpi2SCSITaskManagementRequest_t *mpi_request;
Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi25SCSIIORequest_t *request;
u16 smid = 0;
u32 ioc_state;
int rc;
@@ -2780,7 +2934,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
mpi_request->DevHandle = cpu_to_le16(handle);
mpi_request->TaskType = type;
- mpi_request->MsgFlags = tr_method;
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ mpi_request->MsgFlags = tr_method;
mpi_request->TaskMID = cpu_to_le16(smid_task);
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
mpt3sas_scsih_set_tm_flag(ioc, handle);
@@ -2800,7 +2956,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
}
/* sync IRQs in case those were busy during flush. */
- mpt3sas_base_sync_reply_irqs(ioc);
+ mpt3sas_base_sync_reply_irqs(ioc, 0);
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
@@ -2817,7 +2973,44 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
}
}
- rc = SUCCESS;
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ /*
+ * If DevHandle filed in smid_task's entry of request pool
+ * doesn't match with device handle on which this task abort
+ * TM is received then it means that TM has successfully
+ * aborted the timed out command. Since smid_task's entry in
+ * request pool will be memset to zero once the timed out
+ * command is returned to the SML. If the command is not
+ * aborted then smid_task’s entry won’t be cleared and it
+ * will have same DevHandle value on which this task abort TM
+ * is received and driver will return the TM status as FAILED.
+ */
+ request = mpt3sas_base_get_msg_frame(ioc, smid_task);
+ if (le16_to_cpu(request->DevHandle) != handle)
+ break;
+
+ ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
+ "timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
+ handle, timeout, tr_method, smid_task, msix_task);
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
+ type, smid_task);
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
out:
mpt3sas_scsih_clear_tm_flag(ioc, handle);
@@ -2826,14 +3019,14 @@ out:
}
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
- u64 lun, u8 type, u16 smid_task, u16 msix_task,
- u8 timeout, u8 tr_method)
+ uint channel, uint id, u64 lun, u8 type, u16 smid_task,
+ u16 msix_task, u8 timeout, u8 tr_method)
{
int ret;
mutex_lock(&ioc->tm_cmds.mutex);
- ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
- msix_task, timeout, tr_method);
+ ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
+ smid_task, msix_task, timeout, tr_method);
mutex_unlock(&ioc->tm_cmds.mutex);
return ret;
@@ -2980,7 +3173,8 @@ scsih_abort(struct scsi_cmnd *scmd)
if (pcie_device && (!ioc->tm_custom_handling) &&
(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
timeout = ioc->nvme_abort_timeout;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
st->smid, st->msix_io, timeout, 0);
/* Command must be cleared after abort */
@@ -3056,7 +3250,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
@@ -3134,7 +3329,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
} else
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
- r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
+ r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0,
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
tr_timeout, tr_method);
/* Check for busy commands after reset */
@@ -3323,11 +3519,13 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
{
struct fw_event_work *fw_event;
- if (list_empty(&ioc->fw_event_list) ||
+ if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
!ioc->firmware_event_thread || in_interrupt())
return;
- while ((fw_event = dequeue_next_fw_event(ioc))) {
+ ioc->fw_events_cleanup = 1;
+ while ((fw_event = dequeue_next_fw_event(ioc)) ||
+ (fw_event = ioc->current_event)) {
/*
* Wait on the fw_event to complete. If this returns 1, then
* the event was never executed, and we need a put for the
@@ -3341,6 +3539,7 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
fw_event_work_put(fw_event);
}
+ ioc->fw_events_cleanup = 0;
}
/**
@@ -7527,7 +7726,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
goto out;
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
- r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
+ r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
st->msix_io, 30, 0);
if (r == FAILED) {
@@ -7568,9 +7767,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
if (ioc->shost_recovery)
goto out_no_lock;
- r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
- MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
- st->msix_io, 30, 0);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+ st->smid, st->msix_io, 30, 0);
if (r == FAILED || st->cb_idx != 0xFF) {
sdev_printk(KERN_WARNING, sdev,
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
@@ -9421,11 +9620,13 @@ mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
static void
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
{
+ ioc->current_event = fw_event;
_scsih_fw_event_del_from_list(ioc, fw_event);
/* the queue is being flushed so ignore this event */
if (ioc->remove_host || ioc->pci_error_recovery) {
fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
return;
}
@@ -9439,10 +9640,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
while (scsi_host_in_recovery(ioc->shost) ||
ioc->shost_recovery) {
/*
- * If we're unloading, bail. Otherwise, this can become
- * an infinite loop.
+ * If we're unloading or cancelling the work, bail.
+ * Otherwise, this can become an infinite loop.
*/
- if (ioc->remove_host)
+ if (ioc->remove_host || ioc->fw_events_cleanup)
goto out;
ssleep(1);
}
@@ -9503,11 +9704,13 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
break;
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
_scsih_pcie_topology_change_event(ioc, fw_event);
+ ioc->current_event = NULL;
return;
break;
}
out:
fw_event_work_put(fw_event);
+ ioc->current_event = NULL;
}
/**
@@ -9889,6 +10092,34 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
}
/**
+ * _scsih_get_shost_and_ioc - get shost and ioc
+ * and verify whether they are NULL or not
+ * @pdev: PCI device struct
+ * @shost: address of scsi host pointer
+ * @ioc: address of HBA adapter pointer
+ *
+ * Return zero if *shost and *ioc are not NULL otherwise return error number.
+ */
+static int
+_scsih_get_shost_and_ioc(struct pci_dev *pdev,
+ struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
+{
+ *shost = pci_get_drvdata(pdev);
+ if (*shost == NULL) {
+ dev_err(&pdev->dev, "pdev's driver data is null\n");
+ return -ENXIO;
+ }
+
+ *ioc = shost_priv(*shost);
+ if (*ioc == NULL) {
+ dev_err(&pdev->dev, "shost's private data is null\n");
+ return -ENXIO;
+ }
+
+ return 0;
+}
+
+/**
* scsih_remove - detach and remove add host
* @pdev: PCI device struct
*
@@ -9896,8 +10127,8 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
*/
static void scsih_remove(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct _sas_port *mpt3sas_port, *next_port;
struct _raid_device *raid_device, *next;
struct MPT3SAS_TARGET *sas_target_priv_data;
@@ -9906,6 +10137,9 @@ static void scsih_remove(struct pci_dev *pdev)
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
+
ioc->remove_host = 1;
if (!pci_device_is_present(pdev))
@@ -9985,12 +10219,15 @@ static void scsih_remove(struct pci_dev *pdev)
static void
scsih_shutdown(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
struct workqueue_struct *wq;
unsigned long flags;
Mpi2ConfigReply_t mpi_reply;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
+
ioc->remove_host = 1;
if (!pci_device_is_present(pdev))
@@ -10560,6 +10797,10 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
return MPI26_VERSION;
}
return 0;
@@ -10649,6 +10890,20 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
case MPI26_ATLAS_PCIe_SWITCH_DEVID:
ioc->is_gen35_ioc = 1;
break;
+ case MPI26_MFGPAGE_DEVID_INVALID0_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID0_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
+ case MPI26_MFGPAGE_DEVID_INVALID1_3816:
+ case MPI26_MFGPAGE_DEVID_INVALID1_3916:
+ dev_err(&pdev->dev,
+ "HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
+ pdev->device, pdev->subsystem_vendor,
+ pdev->subsystem_device);
+ return 1;
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
dev_info(&pdev->dev,
@@ -10840,9 +11095,14 @@ out_add_shost_fail:
static int
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
pci_power_t device_state;
+ int rc;
+
+ rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (rc)
+ return rc;
mpt3sas_base_stop_watchdog(ioc);
flush_scheduled_work();
@@ -10867,11 +11127,15 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
static int
scsih_resume(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
pci_power_t device_state = pdev->current_state;
int r;
+ r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
+ if (r)
+ return r;
+
ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
pdev, pci_name(pdev), device_state);
@@ -10902,8 +11166,11 @@ scsih_resume(struct pci_dev *pdev)
static pci_ers_result_t
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
@@ -10938,10 +11205,13 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
static pci_ers_result_t
scsih_pci_slot_reset(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
int rc;
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
+
ioc_info(ioc, "PCI error: slot reset callback!!\n");
ioc->pci_error_recovery = 0;
@@ -10974,8 +11244,11 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
static void
scsih_pci_resume(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return;
ioc_info(ioc, "PCI error: resume callback!!\n");
@@ -10990,8 +11263,11 @@ scsih_pci_resume(struct pci_dev *pdev)
static pci_ers_result_t
scsih_pci_mmio_enabled(struct pci_dev *pdev)
{
- struct Scsi_Host *shost = pci_get_drvdata(pdev);
- struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
+ return PCI_ERS_RESULT_DISCONNECT;
ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
@@ -11139,6 +11415,14 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
PCI_ANY_ID, PCI_ANY_ID },
+ /*
+ * Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
+ PCI_ANY_ID, PCI_ANY_ID },
+
/* Atlas PCIe Switch Management Port */
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
PCI_ANY_ID, PCI_ANY_ID },
@@ -11151,6 +11435,14 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
PCI_ANY_ID, PCI_ANY_ID },
+ /*
+ * Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
+ */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
+ PCI_ANY_ID, PCI_ANY_ID },
+
{0} /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index 978f5283c883..6aa2697c4a15 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -246,19 +246,16 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
&mvi->tx_dma, GFP_KERNEL);
if (!mvi->tx)
goto err_out;
- memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
&mvi->rx_fis_dma, GFP_KERNEL);
if (!mvi->rx_fis)
goto err_out;
- memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
mvi->rx = dma_alloc_coherent(mvi->dev,
sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
&mvi->rx_dma, GFP_KERNEL);
if (!mvi->rx)
goto err_out;
- memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
mvi->rx[0] = cpu_to_le32(0xfff);
mvi->rx_cons = 0xfff;
@@ -267,7 +264,6 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
&mvi->slot_dma, GFP_KERNEL);
if (!mvi->slot)
goto err_out;
- memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
TRASH_BUCKET_SIZE,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index 8906aceda4c4..0354898d7cac 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
if (IS_ERR(mhba->dm_thread)) {
dev_err(&mhba->pdev->dev,
"failed to create device scan thread\n");
+ ret = PTR_ERR(mhba->dm_thread);
mutex_unlock(&mhba->sas_discovery_mutex);
goto fail_create_thread;
}
diff --git a/drivers/scsi/myrb.c b/drivers/scsi/myrb.c
index b2869c5dd7fb..cbf1e8b091b9 100644
--- a/drivers/scsi/myrb.c
+++ b/drivers/scsi/myrb.c
@@ -2226,7 +2226,7 @@ static struct device_attribute *myrb_shost_attrs[] = {
NULL,
};
-struct scsi_host_template myrb_template = {
+static struct scsi_host_template myrb_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrb",
@@ -2315,7 +2315,7 @@ static void myrb_get_state(struct device *dev)
raid_set_state(myrb_raid_template, dev, state);
}
-struct raid_function_template myrb_raid_functions = {
+static struct raid_function_template myrb_raid_functions = {
.cookie = &myrb_template,
.is_raid = myrb_is_raid,
.get_resync = myrb_get_resync,
@@ -2489,7 +2489,7 @@ static void myrb_monitor(struct work_struct *work)
*
* Return: true for fatal errors and false otherwise.
*/
-bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
+static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
unsigned char parm0, unsigned char parm1)
{
struct pci_dev *pdev = cb->pdev;
diff --git a/drivers/scsi/myrs.c b/drivers/scsi/myrs.c
index 103803e779f2..7a3ade765ce3 100644
--- a/drivers/scsi/myrs.c
+++ b/drivers/scsi/myrs.c
@@ -1529,7 +1529,7 @@ static struct device_attribute *myrs_shost_attrs[] = {
/*
* SCSI midlayer interface
*/
-int myrs_host_reset(struct scsi_cmnd *scmd)
+static int myrs_host_reset(struct scsi_cmnd *scmd)
{
struct Scsi_Host *shost = scmd->device->host;
struct myrs_hba *cs = shost_priv(shost);
@@ -1919,7 +1919,7 @@ static void myrs_slave_destroy(struct scsi_device *sdev)
kfree(sdev->hostdata);
}
-struct scsi_host_template myrs_template = {
+static struct scsi_host_template myrs_template = {
.module = THIS_MODULE,
.name = "DAC960",
.proc_name = "myrs",
@@ -2033,7 +2033,7 @@ myrs_get_state(struct device *dev)
raid_set_state(myrs_raid_template, dev, state);
}
-struct raid_function_template myrs_raid_functions = {
+static struct raid_function_template myrs_raid_functions = {
.cookie = &myrs_template,
.is_raid = myrs_is_raid,
.get_resync = myrs_get_resync,
@@ -2043,7 +2043,7 @@ struct raid_function_template myrs_raid_functions = {
/*
* PCI interface functions
*/
-void myrs_flush_cache(struct myrs_hba *cs)
+static void myrs_flush_cache(struct myrs_hba *cs)
{
myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
}
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index b6e04d14292d..da814c2da16d 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -1247,7 +1247,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
* ---> AutoSCSI with MSGOUTreg is processed.
*/
data->msgout_len = 0;
- };
+ }
nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
}
@@ -1839,7 +1839,7 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
nsp32_read1(base, SCSI_BUS_MONITOR));
- };
+ }
data->msgout_len = 0;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index aa9ae2ae8579..cbe5fab793eb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -2860,10 +2860,8 @@ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
{
struct pmcraid_cmd *cancel_cmd;
struct pmcraid_instance *pinstance;
- struct pmcraid_resource_entry *res;
pinstance = (struct pmcraid_instance *)cmd->drv_inst;
- res = cmd->scsi_cmd->device->hostdata;
cancel_cmd = pmcraid_get_free_cmd(pinstance);
@@ -4716,7 +4714,6 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
return -ENOMEM;
}
- memset(pinstance->hrrq_start[i], 0, buffer_size);
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
pinstance->hrrq_end[i] =
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index e163be8af965..0e2cbb164eeb 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -389,6 +389,7 @@ struct qedf_ctx {
mempool_t *io_mempool;
struct workqueue_struct *dpc_wq;
struct delayed_work recovery_work;
+ struct delayed_work board_disable_work;
struct delayed_work grcdump_work;
struct delayed_work stag_work;
@@ -541,9 +542,17 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
extern void qedf_wq_grcdump(struct work_struct *work);
void qedf_stag_change_work(struct work_struct *work);
void qedf_ctx_soft_reset(struct fc_lport *lport);
+extern void qedf_board_disable_work(struct work_struct *work);
+extern void qedf_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
#define FCOE_WORD_TO_BYTE 4
#define QEDF_MAX_TASK_NUM 0xFFFF
+#define QL45xxx 0x165C
+#define QL41xxx 0x8080
+#define MAX_CT_PAYLOAD 2048
+#define DISCOVERED_PORTS 4
+#define NUMBER_OF_PORTS 1
struct fip_vlan {
struct ethhdr eth;
diff --git a/drivers/scsi/qedf/qedf_els.c b/drivers/scsi/qedf/qedf_els.c
index 542ba9454257..625e58ccb8c8 100644
--- a/drivers/scsi/qedf/qedf_els.c
+++ b/drivers/scsi/qedf/qedf_els.c
@@ -124,7 +124,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
task = qedf_get_task_mem(&qedf->tasks, xid);
qedf_init_mp_task(els_req, task, sqe);
- /* Put timer on original I/O request */
+ /* Put timer on els request */
if (timer_msec)
qedf_cmd_timer_set(qedf, els_req, timer_msec);
@@ -143,10 +143,33 @@ void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
struct qedf_ioreq *els_req)
{
struct fcoe_cqe_midpath_info *mp_info;
+ struct qedf_rport *fcport;
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
" cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
+ if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
+ || (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
+ || (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "ELS completion xid=0x%x after flush event=0x%x",
+ els_req->xid, els_req->event);
+ return;
+ }
+
+ fcport = els_req->fcport;
+
+ /* When flush is active,
+ * let the cmds be completed from the cleanup context
+ */
+ if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
+ test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
+ QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
+ "Dropping ELS completion xid=0x%x as fcport is flushing",
+ els_req->xid);
+ return;
+ }
+
clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
/* Kill the ELS timer */
@@ -185,10 +208,6 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
goto out_free;
}
- if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
- rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
- cancel_delayed_work_sync(&orig_io_req->timeout_work);
-
refcount = kref_read(&orig_io_req->refcount);
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
" orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
@@ -883,6 +902,11 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
opcode = fc_frame_payload_op(fp);
if (opcode == ELS_LS_RJT) {
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
+ if (!rjt) {
+ QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
+ goto out_free_frame;
+ }
+
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
"Received LS_RJT for REC: er_reason=0x%x, "
"er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index acd9774a9387..4869ef813dc4 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -85,13 +85,13 @@ static void qedf_cmd_timeout(struct work_struct *work)
*/
QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
io_req->xid);
+ qedf_initiate_cleanup(io_req, true);
io_req->event = QEDF_IOREQ_EV_ELS_TMO;
/* Call callback function to complete command */
if (io_req->cb_func && io_req->cb_arg) {
io_req->cb_func(io_req->cb_arg);
io_req->cb_arg = NULL;
}
- qedf_initiate_cleanup(io_req, true);
kref_put(&io_req->refcount, qedf_release_cmd);
break;
case QEDF_SEQ_CLEANUP:
@@ -1562,6 +1562,8 @@ static void qedf_flush_els_req(struct qedf_ctx *qedf,
*/
els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
+ clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
+
/* Cancel the timer */
cancel_delayed_work_sync(&els_req->timeout_work);
@@ -1704,8 +1706,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
io_req, io_req->xid);
continue;
}
+ qedf_initiate_cleanup(io_req, false);
flush_cnt++;
qedf_flush_els_req(qedf, io_req);
+
/*
* Release the kref and go back to the top of the
* loop.
@@ -2159,7 +2163,6 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
/* Sanity check qedf_rport before dereferencing any pointers */
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
QEDF_ERR(NULL, "tgt not offloaded\n");
- rc = 1;
return SUCCESS;
}
@@ -2169,6 +2172,10 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
return SUCCESS;
}
+ if (io_req->cmd_type == QEDF_ELS) {
+ goto process_els;
+ }
+
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
@@ -2178,6 +2185,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
}
set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
+process_els:
/* Ensure room on SQ */
if (!atomic_read(&fcport->free_sqes)) {
QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 5ca424df355c..46d185cb9ea8 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -41,7 +41,7 @@ MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
"remote ports (default 60)");
uint qedf_debug = QEDF_LOG_INFO;
-module_param_named(debug, qedf_debug, uint, S_IRUGO);
+module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
" mask");
@@ -105,6 +105,12 @@ module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
"during probe (0-3: 0 more verbose).");
+static bool qedf_enable_recovery = true;
+module_param_named(enable_recovery, qedf_enable_recovery,
+ bool, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
+ "interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
+
struct workqueue_struct *qedf_io_wq;
static struct fcoe_percpu_s qedf_global;
@@ -690,6 +696,7 @@ static struct qed_fcoe_cb_ops qedf_cb_ops = {
.dcbx_aen = qedf_dcbx_handler,
.get_generic_tlv_data = qedf_get_generic_tlv_data,
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
+ .schedule_hw_err_handler = qedf_schedule_hw_err_handler,
}
};
@@ -726,7 +733,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
rdata = fcport->rdata;
if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
- rc = 1;
+ rc = SUCCESS;
goto out;
}
@@ -1333,7 +1340,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
- conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
+ conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
@@ -1558,6 +1565,17 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
if (port_id == FC_FID_DIR_SERV)
break;
+ if (rdata->spp_type != FC_TYPE_FCP) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "No action since spp type isn't FCP\n");
+ break;
+ }
+ if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
+ QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
+ "Not FCP target so no action\n");
+ break;
+ }
+
if (!rport) {
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
"port_id=%x - rport notcreated Yet!!\n", port_id);
@@ -1634,11 +1652,13 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
static void qedf_setup_fdmi(struct qedf_ctx *qedf)
{
struct fc_lport *lport = qedf->lport;
- struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
- u64 dsn;
+ u8 buf[8];
+ int pos;
+ uint32_t i;
/*
- * fdmi_enabled needs to be set for libfc to execute FDMI registration.
+ * fdmi_enabled needs to be set for libfc
+ * to execute FDMI registration
*/
lport->fdmi_enabled = 1;
@@ -1648,32 +1668,53 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
*/
/* Get the PCI-e Device Serial Number Capability */
- dsn = pci_get_dsn(qedf->pdev);
- if (dsn)
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number), "%016llX", dsn);
- else
- snprintf(fc_host->serial_number,
- sizeof(fc_host->serial_number), "Unknown");
+ pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ pos += 4;
+ for (i = 0; i < 8; i++)
+ pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
+
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE,
+ "%02X%02X%02X%02X%02X%02X%02X%02X",
+ buf[7], buf[6], buf[5], buf[4],
+ buf[3], buf[2], buf[1], buf[0]);
+ } else
+ snprintf(fc_host_serial_number(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "Unknown");
+
+ snprintf(fc_host_manufacturer(lport->host),
+ FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
+
+ if (qedf->pdev->device == QL45xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
+
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL45xxx FCoE Adapter");
+ }
- snprintf(fc_host->manufacturer,
- sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
+ if (qedf->pdev->device == QL41xxx) {
+ snprintf(fc_host_model(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
- snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
+ snprintf(fc_host_model_description(lport->host),
+ FC_SYMBOLIC_NAME_SIZE, "%s",
+ "Marvell FastLinQ QL41xxx FCoE Adapter");
+ }
- snprintf(fc_host->model_description, sizeof(fc_host->model_description),
- "%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
- "(FCoE)");
+ snprintf(fc_host_hardware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
- snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
- "Rev %d", qedf->pdev->revision);
+ snprintf(fc_host_driver_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
- snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
- "%s", QEDF_VERSION);
+ snprintf(fc_host_firmware_version(lport->host),
+ FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
+ FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
+ FW_ENGINEERING_VERSION);
- snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
- "%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
- FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
}
static int qedf_lport_setup(struct qedf_ctx *qedf)
@@ -1720,8 +1761,13 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
/* Set symbolic node name */
- snprintf(fc_host_symbolic_name(lport->host), 256,
- "QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
+ if (qedf->pdev->device == QL45xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
+
+ if (qedf->pdev->device == QL41xxx)
+ snprintf(fc_host_symbolic_name(lport->host), 256,
+ "Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
qedf_setup_fdmi(qedf);
@@ -3221,11 +3267,16 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
void *task_start, *task_end;
struct qed_slowpath_params slowpath_params;
struct qed_probe_params qed_params;
+ u16 retry_cnt = 10;
/*
* When doing error recovery we didn't reap the lport so don't try
* to reallocate it.
*/
+retry_probe:
+ if (mode == QEDF_MODE_RECOVERY)
+ msleep(2000);
+
if (mode != QEDF_MODE_RECOVERY) {
lport = libfc_host_alloc(&qedf_host_template,
sizeof(struct qedf_ctx));
@@ -3312,6 +3363,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
qed_params.is_vf = is_vf;
qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
if (!qedf->cdev) {
+ if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
+ QEDF_ERR(&qedf->dbg_ctx,
+ "Retry %d initialize hardware\n", retry_cnt);
+ retry_cnt--;
+ goto retry_probe;
+ }
QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
rc = -ENODEV;
goto err1;
@@ -3760,6 +3817,44 @@ void qedf_wq_grcdump(struct work_struct *work)
qedf_capture_grc_dump(qedf);
}
+void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
+{
+ struct qedf_ctx *qedf = dev;
+
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Hardware error handler scheduled, event=%d.\n",
+ err_type);
+
+ if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Already in recovery, not scheduling board disable work.\n");
+ return;
+ }
+
+ switch (err_type) {
+ case QED_HW_ERR_FAN_FAIL:
+ schedule_delayed_work(&qedf->board_disable_work, 0);
+ break;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_DMAE_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+ break;
+ case QED_HW_ERR_RAMROD_FAIL:
+ /* Prevent HW attentions from being reasserted */
+ qed_ops->common->attn_clr_enable(qedf->cdev, true);
+
+ if (qedf_enable_recovery)
+ qed_ops->common->recovery_process(qedf->cdev);
+
+ break;
+ default:
+ break;
+ }
+}
+
/*
* Protocol TLV handler
*/
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index 9498279ae80d..c342defc3f52 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -274,6 +274,10 @@ struct qedi_ctx {
spinlock_t ll2_lock; /* Light L2 lock */
spinlock_t hba_lock; /* per port lock */
struct task_struct *ll2_recv_thread;
+ unsigned long qedi_err_flags;
+#define QEDI_ERR_ATTN_CLR_EN 0
+#define QEDI_ERR_IS_RECOVERABLE 2
+#define QEDI_ERR_OVERRIDE_EN 31
unsigned long flags;
#define UIO_DEV_OPENED 1
#define QEDI_IOTHREAD_WAKE 2
@@ -305,6 +309,7 @@ struct qedi_ctx {
u32 max_sqes;
u8 num_queues;
u32 max_active_conns;
+ s32 msix_count;
struct iscsi_cid_queue cid_que;
struct qedi_endpoint **ep_tbl;
@@ -334,6 +339,7 @@ struct qedi_ctx {
struct workqueue_struct *dpc_wq;
struct delayed_work recovery_work;
+ struct delayed_work board_disable_work;
spinlock_t task_idx_lock; /* To protect gbl context */
s32 last_tidx_alloc;
diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c
index 6ed74583b1b9..440ddd2309f1 100644
--- a/drivers/scsi/qedi/qedi_fw.c
+++ b/drivers/scsi/qedi/qedi_fw.c
@@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
qedi_clear_task_idx(qedi, cmd->task_id);
@@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
"Freeing tid=0x%x for cid=0x%x\n",
cmd->task_id, qedi_conn->iscsi_conn_id);
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
@@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
cmd->task_id, qedi_conn->iscsi_conn_id,
&cmd->io_cmd);
}
+ spin_unlock(&qedi_conn->list_lock);
cmd->state = RESPONSE_RECEIVED;
qedi_clear_task_idx(qedi, cmd->task_id);
@@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
+ spin_lock(&qedi_conn->list_lock);
if (likely(qedi_cmd->io_cmd_in_list)) {
qedi_cmd->io_cmd_in_list = false;
list_del_init(&qedi_cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
@@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
+ spin_lock(&qedi_conn->list_lock);
if (likely(cmd->io_cmd_in_list)) {
cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
memset(task_ctx, '\0', sizeof(*task_ctx));
@@ -816,8 +824,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
qedi_clear_task_idx(qedi_conn->qedi, rtid);
spin_lock(&qedi_conn->list_lock);
- list_del_init(&dbg_cmd->io_cmd);
- qedi_conn->active_cmd_count--;
+ if (likely(dbg_cmd->io_cmd_in_list)) {
+ dbg_cmd->io_cmd_in_list = false;
+ list_del_init(&dbg_cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
spin_unlock(&qedi_conn->list_lock);
qedi_cmd->state = CLEANUP_RECV;
wake_up_interruptible(&qedi_conn->wait_queue);
@@ -1235,6 +1246,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
qedi_conn->cmd_cleanup_req++;
qedi_iscsi_cleanup_task(ctask, true);
+ cmd->io_cmd_in_list = false;
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
QEDI_WARN(&qedi->dbg_ctx,
@@ -1255,7 +1267,8 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
((qedi_conn->cmd_cleanup_req ==
qedi_conn->cmd_cleanup_cmpl) ||
- qedi_conn->ep),
+ test_bit(QEDI_IN_RECOVERY,
+ &qedi->flags)),
5 * HZ);
if (rval) {
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
@@ -1280,7 +1293,9 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
/* Enable IOs for all other sessions except current.*/
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
(qedi_conn->cmd_cleanup_req ==
- qedi_conn->cmd_cleanup_cmpl),
+ qedi_conn->cmd_cleanup_cmpl) ||
+ test_bit(QEDI_IN_RECOVERY,
+ &qedi->flags),
5 * HZ)) {
iscsi_host_for_each_session(qedi->shost,
qedi_mark_device_available);
@@ -1446,8 +1461,11 @@ ldel_exit:
spin_unlock_bh(&qedi_conn->tmf_work_lock);
spin_lock(&qedi_conn->list_lock);
- list_del_init(&cmd->io_cmd);
- qedi_conn->active_cmd_count--;
+ if (likely(cmd->io_cmd_in_list)) {
+ cmd->io_cmd_in_list = false;
+ list_del_init(&cmd->io_cmd);
+ qedi_conn->active_cmd_count--;
+ }
spin_unlock(&qedi_conn->list_lock);
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
index c14ac7882afa..08c05403cd72 100644
--- a/drivers/scsi/qedi/qedi_iscsi.c
+++ b/drivers/scsi/qedi/qedi_iscsi.c
@@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
{
struct qedi_cmd *cmd, *cmd_tmp;
+ spin_lock(&qedi_conn->list_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
io_cmd) {
list_del_init(&cmd->io_cmd);
qedi_conn->active_cmd_count--;
}
+ spin_unlock(&qedi_conn->list_lock);
}
static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
@@ -1069,6 +1071,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
qedi_ep->state = EP_STATE_DISCONN_START;
+
+ if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) ||
+ test_bit(QEDI_IN_RECOVERY, &qedi->flags))
+ goto ep_release_conn;
+
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
if (ret) {
QEDI_WARN(&qedi->dbg_ctx,
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 6f038ae5efca..61fab01d2d52 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -50,6 +50,10 @@ module_param(qedi_ll2_buf_size, uint, 0644);
MODULE_PARM_DESC(qedi_ll2_buf_size,
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
+static uint qedi_flags_override;
+module_param(qedi_flags_override, uint, 0644);
+MODULE_PARM_DESC(qedi_flags_override, "Disable/Enable MFW error flags bits action.");
+
const struct qed_iscsi_ops *qedi_ops;
static struct scsi_transport_template *qedi_scsi_transport;
static struct pci_driver qedi_pci_driver;
@@ -63,6 +67,8 @@ static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
static void qedi_recovery_handler(struct work_struct *work);
+static void qedi_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type);
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
{
@@ -789,8 +795,7 @@ static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
spin_lock_bh(&qedi->ll2_lock);
list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
list_del(&work->list);
- if (work->skb)
- kfree_skb(work->skb);
+ kfree_skb(work->skb);
kfree(work);
}
spin_unlock_bh(&qedi->ll2_lock);
@@ -1113,6 +1118,42 @@ exit_get_data:
return;
}
+void qedi_schedule_hw_err_handler(void *dev,
+ enum qed_hw_err_type err_type)
+{
+ struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
+ unsigned long override_flags = qedi_flags_override;
+
+ if (override_flags && test_bit(QEDI_ERR_OVERRIDE_EN, &override_flags))
+ qedi->qedi_err_flags = qedi_flags_override;
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "HW error handler scheduled, err=%d err_flags=0x%x\n",
+ err_type, qedi->qedi_err_flags);
+
+ switch (err_type) {
+ case QED_HW_ERR_FAN_FAIL:
+ schedule_delayed_work(&qedi->board_disable_work, 0);
+ break;
+ case QED_HW_ERR_MFW_RESP_FAIL:
+ case QED_HW_ERR_HW_ATTN:
+ case QED_HW_ERR_DMAE_FAIL:
+ case QED_HW_ERR_RAMROD_FAIL:
+ case QED_HW_ERR_FW_ASSERT:
+ /* Prevent HW attentions from being reasserted */
+ if (test_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags))
+ qedi_ops->common->attn_clr_enable(qedi->cdev, true);
+
+ if (err_type == QED_HW_ERR_RAMROD_FAIL &&
+ test_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags))
+ qedi_ops->common->recovery_process(qedi->cdev);
+
+ break;
+ default:
+ break;
+ }
+}
+
static void qedi_schedule_recovery_handler(void *dev)
{
struct qedi_ctx *qedi = dev;
@@ -1127,6 +1168,15 @@ static void qedi_schedule_recovery_handler(void *dev)
schedule_delayed_work(&qedi->recovery_work, 0);
}
+static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
+{
+ struct iscsi_session *session = cls_session->dd_data;
+ struct iscsi_conn *conn = session->leadconn;
+ struct qedi_conn *qedi_conn = conn->dd_data;
+
+ qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
+}
+
static void qedi_link_update(void *dev, struct qed_link_output *link)
{
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@@ -1138,6 +1188,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
"Link Down event.\n");
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
+ iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
}
}
@@ -1145,6 +1196,7 @@ static struct qed_iscsi_cb_ops qedi_cb_ops = {
{
.link_update = qedi_link_update,
.schedule_recovery_handler = qedi_schedule_recovery_handler,
+ .schedule_hw_err_handler = qedi_schedule_hw_err_handler,
.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
.get_generic_tlv_data = qedi_get_generic_tlv_data,
}
@@ -1357,7 +1409,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
u16 idx;
cpu = cpumask_first(cpu_online_mask);
- for (i = 0; i < qedi->int_info.msix_cnt; i++) {
+ for (i = 0; i < qedi->msix_count; i++) {
idx = i * qedi->dev_info.common.num_hwfns +
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
@@ -1387,7 +1439,12 @@ static int qedi_setup_int(struct qedi_ctx *qedi)
{
int rc = 0;
- rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
+ rc = qedi_ops->common->set_fp_int(qedi->cdev, qedi->num_queues);
+ if (rc < 0)
+ goto exit_setup_int;
+
+ qedi->msix_count = rc;
+
rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
if (rc)
goto exit_setup_int;
@@ -2336,10 +2393,30 @@ kset_free:
return -ENOMEM;
}
+static pci_ers_result_t qedi_io_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+
+ QEDI_ERR(&qedi->dbg_ctx, "%s: PCI error detected [%d]\n",
+ __func__, state);
+
+ if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Recovery already in progress.\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ qedi_ops->common->recovery_process(qedi->cdev);
+
+ return PCI_ERS_RESULT_CAN_RECOVER;
+}
+
static void __qedi_remove(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
int rval;
+ u16 retry = 10;
if (mode == QEDI_MODE_SHUTDOWN)
iscsi_host_for_each_session(qedi->shost,
@@ -2368,7 +2445,13 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
qedi_sync_free_irqs(qedi);
if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
- qedi_ops->stop(qedi->cdev);
+ while (retry--) {
+ rval = qedi_ops->stop(qedi->cdev);
+ if (rval < 0)
+ msleep(1000);
+ else
+ break;
+ }
qedi_ops->ll2->stop(qedi->cdev);
}
@@ -2405,6 +2488,21 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
}
}
+static void qedi_board_disable_work(struct work_struct *work)
+{
+ struct qedi_ctx *qedi =
+ container_of(work, struct qedi_ctx,
+ board_disable_work.work);
+
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Fan failure, Unloading firmware context.\n");
+
+ if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
+ return;
+
+ __qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
+}
+
static void qedi_shutdown(struct pci_dev *pdev)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
@@ -2427,6 +2525,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
struct qed_probe_params qed_params;
void *task_start, *task_end;
int rc;
+ u16 retry = 10;
if (mode != QEDI_MODE_RECOVERY) {
qedi = qedi_host_alloc(pdev);
@@ -2438,6 +2537,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qedi = pci_get_drvdata(pdev);
}
+retry_probe:
+ if (mode == QEDI_MODE_RECOVERY)
+ msleep(2000);
+
memset(&qed_params, 0, sizeof(qed_params));
qed_params.protocol = QED_PROTOCOL_ISCSI;
qed_params.dp_module = qedi_qed_debug;
@@ -2445,11 +2548,20 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
qed_params.is_vf = is_vf;
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
if (!qedi->cdev) {
+ if (mode == QEDI_MODE_RECOVERY && retry) {
+ QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+ "Retry %d initialize hardware\n", retry);
+ retry--;
+ goto retry_probe;
+ }
+
rc = -ENODEV;
QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
goto free_host;
}
+ set_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags);
+ set_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags);
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
@@ -2533,7 +2645,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
qedi->mac);
- sprintf(host_buf, "host_%d", qedi->shost->host_no);
+ snprintf(host_buf, sizeof(host_buf), "host_%d", qedi->shost->host_no);
qedi_ops->common->set_name(qedi->cdev, host_buf);
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
@@ -2658,6 +2770,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
}
INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
+ INIT_DELAYED_WORK(&qedi->board_disable_work,
+ qedi_board_disable_work);
/* F/w needs 1st task context memory entry for performance */
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
@@ -2744,12 +2858,17 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
static enum cpuhp_state qedi_cpuhp_state;
+static struct pci_error_handlers qedi_err_handler = {
+ .error_detected = qedi_io_error_detected,
+};
+
static struct pci_driver qedi_pci_driver = {
.name = QEDI_MODULE_NAME,
.id_table = qedi_pci_tbl,
.probe = qedi_probe,
.remove = qedi_remove,
.shutdown = qedi_shutdown,
+ .err_handler = &qedi_err_handler,
};
static int __init qedi_init(void)
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 441a45349349..545936cb3980 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1241,7 +1241,7 @@ qla1280_done(struct scsi_qla_host *ha)
{
struct srb *sp;
struct list_head *done_q;
- int bus, target, lun;
+ int bus, target;
struct scsi_cmnd *cmd;
ENTER("qla1280_done");
@@ -1256,7 +1256,6 @@ qla1280_done(struct scsi_qla_host *ha)
cmd = sp->cmd;
bus = SCSI_BUS_32(cmd);
target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
switch ((CMD_RESULT(cmd) >> 16)) {
case DID_RESET:
@@ -2185,13 +2184,12 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
nv->cntr_flags_1.disable_loading_risc_code;
if (IS_ISP1040(ha)) {
- uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
+ uint16_t hwrev, cfg1, cdma_conf;
hwrev = RD_REG_WORD(&reg->cfg_0) & ISP_CFG0_HWMSK;
cfg1 = RD_REG_WORD(&reg->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
cdma_conf = RD_REG_WORD(&reg->cdma_cfg);
- ddma_conf = RD_REG_WORD(&reg->ddma_cfg);
/* Busted fifo, says mjacob. */
if (hwrev != ISP_CFG0_1040A)
@@ -2427,7 +2425,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
int cnt;
uint16_t *optr, *iptr;
uint16_t __iomem *mptr;
- uint16_t data;
DECLARE_COMPLETION_ONSTACK(wait);
ENTER("qla1280_mailbox_command");
@@ -2462,7 +2459,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
spin_unlock_irq(ha->host->host_lock);
WRT_REG_WORD(&reg->host_cmd, HC_SET_HOST_INT);
- data = qla1280_debounce_register(&reg->istatus);
+ qla1280_debounce_register(&reg->istatus);
wait_for_completion(&wait);
del_timer_sync(&ha->mailbox_timer);
@@ -3604,7 +3601,6 @@ static void
qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
struct list_head *done_q)
{
- unsigned int bus, target, lun;
int sense_sz;
struct srb *sp;
struct scsi_cmnd *cmd;
@@ -3630,11 +3626,6 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
cmd = sp->cmd;
- /* Generate LU queue on cntrl, target, LUN */
- bus = SCSI_BUS_32(cmd);
- target = SCSI_TCN_32(cmd);
- lun = SCSI_LUN_32(cmd);
-
if (comp_status || scsi_status) {
dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
"0x%x, handle = 0x%x\n", comp_status,
@@ -3673,7 +3664,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
dprintk(2, "qla1280_status_entry: Check "
"condition Sense data, b %i, t %i, "
- "l %i\n", bus, target, lun);
+ "l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
+ SCSI_LUN_32(cmd));
if (sense_sz)
qla1280_dump_buffer(2,
(char *)cmd->sense_buffer,
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5d93ccc73153..284b1cc91c80 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -157,6 +157,14 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
vha->host_no);
}
break;
+ case 10:
+ if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ ql_log(ql_log_info, vha, 0x70e9,
+ "Issuing MPI firmware dump on host#%ld.\n",
+ vha->host_no);
+ ha->isp_ops->mpi_fw_dump(vha, 0);
+ }
+ break;
}
return count;
}
@@ -744,8 +752,6 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
qla83xx_idc_unlock(vha, 0);
break;
- } else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- qla27xx_reset_mpi(vha);
} else {
/* Make sure FC side is not in reset */
WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
@@ -2726,6 +2732,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
struct link_statistics *stats;
dma_addr_t stats_dma;
struct fc_host_statistics *p = &vha->fc_host_stat;
+ struct qla_qpair *qpair;
+ int i;
+ u64 ib = 0, ob = 0, ir = 0, or = 0;
memset(p, -1, sizeof(*p));
@@ -2762,6 +2771,27 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
if (rval != QLA_SUCCESS)
goto done_free;
+ /* --- */
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ ir += qpair->counters.input_requests;
+ or += qpair->counters.output_requests;
+ ib += qpair->counters.input_bytes;
+ ob += qpair->counters.output_bytes;
+ }
+ ir += ha->base_qpair->counters.input_requests;
+ or += ha->base_qpair->counters.output_requests;
+ ib += ha->base_qpair->counters.input_bytes;
+ ob += ha->base_qpair->counters.output_bytes;
+
+ ir += vha->qla_stats.input_requests;
+ or += vha->qla_stats.output_requests;
+ ib += vha->qla_stats.input_bytes;
+ ob += vha->qla_stats.output_bytes;
+ /* --- */
+
p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
@@ -2781,15 +2811,16 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
} else {
- p->rx_words = vha->qla_stats.input_bytes;
- p->tx_words = vha->qla_stats.output_bytes;
+ p->rx_words = ib >> 2;
+ p->tx_words = ob >> 2;
}
}
+
p->fcp_control_requests = vha->qla_stats.control_requests;
- p->fcp_input_requests = vha->qla_stats.input_requests;
- p->fcp_output_requests = vha->qla_stats.output_requests;
- p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
- p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
+ p->fcp_input_requests = ir;
+ p->fcp_output_requests = or;
+ p->fcp_input_megabytes = ib >> 20;
+ p->fcp_output_megabytes = ob >> 20;
p->seconds_since_last_reset =
get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
do_div(p->seconds_since_last_reset, HZ);
@@ -2809,9 +2840,18 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
struct link_statistics *stats;
dma_addr_t stats_dma;
+ int i;
+ struct qla_qpair *qpair;
memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
+ for (i = 0; i < vha->hw->max_qpairs; i++) {
+ qpair = vha->hw->queue_pair_map[i];
+ if (!qpair)
+ continue;
+ memset(&qpair->counters, 0, sizeof(qpair->counters));
+ }
+ memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
@@ -3214,46 +3254,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
- if (IS_CNA_CAPABLE(ha))
- speeds = FC_PORTSPEED_10GBIT;
- else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
- if (ha->max_supported_speed == 2) {
- if (ha->min_supported_speed <= 6)
- speeds |= FC_PORTSPEED_64GBIT;
- }
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1) {
- if (ha->min_supported_speed <= 5)
- speeds |= FC_PORTSPEED_32GBIT;
- }
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 4)
- speeds |= FC_PORTSPEED_16GBIT;
- }
- if (ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 3)
- speeds |= FC_PORTSPEED_8GBIT;
- }
- if (ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 2)
- speeds |= FC_PORTSPEED_4GBIT;
- }
- } else if (IS_QLA2031(ha))
- speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
- FC_PORTSPEED_4GBIT;
- else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
- speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
- FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
- else if (IS_QLA24XX_TYPE(ha))
- speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
- FC_PORTSPEED_1GBIT;
- else if (IS_QLA23XX(ha))
- speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
- else
- speeds = FC_PORTSPEED_1GBIT;
+ speeds = qla25xx_fdmi_port_speed_capability(ha);
fc_host_supported_speeds(vha->host) = speeds;
}
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 1be811a5d69d..898b8d474868 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -16,7 +16,7 @@
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |
- * | | | 0x212a-0x2134 |
+ * | | | 0x212c-0x2134 |
* | Queue Command and IO tracing | 0x3074 | 0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x303d-0x3041 |
@@ -2449,7 +2449,7 @@ static void ql_dbg_prefix(char *pbuf, int pbuf_size,
const struct pci_dev *pdev = vha->hw->pdev;
/* <module-name> [<dev-name>]-<msg-id>:<host>: */
- snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
+ snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
dev_name(&(pdev->dev)), msg_id, vha->host_no);
} else {
/* <module-name> [<dev-name>]-<msg-id>: : */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a165120d2976..b0b4228050e9 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -624,6 +624,12 @@ enum {
TYPE_TGT_TMCMD, /* task management */
};
+struct iocb_resource {
+ u8 res_type;
+ u8 pad;
+ u16 iocb_cnt;
+};
+
typedef struct srb {
/*
* Do not move cmd_type field, it needs to
@@ -631,6 +637,7 @@ typedef struct srb {
*/
uint8_t cmd_type;
uint8_t pad[3];
+ struct iocb_resource iores;
struct kref cmd_kref; /* need to migrate ref_count over to this */
void *priv;
wait_queue_head_t nvme_ls_waitq;
@@ -2443,12 +2450,6 @@ typedef struct fc_port {
struct list_head list;
struct scsi_qla_host *vha;
- uint8_t node_name[WWN_SIZE];
- uint8_t port_name[WWN_SIZE];
- port_id_t d_id;
- uint16_t loop_id;
- uint16_t old_loop_id;
-
unsigned int conf_compl_supported:1;
unsigned int deleted:2;
unsigned int free_pending:1;
@@ -2465,15 +2466,24 @@ typedef struct fc_port {
unsigned int n2n_flag:1;
unsigned int explicit_logout:1;
unsigned int prli_pend_timer:1;
+ uint8_t nvme_flag;
+
+ uint8_t node_name[WWN_SIZE];
+ uint8_t port_name[WWN_SIZE];
+ port_id_t d_id;
+ uint16_t loop_id;
+ uint16_t old_loop_id;
struct completion nvme_del_done;
uint32_t nvme_prli_service_param;
+#define NVME_PRLI_SP_PI_CTRL BIT_9
+#define NVME_PRLI_SP_SLER BIT_8
#define NVME_PRLI_SP_CONF BIT_7
#define NVME_PRLI_SP_INITIATOR BIT_5
#define NVME_PRLI_SP_TARGET BIT_4
#define NVME_PRLI_SP_DISCOVERY BIT_3
#define NVME_PRLI_SP_FIRST_BURST BIT_0
- uint8_t nvme_flag;
+
uint32_t nvme_first_burst_size;
#define NVME_FLAG_REGISTERED 4
#define NVME_FLAG_DELETING 2
@@ -2544,6 +2554,8 @@ typedef struct fc_port {
u8 last_login_state;
u16 n2n_link_reset_cnt;
u16 n2n_chip_reset;
+
+ struct dentry *dfs_rport_dir;
} fc_port_t;
enum {
@@ -3508,6 +3520,14 @@ struct qla_tgt_counters {
uint64_t num_term_xchg_sent;
};
+struct qla_counters {
+ uint64_t input_bytes;
+ uint64_t input_requests;
+ uint64_t output_bytes;
+ uint64_t output_requests;
+
+};
+
struct qla_qpair;
/* Response queue data structure */
@@ -3566,6 +3586,15 @@ struct req_que {
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
};
+struct qla_fw_resources {
+ u16 iocbs_total;
+ u16 iocbs_limit;
+ u16 iocbs_qp_limit;
+ u16 iocbs_used;
+};
+
+#define QLA_IOCB_PCT_LIMIT 95
+
/*Queue pair data structure */
struct qla_qpair {
spinlock_t qp_lock;
@@ -3592,6 +3621,7 @@ struct qla_qpair {
uint32_t enable_class_2:1;
uint32_t enable_explicit_conf:1;
uint32_t use_shadow_reg:1;
+ uint32_t rcv_intr:1;
uint16_t id; /* qp number used with FW */
uint16_t vp_idx; /* vport ID */
@@ -3607,13 +3637,17 @@ struct qla_qpair {
struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
struct qla_hw_data *hw;
struct work_struct q_work;
+ struct qla_counters counters;
+
struct list_head qp_list_elem; /* vha->qp_list */
struct list_head hints_list;
- uint16_t cpuid;
+
uint16_t retry_term_cnt;
__le32 retry_term_exchg_addr;
uint64_t retry_term_jiff;
struct qla_tgt_counters tgt_counters;
+ uint16_t cpuid;
+ struct qla_fw_resources fwres ____cacheline_aligned;
};
/* Place holder for FW buffer parameters */
@@ -3881,6 +3915,7 @@ struct qla_hw_data {
/* Enabled in Driver */
uint32_t scm_enabled:1;
uint32_t max_req_queue_warned:1;
+ uint32_t plogi_template_valid:1;
} flags;
uint16_t max_exchg;
@@ -4127,6 +4162,10 @@ struct qla_hw_data {
#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
+#define IS_ZIO_THRESHOLD_CAPABLE(ha) \
+ ((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\
+ (ha->zio_mode == QLA_ZIO_MODE_6))
+
/* HBA serial number */
uint8_t serial0;
uint8_t serial1;
@@ -4214,7 +4253,7 @@ struct qla_hw_data {
/* Extended Logins */
void *exlogin_buf;
dma_addr_t exlogin_buf_dma;
- int exlogin_size;
+ uint32_t exlogin_size;
#define ENABLE_EXCHANGE_OFFLD BIT_2
@@ -4225,7 +4264,8 @@ struct qla_hw_data {
int exchoffld_count;
/* n2n */
- struct els_plogi_payload plogi_els_payld;
+ struct fc_els_flogi plogi_els_payld;
+#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4)
void *swl;
@@ -4273,6 +4313,7 @@ struct qla_hw_data {
#define FW_ATTR_EXT0_SCM_BROCADE 0x00001000
/* Cisco fabric attached */
#define FW_ATTR_EXT0_SCM_CISCO 0x00002000
+#define FW_ATTR_EXT0_NVME2 BIT_13
uint16_t fw_attributes_ext[2];
uint32_t fw_memory_size;
uint32_t fw_transfer_size;
@@ -4622,6 +4663,7 @@ typedef struct scsi_qla_host {
uint32_t qpairs_rsp_created:1;
uint32_t nvme_enabled:1;
uint32_t nvme_first_burst:1;
+ uint32_t nvme2_enabled:1;
} flags;
atomic_t loop_state;
@@ -4780,6 +4822,8 @@ typedef struct scsi_qla_host {
uint16_t ql2xexchoffld;
uint16_t ql2xiniexchg;
+ struct dentry *dfs_rport_root;
+
struct purex_list {
struct list_head head;
spinlock_t lock;
@@ -5103,6 +5147,8 @@ struct sff_8247_a0 {
ha->current_topology == ISP_CFG_N || \
!ha->current_topology)
+#define QLA_N2N_WAIT_TIME 5 /* 2 * ra_tov(n2n) + 1 */
+
#define NVME_TYPE(fcport) \
(fcport->fc4_type & FS_FC4TYPE_NVME) \
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index e62b2115235e..6f5f18fc974a 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -12,6 +12,140 @@
static struct dentry *qla2x00_dfs_root;
static atomic_t qla2x00_dfs_root_count;
+#define QLA_DFS_RPORT_DEVLOSS_TMO 1
+
+static int
+qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
+{
+ switch (attr_id) {
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
+ /* Only supported for FC-NVMe devices that are registered. */
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
+ return -EIO;
+ *val = fp->nvme_remote_port->dev_loss_tmo;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
+{
+ switch (attr_id) {
+ case QLA_DFS_RPORT_DEVLOSS_TMO:
+ /* Only supported for FC-NVMe devices that are registered. */
+ if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
+ return -EIO;
+#if (IS_ENABLED(CONFIG_NVME_FC))
+ return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
+ val);
+#else /* CONFIG_NVME_FC */
+ return -EINVAL;
+#endif /* CONFIG_NVME_FC */
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
+static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
+{ \
+ struct fc_port *fp = data; \
+ return qla_dfs_rport_get(fp, _attr_id, val); \
+} \
+static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
+{ \
+ struct fc_port *fp = data; \
+ return qla_dfs_rport_set(fp, _attr_id, val); \
+} \
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
+ qla_dfs_rport_##_attr##_get, \
+ qla_dfs_rport_##_attr##_set, "%llu\n")
+
+/*
+ * Wrapper for getting fc_port fields.
+ *
+ * _attr : Attribute name.
+ * _get_val : Accessor macro to retrieve the value.
+ */
+#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
+static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
+{ \
+ struct fc_port *fp = data; \
+ *val = _get_val; \
+ return 0; \
+} \
+DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
+ qla_dfs_rport_field_##_attr##_get, \
+ NULL, "%llu\n")
+
+#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
+
+#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
+ DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
+
+DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
+
+DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
+DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
+DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
+DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
+DEFINE_QLA_DFS_RPORT_FIELD(flags);
+DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
+DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
+DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
+DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
+DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
+
+void
+qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+{
+ char wwn[32];
+
+#define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
+ debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
+ fp, &qla_dfs_rport_field_##_attr##_fops)
+
+ if (!vha->dfs_rport_root || fp->dfs_rport_dir)
+ return;
+
+ sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
+ fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
+ if (!fp->dfs_rport_dir)
+ return;
+ if (NVME_TARGET(vha->hw, fp))
+ debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
+ fp, &qla_dfs_rport_dev_loss_tmo_fops);
+
+ QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
+ QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
+ QLA_CREATE_RPORT_FIELD_ATTR(flags);
+ QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
+ QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
+ QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
+ QLA_CREATE_RPORT_FIELD_ATTR(port_id);
+ QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
+}
+
+void
+qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
+{
+ if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
+ return;
+ debugfs_remove_recursive(fp->dfs_rport_dir);
+ fp->dfs_rport_dir = NULL;
+}
+
static int
qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
{
@@ -57,51 +191,51 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
{
scsi_qla_host_t *vha = s->private;
struct qla_hw_data *ha = vha->hw;
- struct gid_list_info *gid_list, *gid;
+ struct gid_list_info *gid_list;
dma_addr_t gid_list_dma;
fc_port_t fc_port;
+ char *id_iter;
int rc, i;
uint16_t entries, loop_id;
- struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
seq_printf(s, "%s\n", vha->host_str);
- if (tgt) {
- gid_list = dma_alloc_coherent(&ha->pdev->dev,
- qla2x00_gid_list_size(ha),
- &gid_list_dma, GFP_KERNEL);
- if (!gid_list) {
- ql_dbg(ql_dbg_user, vha, 0x7018,
- "DMA allocation failed for %u\n",
- qla2x00_gid_list_size(ha));
- return 0;
- }
+ gid_list = dma_alloc_coherent(&ha->pdev->dev,
+ qla2x00_gid_list_size(ha),
+ &gid_list_dma, GFP_KERNEL);
+ if (!gid_list) {
+ ql_dbg(ql_dbg_user, vha, 0x7018,
+ "DMA allocation failed for %u\n",
+ qla2x00_gid_list_size(ha));
+ return 0;
+ }
- rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
- &entries);
- if (rc != QLA_SUCCESS)
- goto out_free_id_list;
+ rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
+ &entries);
+ if (rc != QLA_SUCCESS)
+ goto out_free_id_list;
- gid = gid_list;
+ id_iter = (char *)gid_list;
- seq_puts(s, "Port Name Port ID Loop ID\n");
+ seq_puts(s, "Port Name Port ID Loop ID\n");
- for (i = 0; i < entries; i++) {
- loop_id = le16_to_cpu(gid->loop_id);
- memset(&fc_port, 0, sizeof(fc_port_t));
+ for (i = 0; i < entries; i++) {
+ struct gid_list_info *gid =
+ (struct gid_list_info *)id_iter;
+ loop_id = le16_to_cpu(gid->loop_id);
+ memset(&fc_port, 0, sizeof(fc_port_t));
- fc_port.loop_id = loop_id;
+ fc_port.loop_id = loop_id;
- rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
- seq_printf(s, "%8phC %02x%02x%02x %d\n",
- fc_port.port_name, fc_port.d_id.b.domain,
- fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
- fc_port.loop_id);
- gid = (void *)gid + ha->gid_list_info_size;
- }
-out_free_id_list:
- dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
- gid_list, gid_list_dma);
+ rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
+ seq_printf(s, "%8phC %02x%02x%02x %d\n",
+ fc_port.port_name, fc_port.d_id.b.domain,
+ fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
+ fc_port.loop_id);
+ id_iter += ha->gid_list_info_size;
}
+out_free_id_list:
+ dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
+ gid_list, gid_list_dma);
return 0;
}
@@ -127,6 +261,8 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
struct scsi_qla_host *vha = s->private;
uint16_t mb[MAX_IOCB_MB_REG];
int rc;
+ struct qla_hw_data *ha = vha->hw;
+ u16 iocbs_used, i;
rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
if (rc != QLA_SUCCESS) {
@@ -151,6 +287,18 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
mb[23]);
}
+ if (ql2xenforce_iocb_limit) {
+ /* lock is not require. It's an estimate. */
+ iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ }
+
+ seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
+ iocbs_used, ha->base_qpair->fwres.iocbs_limit);
+ }
+
return 0;
}
@@ -473,9 +621,21 @@ create_nodes:
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
- if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
+ if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
+ if (!ha->tgt.dfs_naqp) {
+ ql_log(ql_log_warn, vha, 0xd011,
+ "Unable to create debugFS naqp node.\n");
+ goto out;
+ }
+ }
+ vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
+ if (!vha->dfs_rport_root) {
+ ql_log(ql_log_warn, vha, 0xd012,
+ "Unable to create debugFS rports node.\n");
+ goto out;
+ }
out:
return 0;
}
@@ -515,6 +675,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
ha->dfs_fce = NULL;
}
+ if (vha->dfs_rport_root) {
+ debugfs_remove_recursive(vha->dfs_rport_root);
+ vha->dfs_rport_root = NULL;
+ }
+
if (ha->dfs_dir) {
debugfs_remove(ha->dfs_dir);
ha->dfs_dir = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index bba1b77fba7e..f0052d75849c 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -619,7 +619,7 @@ struct sts_entry_24xx {
#define SF_NVME_ERSP BIT_6
#define SF_FCP_RSP_DMA BIT_0
- __le16 retry_delay;
+ __le16 status_qualifier;
__le16 scsi_status; /* SCSI status. */
#define SS_CONFIRMATION_REQ BIT_12
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 0ced18f3104e..26dc055d93b3 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -129,6 +129,8 @@ int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
void qla_rscn_replay(fc_port_t *fcport);
void qla24xx_free_purex_item(struct purex_item *item);
extern bool qla24xx_risc_firmware_invalid(uint32_t *);
+void qla_init_iocb_limit(scsi_qla_host_t *);
+
/*
* Global Data in qla_os.c source file.
@@ -175,6 +177,7 @@ extern int qla2xuseresexchforels;
extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
+extern int ql2xenforce_iocb_limit;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -704,6 +707,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
void qla_scan_work_fn(struct work_struct *);
+uint qla25xx_fdmi_port_speed_capability(struct qla_hw_data *);
+uint qla25xx_fdmi_port_speed_currently(struct qla_hw_data *);
/*
* Global Function Prototypes in qla_attr.c source file.
@@ -935,9 +940,10 @@ void qlt_clr_qp_table(struct scsi_qla_host *vha);
void qlt_set_mode(struct scsi_qla_host *);
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
extern void qla24xx_process_purex_list(struct purex_list *);
+extern void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp);
+extern void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
-void qla27xx_reset_mpi(scsi_qla_host_t *vha);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index b569fd6e96d6..4d3e8b668c6a 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1502,7 +1502,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
return &p->p.req;
}
-static uint
+uint
qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
{
uint speeds = 0;
@@ -1546,7 +1546,7 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
}
return speeds;
}
- if (IS_QLA25XX(ha))
+ if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
if (IS_QLA24XX_TYPE(ha))
@@ -1556,7 +1556,8 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
return FDMI_PORT_SPEED_1GB;
}
-static uint
+
+uint
qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
{
switch (ha->link_data_rate) {
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 0bd04a62af83..fc8a55da04a0 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -63,6 +63,16 @@ void qla2x00_sp_free(srb_t *sp)
qla2x00_rel_sp(sp);
}
+void qla2xxx_rel_done_warning(srb_t *sp, int res)
+{
+ WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
+}
+
+void qla2xxx_rel_free_warning(srb_t *sp)
+{
+ WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
+}
+
/* Asynchronous Login/Logout Routines -------------------------------------- */
unsigned long
@@ -3288,6 +3298,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
j, fwdt->dump_size);
dump_size += fwdt->dump_size;
}
+ /* Add space for spare MPI fw dump. */
+ dump_size += ha->fwdt[1].dump_size;
} else {
req_q_size = req->length * sizeof(request_t);
rsp_q_size = rsp->length * sizeof(response_t);
@@ -3622,6 +3634,31 @@ out:
return ha->flags.lr_detected;
}
+void qla_init_iocb_limit(scsi_qla_host_t *vha)
+{
+ u16 i, num_qps;
+ u32 limit;
+ struct qla_hw_data *ha = vha->hw;
+
+ num_qps = ha->num_qpairs + 1;
+ limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
+
+ ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
+ ha->base_qpair->fwres.iocbs_limit = limit;
+ ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
+ ha->base_qpair->fwres.iocbs_used = 0;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i]) {
+ ha->queue_pair_map[i]->fwres.iocbs_total =
+ ha->orig_fw_iocb_count;
+ ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
+ ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
+ limit / num_qps;
+ ha->queue_pair_map[i]->fwres.iocbs_used = 0;
+ }
+ }
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @vha: HA context
@@ -3690,9 +3727,7 @@ execute_fw_with_lr:
goto execute_fw_with_lr;
}
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
- IS_QLA28XX(ha)) &&
- (ha->zio_mode == QLA_ZIO_MODE_6))
+ if (IS_ZIO_THRESHOLD_CAPABLE(ha))
qla27xx_set_zio_threshold(vha,
ha->last_zio_threshold);
@@ -3723,6 +3758,7 @@ enable_82xx_npiv:
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha);
+ qla_init_iocb_limit(vha);
/*
* Allocate the array of outstanding commands
@@ -4957,6 +4993,29 @@ qla2x00_free_fcport(fc_port_t *fcport)
kfree(fcport);
}
+static void qla_get_login_template(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ int rval;
+ u32 *bp, sz;
+ __be32 *q;
+
+ memset(ha->init_cb, 0, ha->init_cb_size);
+ sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
+ rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
+ ha->init_cb, sz);
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_init, vha, 0x00d1,
+ "PLOGI ELS param read fail.\n");
+ return;
+ }
+ q = (__be32 *)&ha->plogi_els_payld.fl_csp;
+
+ bp = (uint32_t *)ha->init_cb;
+ cpu_to_be32_array(q, bp, sz / 4);
+ ha->flags.plogi_template_valid = 1;
+}
+
/*
* qla2x00_configure_loop
* Updates Fibre Channel Device Database with what is actually on loop.
@@ -5000,6 +5059,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
clear_bit(RSCN_UPDATE, &vha->dpc_flags);
qla2x00_get_data_rate(vha);
+ qla_get_login_template(vha);
/* Determine what we need to do */
if ((ha->current_topology == ISP_CFG_FL ||
@@ -5084,32 +5144,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
{
- struct qla_hw_data *ha = vha->hw;
unsigned long flags;
fc_port_t *fcport;
- int rval;
-
- if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
- /* borrowing */
- u32 *bp, sz;
-
- memset(ha->init_cb, 0, ha->init_cb_size);
- sz = min_t(int, sizeof(struct els_plogi_payload),
- ha->init_cb_size);
- rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
- ha->init_cb, sz);
- if (rval == QLA_SUCCESS) {
- __be32 *q = &ha->plogi_els_payld.data[0];
- bp = (uint32_t *)ha->init_cb;
- cpu_to_be32_array(q, bp, sz / 4);
- memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
- } else {
- ql_dbg(ql_dbg_init, vha, 0x00d1,
- "PLOGI ELS param read fail.\n");
- goto skip_login;
- }
- }
+ if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
+ set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (fcport->n2n_flag) {
@@ -5118,7 +5157,6 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
}
}
-skip_login:
spin_lock_irqsave(&vha->work_lock, flags);
vha->scan.scan_retry++;
spin_unlock_irqrestore(&vha->work_lock, flags);
@@ -5486,6 +5524,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
qla2x00_iidma_fcport(vha, fcport);
+ qla2x00_dfs_create_rport(vha, fcport);
+
if (NVME_TARGET(vha->hw, fcport)) {
qla_nvme_register_remote(vha, fcport);
qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
@@ -7109,10 +7149,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
unsigned long flags = 0;
struct qla_hw_data *ha = vha->hw;
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
- int rval = QLA_SUCCESS;
if (IS_P3P_TYPE(ha))
- return rval;
+ return QLA_SUCCESS;
vha->flags.online = 0;
ha->isp_ops->disable_intrs(ha);
@@ -7127,7 +7166,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
if (IS_NOPOLLING_TYPE(ha))
ha->isp_ops->enable_intrs(ha);
- return rval;
+ return QLA_SUCCESS;
}
/* On sparc systems, obtain port and node WWN from firmware
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index 861dc522723c..c7aae438f984 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -207,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
return sp;
}
+void qla2xxx_rel_done_warning(srb_t *sp, int res);
+void qla2xxx_rel_free_warning(srb_t *sp);
+
static inline void
qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
{
sp->qpair = NULL;
+ sp->done = qla2xxx_rel_done_warning;
+ sp->free = qla2xxx_rel_free_warning;
mempool_free(sp, qpair->srb_mempool);
QLA_QPAIR_MARK_NOT_BUSY(qpair);
}
@@ -266,11 +271,41 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
}
static inline void
-qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
{
- if (retry_delay)
- fcport->retry_delay_timestamp = jiffies +
- (retry_delay * HZ / 10);
+ u8 scope;
+ u16 qual;
+#define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */
+#define SQ_SCOPE_SHIFT 14
+#define SQ_QUAL_MASK 0x3fff
+
+#define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */
+#define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
+
+ if (!sts_qual) /* Common case. */
+ return;
+
+ scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
+ /* Handle only scope 1 or 2, which is for I-T nexus. */
+ if (scope != 1 && scope != 2)
+ return;
+
+ /* Skip processing, if retry delay timer is already in effect. */
+ if (fcport->retry_delay_timestamp &&
+ time_before(jiffies, fcport->retry_delay_timestamp))
+ return;
+
+ qual = sts_qual & SQ_QUAL_MASK;
+ if (qual < 1 || qual > 0x3fef)
+ return;
+ qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
+
+ /* qual is expressed in 100ms increments. */
+ fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
+
+ ql_log(ql_log_warn, fcport->vha, 0x5101,
+ "%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
+ fcport->port_name, sts_qual, qual * 100);
}
static inline bool
@@ -343,3 +378,58 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
}
+
+enum {
+ RESOURCE_NONE,
+ RESOURCE_INI,
+};
+
+static inline int
+qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+{
+ u16 iocbs_used, i;
+ struct qla_hw_data *ha = qp->vha->hw;
+
+ if (!ql2xenforce_iocb_limit) {
+ iores->res_type = RESOURCE_NONE;
+ return 0;
+ }
+
+ if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ return 0;
+ } else {
+ /* no need to acquire qpair lock. It's just rough calculation */
+ iocbs_used = ha->base_qpair->fwres.iocbs_used;
+ for (i = 0; i < ha->max_qpairs; i++) {
+ if (ha->queue_pair_map[i])
+ iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
+ }
+
+ if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
+ qp->fwres.iocbs_used += iores->iocb_cnt;
+ return 0;
+ } else {
+ iores->res_type = RESOURCE_NONE;
+ return -ENOSPC;
+ }
+ }
+}
+
+static inline void
+qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
+{
+ switch (iores->res_type) {
+ case RESOURCE_NONE:
+ break;
+ default:
+ if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
+ qp->fwres.iocbs_used -= iores->iocb_cnt;
+ } else {
+ // should not happen
+ qp->fwres.iocbs_used = 0;
+ }
+ break;
+ }
+ iores->res_type = RESOURCE_NONE;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 0954fa41911c..93c2460b2e53 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -594,6 +594,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
uint32_t dsd_list_len;
struct dsd_dma *dsd_ptr;
struct ct6_dsd *ctx;
+ struct qla_qpair *qpair = sp->qpair;
cmd = GET_CMD_SP(sp);
@@ -612,12 +613,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
+ qpair->counters.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
+ qpair->counters.input_requests++;
}
cur_seg = scsi_sglist(cmd);
@@ -704,6 +705,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
struct scsi_cmnd *cmd;
struct scatterlist *sg;
int i;
+ struct qla_qpair *qpair = sp->qpair;
cmd = GET_CMD_SP(sp);
@@ -721,12 +723,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
/* Set transfer direction */
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
- vha->qla_stats.output_bytes += scsi_bufflen(cmd);
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += scsi_bufflen(cmd);
+ qpair->counters.output_requests++;
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
- vha->qla_stats.input_bytes += scsi_bufflen(cmd);
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += scsi_bufflen(cmd);
+ qpair->counters.input_requests++;
}
/* One DSD is available in the Command Type 3 IOCB */
@@ -1635,6 +1637,12 @@ qla24xx_start_scsi(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -1707,6 +1715,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -1820,6 +1829,12 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg;
tot_dsds += nseg;
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -1894,6 +1909,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_FUNCTION_FAILED;
}
@@ -1955,6 +1971,12 @@ qla2xxx_start_scsi_mq(srb_t *sp)
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = req_cnt;
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -2027,6 +2049,7 @@ queuing_error:
if (tot_dsds)
scsi_dma_unmap(cmd);
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
@@ -2155,6 +2178,12 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
/* Total Data and protection sg segment(s) */
tot_prot_dsds = nseg;
tot_dsds += nseg;
+
+ sp->iores.res_type = RESOURCE_INI;
+ sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
+ if (qla_get_iocbs(sp->qpair, &sp->iores))
+ goto queuing_error;
+
if (req->cnt < (req_cnt + 2)) {
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
rd_reg_dword_relaxed(req->req_q_out);
@@ -2232,6 +2261,7 @@ queuing_error:
}
/* Cleanup will be performed by the caller (queuecommand) */
+ qla_put_iocbs(sp->qpair, &sp->iores);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return QLA_FUNCTION_FAILED;
}
@@ -2348,6 +2378,14 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
if (sp->vha->flags.nvme_first_burst)
logio->io_parameter[0] =
cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
+ if (sp->vha->flags.nvme2_enabled) {
+ /* Set service parameter BIT_8 for SLER support */
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_SLER);
+ /* Set service parameter BIT_9 for PI control support */
+ logio->io_parameter[0] |=
+ cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
+ }
}
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -2975,8 +3013,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
memset(ptr, 0, sizeof(struct els_plogi_payload));
memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
- &ha->plogi_els_payld.data,
- sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
+ &ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
elsio->u.els_plogi.els_cmd = els_opcode;
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 25e0a1684763..b35cef2f8c71 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -767,7 +767,7 @@ qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
ql_log(ql_log_warn, vha, 0x02f0,
"MPI Heartbeat stop. MPI reset is%s needed. "
"MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
- mb[0] & BIT_8 ? "" : " not",
+ mb[1] & BIT_8 ? "" : " not",
mb[0], mb[1], mb[2], mb[3]);
if ((mb[1] & BIT_8) == 0)
@@ -1716,35 +1716,35 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
{
struct qla_hw_data *ha = vha->hw;
sts_entry_t *pkt = iocb;
- srb_t *sp = NULL;
+ srb_t *sp;
uint16_t index;
index = LSW(pkt->handle);
if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
- "Invalid command index (%x) type %8ph.\n",
- index, iocb);
+ "%s: Invalid command index (%x) type %8ph.\n",
+ func, index, iocb);
if (IS_P3P_TYPE(ha))
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
else
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
- goto done;
+ return NULL;
}
sp = req->outstanding_cmds[index];
if (!sp) {
ql_log(ql_log_warn, vha, 0x5032,
- "Invalid completion handle (%x) -- timed-out.\n", index);
- return sp;
+ "%s: Invalid completion handle (%x) -- timed-out.\n",
+ func, index);
+ return NULL;
}
if (sp->handle != index) {
ql_log(ql_log_warn, vha, 0x5033,
- "SRB handle (%x) mismatch %x.\n", sp->handle, index);
+ "%s: SRB handle (%x) mismatch %x.\n", func,
+ sp->handle, index);
return NULL;
}
req->outstanding_cmds[index] = NULL;
-
-done:
return sp;
}
@@ -2855,7 +2855,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
int logit = 1;
int res = 0;
uint16_t state_flags = 0;
- uint16_t retry_delay = 0;
+ uint16_t sts_qual = 0;
if (IS_FWI2_CAPABLE(ha)) {
comp_status = le16_to_cpu(sts24->comp_status);
@@ -2901,6 +2901,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
}
return;
}
+ qla_put_iocbs(sp->qpair, &sp->iores);
if (sp->cmd_type != TYPE_SRB) {
req->outstanding_cmds[handle] = NULL;
@@ -2953,8 +2954,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
sense_len = par_sense_len = rsp_info_len = resid_len =
fw_resid_len = 0;
if (IS_FWI2_CAPABLE(ha)) {
- u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
-
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le32_to_cpu(sts24->sense_len);
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
@@ -2968,13 +2967,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
ox_id = le16_to_cpu(sts24->ox_id);
par_sense_len = sizeof(sts24->data);
- /* Valid values of the retry delay timer are 0x1-0xffef */
- if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
- retry_delay = sts24_retry_delay & 0x3fff;
- ql_dbg(ql_dbg_io, sp->vha, 0x3033,
- "%s: scope=%#x retry_delay=%#x\n", __func__,
- sts24_retry_delay >> 14, retry_delay);
- }
+ sts_qual = le16_to_cpu(sts24->status_qualifier);
} else {
if (scsi_status & SS_SENSE_LEN_VALID)
sense_len = le16_to_cpu(sts->req_sense_length);
@@ -3012,9 +3005,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
* Check retry_delay_timer value if we receive a busy or
* queue full.
*/
- if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
- lscsi_status == SAM_STAT_BUSY)
- qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+ if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
+ lscsi_status == SAM_STAT_BUSY))
+ qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
/*
* Based on Host and scsi status generate status code for Linux
@@ -3321,6 +3314,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
default:
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
+ qla_put_iocbs(sp->qpair, &sp->iores);
sp->done(sp, res);
return 0;
}
@@ -3406,6 +3400,32 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
sp->done(sp, comp_status);
}
+static void qla24xx_process_mbx_iocb_response(struct scsi_qla_host *vha,
+ struct rsp_que *rsp, struct sts_entry_24xx *pkt)
+{
+ struct qla_hw_data *ha = vha->hw;
+ srb_t *sp;
+ static const char func[] = "MBX-IOCB2";
+
+ sp = qla2x00_get_sp_from_handle(vha, func, rsp->req, pkt);
+ if (!sp)
+ return;
+
+ if (sp->type == SRB_SCSI_CMD ||
+ sp->type == SRB_NVME_CMD ||
+ sp->type == SRB_TM_CMD) {
+ ql_log(ql_log_warn, vha, 0x509d,
+ "Inconsistent event entry type %d\n", sp->type);
+ if (IS_P3P_TYPE(ha))
+ set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+ else
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ return;
+ }
+
+ qla24xx_mbx_iocb_entry(vha, rsp->req, (struct mbx_24xx_entry *)pkt);
+}
+
/**
* qla24xx_process_response_queue() - Process response queue entries.
* @vha: SCSI driver HA context
@@ -3422,8 +3442,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!ha->flags.fw_started)
return;
- if (rsp->qpair->cpuid != smp_processor_id())
+ if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
+ rsp->qpair->rcv_intr = 1;
qla_cpu_update(rsp->qpair, smp_processor_id());
+ }
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -3513,8 +3535,7 @@ process_err:
(struct abort_entry_24xx *)pkt);
break;
case MBX_IOCB_TYPE:
- qla24xx_mbx_iocb_entry(vha, rsp->req,
- (struct mbx_24xx_entry *)pkt);
+ qla24xx_process_mbx_iocb_response(vha, rsp, pkt);
break;
case VP_CTRL_IOCB_TYPE:
qla_ctrlvp_completed(vha, rsp->req,
@@ -3873,7 +3894,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
}
ha = qpair->hw;
- queue_work(ha->wq, &qpair->q_work);
+ queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
@@ -3899,7 +3920,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- queue_work(ha->wq, &qpair->q_work);
+ queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
return IRQ_HANDLED;
}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 226f1428d3e5..bf858179d23c 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -845,7 +845,7 @@ qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
* Context:
* Kernel context.
*/
-#define CONFIG_XLOGINS_MEM 0x3
+#define CONFIG_XLOGINS_MEM 0x9
int
qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
{
@@ -872,8 +872,9 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval != QLA_SUCCESS) {
- /*EMPTY*/
- ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
+ ql_dbg(ql_dbg_mbx, vha, 0x111b,
+ "EXlogin Failed=%x. MB0=%x MB11=%x\n",
+ rval, mcp->mb[0], mcp->mb[11]);
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__);
@@ -1092,6 +1093,14 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
"%s: FC-NVMe is Enabled (0x%x)\n",
__func__, ha->fw_attributes_h);
}
+
+ /* BIT_13 of Extended FW Attributes informs about NVMe2 support */
+ if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
+ ql_log(ql_log_info, vha, 0xd302,
+ "Firmware supports NVMe2 0x%x\n",
+ ha->fw_attributes_ext[0]);
+ vha->flags.nvme2_enabled = 1;
+ }
}
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
@@ -1121,12 +1130,18 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
if (ha->flags.scm_supported_a &&
(ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
ha->flags.scm_supported_f = 1;
- memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
ha->sf_init_cb->flags |= BIT_13;
}
ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
(ha->flags.scm_supported_f) ? "Supported" :
"Not Supported");
+
+ if (vha->flags.nvme2_enabled) {
+ /* set BIT_15 of special feature control block for SLER */
+ ha->sf_init_cb->flags |= BIT_15;
+ /* set BIT_14 of special feature control block for PI CTRL*/
+ ha->sf_init_cb->flags |= BIT_14;
+ }
}
failed:
@@ -1822,7 +1837,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
}
- if (ha->flags.scm_supported_f) {
+ if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
mcp->mb[1] |= BIT_1;
mcp->mb[16] = MSW(ha->sf_init_cb_dma);
mcp->mb[17] = LSW(ha->sf_init_cb_dma);
@@ -3979,7 +3994,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (fcport) {
fcport->plogi_nack_done_deadline = jiffies + HZ;
- fcport->dm_login_expire = jiffies + 2*HZ;
+ fcport->dm_login_expire = jiffies +
+ QLA_N2N_WAIT_TIME * HZ;
fcport->scan_state = QLA_FCPORT_FOUND;
fcport->n2n_flag = 1;
fcport->keep_nport_handle = 1;
@@ -4925,8 +4941,6 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
return QLA_MEMORY_ALLOC_FAILED;
}
- memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE);
-
/* List of Purex ELS */
cmd_opcode[0] = ELS_FPIN;
cmd_opcode[1] = ELS_RDP;
@@ -4958,51 +4972,12 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
"Done %s.\n", __func__);
}
- dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
+ dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
els_cmd_map, els_cmd_map_dma);
return rval;
}
-int
-qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc,
- dma_addr_t bbc_dma)
-{
- mbx_cmd_t mc;
- mbx_cmd_t *mcp = &mc;
- int rval;
-
- if (!IS_FWI2_CAPABLE(vha->hw))
- return QLA_FUNCTION_FAILED;
-
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e,
- "Entered %s.\n", __func__);
-
- mcp->mb[0] = MBC_GET_RNID_PARAMS;
- mcp->mb[1] = RNID_BUFFER_CREDITS << 8;
- mcp->mb[2] = MSW(LSD(bbc_dma));
- mcp->mb[3] = LSW(LSD(bbc_dma));
- mcp->mb[6] = MSW(MSD(bbc_dma));
- mcp->mb[7] = LSW(MSD(bbc_dma));
- mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter);
- mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
- mcp->in_mb = MBX_1|MBX_0;
- mcp->buf_size = sizeof(*bbc);
- mcp->flags = MBX_DMA_IN;
- mcp->tov = MBX_TOV_SECONDS;
- rval = qla2x00_mailbox_command(vha, mcp);
-
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x118f,
- "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
- } else {
- ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190,
- "Done %s.\n", __func__);
- }
-
- return rval;
-}
-
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 15efe2f04b86..08cfe043ac66 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -808,11 +808,9 @@ static void qla_do_work(struct work_struct *work)
{
unsigned long flags;
struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
- struct scsi_qla_host *vha;
- struct qla_hw_data *ha = qpair->hw;
+ struct scsi_qla_host *vha = qpair->vha;
spin_lock_irqsave(&qpair->qp_lock, flags);
- vha = pci_get_drvdata(ha->pdev);
qla24xx_process_response_queue(vha, qpair->rsp);
spin_unlock_irqrestore(&qpair->qp_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index 90bbc61f361b..5cc1bbb1ed74 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -42,7 +42,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
req.port_name = wwn_to_u64(fcport->port_name);
req.node_name = wwn_to_u64(fcport->node_name);
req.port_role = 0;
- req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
+ req.dev_loss_tmo = 0;
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
@@ -69,6 +69,14 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
return ret;
}
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
+ ql_log(ql_log_info, vha, 0x212a,
+ "PortID:%06x Supports SLER\n", req.port_id);
+
+ if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
+ ql_log(ql_log_info, vha, 0x212b,
+ "PortID:%06x Supports PI control\n", req.port_id);
+
rport = fcport->nvme_remote_port->private;
rport->fcport = fcport;
@@ -368,6 +376,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
struct srb_iocb *nvme = &sp->u.iocb_cmd;
struct scatterlist *sgl, *sg;
struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
+ struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
uint32_t rval = QLA_SUCCESS;
/* Setup qpair pointers */
@@ -399,8 +408,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
}
if (unlikely(!fd->sqid)) {
- struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
-
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&ha->nvme_active_aen_cnt);
@@ -428,8 +435,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
/* No data transfer how do we check buffer len == 0?? */
if (fd->io_dir == NVMEFC_FCP_READ) {
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
- vha->qla_stats.input_bytes += fd->payload_length;
- vha->qla_stats.input_requests++;
+ qpair->counters.input_bytes += fd->payload_length;
+ qpair->counters.input_requests++;
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
if ((vha->flags.nvme_first_burst) &&
@@ -441,11 +448,16 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
cmd_pkt->control_flags |=
cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
}
- vha->qla_stats.output_bytes += fd->payload_length;
- vha->qla_stats.output_requests++;
+ qpair->counters.output_bytes += fd->payload_length;
+ qpair->counters.output_requests++;
} else if (fd->io_dir == 0) {
cmd_pkt->control_flags = 0;
}
+ /* Set BIT_13 of control flags for Async event */
+ if (vha->flags.nvme2_enabled &&
+ cmd->sqe.common.opcode == nvme_admin_async_event) {
+ cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
+ }
/* Set NPORT-ID */
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
@@ -548,6 +560,14 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
return rval;
vha = fcport->vha;
+
+ if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
+ return rval;
+
+ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
+ (qpair && !qpair->fw_started) || fcport->deleted)
+ return -EBUSY;
+
/*
* If we know the dev is going away while the transport is still sending
* IO's return busy back to stall the IO Q. This happens when the
@@ -683,7 +703,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
struct nvme_fc_port_template *tmpl;
struct qla_hw_data *ha;
struct nvme_fc_port_info pinfo;
- int ret = EINVAL;
+ int ret = -EINVAL;
if (!IS_ENABLED(CONFIG_NVME_FC))
return ret;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.h b/drivers/scsi/qla2xxx/qla_nvme.h
index fbb844226630..5d5f115a77c3 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.h
+++ b/drivers/scsi/qla2xxx/qla_nvme.h
@@ -14,9 +14,6 @@
#include "qla_def.h"
#include "qla_dsd.h"
-/* default dev loss time (seconds) before transport tears down ctrl */
-#define NVME_FC_DEV_LOSS_TMO 30
-
#define NVME_ATIO_CMD_OFF 32
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
#define Q2T_NVME_NUM_TAGS 2048
@@ -57,6 +54,7 @@ struct cmd_nvme {
uint64_t rsvd;
__le16 control_flags; /* Control Flags */
+#define CF_ADMIN_ASYNC_EVENT BIT_13
#define CF_NVME_FIRST_BURST_ENABLE BIT_11
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8da00ba54aec..2bb015b58609 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -40,6 +40,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
"Set this to take full dump on MPI hang.");
+int ql2xenforce_iocb_limit = 1;
+module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ql2xenforce_iocb_limit,
+ "Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
+
/*
* CT6 CTX allocation cache
*/
@@ -1885,7 +1890,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Any upper-dword bits set? */
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
- !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
+ !dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
/* Ok, a 64bit DMA mask is applicable. */
ha->flags.enable_64bit_addressing = 1;
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
@@ -1895,7 +1900,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
}
dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
- pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
+ dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
}
static void
@@ -3316,6 +3321,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
for (i = 0; i < ha->max_qpairs; i++)
qla2xxx_create_qpair(base_vha, 5, 0, startit);
}
+ qla_init_iocb_limit(base_vha);
if (ha->flags.running_gold_fw)
goto skip_dpc;
@@ -4225,6 +4231,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
&ha->sf_init_cb_dma);
if (!ha->sf_init_cb)
goto fail_sf_init_cb;
+ memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
"sf_init_cb=%p.\n", ha->sf_init_cb);
}
@@ -4379,11 +4386,12 @@ int
qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
{
int rval;
- uint16_t size, max_cnt, temp;
+ uint16_t size, max_cnt;
+ uint32_t temp;
struct qla_hw_data *ha = vha->hw;
/* Return if we don't need to alloacate any extended logins */
- if (!ql2xexlogins)
+ if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
return QLA_SUCCESS;
if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
@@ -4872,7 +4880,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
}
INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
- sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
+ sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
ql_dbg(ql_dbg_init, vha, 0x0041,
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
vha->host, vha->hw, vha,
@@ -5001,7 +5009,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
switch (code) {
case QLA_UEVENT_CODE_FW_DUMP:
- snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
+ snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
vha->host_no);
break;
default:
@@ -5089,6 +5097,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->fc4_type = e->u.new_sess.fc4_type;
if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
+ fcport->dm_login_expire = jiffies +
+ QLA_N2N_WAIT_TIME * HZ;
fcport->fc4_type = FS_FC4TYPE_FCP;
fcport->n2n_flag = 1;
if (vha->flags.nvme_enabled)
@@ -5810,98 +5820,6 @@ qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
return true;
}
-static uint
-qla25xx_rdp_port_speed_capability(struct qla_hw_data *ha)
-{
- if (IS_CNA_CAPABLE(ha))
- return RDP_PORT_SPEED_10GB;
-
- if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
- unsigned int speeds = 0;
-
- if (ha->max_supported_speed == 2) {
- if (ha->min_supported_speed <= 6)
- speeds |= RDP_PORT_SPEED_64GB;
- }
-
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1) {
- if (ha->min_supported_speed <= 5)
- speeds |= RDP_PORT_SPEED_32GB;
- }
-
- if (ha->max_supported_speed == 2 ||
- ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 4)
- speeds |= RDP_PORT_SPEED_16GB;
- }
-
- if (ha->max_supported_speed == 1 ||
- ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 3)
- speeds |= RDP_PORT_SPEED_8GB;
- }
-
- if (ha->max_supported_speed == 0) {
- if (ha->min_supported_speed <= 2)
- speeds |= RDP_PORT_SPEED_4GB;
- }
-
- return speeds;
- }
-
- if (IS_QLA2031(ha))
- return RDP_PORT_SPEED_16GB|RDP_PORT_SPEED_8GB|
- RDP_PORT_SPEED_4GB;
-
- if (IS_QLA25XX(ha))
- return RDP_PORT_SPEED_8GB|RDP_PORT_SPEED_4GB|
- RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
-
- if (IS_QLA24XX_TYPE(ha))
- return RDP_PORT_SPEED_4GB|RDP_PORT_SPEED_2GB|
- RDP_PORT_SPEED_1GB;
-
- if (IS_QLA23XX(ha))
- return RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
-
- return RDP_PORT_SPEED_1GB;
-}
-
-static uint
-qla25xx_rdp_port_speed_currently(struct qla_hw_data *ha)
-{
- switch (ha->link_data_rate) {
- case PORT_SPEED_1GB:
- return RDP_PORT_SPEED_1GB;
-
- case PORT_SPEED_2GB:
- return RDP_PORT_SPEED_2GB;
-
- case PORT_SPEED_4GB:
- return RDP_PORT_SPEED_4GB;
-
- case PORT_SPEED_8GB:
- return RDP_PORT_SPEED_8GB;
-
- case PORT_SPEED_10GB:
- return RDP_PORT_SPEED_10GB;
-
- case PORT_SPEED_16GB:
- return RDP_PORT_SPEED_16GB;
-
- case PORT_SPEED_32GB:
- return RDP_PORT_SPEED_32GB;
-
- case PORT_SPEED_64GB:
- return RDP_PORT_SPEED_64GB;
-
- default:
- return RDP_PORT_SPEED_UNKNOWN;
- }
-}
-
/*
* Function Name: qla24xx_process_purex_iocb
*
@@ -5921,12 +5839,10 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
dma_addr_t rsp_els_dma;
dma_addr_t rsp_payload_dma;
dma_addr_t stat_dma;
- dma_addr_t bbc_dma;
dma_addr_t sfp_dma;
struct els_entry_24xx *rsp_els = NULL;
struct rdp_rsp_payload *rsp_payload = NULL;
struct link_statistics *stat = NULL;
- struct buffer_credit_24xx *bbc = NULL;
uint8_t *sfp = NULL;
uint16_t sfp_flags = 0;
uint rsp_payload_length = sizeof(*rsp_payload);
@@ -5970,9 +5886,6 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
&stat_dma, GFP_KERNEL);
- bbc = dma_alloc_coherent(&ha->pdev->dev, sizeof(*bbc),
- &bbc_dma, GFP_KERNEL);
-
/* Prepare Response IOCB */
rsp_els->entry_type = ELS_IOCB_TYPE;
rsp_els->entry_count = 1;
@@ -6068,9 +5981,9 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
rsp_payload->port_speed_desc.desc_len =
cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
- qla25xx_rdp_port_speed_capability(ha));
+ qla25xx_fdmi_port_speed_capability(ha));
rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
- qla25xx_rdp_port_speed_currently(ha));
+ qla25xx_fdmi_port_speed_currently(ha));
/* Link Error Status Descriptor */
rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
@@ -6126,13 +6039,10 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
- if (bbc) {
- memset(bbc, 0, sizeof(*bbc));
- rval = qla24xx_get_buffer_credits(vha, bbc, bbc_dma);
- if (!rval) {
- rsp_payload->buffer_credit_desc.fcport_b2b =
- cpu_to_be32(LSW(bbc->parameter[0]));
- }
+ if (ha->flags.plogi_template_valid) {
+ uint32_t tmp =
+ be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
+ rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
}
if (rsp_payload_length < sizeof(*rsp_payload))
@@ -6310,9 +6220,6 @@ send:
}
dealloc:
- if (bbc)
- dma_free_coherent(&ha->pdev->dev, sizeof(*bbc),
- bbc, bbc_dma);
if (stat)
dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
stat, stat_dma);
@@ -7289,8 +7196,10 @@ qla2x00_timer(struct timer_list *t)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
+ index = atomic_read(&ha->nvme_active_aen_cnt);
if (!vha->vp_idx &&
- (atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) &&
+ (index != ha->nvme_last_rptd_aen) &&
+ (index >= DEFAULT_ZIO_THRESHOLD) &&
ha->zio_mode == QLA_ZIO_MODE_6 &&
!ha->flags.host_shutting_down) {
ql_log(ql_log_info, vha, 0x3002,
@@ -7302,9 +7211,8 @@ qla2x00_timer(struct timer_list *t)
}
if (!vha->vp_idx &&
- (atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
- (ha->zio_mode == QLA_ZIO_MODE_6) &&
- (IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
+ atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
+ IS_ZIO_THRESHOLD_CAPABLE(ha)) {
ql_log(ql_log_info, vha, 0x3002,
"Sched: Set ZIO exchange threshold to %d.\n",
ha->last_zio_threshold);
@@ -8044,7 +7952,6 @@ module_exit(qla2x00_module_exit);
MODULE_AUTHOR("QLogic Corporation");
MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
MODULE_LICENSE("GPL");
-MODULE_VERSION(QLA2XXX_VERSION);
MODULE_FIRMWARE(FW_FILE_ISP21XX);
MODULE_FIRMWARE(FW_FILE_ISP22XX);
MODULE_FIRMWARE(FW_FILE_ISP2300);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 2d445bdb2129..a27a625839e6 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1111,6 +1111,8 @@ void qlt_free_session_done(struct work_struct *work)
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
sess->free_pending = 0;
+ qla2x00_dfs_remove_rport(vha, sess);
+
ql_dbg(ql_dbg_disc, vha, 0xf001,
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
sess, sess->port_name, vha->fcport_count);
@@ -1229,14 +1231,15 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
case DSC_DELETE_PEND:
return;
case DSC_DELETED:
- if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
- wake_up_all(&tgt->waitQ);
- if (sess->vha->fcport_count == 0)
- wake_up_all(&sess->vha->fcport_waitQ);
-
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
- !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
+ !sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
+ if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
+ wake_up_all(&tgt->waitQ);
+
+ if (sess->vha->fcport_count == 0)
+ wake_up_all(&sess->vha->fcport_waitQ);
return;
+ }
break;
case DSC_UPD_FCPORT:
/*
@@ -2025,7 +2028,7 @@ static void qlt_do_tmr_work(struct work_struct *work)
struct qla_tgt_mgmt_cmd *mcmd =
container_of(work, struct qla_tgt_mgmt_cmd, work);
struct qla_hw_data *ha = mcmd->vha->hw;
- int rc = EIO;
+ int rc;
uint32_t tag;
unsigned long flags;
@@ -3781,7 +3784,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
"multiple abort. %p transport_state %x, t_state %x, "
"se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
- return EIO;
+ return -EIO;
}
cmd->aborted = 1;
cmd->trc_flags |= TRC_ABORT;
@@ -5668,7 +5671,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
/* found existing exchange */
qpair->retry_term_cnt++;
if (qpair->retry_term_cnt >= 5) {
- rc = EIO;
+ rc = -EIO;
qpair->retry_term_cnt = 0;
ql_log(ql_log_warn, vha, 0xffff,
"Unable to send ABTS Respond. Dumping firmware.\n");
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 8dc82cfd38b2..0af3e7fa31f0 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -12,33 +12,6 @@
#define IOBASE(vha) IOBAR(ISPREG(vha))
#define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
-/* hardware_lock assumed held. */
-static void
-qla27xx_write_remote_reg(struct scsi_qla_host *vha,
- u32 addr, u32 data)
-{
- struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
-
- ql_dbg(ql_dbg_misc, vha, 0xd300,
- "%s: addr/data = %xh/%xh\n", __func__, addr, data);
-
- wrt_reg_dword(&reg->iobase_addr, 0x40);
- wrt_reg_dword(&reg->iobase_c4, data);
- wrt_reg_dword(&reg->iobase_window, addr);
-}
-
-void
-qla27xx_reset_mpi(scsi_qla_host_t *vha)
-{
- ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd301,
- "Entered %s.\n", __func__);
-
- qla27xx_write_remote_reg(vha, 0x104050, 0x40004);
- qla27xx_write_remote_reg(vha, 0x10405c, 0x4);
-
- vha->hw->stat.num_mpi_reset++;
-}
-
static inline void
qla27xx_insert16(uint16_t value, void *buf, ulong *len)
{
@@ -906,8 +879,8 @@ qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
WARN_ON_ONCE(sscanf(qla2x00_version_str,
- "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
- v+0, v+1, v+2, v+3, v+4, v+5) != 6);
+ "%hhu.%hhu.%hhu.%hhu",
+ v + 0, v + 1, v + 2, v + 3) != 4);
tmp->driver_info[0] = cpu_to_le32(
v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0]);
@@ -1028,7 +1001,6 @@ void
qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
{
ulong flags = 0;
- bool need_mpi_reset = true;
#ifndef __CHECKER__
if (!hardware_locked)
@@ -1036,14 +1008,20 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
#endif
if (!vha->hw->mpi_fw_dump) {
ql_log(ql_log_warn, vha, 0x02f3, "-> mpi_fwdump no buffer\n");
- } else if (vha->hw->mpi_fw_dumped) {
- ql_log(ql_log_warn, vha, 0x02f4,
- "-> MPI firmware already dumped (%p) -- ignoring request\n",
- vha->hw->mpi_fw_dump);
} else {
struct fwdt *fwdt = &vha->hw->fwdt[1];
ulong len;
void *buf = vha->hw->mpi_fw_dump;
+ bool walk_template_only = false;
+
+ if (vha->hw->mpi_fw_dumped) {
+ /* Use the spare area for any further dumps. */
+ buf += fwdt->dump_size;
+ walk_template_only = true;
+ ql_log(ql_log_warn, vha, 0x02f4,
+ "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
+ buf);
+ }
ql_log(ql_log_warn, vha, 0x02f5, "-> fwdt1 running...\n");
if (!fwdt->template) {
@@ -1058,9 +1036,10 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
ql_log(ql_log_warn, vha, 0x02f7,
"-> fwdt1 fwdump residual=%+ld\n",
fwdt->dump_size - len);
- } else {
- need_mpi_reset = false;
}
+ vha->hw->stat.num_mpi_reset++;
+ if (walk_template_only)
+ goto bailout;
vha->hw->mpi_fw_dump_len = len;
vha->hw->mpi_fw_dumped = 1;
@@ -1072,8 +1051,6 @@ qla27xx_mpi_fwdump(scsi_qla_host_t *vha, int hardware_locked)
}
bailout:
- if (need_mpi_reset)
- qla27xx_reset_mpi(vha);
#ifndef __CHECKER__
if (!hardware_locked)
spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 8ccd9ba1ddef..120e511d2ed5 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,9 +7,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.01.00.25-k"
+#define QLA2XXX_VERSION "10.02.00.103-k"
#define QLA_DRIVER_MAJOR_VER 10
-#define QLA_DRIVER_MINOR_VER 1
+#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 0
+#define QLA_DRIVER_BETA_VER 103
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 44bfe162654a..61017acd3458 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -850,7 +850,7 @@ static ssize_t tcm_qla2xxx_tpg_attrib_##name##_show( \
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
struct tcm_qla2xxx_tpg, se_tpg); \
\
- return sprintf(page, "%u\n", tpg->tpg_attrib.name); \
+ return sprintf(page, "%d\n", tpg->tpg_attrib.name); \
} \
\
static ssize_t tcm_qla2xxx_tpg_attrib_##name##_store( \
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 4a7ef971a387..6444f4bb61b3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1170,7 +1170,6 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
uint32_t state, uint32_t conn_err)
{
struct ddb_entry *ddb_entry;
- int status = QLA_ERROR;
/* check for out of range index */
if (fw_ddb_index >= MAX_DDB_ENTRIES)
@@ -1192,7 +1191,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state);
exit_ddb_event:
- return status;
+ return QLA_ERROR;
}
/**
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 676778cbc550..4775baac43c2 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1254,7 +1254,7 @@ static int qla4xxx_get_host_stats(struct Scsi_Host *shost, char *buf, int len)
le64_to_cpu(ql_iscsi_stats->iscsi_sequence_error);
exit_host_stats:
if (ql_iscsi_stats)
- dma_free_coherent(&ha->pdev->dev, host_stats_size,
+ dma_free_coherent(&ha->pdev->dev, stats_size,
ql_iscsi_stats, iscsi_stats_dma);
ql4_printk(KERN_INFO, ha, "%s: Get host stats done\n",
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 48ff7d88af86..d84e218d32cb 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -1468,22 +1468,10 @@ static struct platform_driver qpti_sbus_driver = {
.probe = qpti_sbus_probe,
.remove = qpti_sbus_remove,
};
-
-static int __init qpti_init(void)
-{
- return platform_driver_register(&qpti_sbus_driver);
-}
-
-static void __exit qpti_exit(void)
-{
- platform_driver_unregister(&qpti_sbus_driver);
-}
+module_platform_driver(qpti_sbus_driver);
MODULE_DESCRIPTION("QlogicISP SBUS driver");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION("2.1");
MODULE_FIRMWARE("qlogic/isp1000.bin");
-
-module_init(qpti_init);
-module_exit(qpti_exit);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index a87e40aec11f..24c0f7ec0351 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -209,10 +209,6 @@ static const char *sdebug_version_date = "20200710";
#define OPT_MEDIUM_ERR_ADDR 0x1234 /* that's sector 4660 in decimal */
#define OPT_MEDIUM_ERR_NUM 10 /* number of consecutive medium errs */
-/* If REPORT LUNS has luns >= 256 it can choose "flat space" (value 1)
- * or "peripheral device" addressing (value 0) */
-#define SAM2_LUN_ADDRESS_METHOD 0
-
/* SDEBUG_CANQUEUE is the maximum number of commands that can be queued
* (for response) per submit queue at one time. Can be reduced by max_queue
* option. Command responses are not queued when jdelay=0 and ndelay=0. The
@@ -791,6 +787,13 @@ static bool sdebug_wp;
static enum blk_zoned_model sdeb_zbc_model = BLK_ZONED_NONE;
static char *sdeb_zbc_model_s;
+enum sam_lun_addr_method {SAM_LUN_AM_PERIPHERAL = 0x0,
+ SAM_LUN_AM_FLAT = 0x1,
+ SAM_LUN_AM_LOGICAL_UNIT = 0x2,
+ SAM_LUN_AM_EXTENDED = 0x3};
+static enum sam_lun_addr_method sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
+static int sdebug_lun_am_i = (int)SAM_LUN_AM_PERIPHERAL;
+
static unsigned int sdebug_store_sectors;
static sector_t sdebug_capacity; /* in sectors */
@@ -4179,6 +4182,8 @@ static int resp_report_luns(struct scsi_cmnd *scp,
if ((k * RL_BUCKET_ELEMS) + j > lun_cnt)
break;
int_to_scsilun(lun++, lun_p);
+ if (lun > 1 && sdebug_lun_am == SAM_LUN_AM_FLAT)
+ lun_p->scsi_lun[0] |= 0x40;
}
if (j < RL_BUCKET_ELEMS)
break;
@@ -4939,6 +4944,7 @@ static struct sdebug_dev_info *find_build_dev_info(struct scsi_device *sdev)
pr_err("Host info NULL\n");
return NULL;
}
+
list_for_each_entry(devip, &sdbg_host->dev_info_list, dev_list) {
if ((devip->used) && (devip->channel == sdev->channel) &&
(devip->target == sdev->id) &&
@@ -5252,7 +5258,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
{
struct msdos_partition *pp;
- int starts[SDEBUG_MAX_PARTS + 2];
+ int starts[SDEBUG_MAX_PARTS + 2], max_part_secs;
int sectors_per_part, num_sectors, k;
int heads_by_sects, start_sec, end_sec;
@@ -5263,14 +5269,18 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
sdebug_num_parts = SDEBUG_MAX_PARTS;
pr_warn("reducing partitions to %d\n", SDEBUG_MAX_PARTS);
}
- num_sectors = (int)sdebug_store_sectors;
+ num_sectors = (int)get_sdebug_capacity();
sectors_per_part = (num_sectors - sdebug_sectors_per)
/ sdebug_num_parts;
heads_by_sects = sdebug_heads * sdebug_sectors_per;
starts[0] = sdebug_sectors_per;
- for (k = 1; k < sdebug_num_parts; ++k)
+ max_part_secs = sectors_per_part;
+ for (k = 1; k < sdebug_num_parts; ++k) {
starts[k] = ((k * sectors_per_part) / heads_by_sects)
* heads_by_sects;
+ if (starts[k] - starts[k - 1] < max_part_secs)
+ max_part_secs = starts[k] - starts[k - 1];
+ }
starts[sdebug_num_parts] = num_sectors;
starts[sdebug_num_parts + 1] = 0;
@@ -5279,7 +5289,7 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
pp = (struct msdos_partition *)(ramp + 0x1be);
for (k = 0; starts[k + 1]; ++k, ++pp) {
start_sec = starts[k];
- end_sec = starts[k + 1] - 1;
+ end_sec = starts[k] + max_part_secs - 1;
pp->boot_ind = 0;
pp->cyl = start_sec / heads_by_sects;
@@ -5579,6 +5589,7 @@ module_param_named(lbpu, sdebug_lbpu, int, S_IRUGO);
module_param_named(lbpws, sdebug_lbpws, int, S_IRUGO);
module_param_named(lbpws10, sdebug_lbpws10, int, S_IRUGO);
module_param_named(lowest_aligned, sdebug_lowest_aligned, int, S_IRUGO);
+module_param_named(lun_format, sdebug_lun_am_i, int, S_IRUGO | S_IWUSR);
module_param_named(max_luns, sdebug_max_luns, int, S_IRUGO | S_IWUSR);
module_param_named(max_queue, sdebug_max_queue, int, S_IRUGO | S_IWUSR);
module_param_named(medium_error_count, sdebug_medium_error_count, int,
@@ -5652,6 +5663,7 @@ MODULE_PARM_DESC(lbpws, "enable LBP, support WRITE SAME(16) with UNMAP bit (def=
MODULE_PARM_DESC(lbpws10, "enable LBP, support WRITE SAME(10) with UNMAP bit (def=0)");
MODULE_PARM_DESC(lowest_aligned, "lowest aligned lba (def=0)");
MODULE_PARM_DESC(max_luns, "number of LUNs per target to simulate(def=1)");
+MODULE_PARM_DESC(lun_format, "LUN format: 0->peripheral (def); 1 --> flat address method");
MODULE_PARM_DESC(max_queue, "max number of queued commands (1 to max(def))");
MODULE_PARM_DESC(medium_error_count, "count of sectors to return follow on MEDIUM error");
MODULE_PARM_DESC(medium_error_start, "starting sector number to return MEDIUM error");
@@ -6099,6 +6111,43 @@ every_nth_done:
}
static DRIVER_ATTR_RW(every_nth);
+static ssize_t lun_format_show(struct device_driver *ddp, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "%d\n", (int)sdebug_lun_am);
+}
+static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
+ size_t count)
+{
+ int n;
+ bool changed;
+
+ if (kstrtoint(buf, 0, &n))
+ return -EINVAL;
+ if (n >= 0) {
+ if (n > (int)SAM_LUN_AM_FLAT) {
+ pr_warn("only LUN address methods 0 and 1 are supported\n");
+ return -EINVAL;
+ }
+ changed = ((int)sdebug_lun_am != n);
+ sdebug_lun_am = n;
+ if (changed && sdebug_scsi_level >= 5) { /* >= SPC-3 */
+ struct sdebug_host_info *sdhp;
+ struct sdebug_dev_info *dp;
+
+ spin_lock(&sdebug_host_list_lock);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ }
+ }
+ spin_unlock(&sdebug_host_list_lock);
+ }
+ return count;
+ }
+ return -EINVAL;
+}
+static DRIVER_ATTR_RW(lun_format);
+
static ssize_t max_luns_show(struct device_driver *ddp, char *buf)
{
return scnprintf(buf, PAGE_SIZE, "%d\n", sdebug_max_luns);
@@ -6537,6 +6586,7 @@ static struct attribute *sdebug_drv_attrs[] = {
&driver_attr_dev_size_mb.attr,
&driver_attr_num_parts.attr,
&driver_attr_every_nth.attr,
+ &driver_attr_lun_format.attr,
&driver_attr_max_luns.attr,
&driver_attr_max_queue.attr,
&driver_attr_no_uld.attr,
@@ -6629,9 +6679,19 @@ static int __init scsi_debug_init(void)
pr_err("invalid physblk_exp %u\n", sdebug_physblk_exp);
return -EINVAL;
}
+
+ sdebug_lun_am = sdebug_lun_am_i;
+ if (sdebug_lun_am > SAM_LUN_AM_FLAT) {
+ pr_warn("Invalid LUN format %u, using default\n", (int)sdebug_lun_am);
+ sdebug_lun_am = SAM_LUN_AM_PERIPHERAL;
+ }
+
if (sdebug_max_luns > 256) {
- pr_warn("max_luns can be no more than 256, use default\n");
- sdebug_max_luns = DEF_MAX_LUNS;
+ if (sdebug_max_luns > 16384) {
+ pr_warn("max_luns can be no more than 16384, use default\n");
+ sdebug_max_luns = DEF_MAX_LUNS;
+ }
+ sdebug_lun_am = SAM_LUN_AM_FLAT;
}
if (sdebug_lowest_aligned > 0x3fff) {
@@ -7153,6 +7213,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
int k, na;
int errsts = 0;
+ u64 lun_index = sdp->lun & 0x3FFF;
u32 flags;
u16 sa;
u8 opcode = cmd[0];
@@ -7186,7 +7247,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
- if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
+ if (unlikely(lun_index >= sdebug_max_luns && !has_wlun_rl))
goto err_out;
sdeb_i = opcode_ind_arr[opcode]; /* fully mapped */
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 7d3571a2bd89..f11f51e2465f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -116,6 +116,14 @@ static int scsi_host_eh_past_deadline(struct Scsi_Host *shost)
return 1;
}
+static bool scsi_cmd_retry_allowed(struct scsi_cmnd *cmd)
+{
+ if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ return true;
+
+ return ++cmd->retries <= cmd->allowed;
+}
+
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
@@ -151,7 +159,7 @@ scmd_eh_abort_handler(struct work_struct *work)
"eh timeout, not retrying "
"aborted command\n"));
} else if (!scsi_noretry_cmd(scmd) &&
- (++scmd->retries <= scmd->allowed)) {
+ scsi_cmd_retry_allowed(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
@@ -1264,11 +1272,18 @@ int scsi_eh_get_sense(struct list_head *work_q,
* upper level.
*/
if (rtn == SUCCESS)
- /* we don't want this command reissued, just
- * finished with the sense data, so set
- * retries to the max allowed to ensure it
- * won't get reissued */
- scmd->retries = scmd->allowed;
+ /*
+ * We don't want this command reissued, just finished
+ * with the sense data, so set retries to the max
+ * allowed to ensure it won't get reissued. If the user
+ * has requested infinite retries, we also want to
+ * finish this command, so force completion by setting
+ * retries and allowed to the same value.
+ */
+ if (scmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ scmd->retries = scmd->allowed = 1;
+ else
+ scmd->retries = scmd->allowed;
else if (rtn != NEEDS_RETRY)
continue;
@@ -1755,8 +1770,8 @@ check_type:
if (scmd->request->cmd_flags & REQ_FAILFAST_DEV ||
blk_rq_is_passthrough(scmd->request))
return 1;
- else
- return 0;
+
+ return 0;
}
/**
@@ -1944,8 +1959,7 @@ maybe_retry:
* the request was not marked fast fail. Note that above,
* even if the request is marked fast fail, we still requeue
* for queue congestion conditions (QUEUE_FULL or BUSY) */
- if ((++scmd->retries) <= scmd->allowed
- && !scsi_noretry_cmd(scmd)) {
+ if (scsi_cmd_retry_allowed(scmd) && !scsi_noretry_cmd(scmd)) {
return NEEDS_RETRY;
} else {
/*
@@ -2091,8 +2105,7 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
- !scsi_noretry_cmd(scmd) &&
- (++scmd->retries <= scmd->allowed)) {
+ !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush retry cmd\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index a89478a0c588..97ff31ed2a44 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -549,10 +549,27 @@ static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
static void scsi_run_queue_async(struct scsi_device *sdev)
{
if (scsi_target(sdev)->single_lun ||
- !list_empty(&sdev->host->starved_list))
+ !list_empty(&sdev->host->starved_list)) {
kblockd_schedule_work(&sdev->requeue_work);
- else
- blk_mq_run_hw_queues(sdev->request_queue, true);
+ } else {
+ /*
+ * smp_mb() present in sbitmap_queue_clear() or implied in
+ * .end_io is for ordering writing .device_busy in
+ * scsi_device_unbusy() and reading sdev->restarts.
+ */
+ int old = atomic_read(&sdev->restarts);
+
+ /*
+ * ->restarts has to be kept as non-zero if new budget
+ * contention occurs.
+ *
+ * No need to run queue when either another re-run
+ * queue wins in updating ->restarts or a new budget
+ * contention occurs.
+ */
+ if (old && atomic_cmpxchg(&sdev->restarts, old, 0) == old)
+ blk_mq_run_hw_queues(sdev->request_queue, true);
+ }
}
/* Returns false when no more bytes to process, true if there are more */
@@ -652,6 +669,23 @@ static void scsi_io_completion_reprep(struct scsi_cmnd *cmd,
scsi_mq_requeue_cmd(cmd);
}
+static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd)
+{
+ struct request *req = cmd->request;
+ unsigned long wait_for;
+
+ if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT)
+ return false;
+
+ wait_for = (cmd->allowed + 1) * req->timeout;
+ if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
+ scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n",
+ wait_for/HZ);
+ return true;
+ }
+ return false;
+}
+
/* Helper for scsi_io_completion() when special action required. */
static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
{
@@ -660,7 +694,6 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
int level = 0;
enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
ACTION_DELAYED_RETRY} action;
- unsigned long wait_for = (cmd->allowed + 1) * req->timeout;
struct scsi_sense_hdr sshdr;
bool sense_valid;
bool sense_current = true; /* false implies "deferred sense" */
@@ -765,8 +798,7 @@ static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result)
} else
action = ACTION_FAIL;
- if (action != ACTION_FAIL &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies))
+ if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd))
action = ACTION_FAIL;
switch (action) {
@@ -1439,7 +1471,6 @@ static bool scsi_mq_lld_busy(struct request_queue *q)
static void scsi_softirq_done(struct request *rq)
{
struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
- unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
int disposition;
INIT_LIST_HEAD(&cmd->eh_entry);
@@ -1449,13 +1480,8 @@ static void scsi_softirq_done(struct request *rq)
atomic_inc(&cmd->device->ioerr_cnt);
disposition = scsi_decide_disposition(cmd);
- if (disposition != SUCCESS &&
- time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
- scmd_printk(KERN_ERR, cmd,
- "timing out command, waited %lus\n",
- wait_for/HZ);
+ if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd))
disposition = SUCCESS;
- }
scsi_log_completion(cmd, disposition);
@@ -1612,7 +1638,30 @@ static bool scsi_mq_get_budget(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
- return scsi_dev_queue_ready(q, sdev);
+ if (scsi_dev_queue_ready(q, sdev))
+ return true;
+
+ atomic_inc(&sdev->restarts);
+
+ /*
+ * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy).
+ * .restarts must be incremented before .device_busy is read because the
+ * code in scsi_run_queue_async() depends on the order of these operations.
+ */
+ smp_mb__after_atomic();
+
+ /*
+ * If all in-flight requests originated from this LUN are completed
+ * before reading .device_busy, sdev->device_busy will be observed as
+ * zero, then blk_mq_delay_run_hw_queues() will dispatch this request
+ * soon. Otherwise, completion of one of these requests will observe
+ * the .restarts flag, and the request queue will be run for handling
+ * this request, see scsi_end_request().
+ */
+ if (unlikely(atomic_read(&sdev->device_busy) == 0 &&
+ !scsi_device_blocked(sdev)))
+ blk_mq_delay_run_hw_queues(sdev->request_queue, SCSI_QUEUE_DELAY);
+ return false;
}
static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
index d12ada035961..180636d54982 100644
--- a/drivers/scsi/scsi_priv.h
+++ b/drivers/scsi/scsi_priv.h
@@ -15,6 +15,7 @@ struct scsi_host_template;
struct Scsi_Host;
struct scsi_nl_hdr;
+#define SCSI_CMD_RETRIES_NO_LIMIT -1
/*
* Scsi Error Handler Flags
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 2732fa65119c..2ff7f06203da 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -253,6 +253,7 @@ static const struct {
{ FC_PORTSPEED_25GBIT, "25 Gbit" },
{ FC_PORTSPEED_64GBIT, "64 Gbit" },
{ FC_PORTSPEED_128GBIT, "128 Gbit" },
+ { FC_PORTSPEED_256GBIT, "256 Gbit" },
{ FC_PORTSPEED_NOT_NEGOTIATED, "Not Negotiated" },
};
fc_bitfield_name_search(port_speed, fc_port_speed_names)
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 93f4374dce38..83c4d95756a9 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -194,7 +194,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
}
if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT,
- SD_MAX_RETRIES, &data, NULL))
+ sdkp->max_retries, &data, NULL))
return -EINVAL;
len = min_t(size_t, sizeof(buffer), data.length - data.header_length -
data.block_descriptor_length);
@@ -212,7 +212,7 @@ cache_type_store(struct device *dev, struct device_attribute *attr,
data.device_specific = 0;
if (scsi_mode_select(sdp, 1, sp, 8, buffer_data, len, SD_TIMEOUT,
- SD_MAX_RETRIES, &data, &sshdr)) {
+ sdkp->max_retries, &data, &sshdr)) {
if (scsi_sense_valid(&sshdr))
sd_print_sense_hdr(sdkp, &sshdr);
return -EINVAL;
@@ -543,6 +543,39 @@ zoned_cap_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(zoned_cap);
+static ssize_t
+max_retries_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+ struct scsi_device *sdev = sdkp->device;
+ int retries, err;
+
+ err = kstrtoint(buf, 10, &retries);
+ if (err)
+ return err;
+
+ if (retries == SCSI_CMD_RETRIES_NO_LIMIT || retries <= SD_MAX_RETRIES) {
+ sdkp->max_retries = retries;
+ return count;
+ }
+
+ sdev_printk(KERN_ERR, sdev, "max_retries must be between -1 and %d\n",
+ SD_MAX_RETRIES);
+ return -EINVAL;
+}
+
+static ssize_t
+max_retries_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+ return sprintf(buf, "%d\n", sdkp->max_retries);
+}
+
+static DEVICE_ATTR_RW(max_retries);
+
static struct attribute *sd_disk_attrs[] = {
&dev_attr_cache_type.attr,
&dev_attr_FUA.attr,
@@ -557,6 +590,7 @@ static struct attribute *sd_disk_attrs[] = {
&dev_attr_max_write_same_blocks.attr,
&dev_attr_max_medium_access_timeouts.attr,
&dev_attr_zoned_cap.attr,
+ &dev_attr_max_retries.attr,
NULL,
};
ATTRIBUTE_GROUPS(sd_disk);
@@ -665,7 +699,8 @@ static void scsi_disk_put(struct scsi_disk *sdkp)
static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
size_t len, bool send)
{
- struct scsi_device *sdev = data;
+ struct scsi_disk *sdkp = data;
+ struct scsi_device *sdev = sdkp->device;
u8 cdb[12] = { 0, };
int ret;
@@ -676,7 +711,7 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
ret = scsi_execute_req(sdev, cdb,
send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
- buffer, len, NULL, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ buffer, len, NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
return ret <= 0 ? ret : -EIO;
}
#endif /* CONFIG_BLK_SED_OPAL */
@@ -839,6 +874,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
unsigned int data_len = 24;
@@ -862,7 +898,7 @@ static blk_status_t sd_setup_unmap_cmnd(struct scsi_cmnd *cmd)
put_unaligned_be64(lba, &buf[8]);
put_unaligned_be32(nr_blocks, &buf[16]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = SD_TIMEOUT;
@@ -874,6 +910,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -893,7 +930,7 @@ static blk_status_t sd_setup_write_same16_cmnd(struct scsi_cmnd *cmd,
put_unaligned_be64(lba, &cmd->cmnd[2]);
put_unaligned_be32(nr_blocks, &cmd->cmnd[10]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
@@ -905,6 +942,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
{
struct scsi_device *sdp = cmd->device;
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
u64 lba = sectors_to_logical(sdp, blk_rq_pos(rq));
u32 nr_blocks = sectors_to_logical(sdp, blk_rq_sectors(rq));
u32 data_len = sdp->sector_size;
@@ -924,7 +962,7 @@ static blk_status_t sd_setup_write_same10_cmnd(struct scsi_cmnd *cmd,
put_unaligned_be32(lba, &cmd->cmnd[2]);
put_unaligned_be16(nr_blocks, &cmd->cmnd[7]);
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->transfersize = data_len;
rq->timeout = unmap ? SD_TIMEOUT : SD_WRITE_SAME_TIMEOUT;
@@ -1056,7 +1094,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
}
cmd->transfersize = sdp->sector_size;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
/*
* For WRITE SAME the data transferred via the DATA OUT buffer is
@@ -1078,6 +1116,7 @@ static blk_status_t sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
{
struct request *rq = cmd->request;
+ struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
/* flush requests don't perform I/O, zero the S/G table */
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
@@ -1085,7 +1124,7 @@ static blk_status_t sd_setup_flush_cmnd(struct scsi_cmnd *cmd)
cmd->cmnd[0] = SYNCHRONIZE_CACHE;
cmd->cmd_len = 10;
cmd->transfersize = 0;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
rq->timeout = rq->q->rq_timeout * SD_FLUSH_TIMEOUT_MULTIPLIER;
return BLK_STS_OK;
@@ -1262,7 +1301,7 @@ static blk_status_t sd_setup_read_write_cmnd(struct scsi_cmnd *cmd)
*/
cmd->transfersize = sdp->sector_size;
cmd->underflow = nr_blocks << 9;
- cmd->allowed = SD_MAX_RETRIES;
+ cmd->allowed = sdkp->max_retries;
cmd->sdb.length = nr_blocks * sdp->sector_size;
SCSI_LOG_HLQUEUE(1,
@@ -1611,7 +1650,7 @@ static unsigned int sd_check_events(struct gendisk *disk, unsigned int clearing)
if (scsi_block_when_processing_errors(sdp)) {
struct scsi_sense_hdr sshdr = { 0, };
- retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, SD_MAX_RETRIES,
+ retval = scsi_test_unit_ready(sdp, SD_TIMEOUT, sdkp->max_retries,
&sshdr);
/* failed to execute TUR, assume media not present */
@@ -1668,7 +1707,7 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
* flush everything.
*/
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, sshdr,
- timeout, SD_MAX_RETRIES, 0, RQF_PM, NULL);
+ timeout, sdkp->max_retries, 0, RQF_PM, NULL);
if (res == 0)
break;
}
@@ -1765,7 +1804,8 @@ static char sd_pr_type(enum pr_type type)
static int sd_pr_command(struct block_device *bdev, u8 sa,
u64 key, u64 sa_key, u8 type, u8 flags)
{
- struct scsi_device *sdev = scsi_disk(bdev->bd_disk)->device;
+ struct scsi_disk *sdkp = scsi_disk(bdev->bd_disk);
+ struct scsi_device *sdev = sdkp->device;
struct scsi_sense_hdr sshdr;
int result;
u8 cmd[16] = { 0, };
@@ -1781,7 +1821,7 @@ static int sd_pr_command(struct block_device *bdev, u8 sa,
data[20] = flags;
result = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, &data, sizeof(data),
- &sshdr, SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ &sshdr, SD_TIMEOUT, sdkp->max_retries, NULL);
if (driver_byte(result) == DRIVER_SENSE &&
scsi_sense_valid(&sshdr)) {
@@ -2117,7 +2157,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
the_result = scsi_execute_req(sdkp->device, cmd,
DMA_NONE, NULL, 0,
&sshdr, SD_TIMEOUT,
- SD_MAX_RETRIES, NULL);
+ sdkp->max_retries, NULL);
/*
* If the drive has indicated to us that it
@@ -2173,7 +2213,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
cmd[4] |= 1 << 4;
scsi_execute_req(sdkp->device, cmd, DMA_NONE,
NULL, 0, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES,
+ SD_TIMEOUT, sdkp->max_retries,
NULL);
spintime_expire = jiffies + 100 * HZ;
spintime = 1;
@@ -2315,7 +2355,7 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
buffer, RC16_LEN, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ SD_TIMEOUT, sdkp->max_retries, NULL);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -2400,7 +2440,7 @@ static int read_capacity_10(struct scsi_disk *sdkp, struct scsi_device *sdp,
the_result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
buffer, 8, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, NULL);
+ SD_TIMEOUT, sdkp->max_retries, NULL);
if (media_not_present(sdkp, &sshdr))
return -ENODEV;
@@ -2585,12 +2625,12 @@ sd_print_capacity(struct scsi_disk *sdkp,
/* called with buffer of length 512 */
static inline int
-sd_do_mode_sense(struct scsi_device *sdp, int dbd, int modepage,
+sd_do_mode_sense(struct scsi_disk *sdkp, int dbd, int modepage,
unsigned char *buffer, int len, struct scsi_mode_data *data,
struct scsi_sense_hdr *sshdr)
{
- return scsi_mode_sense(sdp, dbd, modepage, buffer, len,
- SD_TIMEOUT, SD_MAX_RETRIES, data,
+ return scsi_mode_sense(sdkp->device, dbd, modepage, buffer, len,
+ SD_TIMEOUT, sdkp->max_retries, data,
sshdr);
}
@@ -2613,14 +2653,14 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
}
if (sdp->use_192_bytes_for_3f) {
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 192, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 192, &data, NULL);
} else {
/*
* First attempt: ask for all pages (0x3F), but only 4 bytes.
* We have to start carefully: some devices hang if we ask
* for more than is available.
*/
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 4, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 4, &data, NULL);
/*
* Second attempt: ask for page 0 When only page 0 is
@@ -2629,13 +2669,13 @@ sd_read_write_protect_flag(struct scsi_disk *sdkp, unsigned char *buffer)
* CDB.
*/
if (!scsi_status_is_good(res))
- res = sd_do_mode_sense(sdp, 0, 0, buffer, 4, &data, NULL);
+ res = sd_do_mode_sense(sdkp, 0, 0, buffer, 4, &data, NULL);
/*
* Third attempt: ask 255 bytes, as we did earlier.
*/
if (!scsi_status_is_good(res))
- res = sd_do_mode_sense(sdp, 0, 0x3F, buffer, 255,
+ res = sd_do_mode_sense(sdkp, 0, 0x3F, buffer, 255,
&data, NULL);
}
@@ -2697,7 +2737,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
}
/* cautiously ask */
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, first_len,
+ res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, first_len,
&data, &sshdr);
if (!scsi_status_is_good(res))
@@ -2729,7 +2769,7 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
/* Get the data */
if (len > first_len)
- res = sd_do_mode_sense(sdp, dbd, modepage, buffer, len,
+ res = sd_do_mode_sense(sdkp, dbd, modepage, buffer, len,
&data, &sshdr);
if (scsi_status_is_good(res)) {
@@ -2848,7 +2888,7 @@ static void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
return;
res = scsi_mode_sense(sdp, 1, 0x0a, buffer, 36, SD_TIMEOUT,
- SD_MAX_RETRIES, &data, &sshdr);
+ sdkp->max_retries, &data, &sshdr);
if (!scsi_status_is_good(res) || !data.header_length ||
data.length < 6) {
@@ -3371,6 +3411,7 @@ static int sd_probe(struct device *dev)
sdkp->driver = &sd_template;
sdkp->disk = gd;
sdkp->index = index;
+ sdkp->max_retries = SD_MAX_RETRIES;
atomic_set(&sdkp->openers, 0);
atomic_set(&sdkp->device->ioerr_cnt, 0);
@@ -3434,7 +3475,7 @@ static int sd_probe(struct device *dev)
sd_revalidate_disk(gd);
if (sdkp->security) {
- sdkp->opal_dev = init_opal_dev(sdp, &sd_sec_submit);
+ sdkp->opal_dev = init_opal_dev(sdkp, &sd_sec_submit);
if (sdkp->opal_dev)
sd_printk(KERN_NOTICE, sdkp, "supports TCG Opal\n");
}
@@ -3549,7 +3590,7 @@ static int sd_start_stop_device(struct scsi_disk *sdkp, int start)
return -ENODEV;
res = scsi_execute(sdp, cmd, DMA_NONE, NULL, 0, NULL, &sshdr,
- SD_TIMEOUT, SD_MAX_RETRIES, 0, RQF_PM, NULL);
+ SD_TIMEOUT, sdkp->max_retries, 0, RQF_PM, NULL);
if (res) {
sd_print_result(sdkp, "Start/Stop Unit failed", res);
if (driver_byte(res) == DRIVER_SENSE)
diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h
index a3aad608bc38..b59136c4125b 100644
--- a/drivers/scsi/sd.h
+++ b/drivers/scsi/sd.h
@@ -90,6 +90,7 @@ struct scsi_disk {
#endif
atomic_t openers;
sector_t capacity; /* size in logical blocks */
+ int max_retries;
u32 max_xfer_blocks;
u32 opt_xfer_blocks;
u32 max_ws_blocks;
diff --git a/drivers/scsi/sense_codes.h b/drivers/scsi/sense_codes.h
index 201a536688de..805d4c13d070 100644
--- a/drivers/scsi/sense_codes.h
+++ b/drivers/scsi/sense_codes.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* The canonical list of T10 Additional Sense Codes is available at:
- * http://www.t10.org/lists/asc-num.txt [most recent: 20141221]
+ * http://www.t10.org/lists/asc-num.txt [most recent: 20200817]
*/
SENSE_CODE(0x0000, "No additional sense information")
@@ -29,6 +29,7 @@ SENSE_CODE(0x001E, "Conflicting SA creation request")
SENSE_CODE(0x001F, "Logical unit transitioning to another power condition")
SENSE_CODE(0x0020, "Extended copy information available")
SENSE_CODE(0x0021, "Atomic command aborted due to ACA")
+SENSE_CODE(0x0022, "Deferred microcode is pending")
SENSE_CODE(0x0100, "No index/sector signal")
@@ -72,6 +73,9 @@ SENSE_CODE(0x041F, "Logical unit not ready, microcode download required")
SENSE_CODE(0x0420, "Logical unit not ready, logical unit reset required")
SENSE_CODE(0x0421, "Logical unit not ready, hard reset required")
SENSE_CODE(0x0422, "Logical unit not ready, power cycle required")
+SENSE_CODE(0x0423, "Logical unit not ready, affiliation required")
+SENSE_CODE(0x0424, "Depopulation in progress")
+SENSE_CODE(0x0425, "Depopulation restoration in progress")
SENSE_CODE(0x0500, "Logical unit does not respond to selection")
@@ -104,6 +108,17 @@ SENSE_CODE(0x0B06, "Warning - non-volatile cache now volatile")
SENSE_CODE(0x0B07, "Warning - degraded power to non-volatile cache")
SENSE_CODE(0x0B08, "Warning - power loss expected")
SENSE_CODE(0x0B09, "Warning - device statistics notification active")
+SENSE_CODE(0x0B0A, "Warning - high critical temperature limit exceeded")
+SENSE_CODE(0x0B0B, "Warning - low critical temperature limit exceeded")
+SENSE_CODE(0x0B0C, "Warning - high operating temperature limit exceeded")
+SENSE_CODE(0x0B0D, "Warning - low operating temperature limit exceeded")
+SENSE_CODE(0x0B0E, "Warning - high critical humidity limit exceeded")
+SENSE_CODE(0x0B0F, "Warning - low critical humidity limit exceeded")
+SENSE_CODE(0x0B10, "Warning - high operating humidity limit exceeded")
+SENSE_CODE(0x0B11, "Warning - low operating humidity limit exceeded")
+SENSE_CODE(0x0B12, "Warning - microcode security at risk")
+SENSE_CODE(0x0B13, "Warning - microcode digital signature validation failure")
+SENSE_CODE(0x0B14, "Warning - physical element status change")
SENSE_CODE(0x0C00, "Write error")
SENSE_CODE(0x0C01, "Write error - recovered with auto reallocation")
@@ -122,6 +137,8 @@ SENSE_CODE(0x0C0D, "Write error - not enough unsolicited data")
SENSE_CODE(0x0C0E, "Multiple write errors")
SENSE_CODE(0x0C0F, "Defects in error window")
SENSE_CODE(0x0C10, "Incomplete multiple atomic write operations")
+SENSE_CODE(0x0C11, "Write error - recovery scan needed")
+SENSE_CODE(0x0C12, "Write error - insufficient zone resources")
SENSE_CODE(0x0D00, "Error detected by third party temporary initiator")
SENSE_CODE(0x0D01, "Third party device failure")
@@ -242,6 +259,9 @@ SENSE_CODE(0x2009, "Access denied - invalid LU identifier")
SENSE_CODE(0x200A, "Access denied - invalid proxy token")
SENSE_CODE(0x200B, "Access denied - ACL LUN conflict")
SENSE_CODE(0x200C, "Illegal command when not in append-only mode")
+SENSE_CODE(0x200D, "Not an administrative logical unit")
+SENSE_CODE(0x200E, "Not a subsidiary logical unit")
+SENSE_CODE(0x200F, "Not a conglomerate logical unit")
SENSE_CODE(0x2100, "Logical block address out of range")
SENSE_CODE(0x2101, "Invalid element address")
@@ -251,6 +271,8 @@ SENSE_CODE(0x2104, "Unaligned write command")
SENSE_CODE(0x2105, "Write boundary violation")
SENSE_CODE(0x2106, "Attempt to read invalid data")
SENSE_CODE(0x2107, "Read boundary violation")
+SENSE_CODE(0x2108, "Misaligned write command")
+SENSE_CODE(0x2109, "Attempt to access gap zone")
SENSE_CODE(0x2200, "Illegal function (use 20 00, 24 00, or 26 00)")
@@ -275,6 +297,7 @@ SENSE_CODE(0x2405, "Security working key frozen")
SENSE_CODE(0x2406, "Nonce not unique")
SENSE_CODE(0x2407, "Nonce timestamp out of range")
SENSE_CODE(0x2408, "Invalid XCDB")
+SENSE_CODE(0x2409, "Invalid fast format")
SENSE_CODE(0x2500, "Logical unit not supported")
@@ -297,6 +320,10 @@ SENSE_CODE(0x260F, "Invalid data-out buffer integrity check value")
SENSE_CODE(0x2610, "Data decryption key fail limit reached")
SENSE_CODE(0x2611, "Incomplete key-associated data set")
SENSE_CODE(0x2612, "Vendor specific key reference not found")
+SENSE_CODE(0x2613, "Application tag mode page is invalid")
+SENSE_CODE(0x2614, "Tape stream mirroring prevented")
+SENSE_CODE(0x2615, "Copy source or copy destination not authorized")
+SENSE_CODE(0x2616, "Fast copy not possible")
SENSE_CODE(0x2700, "Write protected")
SENSE_CODE(0x2701, "Hardware write protected")
@@ -342,6 +369,7 @@ SENSE_CODE(0x2A12, "Data encryption parameters changed by vendor specific event"
SENSE_CODE(0x2A13, "Data encryption key instance counter has changed")
SENSE_CODE(0x2A14, "SA creation capabilities data has changed")
SENSE_CODE(0x2A15, "Medium removal prevention preempted")
+SENSE_CODE(0x2A16, "Zone reset write pointer recommended")
SENSE_CODE(0x2B00, "Copy cannot execute since host cannot disconnect")
@@ -360,6 +388,11 @@ SENSE_CODE(0x2C0B, "Not reserved")
SENSE_CODE(0x2C0C, "Orwrite generation does not match")
SENSE_CODE(0x2C0D, "Reset write pointer not allowed")
SENSE_CODE(0x2C0E, "Zone is offline")
+SENSE_CODE(0x2C0F, "Stream not open")
+SENSE_CODE(0x2C10, "Unwritten data in zone")
+SENSE_CODE(0x2C11, "Descriptor format sense data required")
+SENSE_CODE(0x2C12, "Zone is inactive")
+SENSE_CODE(0x2C13, "Well known logical unit access required")
SENSE_CODE(0x2D00, "Overwrite error on update in place")
@@ -395,6 +428,8 @@ SENSE_CODE(0x3100, "Medium format corrupted")
SENSE_CODE(0x3101, "Format command failed")
SENSE_CODE(0x3102, "Zoned formatting failed due to spare linking")
SENSE_CODE(0x3103, "Sanitize command failed")
+SENSE_CODE(0x3104, "Depopulation failed")
+SENSE_CODE(0x3105, "Depopulation restoration failed")
SENSE_CODE(0x3200, "No defect spare location available")
SENSE_CODE(0x3201, "Defect list update failure")
@@ -419,6 +454,7 @@ SENSE_CODE(0x3802, "Esn - power management class event")
SENSE_CODE(0x3804, "Esn - media class event")
SENSE_CODE(0x3806, "Esn - device busy class event")
SENSE_CODE(0x3807, "Thin Provisioning soft threshold reached")
+SENSE_CODE(0x3808, "Depopulation interrupted")
SENSE_CODE(0x3900, "Saving parameters not supported")
@@ -456,6 +492,7 @@ SENSE_CODE(0x3B19, "Element enabled")
SENSE_CODE(0x3B1A, "Data transfer device removed")
SENSE_CODE(0x3B1B, "Data transfer device inserted")
SENSE_CODE(0x3B1C, "Too many logical objects on partition to support operation")
+SENSE_CODE(0x3B20, "Element static information changed")
SENSE_CODE(0x3D00, "Invalid bits in identify message")
@@ -488,6 +525,11 @@ SENSE_CODE(0x3F13, "iSCSI IP address removed")
SENSE_CODE(0x3F14, "iSCSI IP address changed")
SENSE_CODE(0x3F15, "Inspect referrals sense descriptors")
SENSE_CODE(0x3F16, "Microcode has been changed without reset")
+SENSE_CODE(0x3F17, "Zone transition to full")
+SENSE_CODE(0x3F18, "Bind completed")
+SENSE_CODE(0x3F19, "Bind redirected")
+SENSE_CODE(0x3F1A, "Subsidiary binding changed")
+
/*
* SENSE_CODE(0x40NN, "Ram failure")
* SENSE_CODE(0x40NN, "Diagnostic failure on component nn")
@@ -589,6 +631,9 @@ SENSE_CODE(0x550B, "Insufficient power for operation")
SENSE_CODE(0x550C, "Insufficient resources to create rod")
SENSE_CODE(0x550D, "Insufficient resources to create rod token")
SENSE_CODE(0x550E, "Insufficient zone resources")
+SENSE_CODE(0x550F, "Insufficient zone resources to complete write")
+SENSE_CODE(0x5510, "Maximum number of streams open")
+SENSE_CODE(0x5511, "Insufficient resources to bind")
SENSE_CODE(0x5700, "Unable to recover table-of-contents")
@@ -692,6 +737,7 @@ SENSE_CODE(0x5D69, "Firmware impending failure throughput performance")
SENSE_CODE(0x5D6A, "Firmware impending failure seek time performance")
SENSE_CODE(0x5D6B, "Firmware impending failure spin-up retry count")
SENSE_CODE(0x5D6C, "Firmware impending failure drive calibration retry count")
+SENSE_CODE(0x5D73, "Media impending failure endurance limit met")
SENSE_CODE(0x5DFF, "Failure prediction threshold exceeded (false)")
SENSE_CODE(0x5E00, "Low power condition on")
@@ -744,6 +790,8 @@ SENSE_CODE(0x6708, "Assign failure occurred")
SENSE_CODE(0x6709, "Multiply assigned logical unit")
SENSE_CODE(0x670A, "Set target port groups command failed")
SENSE_CODE(0x670B, "ATA device feature not enabled")
+SENSE_CODE(0x670C, "Command rejected")
+SENSE_CODE(0x670D, "Explicit bind not allowed")
SENSE_CODE(0x6800, "Logical unit not configured")
SENSE_CODE(0x6801, "Subsidiary logical unit not configured")
@@ -772,6 +820,10 @@ SENSE_CODE(0x6F04, "Media region code is mismatched to logical unit region")
SENSE_CODE(0x6F05, "Drive region must be permanent/region reset count error")
SENSE_CODE(0x6F06, "Insufficient block count for binding nonce recording")
SENSE_CODE(0x6F07, "Conflict in binding nonce recording")
+SENSE_CODE(0x6F08, "Insufficient permission")
+SENSE_CODE(0x6F09, "Invalid drive-host pairing server")
+SENSE_CODE(0x6F0A, "Drive-host pairing suspended")
+
/*
* SENSE_CODE(0x70NN, "Decompression exception short algorithm id of nn")
*/
diff --git a/drivers/scsi/smartpqi/Kconfig b/drivers/scsi/smartpqi/Kconfig
index 8eec241f074b..cb9e4e968b60 100644
--- a/drivers/scsi/smartpqi/Kconfig
+++ b/drivers/scsi/smartpqi/Kconfig
@@ -1,11 +1,11 @@
#
# Kernel configuration file for the SMARTPQI
#
-# Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+# Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
# Copyright (c) 2017-2018 Microsemi Corporation
# Copyright (c) 2016 Microsemi Corporation
# Copyright (c) 2016 PMC-Sierra, Inc.
-# (mailto:esc.storagedev@microsemi.com)
+# (mailto:storagedev@microchip.com)
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
diff --git a/drivers/scsi/smartpqi/smartpqi.h b/drivers/scsi/smartpqi/smartpqi.h
index 1129fe7a27ed..3e54590e6e92 100644
--- a/drivers/scsi/smartpqi/smartpqi.h
+++ b/drivers/scsi/smartpqi/smartpqi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -359,7 +359,7 @@ struct pqi_event_response {
struct pqi_iu_header header;
u8 event_type;
u8 reserved2 : 7;
- u8 request_acknowlege : 1;
+ u8 request_acknowledge : 1;
__le16 event_id;
__le32 additional_event_id;
union {
@@ -927,6 +927,7 @@ struct pqi_scsi_dev {
u8 new_device : 1;
u8 keep_device : 1;
u8 volume_offline : 1;
+ u8 rescan : 1;
bool aio_enabled; /* only valid for physical disks */
bool in_reset;
bool in_remove;
@@ -962,6 +963,7 @@ struct pqi_scsi_dev {
struct list_head delete_list_entry;
atomic_t scsi_cmds_outstanding;
+ atomic_t raid_bypass_cnt;
};
/* VPD inquiry pages */
@@ -1255,6 +1257,7 @@ struct bmic_sense_subsystem_info {
#define SA_DEVICE_TYPE_SATA 0x1
#define SA_DEVICE_TYPE_SAS 0x2
#define SA_DEVICE_TYPE_EXPANDER_SMP 0x5
+#define SA_DEVICE_TYPE_SES 0x6
#define SA_DEVICE_TYPE_CONTROLLER 0x7
#define SA_DEVICE_TYPE_NVME 0x9
diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c
index ca1e6cf6a38e..9d0229656681 100644
--- a/drivers/scsi/smartpqi/smartpqi_init.c
+++ b/drivers/scsi/smartpqi/smartpqi_init.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
@@ -33,11 +33,11 @@
#define BUILD_TIMESTAMP
#endif
-#define DRIVER_VERSION "1.2.10-025"
+#define DRIVER_VERSION "1.2.16-010"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 2
-#define DRIVER_RELEASE 10
-#define DRIVER_REVISION 25
+#define DRIVER_RELEASE 16
+#define DRIVER_REVISION 10
#define DRIVER_NAME "Microsemi PQI Driver (v" \
DRIVER_VERSION BUILD_TIMESTAMP ")"
@@ -542,8 +542,7 @@ static int pqi_build_raid_path_request(struct pqi_ctrl_info *ctrl_info,
put_unaligned_be16(cdb_length, &cdb[7]);
break;
default:
- dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n",
- cmd);
+ dev_err(&ctrl_info->pci_dev->dev, "unknown command 0x%c\n", cmd);
break;
}
@@ -1300,33 +1299,59 @@ no_buffer:
device->volume_offline = volume_offline;
}
-#define PQI_INQUIRY_PAGE0_RETRIES 3
+static int pqi_get_physical_device_info(struct pqi_ctrl_info *ctrl_info,
+ struct pqi_scsi_dev *device,
+ struct bmic_identify_physical_device *id_phys)
+{
+ int rc;
-static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
+ memset(id_phys, 0, sizeof(*id_phys));
+
+ rc = pqi_identify_physical_device(ctrl_info, device,
+ id_phys, sizeof(*id_phys));
+ if (rc) {
+ device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
+ return rc;
+ }
+
+ scsi_sanitize_inquiry_string(&id_phys->model[0], 8);
+ scsi_sanitize_inquiry_string(&id_phys->model[8], 16);
+
+ memcpy(device->vendor, &id_phys->model[0], sizeof(device->vendor));
+ memcpy(device->model, &id_phys->model[8], sizeof(device->model));
+
+ device->box_index = id_phys->box_index;
+ device->phys_box_on_bus = id_phys->phys_box_on_bus;
+ device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
+ device->queue_depth =
+ get_unaligned_le16(&id_phys->current_queue_depth_limit);
+ device->active_path_index = id_phys->active_path_number;
+ device->path_map = id_phys->redundant_path_present_map;
+ memcpy(&device->box,
+ &id_phys->alternate_paths_phys_box_on_port,
+ sizeof(device->box));
+ memcpy(&device->phys_connector,
+ &id_phys->alternate_paths_phys_connector,
+ sizeof(device->phys_connector));
+ device->bay = id_phys->phys_bay_in_box;
+
+ return 0;
+}
+
+static int pqi_get_logical_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device)
{
int rc;
u8 *buffer;
- unsigned int retries;
-
- if (device->is_expander_smp_device)
- return 0;
buffer = kmalloc(64, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
/* Send an inquiry to the device to see what it is. */
- for (retries = 0;;) {
- rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0,
- buffer, 64);
- if (rc == 0)
- break;
- if (pqi_is_logical_device(device) ||
- rc != PQI_CMD_STATUS_ABORTED ||
- ++retries > PQI_INQUIRY_PAGE0_RETRIES)
- goto out;
- }
+ rc = pqi_scsi_inquiry(ctrl_info, device->scsi3addr, 0, buffer, 64);
+ if (rc)
+ goto out;
scsi_sanitize_inquiry_string(&buffer[8], 8);
scsi_sanitize_inquiry_string(&buffer[16], 16);
@@ -1335,7 +1360,7 @@ static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
memcpy(device->vendor, &buffer[8], sizeof(device->vendor));
memcpy(device->model, &buffer[16], sizeof(device->model));
- if (pqi_is_logical_device(device) && device->devtype == TYPE_DISK) {
+ if (device->devtype == TYPE_DISK) {
if (device->is_external_raid_device) {
device->raid_level = SA_RAID_UNKNOWN;
device->volume_status = CISS_LV_OK;
@@ -1353,36 +1378,21 @@ out:
return rc;
}
-static void pqi_get_physical_disk_info(struct pqi_ctrl_info *ctrl_info,
+static int pqi_get_device_info(struct pqi_ctrl_info *ctrl_info,
struct pqi_scsi_dev *device,
struct bmic_identify_physical_device *id_phys)
{
int rc;
- memset(id_phys, 0, sizeof(*id_phys));
+ if (device->is_expander_smp_device)
+ return 0;
- rc = pqi_identify_physical_device(ctrl_info, device,
- id_phys, sizeof(*id_phys));
- if (rc) {
- device->queue_depth = PQI_PHYSICAL_DISK_DEFAULT_MAX_QUEUE_DEPTH;
- return;
- }
+ if (pqi_is_logical_device(device))
+ rc = pqi_get_logical_device_info(ctrl_info, device);
+ else
+ rc = pqi_get_physical_device_info(ctrl_info, device, id_phys);
- device->box_index = id_phys->box_index;
- device->phys_box_on_bus = id_phys->phys_box_on_bus;
- device->phy_connected_dev_type = id_phys->phy_connected_dev_type[0];
- device->queue_depth =
- get_unaligned_le16(&id_phys->current_queue_depth_limit);
- device->device_type = id_phys->device_type;
- device->active_path_index = id_phys->active_path_number;
- device->path_map = id_phys->redundant_path_present_map;
- memcpy(&device->box,
- &id_phys->alternate_paths_phys_box_on_port,
- sizeof(device->box));
- memcpy(&device->phys_connector,
- &id_phys->alternate_paths_phys_connector,
- sizeof(device->phys_connector));
- device->bay = id_phys->phys_bay_in_box;
+ return rc;
}
static void pqi_show_volume_status(struct pqi_ctrl_info *ctrl_info,
@@ -1521,11 +1531,10 @@ static inline void pqi_remove_device(struct pqi_ctrl_info *ctrl_info,
pqi_device_remove_start(device);
- rc = pqi_device_wait_for_pending_io(ctrl_info, device,
- PQI_PENDING_IO_TIMEOUT_SECS);
+ rc = pqi_device_wait_for_pending_io(ctrl_info, device, PQI_PENDING_IO_TIMEOUT_SECS);
if (rc)
dev_err(&ctrl_info->pci_dev->dev,
- "scsi %d:%d:%d:%d removing device with %d outstanding commands\n",
+ "scsi %d:%d:%d:%d removing device with %d outstanding command(s)\n",
ctrl_info->scsi_host->host_no, device->bus,
device->target, device->lun,
atomic_read(&device->scsi_cmds_outstanding));
@@ -1543,10 +1552,8 @@ static struct pqi_scsi_dev *pqi_find_scsi_dev(struct pqi_ctrl_info *ctrl_info,
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
- if (device->bus == bus && device->target == target &&
- device->lun == lun)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
+ if (device->bus == bus && device->target == target && device->lun == lun)
return device;
return NULL;
@@ -1572,15 +1579,12 @@ enum pqi_find_result {
};
static enum pqi_find_result pqi_scsi_find_entry(struct pqi_ctrl_info *ctrl_info,
- struct pqi_scsi_dev *device_to_find,
- struct pqi_scsi_dev **matching_device)
+ struct pqi_scsi_dev *device_to_find, struct pqi_scsi_dev **matching_device)
{
struct pqi_scsi_dev *device;
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry) {
- if (pqi_scsi3addr_equal(device_to_find->scsi3addr,
- device->scsi3addr)) {
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry) {
+ if (pqi_scsi3addr_equal(device_to_find->scsi3addr, device->scsi3addr)) {
*matching_device = device;
if (pqi_device_equal(device_to_find, device)) {
if (device_to_find->volume_offline)
@@ -1677,6 +1681,11 @@ static void pqi_scsi_update_device(struct pqi_scsi_dev *existing_device,
existing_device->target_lun_valid = true;
}
+ if ((existing_device->volume_status == CISS_LV_QUEUED_FOR_EXPANSION ||
+ existing_device->volume_status == CISS_LV_UNDERGOING_EXPANSION) &&
+ new_device->volume_status == CISS_LV_OK)
+ existing_device->rescan = true;
+
/* By definition, the scsi3addr and wwid fields are already the same. */
existing_device->is_physical_device = new_device->is_physical_device;
@@ -1775,8 +1784,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
/* Assume that all devices in the existing list have gone away. */
- list_for_each_entry(device, &ctrl_info->scsi_device_list,
- scsi_device_list_entry)
+ list_for_each_entry(device, &ctrl_info->scsi_device_list, scsi_device_list_entry)
device->device_gone = true;
for (i = 0; i < num_new_devices; i++) {
@@ -1816,7 +1824,7 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
list_for_each_entry_safe(device, next, &ctrl_info->scsi_device_list,
scsi_device_list_entry) {
if (device->device_gone) {
- list_del(&device->scsi_device_list_entry);
+ list_del_init(&device->scsi_device_list_entry);
list_add_tail(&device->delete_list_entry, &delete_list);
}
}
@@ -1841,18 +1849,19 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
pqi_ctrl_ofa_done(ctrl_info);
/* Remove all devices that have gone away. */
- list_for_each_entry_safe(device, next, &delete_list,
- delete_list_entry) {
+ list_for_each_entry_safe(device, next, &delete_list, delete_list_entry) {
if (device->volume_offline) {
pqi_dev_info(ctrl_info, "offline", device);
pqi_show_volume_status(ctrl_info, device);
- } else {
- pqi_dev_info(ctrl_info, "removed", device);
}
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
list_del(&device->delete_list_entry);
- pqi_free_device(device);
+ if (pqi_is_device_added(device)) {
+ pqi_remove_device(ctrl_info, device);
+ } else {
+ if (!device->volume_offline)
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+ }
}
/*
@@ -1861,20 +1870,27 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
*/
list_for_each_entry(device, &ctrl_info->scsi_device_list,
scsi_device_list_entry) {
- if (device->sdev && device->queue_depth !=
- device->advertised_queue_depth) {
- device->advertised_queue_depth = device->queue_depth;
- scsi_change_queue_depth(device->sdev,
- device->advertised_queue_depth);
+ if (device->sdev) {
+ if (device->queue_depth !=
+ device->advertised_queue_depth) {
+ device->advertised_queue_depth = device->queue_depth;
+ scsi_change_queue_depth(device->sdev,
+ device->advertised_queue_depth);
+ }
+ if (device->rescan) {
+ scsi_rescan_device(&device->sdev->sdev_gendev);
+ device->rescan = false;
+ }
}
}
/* Expose any new devices. */
list_for_each_entry_safe(device, next, &add_list, add_list_entry) {
if (!pqi_is_device_added(device)) {
- pqi_dev_info(ctrl_info, "added", device);
rc = pqi_add_device(ctrl_info, device);
- if (rc) {
+ if (rc == 0) {
+ pqi_dev_info(ctrl_info, "added", device);
+ } else {
dev_warn(&ctrl_info->pci_dev->dev,
"scsi %d:%d:%d:%d addition failed, device not added\n",
ctrl_info->scsi_host->host_no,
@@ -1886,36 +1902,19 @@ static void pqi_update_device_list(struct pqi_ctrl_info *ctrl_info,
}
}
-static bool pqi_is_supported_device(struct pqi_scsi_dev *device)
+static inline bool pqi_is_supported_device(struct pqi_scsi_dev *device)
{
- bool is_supported;
-
- if (device->is_expander_smp_device)
- return true;
-
- is_supported = false;
-
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_TAPE:
- case TYPE_MEDIUM_CHANGER:
- case TYPE_ENCLOSURE:
- is_supported = true;
- break;
- case TYPE_RAID:
- /*
- * Only support the HBA controller itself as a RAID
- * controller. If it's a RAID controller other than
- * the HBA itself (an external RAID controller, for
- * example), we don't support it.
- */
- if (pqi_is_hba_lunid(device->scsi3addr))
- is_supported = true;
- break;
- }
+ /*
+ * Only support the HBA controller itself as a RAID
+ * controller. If it's a RAID controller other than
+ * the HBA itself (an external RAID controller, for
+ * example), we don't support it.
+ */
+ if (device->device_type == SA_DEVICE_TYPE_CONTROLLER &&
+ !pqi_is_hba_lunid(device->scsi3addr))
+ return false;
- return is_supported;
+ return true;
}
static inline bool pqi_skip_device(u8 *scsi3addr)
@@ -1934,16 +1933,10 @@ static inline void pqi_mask_device(u8 *scsi3addr)
static inline bool pqi_is_device_with_sas_address(struct pqi_scsi_dev *device)
{
- if (!device->is_physical_device)
- return false;
-
- if (device->is_expander_smp_device)
- return true;
-
- switch (device->devtype) {
- case TYPE_DISK:
- case TYPE_ZBC:
- case TYPE_ENCLOSURE:
+ switch (device->device_type) {
+ case SA_DEVICE_TYPE_SAS:
+ case SA_DEVICE_TYPE_EXPANDER_SMP:
+ case SA_DEVICE_TYPE_SES:
return true;
}
@@ -2085,16 +2078,19 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr));
device->is_physical_device = is_physical_device;
if (is_physical_device) {
- if (phys_lun_ext_entry->device_type ==
- SA_DEVICE_TYPE_EXPANDER_SMP)
+ device->device_type = phys_lun_ext_entry->device_type;
+ if (device->device_type == SA_DEVICE_TYPE_EXPANDER_SMP)
device->is_expander_smp_device = true;
} else {
device->is_external_raid_device =
pqi_is_external_raid_addr(scsi3addr);
}
+ if (!pqi_is_supported_device(device))
+ continue;
+
/* Gather information about the device. */
- rc = pqi_get_device_info(ctrl_info, device);
+ rc = pqi_get_device_info(ctrl_info, device, id_phys);
if (rc == -ENOMEM) {
dev_warn(&ctrl_info->pci_dev->dev, "%s\n",
out_of_memory_msg);
@@ -2115,9 +2111,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
continue;
}
- if (!pqi_is_supported_device(device))
- continue;
-
pqi_assign_bus_target_lun(device);
if (device->is_physical_device) {
@@ -2129,7 +2122,6 @@ static int pqi_update_scsi_devices(struct pqi_ctrl_info *ctrl_info)
device->aio_handle =
phys_lun_ext_entry->aio_handle;
}
- pqi_get_physical_disk_info(ctrl_info, device, id_phys);
} else {
memcpy(device->volume_id, log_lun_ext_entry->volume_id,
sizeof(device->volume_id));
@@ -2160,31 +2152,6 @@ out:
return rc;
}
-static void pqi_remove_all_scsi_devices(struct pqi_ctrl_info *ctrl_info)
-{
- unsigned long flags;
- struct pqi_scsi_dev *device;
-
- while (1) {
- spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
-
- device = list_first_entry_or_null(&ctrl_info->scsi_device_list,
- struct pqi_scsi_dev, scsi_device_list_entry);
- if (device)
- list_del(&device->scsi_device_list_entry);
-
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
-
- if (!device)
- break;
-
- if (pqi_is_device_added(device))
- pqi_remove_device(ctrl_info, device);
- pqi_free_device(device);
- }
-}
-
static int pqi_scan_scsi_devices(struct pqi_ctrl_info *ctrl_info)
{
int rc = 0;
@@ -2462,7 +2429,6 @@ static int pqi_raid_bypass_submit_scsi_cmd(struct pqi_ctrl_info *ctrl_info,
offload_to_mirror =
(offload_to_mirror >= layout_map_count - 1) ?
0 : offload_to_mirror + 1;
- WARN_ON(offload_to_mirror >= layout_map_count);
device->offload_to_mirror = offload_to_mirror;
/*
* Avoid direct use of device->offload_to_mirror within this
@@ -2915,10 +2881,14 @@ static int pqi_interpret_task_management_response(
return rc;
}
-static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
- struct pqi_queue_group *queue_group)
+static inline void pqi_invalid_response(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int num_responses;
+ pqi_take_ctrl_offline(ctrl_info);
+}
+
+static int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info, struct pqi_queue_group *queue_group)
+{
+ int num_responses;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_io_request *io_request;
@@ -2930,6 +2900,13 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
while (1) {
oq_pi = readl(queue_group->oq_pi);
+ if (oq_pi >= ctrl_info->num_elements_per_oq) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "I/O interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, ctrl_info->num_elements_per_oq - 1, oq_ci);
+ return -1;
+ }
if (oq_pi == oq_ci)
break;
@@ -2938,10 +2915,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
(oq_ci * PQI_OPERATIONAL_OQ_ELEMENT_LENGTH);
request_id = get_unaligned_le16(&response->request_id);
- WARN_ON(request_id >= ctrl_info->max_io_slots);
+ if (request_id >= ctrl_info->max_io_slots) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) out of range (0-%u): producer index: %u consumer index: %u\n",
+ request_id, ctrl_info->max_io_slots - 1, oq_pi, oq_ci);
+ return -1;
+ }
io_request = &ctrl_info->io_request_pool[request_id];
- WARN_ON(atomic_read(&io_request->refcount) == 0);
+ if (atomic_read(&io_request->refcount) == 0) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "request ID in response (%u) does not match an outstanding I/O request: producer index: %u consumer index: %u\n",
+ request_id, oq_pi, oq_ci);
+ return -1;
+ }
switch (response->header.iu_type) {
case PQI_RESPONSE_IU_RAID_PATH_IO_SUCCESS:
@@ -2971,24 +2960,22 @@ static unsigned int pqi_process_io_intr(struct pqi_ctrl_info *ctrl_info,
io_request->error_info = ctrl_info->error_buffer +
(get_unaligned_le16(&response->error_index) *
PQI_ERROR_BUFFER_ELEMENT_LENGTH);
- pqi_process_io_error(response->header.iu_type,
- io_request);
+ pqi_process_io_error(response->header.iu_type, io_request);
break;
default:
+ pqi_invalid_response(ctrl_info);
dev_err(&ctrl_info->pci_dev->dev,
- "unexpected IU type: 0x%x\n",
- response->header.iu_type);
- break;
+ "unexpected IU type: 0x%x: producer index: %u consumer index: %u\n",
+ response->header.iu_type, oq_pi, oq_ci);
+ return -1;
}
- io_request->io_complete_callback(io_request,
- io_request->context);
+ io_request->io_complete_callback(io_request, io_request->context);
/*
* Note that the I/O request structure CANNOT BE TOUCHED after
* returning from the I/O completion callback!
*/
-
oq_ci = (oq_ci + 1) % ctrl_info->num_elements_per_oq;
}
@@ -3300,9 +3287,9 @@ static void pqi_ofa_capture_event_payload(struct pqi_event *event,
}
}
-static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
+static int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
{
- unsigned int num_events;
+ int num_events;
pqi_index_t oq_pi;
pqi_index_t oq_ci;
struct pqi_event_queue *event_queue;
@@ -3316,26 +3303,31 @@ static unsigned int pqi_process_event_intr(struct pqi_ctrl_info *ctrl_info)
while (1) {
oq_pi = readl(event_queue->oq_pi);
+ if (oq_pi >= PQI_NUM_EVENT_QUEUE_ELEMENTS) {
+ pqi_invalid_response(ctrl_info);
+ dev_err(&ctrl_info->pci_dev->dev,
+ "event interrupt: producer index (%u) out of range (0-%u): consumer index: %u\n",
+ oq_pi, PQI_NUM_EVENT_QUEUE_ELEMENTS - 1, oq_ci);
+ return -1;
+ }
+
if (oq_pi == oq_ci)
break;
num_events++;
- response = event_queue->oq_element_array +
- (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
+ response = event_queue->oq_element_array + (oq_ci * PQI_EVENT_OQ_ELEMENT_LENGTH);
event_index =
pqi_event_type_to_event_index(response->event_type);
- if (event_index >= 0) {
- if (response->request_acknowlege) {
- event = &ctrl_info->events[event_index];
- event->pending = true;
- event->event_type = response->event_type;
- event->event_id = response->event_id;
- event->additional_event_id =
- response->additional_event_id;
+ if (event_index >= 0 && response->request_acknowledge) {
+ event = &ctrl_info->events[event_index];
+ event->pending = true;
+ event->event_type = response->event_type;
+ event->event_id = response->event_id;
+ event->additional_event_id = response->additional_event_id;
+ if (event->event_type == PQI_EVENT_TYPE_OFA)
pqi_ofa_capture_event_payload(event, response);
- }
}
oq_ci = (oq_ci + 1) % PQI_NUM_EVENT_QUEUE_ELEMENTS;
@@ -3450,7 +3442,8 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
{
struct pqi_ctrl_info *ctrl_info;
struct pqi_queue_group *queue_group;
- unsigned int num_responses_handled;
+ int num_io_responses_handled;
+ int num_events_handled;
queue_group = data;
ctrl_info = queue_group->ctrl_info;
@@ -3458,17 +3451,25 @@ static irqreturn_t pqi_irq_handler(int irq, void *data)
if (!pqi_is_valid_irq(ctrl_info))
return IRQ_NONE;
- num_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ num_io_responses_handled = pqi_process_io_intr(ctrl_info, queue_group);
+ if (num_io_responses_handled < 0)
+ goto out;
- if (irq == ctrl_info->event_irq)
- num_responses_handled += pqi_process_event_intr(ctrl_info);
+ if (irq == ctrl_info->event_irq) {
+ num_events_handled = pqi_process_event_intr(ctrl_info);
+ if (num_events_handled < 0)
+ goto out;
+ } else {
+ num_events_handled = 0;
+ }
- if (num_responses_handled)
+ if (num_io_responses_handled + num_events_handled > 0)
atomic_inc(&ctrl_info->num_interrupts);
pqi_start_io(ctrl_info, queue_group, RAID_PATH, NULL);
pqi_start_io(ctrl_info, queue_group, AIO_PATH, NULL);
+out:
return IRQ_HANDLED;
}
@@ -5375,19 +5376,18 @@ static int pqi_scsi_queue_command(struct Scsi_Host *shost,
!blk_rq_is_passthrough(scmd->request)) {
rc = pqi_raid_bypass_submit_scsi_cmd(ctrl_info, device,
scmd, queue_group);
- if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY)
+ if (rc == 0 || rc == SCSI_MLQUEUE_HOST_BUSY) {
raid_bypassed = true;
+ atomic_inc(&device->raid_bypass_cnt);
+ }
}
if (!raid_bypassed)
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
} else {
if (device->aio_enabled)
- rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_aio_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
else
- rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd,
- queue_group);
+ rc = pqi_raid_submit_scsi_cmd(ctrl_info, device, scmd, queue_group);
}
out:
@@ -5830,8 +5830,42 @@ static int pqi_map_queues(struct Scsi_Host *shost)
ctrl_info->pci_dev, 0);
}
-static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
- void __user *arg)
+static int pqi_slave_configure(struct scsi_device *sdev)
+{
+ struct pqi_scsi_dev *device;
+
+ device = sdev->hostdata;
+ device->devtype = sdev->type;
+
+ return 0;
+}
+
+static void pqi_slave_destroy(struct scsi_device *sdev)
+{
+ unsigned long flags;
+ struct pqi_scsi_dev *device;
+ struct pqi_ctrl_info *ctrl_info;
+
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (device) {
+ sdev->hostdata = NULL;
+ if (!list_empty(&device->scsi_device_list_entry))
+ list_del(&device->scsi_device_list_entry);
+ }
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ if (device) {
+ pqi_dev_info(ctrl_info, "removed", device);
+ pqi_free_device(device);
+ }
+}
+
+static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, void __user *arg)
{
struct pci_dev *pci_dev;
u32 subsystem_vendor;
@@ -5848,8 +5882,7 @@ static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
pciinfo.dev_fn = pci_dev->devfn;
subsystem_vendor = pci_dev->subsystem_vendor;
subsystem_device = pci_dev->subsystem_device;
- pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) |
- subsystem_vendor;
+ pciinfo.board_id = ((subsystem_device << 16) & 0xffff0000) | subsystem_vendor;
if (copy_to_user(arg, &pciinfo, sizeof(pciinfo)))
return -EFAULT;
@@ -6258,8 +6291,7 @@ static ssize_t pqi_unique_id_show(struct device *dev,
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6296,8 +6328,7 @@ static ssize_t pqi_lunid_show(struct device *dev,
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6332,8 +6363,7 @@ static ssize_t pqi_path_info_show(struct device *dev,
device = sdev->hostdata;
if (!device) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6409,9 +6439,8 @@ static ssize_t pqi_sas_address_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
- if (pqi_is_logical_device(device)) {
- spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock,
- flags);
+ if (!device || !pqi_is_device_with_sas_address(device)) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
return -ENODEV;
}
@@ -6436,6 +6465,11 @@ static ssize_t pqi_ssd_smart_path_enabled_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
buffer[0] = device->raid_bypass_enabled ? '1' : '0';
buffer[1] = '\n';
buffer[2] = '\0';
@@ -6460,6 +6494,10 @@ static ssize_t pqi_raid_level_show(struct device *dev,
spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
if (pqi_is_logical_device(device))
raid_level = pqi_raid_level_to_string(device->raid_level);
@@ -6471,13 +6509,40 @@ static ssize_t pqi_raid_level_show(struct device *dev,
return snprintf(buffer, PAGE_SIZE, "%s\n", raid_level);
}
+static ssize_t pqi_raid_bypass_cnt_show(struct device *dev,
+ struct device_attribute *attr, char *buffer)
+{
+ struct pqi_ctrl_info *ctrl_info;
+ struct scsi_device *sdev;
+ struct pqi_scsi_dev *device;
+ unsigned long flags;
+ int raid_bypass_cnt;
+
+ sdev = to_scsi_device(dev);
+ ctrl_info = shost_to_hba(sdev->host);
+
+ spin_lock_irqsave(&ctrl_info->scsi_device_list_lock, flags);
+
+ device = sdev->hostdata;
+ if (!device) {
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+ return -ENODEV;
+ }
+
+ raid_bypass_cnt = atomic_read(&device->raid_bypass_cnt);
+
+ spin_unlock_irqrestore(&ctrl_info->scsi_device_list_lock, flags);
+
+ return snprintf(buffer, PAGE_SIZE, "0x%x\n", raid_bypass_cnt);
+}
+
static DEVICE_ATTR(lunid, 0444, pqi_lunid_show, NULL);
static DEVICE_ATTR(unique_id, 0444, pqi_unique_id_show, NULL);
static DEVICE_ATTR(path_info, 0444, pqi_path_info_show, NULL);
static DEVICE_ATTR(sas_address, 0444, pqi_sas_address_show, NULL);
-static DEVICE_ATTR(ssd_smart_path_enabled, 0444,
- pqi_ssd_smart_path_enabled_show, NULL);
+static DEVICE_ATTR(ssd_smart_path_enabled, 0444, pqi_ssd_smart_path_enabled_show, NULL);
static DEVICE_ATTR(raid_level, 0444, pqi_raid_level_show, NULL);
+static DEVICE_ATTR(raid_bypass_cnt, 0444, pqi_raid_bypass_cnt_show, NULL);
static struct device_attribute *pqi_sdev_attrs[] = {
&dev_attr_lunid,
@@ -6486,6 +6551,7 @@ static struct device_attribute *pqi_sdev_attrs[] = {
&dev_attr_sas_address,
&dev_attr_ssd_smart_path_enabled,
&dev_attr_raid_level,
+ &dev_attr_raid_bypass_cnt,
NULL
};
@@ -6500,6 +6566,8 @@ static struct scsi_host_template pqi_driver_template = {
.eh_device_reset_handler = pqi_eh_device_reset_handler,
.ioctl = pqi_ioctl,
.slave_alloc = pqi_slave_alloc,
+ .slave_configure = pqi_slave_configure,
+ .slave_destroy = pqi_slave_destroy,
.map_queues = pqi_map_queues,
.sdev_attrs = pqi_sdev_attrs,
.shost_attrs = pqi_shost_attrs,
@@ -7590,7 +7658,6 @@ static void pqi_remove_ctrl(struct pqi_ctrl_info *ctrl_info)
{
pqi_cancel_rescan_worker(ctrl_info);
pqi_cancel_update_time_worker(ctrl_info);
- pqi_remove_all_scsi_devices(ctrl_info);
pqi_unregister_scsi(ctrl_info);
if (ctrl_info->pqi_mode_enabled)
pqi_revert_to_sis_mode(ctrl_info);
@@ -8300,6 +8367,10 @@ static const struct pci_device_id pqi_pci_id_table[] = {
},
{
PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
+ PCI_VENDOR_ID_ADAPTEC2, 0x080a)
+ },
+ {
+ PCI_DEVICE_SUB(PCI_VENDOR_ID_ADAPTEC2, 0x028f,
PCI_VENDOR_ID_ADAPTEC2, 0x0900)
},
{
@@ -8501,8 +8572,7 @@ static int __init pqi_init(void)
pr_info(DRIVER_NAME "\n");
- pqi_sas_transport_template =
- sas_attach_transport(&pqi_sas_transport_functions);
+ pqi_sas_transport_template = sas_attach_transport(&pqi_sas_transport_functions);
if (!pqi_sas_transport_template)
return -ENODEV;
diff --git a/drivers/scsi/smartpqi/smartpqi_sas_transport.c b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
index b7289112455c..999870eb9ed8 100644
--- a/drivers/scsi/smartpqi/smartpqi_sas_transport.c
+++ b/drivers/scsi/smartpqi/smartpqi_sas_transport.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.c b/drivers/scsi/smartpqi/smartpqi_sis.c
index f0d6e88ba2c1..26ea6b9d4199 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.c
+++ b/drivers/scsi/smartpqi/smartpqi_sis.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/smartpqi/smartpqi_sis.h b/drivers/scsi/smartpqi/smartpqi_sis.h
index 86b0e484d921..878d34ca6532 100644
--- a/drivers/scsi/smartpqi/smartpqi_sis.h
+++ b/drivers/scsi/smartpqi/smartpqi_sis.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* driver for Microsemi PQI-based storage controllers
- * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ * Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
* Copyright (c) 2016-2018 Microsemi Corporation
* Copyright (c) 2016 PMC-Sierra, Inc.
*
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index 03d43f016397..9e2e196bc202 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -124,16 +124,4 @@ static struct platform_driver snirm710_driver = {
.name = "snirm_53c710",
},
};
-
-static int __init snirm710_init(void)
-{
- return platform_driver_register(&snirm710_driver);
-}
-
-static void __exit snirm710_exit(void)
-{
- platform_driver_unregister(&snirm710_driver);
-}
-
-module_init(snirm710_init);
-module_exit(snirm710_exit);
+module_platform_driver(snirm710_driver);
diff --git a/drivers/scsi/snic/snic_debugfs.c b/drivers/scsi/snic/snic_debugfs.c
index 2b349365592f..4471c4c8aafa 100644
--- a/drivers/scsi/snic/snic_debugfs.c
+++ b/drivers/scsi/snic/snic_debugfs.c
@@ -439,26 +439,14 @@ snic_trc_seq_show(struct seq_file *sfp, void *data)
return 0;
}
-static const struct seq_operations snic_trc_seq_ops = {
+static const struct seq_operations snic_trc_sops = {
.start = snic_trc_seq_start,
.next = snic_trc_seq_next,
.stop = snic_trc_seq_stop,
.show = snic_trc_seq_show,
};
-static int
-snic_trc_open(struct inode *inode, struct file *filp)
-{
- return seq_open(filp, &snic_trc_seq_ops);
-}
-
-static const struct file_operations snic_trc_fops = {
- .owner = THIS_MODULE,
- .open = snic_trc_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
+DEFINE_SEQ_ATTRIBUTE(snic_trc);
/*
* snic_trc_debugfs_init : creates trace/tracing_enable files for trace
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index b3650c989ed4..6dd0ff188bb4 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -1387,19 +1387,15 @@ snic_issue_tm_req(struct snic *snic,
}
ret = snic_queue_itmf_req(snic, tmreq, sc, tmf, req_id);
- if (ret)
- goto tmreq_err;
-
- ret = 0;
tmreq_err:
if (ret) {
SNIC_HOST_ERR(snic->shost,
- "issu_tmreq: Queing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
+ "issu_tmreq: Queueing ITMF(%d) Req, sc %p rqi %p req_id %d tag %x fails err = %d\n",
tmf, sc, rqi, req_id, tag, ret);
} else {
SNIC_SCSI_DBG(snic->shost,
- "issu_tmreq: Queuing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
+ "issu_tmreq: Queueing ITMF(%d) Req, sc %p, rqi %p, req_id %d tag %x - Success.\n",
tmf, sc, rqi, req_id, tag);
}
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index f37df79e37e1..7de82f2c9757 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -270,22 +270,10 @@ static struct platform_driver esp_sun3x_driver = {
.name = "sun3x_esp",
},
};
-
-static int __init sun3x_esp_init(void)
-{
- return platform_driver_register(&esp_sun3x_driver);
-}
-
-static void __exit sun3x_esp_exit(void)
-{
- platform_driver_unregister(&esp_sun3x_driver);
-}
+module_platform_driver(esp_sun3x_driver);
MODULE_DESCRIPTION("Sun3x ESP SCSI driver");
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(sun3x_esp_init);
-module_exit(sun3x_esp_exit);
MODULE_ALIAS("platform:sun3x_esp");
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 964130d2c8a6..5dc38d35745b 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -606,21 +606,9 @@ static struct platform_driver esp_sbus_driver = {
.probe = esp_sbus_probe,
.remove = esp_sbus_remove,
};
-
-static int __init sunesp_init(void)
-{
- return platform_driver_register(&esp_sbus_driver);
-}
-
-static void __exit sunesp_exit(void)
-{
- platform_driver_unregister(&esp_sbus_driver);
-}
+module_platform_driver(esp_sbus_driver);
MODULE_DESCRIPTION("Sun ESP SCSI driver");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
-
-module_init(sunesp_init);
-module_exit(sunesp_exit);
diff --git a/drivers/scsi/sym53c8xx_2/sym_fw.c b/drivers/scsi/sym53c8xx_2/sym_fw.c
index c6db61b61de3..c536d2a9a657 100644
--- a/drivers/scsi/sym53c8xx_2/sym_fw.c
+++ b/drivers/scsi/sym53c8xx_2/sym_fw.c
@@ -369,7 +369,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
sym_name(np), (int) (cur-start));
++cur;
continue;
- };
+ }
/*
* We use the bogus value 0xf00ff00f ;-)
@@ -477,7 +477,7 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
default:
relocs = 0;
break;
- };
+ }
/*
* Scriptify:) the opcode.
@@ -533,5 +533,5 @@ void sym_fw_bind_script(struct sym_hcb *np, u32 *start, int len)
*cur++ = cpu_to_scr(new);
}
- };
+ }
}
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index 28edb6e53ea2..d9a045f9858c 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -156,12 +156,8 @@ void sym_xpt_async_bus_reset(struct sym_hcb *np)
static int sym_xerr_cam_status(int cam_status, int x_status)
{
if (x_status) {
- if (x_status & XE_PARITY_ERR)
+ if (x_status & XE_PARITY_ERR)
cam_status = DID_PARITY;
- else if (x_status &(XE_EXTRA_DATA|XE_SODL_UNRUN|XE_SWIDE_OVRUN))
- cam_status = DID_ERROR;
- else if (x_status & XE_BAD_PHASE)
- cam_status = DID_ERROR;
else
cam_status = DID_ERROR;
}
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index f6394999b98c..dcdb4eb1f90b 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -165,7 +165,6 @@ config SCSI_UFS_BSG
config SCSI_UFS_EXYNOS
tristate "EXYNOS specific hooks to UFS controller platform driver"
depends on SCSI_UFSHCD_PLATFORM && (ARCH_EXYNOS || COMPILE_TEST)
- select PHY_SAMSUNG_UFS
help
This selects the EXYNOS specific additions to UFSHCD platform driver.
UFS host on EXYNOS includes HCI and UNIPRO layer, and associates with
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index 8f1b6f61a776..5e6b95dbb578 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -940,7 +940,6 @@ static int exynos_ufs_init(struct ufs_hba *hba)
struct device *dev = hba->dev;
struct platform_device *pdev = to_platform_device(dev);
struct exynos_ufs *ufs;
- struct resource *res;
int ret;
ufs = devm_kzalloc(dev, sizeof(*ufs), GFP_KERNEL);
@@ -948,24 +947,21 @@ static int exynos_ufs_init(struct ufs_hba *hba)
return -ENOMEM;
/* exynos-specific hci */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vs_hci");
- ufs->reg_hci = devm_ioremap_resource(dev, res);
+ ufs->reg_hci = devm_platform_ioremap_resource_byname(pdev, "vs_hci");
if (IS_ERR(ufs->reg_hci)) {
dev_err(dev, "cannot ioremap for hci vendor register\n");
return PTR_ERR(ufs->reg_hci);
}
/* unipro */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "unipro");
- ufs->reg_unipro = devm_ioremap_resource(dev, res);
+ ufs->reg_unipro = devm_platform_ioremap_resource_byname(pdev, "unipro");
if (IS_ERR(ufs->reg_unipro)) {
dev_err(dev, "cannot ioremap for unipro register\n");
return PTR_ERR(ufs->reg_unipro);
}
/* ufs protector */
- res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ufsp");
- ufs->reg_ufsp = devm_ioremap_resource(dev, res);
+ ufs->reg_ufsp = devm_platform_ioremap_resource_byname(pdev, "ufsp");
if (IS_ERR(ufs->reg_ufsp)) {
dev_err(dev, "cannot ioremap for ufs protector register\n");
return PTR_ERR(ufs->reg_ufsp);
@@ -1252,7 +1248,8 @@ struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR |
UFSHCI_QUIRK_BROKEN_HCE |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
- UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR,
+ UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 1755dd6b04ae..8df73bc2f8cb 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -10,9 +10,11 @@
#include <linux/bitfield.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
+#include <linux/reset.h>
#include <linux/soc/mediatek/mtk_sip_svc.h>
#include "ufshcd.h"
@@ -43,6 +45,28 @@ static struct ufs_dev_fix ufs_mtk_dev_fixups[] = {
END_FIX
};
+static const struct ufs_mtk_host_cfg ufs_mtk_mt8192_cfg = {
+ .caps = UFS_MTK_CAP_BOOST_CRYPT_ENGINE,
+};
+
+static const struct of_device_id ufs_mtk_of_match[] = {
+ {
+ .compatible = "mediatek,mt8183-ufshci",
+ },
+ {
+ .compatible = "mediatek,mt8192-ufshci",
+ .data = &ufs_mtk_mt8192_cfg
+ },
+ {},
+};
+
+static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ return (host->caps & UFS_MTK_CAP_BOOST_CRYPT_ENGINE);
+}
+
static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
{
u32 tmp;
@@ -91,16 +115,57 @@ static void ufs_mtk_crypto_enable(struct ufs_hba *hba)
}
}
+static void ufs_mtk_host_reset(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ reset_control_assert(host->hci_reset);
+ reset_control_assert(host->crypto_reset);
+ reset_control_assert(host->unipro_reset);
+
+ usleep_range(100, 110);
+
+ reset_control_deassert(host->unipro_reset);
+ reset_control_deassert(host->crypto_reset);
+ reset_control_deassert(host->hci_reset);
+}
+
+static void ufs_mtk_init_reset_control(struct ufs_hba *hba,
+ struct reset_control **rc,
+ char *str)
+{
+ *rc = devm_reset_control_get(hba->dev, str);
+ if (IS_ERR(*rc)) {
+ dev_info(hba->dev, "Failed to get reset control %s: %ld\n",
+ str, PTR_ERR(*rc));
+ *rc = NULL;
+ }
+}
+
+static void ufs_mtk_init_reset(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+ ufs_mtk_init_reset_control(hba, &host->hci_reset,
+ "hci_rst");
+ ufs_mtk_init_reset_control(hba, &host->unipro_reset,
+ "unipro_rst");
+ ufs_mtk_init_reset_control(hba, &host->crypto_reset,
+ "crypto_rst");
+}
+
static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
- if (host->unipro_lpm)
+ if (host->unipro_lpm) {
hba->vps->hba_enable_delay_us = 0;
- else
+ } else {
hba->vps->hba_enable_delay_us = 600;
+ ufs_mtk_host_reset(hba);
+ }
if (hba->caps & UFSHCD_CAP_CRYPTO)
ufs_mtk_crypto_enable(hba);
@@ -129,7 +194,10 @@ static int ufs_mtk_bind_mphy(struct ufs_hba *hba)
__func__, err);
} else if (IS_ERR(host->mphy)) {
err = PTR_ERR(host->mphy);
- dev_info(dev, "%s: PHY get failed %d\n", __func__, err);
+ if (err != -ENODEV) {
+ dev_info(dev, "%s: PHY get failed %d\n", __func__,
+ err);
+ }
}
if (err)
@@ -249,6 +317,144 @@ static void ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on)
host->mphy_powered_on = on;
}
+static int ufs_mtk_get_host_clk(struct device *dev, const char *name,
+ struct clk **clk_out)
+{
+ struct clk *clk;
+ int err = 0;
+
+ clk = devm_clk_get(dev, name);
+ if (IS_ERR(clk))
+ err = PTR_ERR(clk);
+ else
+ *clk_out = clk;
+
+ return err;
+}
+
+static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_crypt_cfg *cfg;
+ struct regulator *reg;
+ int volt, ret;
+
+ if (!ufs_mtk_is_boost_crypt_enabled(hba))
+ return;
+
+ cfg = host->crypt;
+ volt = cfg->vcore_volt;
+ reg = cfg->reg_vcore;
+
+ ret = clk_prepare_enable(cfg->clk_crypt_mux);
+ if (ret) {
+ dev_info(hba->dev, "clk_prepare_enable(): %d\n",
+ ret);
+ return;
+ }
+
+ if (boost) {
+ ret = regulator_set_voltage(reg, volt, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to %d\n", volt);
+ goto out;
+ }
+
+ ret = clk_set_parent(cfg->clk_crypt_mux,
+ cfg->clk_crypt_perf);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set clk_crypt_perf\n");
+ regulator_set_voltage(reg, 0, INT_MAX);
+ goto out;
+ }
+ } else {
+ ret = clk_set_parent(cfg->clk_crypt_mux,
+ cfg->clk_crypt_lp);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set clk_crypt_lp\n");
+ goto out;
+ }
+
+ ret = regulator_set_voltage(reg, 0, INT_MAX);
+ if (ret) {
+ dev_info(hba->dev,
+ "failed to set vcore to MIN\n");
+ }
+ }
+out:
+ clk_disable_unprepare(cfg->clk_crypt_mux);
+}
+
+static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name,
+ struct clk **clk)
+{
+ int ret;
+
+ ret = ufs_mtk_get_host_clk(hba->dev, name, clk);
+ if (ret) {
+ dev_info(hba->dev, "%s: failed to get %s: %d", __func__,
+ name, ret);
+ }
+
+ return ret;
+}
+
+static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
+{
+ struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+ struct ufs_mtk_crypt_cfg *cfg;
+ struct device *dev = hba->dev;
+ struct regulator *reg;
+ u32 volt;
+
+ host->caps = host->cfg->caps;
+
+ if (!ufs_mtk_is_boost_crypt_enabled(hba))
+ return;
+
+ host->crypt = devm_kzalloc(dev, sizeof(*(host->crypt)),
+ GFP_KERNEL);
+ if (!host->crypt)
+ goto disable_caps;
+
+ reg = devm_regulator_get_optional(dev, "dvfsrc-vcore");
+ if (IS_ERR(reg)) {
+ dev_info(dev, "failed to get dvfsrc-vcore: %ld",
+ PTR_ERR(reg));
+ goto disable_caps;
+ }
+
+ if (of_property_read_u32(dev->of_node, "boost-crypt-vcore-min",
+ &volt)) {
+ dev_info(dev, "failed to get boost-crypt-vcore-min");
+ goto disable_caps;
+ }
+
+ cfg = host->crypt;
+ if (ufs_mtk_init_host_clk(hba, "crypt_mux",
+ &cfg->clk_crypt_mux))
+ goto disable_caps;
+
+ if (ufs_mtk_init_host_clk(hba, "crypt_lp",
+ &cfg->clk_crypt_lp))
+ goto disable_caps;
+
+ if (ufs_mtk_init_host_clk(hba, "crypt_perf",
+ &cfg->clk_crypt_perf))
+ goto disable_caps;
+
+ cfg->reg_vcore = reg;
+ cfg->vcore_volt = volt;
+ dev_info(dev, "caps: boost-crypt");
+ return;
+
+disable_caps:
+ host->caps &= ~UFS_MTK_CAP_BOOST_CRYPT_ENGINE;
+}
+
/**
* ufs_mtk_setup_clocks - enables/disable clocks
* @hba: host controller instance
@@ -291,12 +497,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
}
if (clk_pwr_off) {
+ ufs_mtk_boost_crypt(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
ufs_mtk_mphy_power_on(hba, on);
}
} else if (on && status == POST_CHANGE) {
ufs_mtk_mphy_power_on(hba, on);
ufs_mtk_setup_ref_clk(hba, on);
+ ufs_mtk_boost_crypt(hba, on);
}
return ret;
@@ -314,8 +522,9 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on,
*/
static int ufs_mtk_init(struct ufs_hba *hba)
{
- struct ufs_mtk_host *host;
+ const struct of_device_id *id;
struct device *dev = hba->dev;
+ struct ufs_mtk_host *host;
int err = 0;
host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
@@ -328,10 +537,24 @@ static int ufs_mtk_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
+ /* Get host capability and platform data */
+ id = of_match_device(ufs_mtk_of_match, dev);
+ if (!id) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (id->data) {
+ host->cfg = (struct ufs_mtk_host_cfg *)id->data;
+ ufs_mtk_init_host_caps(hba);
+ }
+
err = ufs_mtk_bind_mphy(hba);
if (err)
goto out_variant_clear;
+ ufs_mtk_init_reset(hba);
+
/* Enable runtime autosuspend */
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
@@ -416,7 +639,7 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
return ret;
}
-static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
+static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, bool lpm)
{
int ret;
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
@@ -424,8 +647,14 @@ static int ufs_mtk_unipro_set_pm(struct ufs_hba *hba, u32 lpm)
ret = ufshcd_dme_set(hba,
UIC_ARG_MIB_SEL(VS_UNIPROPOWERDOWNCONTROL, 0),
lpm);
- if (!ret)
+ if (!ret || !lpm) {
+ /*
+ * Forcibly set as non-LPM mode if UIC commands is failed
+ * to use default hba_enable_delay_us value for re-enabling
+ * the host.
+ */
host->unipro_lpm = lpm;
+ }
return ret;
}
@@ -435,7 +664,9 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba)
int ret;
u32 tmp;
- ufs_mtk_unipro_set_pm(hba, 0);
+ ret = ufs_mtk_unipro_set_pm(hba, false);
+ if (ret)
+ return ret;
/*
* Setting PA_Local_TX_LCC_Enable to 0 before link startup
@@ -543,7 +774,7 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba)
if (err)
return err;
- err = ufs_mtk_unipro_set_pm(hba, 0);
+ err = ufs_mtk_unipro_set_pm(hba, false);
if (err)
return err;
@@ -564,10 +795,10 @@ static int ufs_mtk_link_set_lpm(struct ufs_hba *hba)
{
int err;
- err = ufs_mtk_unipro_set_pm(hba, 1);
+ err = ufs_mtk_unipro_set_pm(hba, true);
if (err) {
/* Resume UniPro state for following error recovery */
- ufs_mtk_unipro_set_pm(hba, 0);
+ ufs_mtk_unipro_set_pm(hba, false);
return err;
}
@@ -669,22 +900,16 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
{
- struct ufs_dev_info *dev_info = &hba->dev_info;
- u16 mid = dev_info->wmanufacturerid;
-
ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
-
- if (mid == UFS_VENDOR_SAMSUNG)
- hba->dev_quirks &= ~UFS_DEVICE_QUIRK_HOST_PA_TACTIVATE;
}
-/**
+/*
* struct ufs_hba_mtk_vops - UFS MTK specific variant operations
*
* The variant operations configure the necessary controller and PHY
* handshake during initialization.
*/
-static struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
+static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.name = "mediatek.ufshci",
.init = ufs_mtk_init,
.setup_clocks = ufs_mtk_setup_clocks,
@@ -733,11 +958,6 @@ static int ufs_mtk_remove(struct platform_device *pdev)
return 0;
}
-static const struct of_device_id ufs_mtk_of_match[] = {
- { .compatible = "mediatek,mt8183-ufshci"},
- {},
-};
-
static const struct dev_pm_ops ufs_mtk_pm_ops = {
.suspend = ufshcd_pltfrm_suspend,
.resume = ufshcd_pltfrm_resume,
diff --git a/drivers/scsi/ufs/ufs-mediatek.h b/drivers/scsi/ufs/ufs-mediatek.h
index 8ed24d5fcff9..2b6a1312c9bc 100644
--- a/drivers/scsi/ufs/ufs-mediatek.h
+++ b/drivers/scsi/ufs/ufs-mediatek.h
@@ -33,8 +33,8 @@
/*
* Vendor specific pre-defined parameters
*/
-#define UFS_MTK_LIMIT_NUM_LANES_RX 1
-#define UFS_MTK_LIMIT_NUM_LANES_TX 1
+#define UFS_MTK_LIMIT_NUM_LANES_RX 2
+#define UFS_MTK_LIMIT_NUM_LANES_TX 2
#define UFS_MTK_LIMIT_HSGEAR_RX UFS_HS_G3
#define UFS_MTK_LIMIT_HSGEAR_TX UFS_HS_G3
#define UFS_MTK_LIMIT_PWMGEAR_RX UFS_PWM_G4
@@ -89,9 +89,34 @@ enum {
TX_CLK_GATE_EN = 3,
};
+/*
+ * Host capability
+ */
+enum ufs_mtk_host_caps {
+ UFS_MTK_CAP_BOOST_CRYPT_ENGINE = 1 << 0,
+};
+
+struct ufs_mtk_crypt_cfg {
+ struct regulator *reg_vcore;
+ struct clk *clk_crypt_perf;
+ struct clk *clk_crypt_mux;
+ struct clk *clk_crypt_lp;
+ int vcore_volt;
+};
+
+struct ufs_mtk_host_cfg {
+ enum ufs_mtk_host_caps caps;
+};
+
struct ufs_mtk_host {
struct ufs_hba *hba;
struct phy *mphy;
+ struct ufs_mtk_host_cfg *cfg;
+ struct ufs_mtk_crypt_cfg *crypt;
+ enum ufs_mtk_host_caps caps;
+ struct reset_control *hci_reset;
+ struct reset_control *unipro_reset;
+ struct reset_control *crypto_reset;
bool mphy_powered_on;
bool unipro_lpm;
bool ref_clk_enabled;
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index d0d75527830e..f9d6ef356540 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -621,218 +621,6 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
return 0;
}
-#ifdef CONFIG_MSM_BUS_SCALING
-static int ufs_qcom_get_bus_vote(struct ufs_qcom_host *host,
- const char *speed_mode)
-{
- struct device *dev = host->hba->dev;
- struct device_node *np = dev->of_node;
- int err;
- const char *key = "qcom,bus-vector-names";
-
- if (!speed_mode) {
- err = -EINVAL;
- goto out;
- }
-
- if (host->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
- err = of_property_match_string(np, key, "MAX");
- else
- err = of_property_match_string(np, key, speed_mode);
-
-out:
- if (err < 0)
- dev_err(dev, "%s: Invalid %s mode %d\n",
- __func__, speed_mode, err);
- return err;
-}
-
-static void ufs_qcom_get_speed_mode(struct ufs_pa_layer_attr *p, char *result)
-{
- int gear = max_t(u32, p->gear_rx, p->gear_tx);
- int lanes = max_t(u32, p->lane_rx, p->lane_tx);
- int pwr;
-
- /* default to PWM Gear 1, Lane 1 if power mode is not initialized */
- if (!gear)
- gear = 1;
-
- if (!lanes)
- lanes = 1;
-
- if (!p->pwr_rx && !p->pwr_tx) {
- pwr = SLOWAUTO_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "MIN");
- } else if (p->pwr_rx == FAST_MODE || p->pwr_rx == FASTAUTO_MODE ||
- p->pwr_tx == FAST_MODE || p->pwr_tx == FASTAUTO_MODE) {
- pwr = FAST_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_R%s_G%d_L%d", "HS",
- p->hs_rate == PA_HS_MODE_B ? "B" : "A", gear, lanes);
- } else {
- pwr = SLOW_MODE;
- snprintf(result, BUS_VECTOR_NAME_LEN, "%s_G%d_L%d",
- "PWM", gear, lanes);
- }
-}
-
-static int __ufs_qcom_set_bus_vote(struct ufs_qcom_host *host, int vote)
-{
- int err = 0;
-
- if (vote != host->bus_vote.curr_vote) {
- err = msm_bus_scale_client_update_request(
- host->bus_vote.client_handle, vote);
- if (err) {
- dev_err(host->hba->dev,
- "%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
- __func__, host->bus_vote.client_handle,
- vote, err);
- goto out;
- }
-
- host->bus_vote.curr_vote = vote;
- }
-out:
- return err;
-}
-
-static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
-{
- int vote;
- int err = 0;
- char mode[BUS_VECTOR_NAME_LEN];
-
- ufs_qcom_get_speed_mode(&host->dev_req_params, mode);
-
- vote = ufs_qcom_get_bus_vote(host, mode);
- if (vote >= 0)
- err = __ufs_qcom_set_bus_vote(host, vote);
- else
- err = vote;
-
- if (err)
- dev_err(host->hba->dev, "%s: failed %d\n", __func__, err);
- else
- host->bus_vote.saved_vote = vote;
- return err;
-}
-
-static int ufs_qcom_set_bus_vote(struct ufs_hba *hba, bool on)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- int vote, err;
-
- /*
- * In case ufs_qcom_init() is not yet done, simply ignore.
- * This ufs_qcom_set_bus_vote() shall be called from
- * ufs_qcom_init() after init is done.
- */
- if (!host)
- return 0;
-
- if (on) {
- vote = host->bus_vote.saved_vote;
- if (vote == host->bus_vote.min_bw_vote)
- ufs_qcom_update_bus_bw_vote(host);
- } else {
- vote = host->bus_vote.min_bw_vote;
- }
-
- err = __ufs_qcom_set_bus_vote(host, vote);
- if (err)
- dev_err(hba->dev, "%s: set bus vote failed %d\n",
- __func__, err);
-
- return err;
-}
-
-static ssize_t
-show_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
-
- return snprintf(buf, PAGE_SIZE, "%u\n",
- host->bus_vote.is_max_bw_needed);
-}
-
-static ssize_t
-store_ufs_to_mem_max_bus_bw(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct ufs_hba *hba = dev_get_drvdata(dev);
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- uint32_t value;
-
- if (!kstrtou32(buf, 0, &value)) {
- host->bus_vote.is_max_bw_needed = !!value;
- ufs_qcom_update_bus_bw_vote(host);
- }
-
- return count;
-}
-
-static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
-{
- int err;
- struct msm_bus_scale_pdata *bus_pdata;
- struct device *dev = host->hba->dev;
- struct platform_device *pdev = to_platform_device(dev);
- struct device_node *np = dev->of_node;
-
- bus_pdata = msm_bus_cl_get_pdata(pdev);
- if (!bus_pdata) {
- dev_err(dev, "%s: failed to get bus vectors\n", __func__);
- err = -ENODATA;
- goto out;
- }
-
- err = of_property_count_strings(np, "qcom,bus-vector-names");
- if (err < 0 || err != bus_pdata->num_usecases) {
- dev_err(dev, "%s: qcom,bus-vector-names not specified correctly %d\n",
- __func__, err);
- goto out;
- }
-
- host->bus_vote.client_handle = msm_bus_scale_register_client(bus_pdata);
- if (!host->bus_vote.client_handle) {
- dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
- __func__);
- err = -EFAULT;
- goto out;
- }
-
- /* cache the vote index for minimum and maximum bandwidth */
- host->bus_vote.min_bw_vote = ufs_qcom_get_bus_vote(host, "MIN");
- host->bus_vote.max_bw_vote = ufs_qcom_get_bus_vote(host, "MAX");
-
- host->bus_vote.max_bus_bw.show = show_ufs_to_mem_max_bus_bw;
- host->bus_vote.max_bus_bw.store = store_ufs_to_mem_max_bus_bw;
- sysfs_attr_init(&host->bus_vote.max_bus_bw.attr);
- host->bus_vote.max_bus_bw.attr.name = "max_bus_bw";
- host->bus_vote.max_bus_bw.attr.mode = S_IRUGO | S_IWUSR;
- err = device_create_file(dev, &host->bus_vote.max_bus_bw);
-out:
- return err;
-}
-#else /* CONFIG_MSM_BUS_SCALING */
-static int ufs_qcom_update_bus_bw_vote(struct ufs_qcom_host *host)
-{
- return 0;
-}
-
-static int ufs_qcom_set_bus_vote(struct ufs_hba *host, bool on)
-{
- return 0;
-}
-
-static int ufs_qcom_bus_register(struct ufs_qcom_host *host)
-{
- return 0;
-}
-#endif /* CONFIG_MSM_BUS_SCALING */
-
static void ufs_qcom_dev_ref_clk_ctrl(struct ufs_qcom_host *host, bool enable)
{
if (host->dev_ref_clk_ctrl_mmio &&
@@ -976,7 +764,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
/* cache the power mode parameters to use internally */
memcpy(&host->dev_req_params,
dev_req_params, sizeof(*dev_req_params));
- ufs_qcom_update_bus_bw_vote(host);
/* disable the device ref clock if entered PWM mode */
if (ufshcd_is_hs_mode(&hba->pwr_info) &&
@@ -1107,9 +894,7 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
switch (status) {
case PRE_CHANGE:
- if (on) {
- err = ufs_qcom_set_bus_vote(hba, true);
- } else {
+ if (!on) {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
@@ -1121,8 +906,6 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* enable the device ref clock for HS mode*/
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
- } else {
- err = ufs_qcom_set_bus_vote(hba, false);
}
break;
}
@@ -1264,10 +1047,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out_variant_clear;
}
- err = ufs_qcom_bus_register(host);
- if (err)
- goto out_variant_clear;
-
ufs_qcom_get_controller_revision(hba, &host->hw_ver.major,
&host->hw_ver.minor, &host->hw_ver.step);
@@ -1307,7 +1086,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
if (err)
goto out_variant_clear;
- ufs_qcom_set_bus_vote(hba, true);
ufs_qcom_setup_clocks(hba, true, POST_CHANGE);
if (hba->dev->id < MAX_UFS_QCOM_HOSTS)
@@ -1446,7 +1224,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
dev_req_params->pwr_rx,
dev_req_params->hs_rate,
false);
- ufs_qcom_update_bus_bw_vote(host);
}
out:
@@ -1614,9 +1391,6 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
*/
}
mask <<= offset;
-
- pm_runtime_get_sync(host->hba->dev);
- ufshcd_hold(host->hba, false);
ufshcd_rmwl(host->hba, TEST_BUS_SEL,
(u32)host->testbus.select_major << 19,
REG_UFS_CFG1);
@@ -1629,50 +1403,16 @@ int ufs_qcom_testbus_config(struct ufs_qcom_host *host)
* committed before returning.
*/
mb();
- ufshcd_release(host->hba);
- pm_runtime_put_sync(host->hba->dev);
return 0;
}
-static void ufs_qcom_testbus_read(struct ufs_hba *hba)
-{
- ufshcd_dump_regs(hba, UFS_TEST_BUS, 4, "UFS_TEST_BUS ");
-}
-
-static void ufs_qcom_print_unipro_testbus(struct ufs_hba *hba)
-{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- u32 *testbus = NULL;
- int i, nminor = 256, testbus_len = nminor * sizeof(u32);
-
- testbus = kmalloc(testbus_len, GFP_KERNEL);
- if (!testbus)
- return;
-
- host->testbus.select_major = TSTBUS_UNIPRO;
- for (i = 0; i < nminor; i++) {
- host->testbus.select_minor = i;
- ufs_qcom_testbus_config(host);
- testbus[i] = ufshcd_readl(hba, UFS_TEST_BUS);
- }
- print_hex_dump(KERN_ERR, "UNIPRO_TEST_BUS ", DUMP_PREFIX_OFFSET,
- 16, 4, testbus, testbus_len, false);
- kfree(testbus);
-}
-
static void ufs_qcom_dump_dbg_regs(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, REG_UFS_SYS1CLK_1US, 16 * 4,
"HCI Vendor Specific Registers ");
- /* sleep a bit intermittently as we are dumping too much data */
ufs_qcom_print_hw_debug_reg_all(hba, NULL, ufs_qcom_dump_regs_wrapper);
- udelay(1000);
- ufs_qcom_testbus_read(hba);
- udelay(1000);
- ufs_qcom_print_unipro_testbus(hba);
- udelay(1000);
}
/**
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 97247d17e258..3f4922743b3e 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -174,16 +174,6 @@ static inline void ufs_qcom_deassert_reset(struct ufs_hba *hba)
mb();
}
-struct ufs_qcom_bus_vote {
- uint32_t client_handle;
- uint32_t curr_vote;
- int min_bw_vote;
- int max_bw_vote;
- int saved_vote;
- bool is_max_bw_needed;
- struct device_attribute max_bus_bw;
-};
-
/* Host controller hardware version: major.minor.step */
struct ufs_hw_version {
u16 step;
@@ -216,7 +206,6 @@ struct ufs_qcom_host {
struct phy *generic_phy;
struct ufs_hba *hba;
- struct ufs_qcom_bus_vote bus_vote;
struct ufs_pa_layer_attr dev_req_params;
struct clk *rx_l0_sync_clk;
struct clk *tx_l0_sync_clk;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 2d71d232a69d..bdcd27faa054 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -16,6 +16,7 @@ static const char *ufschd_uic_link_state_to_string(
case UIC_LINK_OFF_STATE: return "OFF";
case UIC_LINK_ACTIVE_STATE: return "ACTIVE";
case UIC_LINK_HIBERN8_STATE: return "HIBERN8";
+ case UIC_LINK_BROKEN_STATE: return "BROKEN";
default: return "UNKNOWN";
}
}
@@ -145,12 +146,19 @@ static u32 ufshcd_us_to_ahit(unsigned int timer)
static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
+ u32 ahit;
struct ufs_hba *hba = dev_get_drvdata(dev);
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
- return snprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(hba->ahit));
+ pm_runtime_get_sync(hba->dev);
+ ufshcd_hold(hba, false);
+ ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ ufshcd_release(hba);
+ pm_runtime_put_sync(hba->dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(ahit));
}
static ssize_t auto_hibern8_store(struct device *dev,
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index d2edbd960ebf..07310b12a5dc 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -59,7 +59,7 @@ static int ufshcd_crypto_keyslot_program(struct blk_keyslot_manager *ksm,
u8 data_unit_mask = key->crypto_cfg.data_unit_size / 512;
int i;
int cap_idx = -1;
- union ufs_crypto_cfg_entry cfg = { 0 };
+ union ufs_crypto_cfg_entry cfg = {};
int err;
BUILD_BUG_ON(UFS_CRYPTO_KEY_SIZE_INVALID != 0);
@@ -100,7 +100,7 @@ static int ufshcd_clear_keyslot(struct ufs_hba *hba, int slot)
* Clear the crypto cfg on the device. Clearing CFGE
* might not be sufficient, so just clear the entire cfg.
*/
- union ufs_crypto_cfg_entry cfg = { 0 };
+ union ufs_crypto_cfg_entry cfg = {};
return ufshcd_program_key(hba, &cfg, slot);
}
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index 5a95a7bfbab0..df3a564c3e33 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -13,6 +13,14 @@
#include "ufshcd.h"
#include <linux/pci.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_qos.h>
+#include <linux/debugfs.h>
+
+struct intel_host {
+ u32 active_ltr;
+ u32 idle_ltr;
+ struct dentry *debugfs_root;
+};
static int ufs_intel_disable_lcc(struct ufs_hba *hba)
{
@@ -44,20 +52,134 @@ static int ufs_intel_link_startup_notify(struct ufs_hba *hba,
return err;
}
+#define INTEL_ACTIVELTR 0x804
+#define INTEL_IDLELTR 0x808
+
+#define INTEL_LTR_REQ BIT(15)
+#define INTEL_LTR_SCALE_MASK GENMASK(11, 10)
+#define INTEL_LTR_SCALE_1US (2 << 10)
+#define INTEL_LTR_SCALE_32US (3 << 10)
+#define INTEL_LTR_VALUE_MASK GENMASK(9, 0)
+
+static void intel_cache_ltr(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
+ host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR);
+}
+
+static void intel_ltr_set(struct device *dev, s32 val)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ struct intel_host *host = ufshcd_get_variant(hba);
+ u32 ltr;
+
+ pm_runtime_get_sync(dev);
+
+ /*
+ * Program latency tolerance (LTR) accordingly what has been asked
+ * by the PM QoS layer or disable it in case we were passed
+ * negative value or PM_QOS_LATENCY_ANY.
+ */
+ ltr = readl(hba->mmio_base + INTEL_ACTIVELTR);
+
+ if (val == PM_QOS_LATENCY_ANY || val < 0) {
+ ltr &= ~INTEL_LTR_REQ;
+ } else {
+ ltr |= INTEL_LTR_REQ;
+ ltr &= ~INTEL_LTR_SCALE_MASK;
+ ltr &= ~INTEL_LTR_VALUE_MASK;
+
+ if (val > INTEL_LTR_VALUE_MASK) {
+ val >>= 5;
+ if (val > INTEL_LTR_VALUE_MASK)
+ val = INTEL_LTR_VALUE_MASK;
+ ltr |= INTEL_LTR_SCALE_32US | val;
+ } else {
+ ltr |= INTEL_LTR_SCALE_1US | val;
+ }
+ }
+
+ if (ltr == host->active_ltr)
+ goto out;
+
+ writel(ltr, hba->mmio_base + INTEL_ACTIVELTR);
+ writel(ltr, hba->mmio_base + INTEL_IDLELTR);
+
+ /* Cache the values into intel_host structure */
+ intel_cache_ltr(hba);
+out:
+ pm_runtime_put(dev);
+}
+
+static void intel_ltr_expose(struct device *dev)
+{
+ dev->power.set_latency_tolerance = intel_ltr_set;
+ dev_pm_qos_expose_latency_tolerance(dev);
+}
+
+static void intel_ltr_hide(struct device *dev)
+{
+ dev_pm_qos_hide_latency_tolerance(dev);
+ dev->power.set_latency_tolerance = NULL;
+}
+
+static void intel_add_debugfs(struct ufs_hba *hba)
+{
+ struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL);
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ intel_cache_ltr(hba);
+
+ host->debugfs_root = dir;
+ debugfs_create_x32("active_ltr", 0444, dir, &host->active_ltr);
+ debugfs_create_x32("idle_ltr", 0444, dir, &host->idle_ltr);
+}
+
+static void intel_remove_debugfs(struct ufs_hba *hba)
+{
+ struct intel_host *host = ufshcd_get_variant(hba);
+
+ debugfs_remove_recursive(host->debugfs_root);
+}
+
+static int ufs_intel_common_init(struct ufs_hba *hba)
+{
+ struct intel_host *host;
+
+ host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
+ if (!host)
+ return -ENOMEM;
+ ufshcd_set_variant(hba, host);
+ intel_ltr_expose(hba->dev);
+ intel_add_debugfs(hba);
+ return 0;
+}
+
+static void ufs_intel_common_exit(struct ufs_hba *hba)
+{
+ intel_remove_debugfs(hba);
+ intel_ltr_hide(hba->dev);
+}
+
static int ufs_intel_ehl_init(struct ufs_hba *hba)
{
hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
- return 0;
+ return ufs_intel_common_init(hba);
}
static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
.name = "intel-pci",
+ .init = ufs_intel_common_init,
+ .exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
};
static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
.name = "intel-pci",
.init = ufs_intel_ehl_init,
+ .exit = ufs_intel_common_exit,
.link_startup_notify = ufs_intel_link_startup_notify,
};
@@ -162,6 +284,8 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
+ pci_set_drvdata(pdev, hba);
+
hba->vops = (struct ufs_hba_variant_ops *)id->driver_data;
err = ufshcd_init(hba, mmio_base, pdev->irq);
@@ -171,7 +295,6 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return err;
}
- pci_set_drvdata(pdev, hba);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_allow(&pdev->dev);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1d157ff58d81..b8f573a02713 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -15,6 +15,7 @@
#include <linux/of.h>
#include <linux/bitfield.h>
#include <linux/blk-pm.h>
+#include <linux/blkdev.h>
#include "ufshcd.h"
#include "ufs_quirks.h"
#include "unipro.h"
@@ -35,8 +36,8 @@
/* NOP OUT retries waiting for NOP IN response */
#define NOP_OUT_RETRIES 10
-/* Timeout after 30 msecs if NOP OUT hangs without response */
-#define NOP_OUT_TIMEOUT 30 /* msecs */
+/* Timeout after 50 msecs if NOP OUT hangs without response */
+#define NOP_OUT_TIMEOUT 50 /* msecs */
/* Query request retries */
#define QUERY_REQ_RETRIES 3
@@ -73,6 +74,9 @@
/* Default value of wait time before gating device ref clock */
#define UFSHCD_REF_CLK_GATING_WAIT_US 0xFF /* microsecs */
+/* Polling time to wait for fDeviceInit */
+#define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */
+
#define ufshcd_toggle_vreg(_dev, _vreg, _on) \
({ \
int _ret; \
@@ -125,7 +129,8 @@ enum {
UFSHCD_STATE_RESET,
UFSHCD_STATE_ERROR,
UFSHCD_STATE_OPERATIONAL,
- UFSHCD_STATE_EH_SCHEDULED,
+ UFSHCD_STATE_EH_SCHEDULED_FATAL,
+ UFSHCD_STATE_EH_SCHEDULED_NON_FATAL,
};
/* UFSHCD error handling flags */
@@ -141,6 +146,7 @@ enum {
UFSHCD_UIC_NL_ERROR = (1 << 3), /* Network layer error */
UFSHCD_UIC_TL_ERROR = (1 << 4), /* Transport Layer error */
UFSHCD_UIC_DME_ERROR = (1 << 5), /* DME error */
+ UFSHCD_UIC_PA_GENERIC_ERROR = (1 << 6), /* Generic PA error */
};
#define ufshcd_set_eh_in_progress(h) \
@@ -228,6 +234,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
static int ufshcd_change_power_mode(struct ufs_hba *hba,
struct ufs_pa_layer_attr *pwr_mode);
+static void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+ struct ufs_vreg *vreg);
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
@@ -411,15 +423,6 @@ static void ufshcd_print_err_hist(struct ufs_hba *hba,
static void ufshcd_print_host_regs(struct ufs_hba *hba)
{
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: ");
- dev_err(hba->dev, "hba->ufs_version = 0x%x, hba->capabilities = 0x%x\n",
- hba->ufs_version, hba->capabilities);
- dev_err(hba->dev,
- "hba->outstanding_reqs = 0x%x, hba->outstanding_tasks = 0x%x\n",
- (u32)hba->outstanding_reqs, (u32)hba->outstanding_tasks);
- dev_err(hba->dev,
- "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt = %d\n",
- ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
- hba->ufs_stats.hibern8_exit_cnt);
ufshcd_print_err_hist(hba, &hba->ufs_stats.pa_err, "pa_err");
ufshcd_print_err_hist(hba, &hba->ufs_stats.dl_err, "dl_err");
@@ -438,8 +441,6 @@ static void ufshcd_print_host_regs(struct ufs_hba *hba)
ufshcd_print_err_hist(hba, &hba->ufs_stats.host_reset, "host_reset");
ufshcd_print_err_hist(hba, &hba->ufs_stats.task_abort, "task_abort");
- ufshcd_print_clk_freqs(hba);
-
ufshcd_vops_dbg_register_dump(hba);
}
@@ -474,6 +475,9 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt)
prdt_length = le16_to_cpu(
lrbp->utr_descriptor_ptr->prd_table_length);
+ if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN)
+ prdt_length /= sizeof(struct ufshcd_sg_entry);
+
dev_err(hba->dev,
"UPIU[%d] - PRDT - %d entries phys@0x%llx\n",
tag, prdt_length,
@@ -499,6 +503,8 @@ static void ufshcd_print_tmrs(struct ufs_hba *hba, unsigned long bitmap)
static void ufshcd_print_host_state(struct ufs_hba *hba)
{
+ struct scsi_device *sdev_ufs = hba->sdev_ufs_device;
+
dev_err(hba->dev, "UFS Host state=%d\n", hba->ufshcd_state);
dev_err(hba->dev, "outstanding reqs=0x%lx tasks=0x%lx\n",
hba->outstanding_reqs, hba->outstanding_tasks);
@@ -511,12 +517,24 @@ static void ufshcd_print_host_state(struct ufs_hba *hba)
dev_err(hba->dev, "Auto BKOPS=%d, Host self-block=%d\n",
hba->auto_bkops_enabled, hba->host->host_self_blocked);
dev_err(hba->dev, "Clk gate=%d\n", hba->clk_gating.state);
+ dev_err(hba->dev,
+ "last_hibern8_exit_tstamp at %lld us, hibern8_exit_cnt=%d\n",
+ ktime_to_us(hba->ufs_stats.last_hibern8_exit_tstamp),
+ hba->ufs_stats.hibern8_exit_cnt);
+ dev_err(hba->dev, "last intr at %lld us, last intr status=0x%x\n",
+ ktime_to_us(hba->ufs_stats.last_intr_ts),
+ hba->ufs_stats.last_intr_status);
dev_err(hba->dev, "error handling flags=0x%x, req. abort count=%d\n",
hba->eh_flags, hba->req_abort_count);
- dev_err(hba->dev, "Host capabilities=0x%x, caps=0x%x\n",
- hba->capabilities, hba->caps);
+ dev_err(hba->dev, "hba->ufs_version=0x%x, Host capabilities=0x%x, caps=0x%x\n",
+ hba->ufs_version, hba->capabilities, hba->caps);
dev_err(hba->dev, "quirks=0x%x, dev. quirks=0x%x\n", hba->quirks,
hba->dev_quirks);
+ if (sdev_ufs)
+ dev_err(hba->dev, "UFS dev info: %.8s %.16s rev %.4s\n",
+ sdev_ufs->vendor, sdev_ufs->model, sdev_ufs->rev);
+
+ ufshcd_print_clk_freqs(hba);
}
/**
@@ -1569,11 +1587,6 @@ int ufshcd_hold(struct ufs_hba *hba, bool async)
spin_lock_irqsave(hba->host->host_lock, flags);
hba->clk_gating.active_reqs++;
- if (ufshcd_eh_in_progress(hba)) {
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- return 0;
- }
-
start:
switch (hba->clk_gating.state) {
case CLKS_ON:
@@ -1653,6 +1666,7 @@ static void ufshcd_gate_work(struct work_struct *work)
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.gate_work.work);
unsigned long flags;
+ int ret;
spin_lock_irqsave(hba->host->host_lock, flags);
/*
@@ -1679,8 +1693,11 @@ static void ufshcd_gate_work(struct work_struct *work)
/* put the link into hibern8 mode before turning off clocks */
if (ufshcd_can_hibern8_during_gating(hba)) {
- if (ufshcd_uic_hibern8_enter(hba)) {
+ ret = ufshcd_uic_hibern8_enter(hba);
+ if (ret) {
hba->clk_gating.state = CLKS_ON;
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
+ __func__, ret);
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
goto out;
@@ -1725,11 +1742,10 @@ static void __ufshcd_release(struct ufs_hba *hba)
hba->clk_gating.active_reqs--;
- if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
- || hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
- || ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks
- || hba->active_uic_cmd || hba->uic_async_done
- || ufshcd_eh_in_progress(hba))
+ if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended ||
+ hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
+ ufshcd_any_tag_in_use(hba) || hba->outstanding_tasks ||
+ hba->active_uic_cmd || hba->uic_async_done)
return;
hba->clk_gating.state = REQ_CLKS_OFF;
@@ -1842,6 +1858,8 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
if (!ufshcd_is_clkgating_allowed(hba))
return;
+ hba->clk_gating.state = CLKS_ON;
+
hba->clk_gating.delay_ms = 150;
INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
@@ -2394,12 +2412,13 @@ static inline void ufshcd_prepare_utp_nop_upiu(struct ufshcd_lrb *lrbp)
}
/**
- * ufshcd_comp_devman_upiu - UFS Protocol Information Unit(UPIU)
+ * ufshcd_compose_devman_upiu - UFS Protocol Information Unit(UPIU)
* for Device Management Purposes
* @hba: per adapter instance
* @lrbp: pointer to local reference block
*/
-static int ufshcd_comp_devman_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
+static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
+ struct ufshcd_lrb *lrbp)
{
u8 upiu_flags;
int ret = 0;
@@ -2509,34 +2528,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (!down_read_trylock(&hba->clk_scaling_lock))
return SCSI_MLQUEUE_HOST_BUSY;
- spin_lock_irqsave(hba->host->host_lock, flags);
- switch (hba->ufshcd_state) {
- case UFSHCD_STATE_OPERATIONAL:
- break;
- case UFSHCD_STATE_EH_SCHEDULED:
- case UFSHCD_STATE_RESET:
- err = SCSI_MLQUEUE_HOST_BUSY;
- goto out_unlock;
- case UFSHCD_STATE_ERROR:
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- default:
- dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
- __func__, hba->ufshcd_state);
- set_host_byte(cmd, DID_BAD_TARGET);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
-
- /* if error handling is in progress, don't issue commands */
- if (ufshcd_eh_in_progress(hba)) {
- set_host_byte(cmd, DID_ERROR);
- cmd->scsi_done(cmd);
- goto out_unlock;
- }
- spin_unlock_irqrestore(hba->host->host_lock, flags);
-
hba->req_abort_count = 0;
err = ufshcd_hold(hba, true);
@@ -2544,7 +2535,8 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
err = SCSI_MLQUEUE_HOST_BUSY;
goto out;
}
- WARN_ON(hba->clk_gating.state != CLKS_ON);
+ WARN_ON(ufshcd_is_clkgating_allowed(hba) &&
+ (hba->clk_gating.state != CLKS_ON));
lrbp = &hba->lrb[tag];
@@ -2571,11 +2563,51 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
- /* issue command to the controller */
spin_lock_irqsave(hba->host->host_lock, flags);
+ switch (hba->ufshcd_state) {
+ case UFSHCD_STATE_OPERATIONAL:
+ case UFSHCD_STATE_EH_SCHEDULED_NON_FATAL:
+ break;
+ case UFSHCD_STATE_EH_SCHEDULED_FATAL:
+ /*
+ * pm_runtime_get_sync() is used at error handling preparation
+ * stage. If a scsi cmd, e.g. the SSU cmd, is sent from hba's
+ * PM ops, it can never be finished if we let SCSI layer keep
+ * retrying it, which gets err handler stuck forever. Neither
+ * can we let the scsi cmd pass through, because UFS is in bad
+ * state, the scsi cmd may eventually time out, which will get
+ * err handler blocked for too long. So, just fail the scsi cmd
+ * sent from PM ops, err handler can recover PM error anyways.
+ */
+ if (hba->pm_op_in_progress) {
+ hba->force_reset = true;
+ set_host_byte(cmd, DID_BAD_TARGET);
+ goto out_compl_cmd;
+ }
+ fallthrough;
+ case UFSHCD_STATE_RESET:
+ err = SCSI_MLQUEUE_HOST_BUSY;
+ goto out_compl_cmd;
+ case UFSHCD_STATE_ERROR:
+ set_host_byte(cmd, DID_ERROR);
+ goto out_compl_cmd;
+ default:
+ dev_WARN_ONCE(hba->dev, 1, "%s: invalid state %d\n",
+ __func__, hba->ufshcd_state);
+ set_host_byte(cmd, DID_BAD_TARGET);
+ goto out_compl_cmd;
+ }
ufshcd_send_command(hba, tag);
-out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
+ goto out;
+
+out_compl_cmd:
+ scsi_dma_unmap(lrbp->cmd);
+ lrbp->cmd = NULL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_release(hba);
+ if (!err)
+ cmd->scsi_done(cmd);
out:
up_read(&hba->clk_scaling_lock);
return err;
@@ -2593,7 +2625,7 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba,
ufshcd_prepare_lrbp_crypto(NULL, lrbp);
hba->dev_cmd.type = cmd_type;
- return ufshcd_comp_devman_upiu(hba, lrbp);
+ return ufshcd_compose_devman_upiu(hba, lrbp);
}
static int
@@ -3747,6 +3779,10 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
ufshcd_add_delay_before_dme_cmd(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ufshcd_is_link_broken(hba)) {
+ ret = -ENOLINK;
+ goto out_unlock;
+ }
hba->uic_async_done = &uic_async_done;
if (ufshcd_readl(hba, REG_INTERRUPT_ENABLE) & UIC_COMMAND_COMPL) {
ufshcd_disable_intr(hba, UIC_COMMAND_COMPL);
@@ -3794,6 +3830,11 @@ out:
hba->uic_async_done = NULL;
if (reenable_intr)
ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+ if (ret) {
+ ufshcd_set_link_broken(hba);
+ ufshcd_schedule_eh_work(hba);
+ }
+out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
mutex_unlock(&hba->uic_cmd_mutex);
@@ -3863,7 +3904,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
}
EXPORT_SYMBOL_GPL(ufshcd_link_recovery);
-static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
{
int ret;
struct uic_command uic_cmd = {0};
@@ -3876,45 +3917,16 @@ static int __ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
trace_ufshcd_profile_hibern8(dev_name(hba->dev), "enter",
ktime_to_us(ktime_sub(ktime_get(), start)), ret);
- if (ret) {
- int err;
-
+ if (ret)
dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d\n",
__func__, ret);
-
- /*
- * If link recovery fails then return error code returned from
- * ufshcd_link_recovery().
- * If link recovery succeeds then return -EAGAIN to attempt
- * hibern8 enter retry again.
- */
- err = ufshcd_link_recovery(hba);
- if (err) {
- dev_err(hba->dev, "%s: link recovery failed", __func__);
- ret = err;
- } else {
- ret = -EAGAIN;
- }
- } else
+ else
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_ENTER,
POST_CHANGE);
return ret;
}
-static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
-{
- int ret = 0, retries;
-
- for (retries = UIC_HIBERN8_ENTER_RETRIES; retries > 0; retries--) {
- ret = __ufshcd_uic_hibern8_enter(hba);
- if (!ret)
- goto out;
- }
-out:
- return ret;
-}
-
int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
{
struct uic_command uic_cmd = {0};
@@ -3931,7 +3943,6 @@ int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
if (ret) {
dev_err(hba->dev, "%s: hibern8 exit failed. ret = %d\n",
__func__, ret);
- ret = ufshcd_link_recovery(hba);
} else {
ufshcd_vops_hibern8_notify(hba, UIC_CMD_DME_HIBER_EXIT,
POST_CHANGE);
@@ -3972,7 +3983,7 @@ void ufshcd_auto_hibern8_enable(struct ufs_hba *hba)
{
unsigned long flags;
- if (!ufshcd_is_auto_hibern8_supported(hba) || !hba->ahit)
+ if (!ufshcd_is_auto_hibern8_supported(hba))
return;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -4065,7 +4076,8 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
int ret;
/* if already configured to the requested pwr_mode */
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ if (!hba->force_pmc &&
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
@@ -4175,9 +4187,9 @@ EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
*/
static int ufshcd_complete_dev_init(struct ufs_hba *hba)
{
- int i;
int err;
bool flag_res = true;
+ ktime_t timeout;
err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
QUERY_FLAG_IDN_FDEVICEINIT, 0, NULL);
@@ -4188,20 +4200,26 @@ static int ufshcd_complete_dev_init(struct ufs_hba *hba)
goto out;
}
- /* poll for max. 1000 iterations for fDeviceInit flag to clear */
- for (i = 0; i < 1000 && !err && flag_res; i++)
- err = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_READ_FLAG,
- QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
+ /* Poll fDeviceInit flag to be cleared */
+ timeout = ktime_add_ms(ktime_get(), FDEVICEINIT_COMPL_TIMEOUT);
+ do {
+ err = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+ QUERY_FLAG_IDN_FDEVICEINIT, 0, &flag_res);
+ if (!flag_res)
+ break;
+ usleep_range(5000, 10000);
+ } while (ktime_before(ktime_get(), timeout));
- if (err)
+ if (err) {
dev_err(hba->dev,
- "%s reading fDeviceInit flag failed with error %d\n",
- __func__, err);
- else if (flag_res)
+ "%s reading fDeviceInit flag failed with error %d\n",
+ __func__, err);
+ } else if (flag_res) {
dev_err(hba->dev,
- "%s fDeviceInit was not cleared by the device\n",
- __func__);
-
+ "%s fDeviceInit was not cleared by the device\n",
+ __func__);
+ err = -EBUSY;
+ }
out:
return err;
}
@@ -4258,10 +4276,8 @@ int ufshcd_make_hba_operational(struct ufs_hba *hba)
dev_err(hba->dev,
"Host controller not ready to process requests");
err = -EIO;
- goto out;
}
-out:
return err;
}
EXPORT_SYMBOL_GPL(ufshcd_make_hba_operational);
@@ -4495,6 +4511,8 @@ link_startup:
if (ret)
goto out;
+ /* Clear UECPA once due to LINERESET has happened during LINK_STARTUP */
+ ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
ret = ufshcd_make_hba_operational(hba);
out:
if (ret) {
@@ -5299,6 +5317,9 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
+ if (hba->quirks & UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL)
+ return;
+
if (enable)
ufshcd_wb_buf_flush_enable(hba);
else
@@ -5544,16 +5565,129 @@ static bool ufshcd_quirk_dl_nac_errors(struct ufs_hba *hba)
hba->saved_err &= ~UIC_ERROR;
/* clear NAC error */
hba->saved_uic_err &= ~UFSHCD_UIC_DL_NAC_RECEIVED_ERROR;
- if (!hba->saved_uic_err) {
+ if (!hba->saved_uic_err)
err_handling = false;
- goto out;
- }
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
return err_handling;
}
+/* host lock must be held before calling this func */
+static inline bool ufshcd_is_saved_err_fatal(struct ufs_hba *hba)
+{
+ return (hba->saved_uic_err & UFSHCD_UIC_DL_PA_INIT_ERROR) ||
+ (hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK));
+}
+
+/* host lock must be held before calling this func */
+static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
+{
+ /* handle fatal errors only when link is not in error state */
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR) {
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba))
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_FATAL;
+ else
+ hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED_NON_FATAL;
+ queue_work(hba->eh_wq, &hba->eh_work);
+ }
+}
+
+static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
+{
+ pm_runtime_get_sync(hba->dev);
+ if (pm_runtime_suspended(hba->dev)) {
+ /*
+ * Don't assume anything of pm_runtime_get_sync(), if
+ * resume fails, irq and clocks can be OFF, and powers
+ * can be OFF or in LPM.
+ */
+ ufshcd_setup_hba_vreg(hba, true);
+ ufshcd_enable_irq(hba);
+ ufshcd_setup_vreg(hba, true);
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+ ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+ ufshcd_hold(hba, false);
+ if (!ufshcd_is_clkgating_allowed(hba))
+ ufshcd_setup_clocks(hba, true);
+ ufshcd_release(hba);
+ ufshcd_vops_resume(hba, UFS_RUNTIME_PM);
+ } else {
+ ufshcd_hold(hba, false);
+ if (hba->clk_scaling.is_allowed) {
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
+ ufshcd_suspend_clkscaling(hba);
+ }
+ }
+}
+
+static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
+{
+ ufshcd_release(hba);
+ if (hba->clk_scaling.is_allowed)
+ ufshcd_resume_clkscaling(hba);
+ pm_runtime_put(hba->dev);
+}
+
+static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
+{
+ return (hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+ (!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
+ ufshcd_is_link_broken(hba))));
+}
+
+#ifdef CONFIG_PM
+static void ufshcd_recover_pm_error(struct ufs_hba *hba)
+{
+ struct Scsi_Host *shost = hba->host;
+ struct scsi_device *sdev;
+ struct request_queue *q;
+ int ret;
+
+ /*
+ * Set RPM status of hba device to RPM_ACTIVE,
+ * this also clears its runtime error.
+ */
+ ret = pm_runtime_set_active(hba->dev);
+ /*
+ * If hba device had runtime error, we also need to resume those
+ * scsi devices under hba in case any of them has failed to be
+ * resumed due to hba runtime resume failure. This is to unblock
+ * blk_queue_enter in case there are bios waiting inside it.
+ */
+ if (!ret) {
+ shost_for_each_device(sdev, shost) {
+ q = sdev->request_queue;
+ if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
+ q->rpm_status == RPM_SUSPENDING))
+ pm_request_resume(q->dev);
+ }
+ }
+}
+#else
+static inline void ufshcd_recover_pm_error(struct ufs_hba *hba)
+{
+}
+#endif
+
+static bool ufshcd_is_pwr_mode_restore_needed(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->pwr_info;
+ u32 mode;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+
+ if (pwr_info->pwr_rx != ((mode >> PWRMODE_RX_OFFSET) & PWRMODE_MASK))
+ return true;
+
+ if (pwr_info->pwr_tx != (mode & PWRMODE_MASK))
+ return true;
+
+ return false;
+}
+
/**
* ufshcd_err_handler - handle UFS errors that require s/w attention
* @work: pointer to work structure
@@ -5562,23 +5696,36 @@ static void ufshcd_err_handler(struct work_struct *work)
{
struct ufs_hba *hba;
unsigned long flags;
- u32 err_xfer = 0;
- u32 err_tm = 0;
- int err = 0;
+ bool err_xfer = false;
+ bool err_tm = false;
+ int err = 0, pmc_err;
int tag;
- bool needs_reset = false;
+ bool needs_reset = false, needs_restore = false;
hba = container_of(work, struct ufs_hba, eh_work);
- pm_runtime_get_sync(hba->dev);
- ufshcd_hold(hba, false);
-
spin_lock_irqsave(hba->host->host_lock, flags);
- if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ if (ufshcd_err_handling_should_stop(hba)) {
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ return;
+ }
+ ufshcd_set_eh_in_progress(hba);
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_err_handling_prepare(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ ufshcd_scsi_block_requests(hba);
+ /*
+ * A full reset and restore might have happened after preparation
+ * is finished, double check whether we should stop.
+ */
+ if (ufshcd_err_handling_should_stop(hba)) {
+ if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
goto out;
-
+ }
hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
/* Complete requests that have door-bell cleared by h/w */
ufshcd_complete_requests(hba);
@@ -5590,30 +5737,61 @@ static void ufshcd_err_handler(struct work_struct *work)
/* release the lock as ufshcd_quirk_dl_nac_errors() may sleep */
ret = ufshcd_quirk_dl_nac_errors(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (!ret)
+ if (!ret && !hba->force_reset && ufshcd_is_link_active(hba))
goto skip_err_handling;
}
- if ((hba->saved_err & INT_FATAL_ERRORS) ||
- (hba->saved_err & UFSHCD_UIC_HIBERN8_MASK) ||
+
+ if (hba->force_reset || ufshcd_is_link_broken(hba) ||
+ ufshcd_is_saved_err_fatal(hba) ||
((hba->saved_err & UIC_ERROR) &&
- (hba->saved_uic_err & (UFSHCD_UIC_DL_PA_INIT_ERROR |
- UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
- UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
+ (hba->saved_uic_err & (UFSHCD_UIC_DL_NAC_RECEIVED_ERROR |
+ UFSHCD_UIC_DL_TCx_REPLAY_ERROR))))
needs_reset = true;
+ if ((hba->saved_err & (INT_FATAL_ERRORS | UFSHCD_UIC_HIBERN8_MASK)) ||
+ (hba->saved_uic_err &&
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
+ bool pr_prdt = !!(hba->saved_err & SYSTEM_BUS_FATAL_ERROR);
+
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ ufshcd_print_host_state(hba);
+ ufshcd_print_pwr_info(hba);
+ ufshcd_print_host_regs(hba);
+ ufshcd_print_tmrs(hba, hba->outstanding_tasks);
+ ufshcd_print_trs(hba, hba->outstanding_reqs, pr_prdt);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ }
+
/*
* if host reset is required then skip clearing the pending
* transfers forcefully because they will get cleared during
* host reset and restore
*/
if (needs_reset)
- goto skip_pending_xfer_clear;
+ goto do_reset;
+ /*
+ * If LINERESET was caught, UFS might have been put to PWM mode,
+ * check if power mode restore is needed.
+ */
+ if (hba->saved_uic_err & UFSHCD_UIC_PA_GENERIC_ERROR) {
+ hba->saved_uic_err &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
+ if (!hba->saved_uic_err)
+ hba->saved_err &= ~UIC_ERROR;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ if (ufshcd_is_pwr_mode_restore_needed(hba))
+ needs_restore = true;
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (!hba->saved_err && !needs_restore)
+ goto skip_err_handling;
+ }
+
+ hba->silence_err_logs = true;
/* release lock as clear command might sleep */
spin_unlock_irqrestore(hba->host->host_lock, flags);
/* Clear pending transfer requests */
for_each_set_bit(tag, &hba->outstanding_reqs, hba->nutrs) {
- if (ufshcd_clear_cmd(hba, tag)) {
+ if (ufshcd_try_to_abort_task(hba, tag)) {
err_xfer = true;
goto lock_skip_pending_xfer_clear;
}
@@ -5632,11 +5810,38 @@ lock_skip_pending_xfer_clear:
/* Complete the requests that are cleared by s/w */
ufshcd_complete_requests(hba);
+ hba->silence_err_logs = false;
- if (err_xfer || err_tm)
+ if (err_xfer || err_tm) {
needs_reset = true;
+ goto do_reset;
+ }
-skip_pending_xfer_clear:
+ /*
+ * After all reqs and tasks are cleared from doorbell,
+ * now it is safe to retore power mode.
+ */
+ if (needs_restore) {
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+ /*
+ * Hold the scaling lock just in case dev cmds
+ * are sent via bsg and/or sysfs.
+ */
+ down_write(&hba->clk_scaling_lock);
+ hba->force_pmc = true;
+ pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+ if (pmc_err) {
+ needs_reset = true;
+ dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
+ __func__, pmc_err);
+ }
+ hba->force_pmc = false;
+ ufshcd_print_pwr_info(hba);
+ up_write(&hba->clk_scaling_lock);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ }
+
+do_reset:
/* Fatal errors need reset */
if (needs_reset) {
unsigned long max_doorbells = (1UL << hba->nutrs) - 1;
@@ -5652,38 +5857,31 @@ skip_pending_xfer_clear:
__ufshcd_transfer_req_compl(hba,
(1UL << (hba->nutrs - 1)));
+ hba->force_reset = false;
spin_unlock_irqrestore(hba->host->host_lock, flags);
err = ufshcd_reset_and_restore(hba);
+ if (err)
+ dev_err(hba->dev, "%s: reset and restore failed with err %d\n",
+ __func__, err);
+ else
+ ufshcd_recover_pm_error(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (err) {
- dev_err(hba->dev, "%s: reset and restore failed\n",
- __func__);
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- }
- /*
- * Inform scsi mid-layer that we did reset and allow to handle
- * Unit Attention properly.
- */
- scsi_report_bus_reset(hba->host, 0);
- hba->saved_err = 0;
- hba->saved_uic_err = 0;
}
skip_err_handling:
if (!needs_reset) {
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
if (hba->saved_err || hba->saved_uic_err)
dev_err_ratelimited(hba->dev, "%s: exit: saved_err 0x%x saved_uic_err 0x%x",
__func__, hba->saved_err, hba->saved_uic_err);
}
- ufshcd_clear_eh_in_progress(hba);
-
out:
+ ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
- ufshcd_release(hba);
- pm_runtime_put_sync(hba->dev);
+ ufshcd_err_handling_unprepare(hba);
}
/**
@@ -5699,17 +5897,33 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
u32 reg;
irqreturn_t retval = IRQ_NONE;
- /* PHY layer lane error */
+ /* PHY layer error */
reg = ufshcd_readl(hba, REG_UIC_ERROR_CODE_PHY_ADAPTER_LAYER);
- /* Ignore LINERESET indication, as this is not an error */
if ((reg & UIC_PHY_ADAPTER_LAYER_ERROR) &&
- (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)) {
+ (reg & UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK)) {
+ ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
/*
* To know whether this error is fatal or not, DB timeout
* must be checked but this error is handled separately.
*/
- dev_dbg(hba->dev, "%s: UIC Lane error reported\n", __func__);
- ufshcd_update_reg_hist(&hba->ufs_stats.pa_err, reg);
+ if (reg & UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK)
+ dev_dbg(hba->dev, "%s: UIC Lane error reported\n",
+ __func__);
+
+ /* Got a LINERESET indication. */
+ if (reg & UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR) {
+ struct uic_command *cmd = NULL;
+
+ hba->uic_error |= UFSHCD_UIC_PA_GENERIC_ERROR;
+ if (hba->uic_async_done && hba->active_uic_cmd)
+ cmd = hba->active_uic_cmd;
+ /*
+ * Ignore the LINERESET during power mode change
+ * operation via DME_SET command.
+ */
+ if (cmd && (cmd->command == UIC_CMD_DME_SET))
+ hba->uic_error &= ~UFSHCD_UIC_PA_GENERIC_ERROR;
+ }
retval |= IRQ_HANDLED;
}
@@ -5813,6 +6027,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
hba->errors, ufshcd_get_upmcrs(hba));
ufshcd_update_reg_hist(&hba->ufs_stats.auto_hibern8_err,
hba->errors);
+ ufshcd_set_link_broken(hba);
queue_eh_work = true;
}
@@ -5824,30 +6039,18 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba)
hba->saved_err |= hba->errors;
hba->saved_uic_err |= hba->uic_error;
- /* handle fatal errors only when link is functional */
- if (hba->ufshcd_state == UFSHCD_STATE_OPERATIONAL) {
- /* block commands from scsi mid-layer */
- ufshcd_scsi_block_requests(hba);
-
- hba->ufshcd_state = UFSHCD_STATE_EH_SCHEDULED;
-
- /* dump controller state before resetting */
- if (hba->saved_err & (INT_FATAL_ERRORS | UIC_ERROR)) {
- bool pr_prdt = !!(hba->saved_err &
- SYSTEM_BUS_FATAL_ERROR);
-
- dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
+ /* dump controller state before resetting */
+ if ((hba->saved_err & (INT_FATAL_ERRORS)) ||
+ (hba->saved_uic_err &&
+ (hba->saved_uic_err != UFSHCD_UIC_PA_GENERIC_ERROR))) {
+ dev_err(hba->dev, "%s: saved_err 0x%x saved_uic_err 0x%x\n",
__func__, hba->saved_err,
hba->saved_uic_err);
-
- ufshcd_print_host_regs(hba);
- ufshcd_print_pwr_info(hba);
- ufshcd_print_tmrs(hba, hba->outstanding_tasks);
- ufshcd_print_trs(hba, hba->outstanding_reqs,
- pr_prdt);
- }
- schedule_work(&hba->eh_work);
+ ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
+ "host_regs: ");
+ ufshcd_print_pwr_info(hba);
}
+ ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
}
/*
@@ -5951,6 +6154,8 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
spin_lock(hba->host->host_lock);
intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
+ hba->ufs_stats.last_intr_status = intr_status;
+ hba->ufs_stats.last_intr_ts = ktime_get();
/*
* There could be max of hba->nutrs reqs in flight and in worst case
@@ -6383,7 +6588,7 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
}
/**
- * ufshcd_abort - abort a specific command
+ * ufshcd_try_to_abort_task - abort a specific task
* @cmd: SCSI command pointer
*
* Abort the pending command in device by sending UFS_ABORT_TASK task management
@@ -6392,6 +6597,80 @@ static void ufshcd_set_req_abort_skip(struct ufs_hba *hba, unsigned long bitmap)
* issued. To avoid that, first issue UFS_QUERY_TASK to check if the command is
* really issued and then try to abort it.
*
+ * Returns zero on success, non-zero on failure
+ */
+static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag)
+{
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
+ int err = 0;
+ int poll_cnt;
+ u8 resp = 0xF;
+ u32 reg;
+
+ for (poll_cnt = 100; poll_cnt; poll_cnt--) {
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_QUERY_TASK, &resp);
+ if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
+ /* cmd pending in the device */
+ dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
+ __func__, tag);
+ break;
+ } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ /*
+ * cmd not pending in the device, check if it is
+ * in transition.
+ */
+ dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
+ __func__, tag);
+ reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ if (reg & (1 << tag)) {
+ /* sleep for max. 200us to stabilize */
+ usleep_range(100, 200);
+ continue;
+ }
+ /* command completed already */
+ dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
+ __func__, tag);
+ goto out;
+ } else {
+ dev_err(hba->dev,
+ "%s: no response from device. tag = %d, err %d\n",
+ __func__, tag, err);
+ if (!err)
+ err = resp; /* service response error */
+ goto out;
+ }
+ }
+
+ if (!poll_cnt) {
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
+ UFS_ABORT_TASK, &resp);
+ if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
+ if (!err) {
+ err = resp; /* service response error */
+ dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
+ __func__, tag, err);
+ }
+ goto out;
+ }
+
+ err = ufshcd_clear_cmd(hba, tag);
+ if (err)
+ dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
+ __func__, tag, err);
+
+out:
+ return err;
+}
+
+/**
+ * ufshcd_abort - scsi host template eh_abort_handler callback
+ * @cmd: SCSI command pointer
+ *
* Returns SUCCESS/FAILED
*/
static int ufshcd_abort(struct scsi_cmnd *cmd)
@@ -6401,8 +6680,6 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
unsigned long flags;
unsigned int tag;
int err = 0;
- int poll_cnt;
- u8 resp = 0xF;
struct ufshcd_lrb *lrbp;
u32 reg;
@@ -6467,79 +6744,17 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
}
/* Skip task abort in case previous aborts failed and report failure */
- if (lrbp->req_abort_skip) {
+ if (lrbp->req_abort_skip)
err = -EIO;
- goto out;
- }
-
- for (poll_cnt = 100; poll_cnt; poll_cnt--) {
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_QUERY_TASK, &resp);
- if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_SUCCEEDED) {
- /* cmd pending in the device */
- dev_err(hba->dev, "%s: cmd pending in the device. tag = %d\n",
- __func__, tag);
- break;
- } else if (!err && resp == UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- /*
- * cmd not pending in the device, check if it is
- * in transition.
- */
- dev_err(hba->dev, "%s: cmd at tag %d not pending in the device.\n",
- __func__, tag);
- reg = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- if (reg & (1 << tag)) {
- /* sleep for max. 200us to stabilize */
- usleep_range(100, 200);
- continue;
- }
- /* command completed already */
- dev_err(hba->dev, "%s: cmd at tag %d successfully cleared from DB.\n",
- __func__, tag);
- goto cleanup;
- } else {
- dev_err(hba->dev,
- "%s: no response from device. tag = %d, err %d\n",
- __func__, tag, err);
- if (!err)
- err = resp; /* service response error */
- goto out;
- }
- }
-
- if (!poll_cnt) {
- err = -EBUSY;
- goto out;
- }
-
- err = ufshcd_issue_tm_cmd(hba, lrbp->lun, lrbp->task_tag,
- UFS_ABORT_TASK, &resp);
- if (err || resp != UPIU_TASK_MANAGEMENT_FUNC_COMPL) {
- if (!err) {
- err = resp; /* service response error */
- dev_err(hba->dev, "%s: issued. tag = %d, err %d\n",
- __func__, tag, err);
- }
- goto out;
- }
-
- err = ufshcd_clear_cmd(hba, tag);
- if (err) {
- dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n",
- __func__, tag, err);
- goto out;
- }
+ else
+ err = ufshcd_try_to_abort_task(hba, tag);
+ if (!err) {
cleanup:
- scsi_dma_unmap(cmd);
-
- spin_lock_irqsave(host->host_lock, flags);
- ufshcd_outstanding_req_clear(hba, tag);
- hba->lrb[tag].cmd = NULL;
- spin_unlock_irqrestore(host->host_lock, flags);
-
+ spin_lock_irqsave(host->host_lock, flags);
+ __ufshcd_transfer_req_compl(hba, (1UL << tag));
+ spin_unlock_irqrestore(host->host_lock, flags);
out:
- if (!err) {
err = SUCCESS;
} else {
dev_err(hba->dev, "%s: failed with err %d\n", __func__, err);
@@ -6592,8 +6807,6 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
/* Establish the link again and restore the device */
err = ufshcd_probe_hba(hba, false);
- if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
- err = -EIO;
out:
if (err)
dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -6612,9 +6825,23 @@ out:
*/
static int ufshcd_reset_and_restore(struct ufs_hba *hba)
{
+ u32 saved_err;
+ u32 saved_uic_err;
int err = 0;
+ unsigned long flags;
int retries = MAX_HOST_RESET_RETRIES;
+ /*
+ * This is a fresh start, cache and clear saved error first,
+ * in case new error generated during reset and restore.
+ */
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ saved_err = hba->saved_err;
+ saved_uic_err = hba->saved_uic_err;
+ hba->saved_err = 0;
+ hba->saved_uic_err = 0;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
do {
/* Reset the attached device */
ufshcd_vops_device_reset(hba);
@@ -6622,6 +6849,18 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
err = ufshcd_host_reset_and_restore(hba);
} while (err && --retries);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ /*
+ * Inform scsi mid-layer that we did reset and allow to handle
+ * Unit Attention properly.
+ */
+ scsi_report_bus_reset(hba->host, 0);
+ if (err) {
+ hba->saved_err |= saved_err;
+ hba->saved_uic_err |= saved_uic_err;
+ }
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
+
return err;
}
@@ -6633,48 +6872,25 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
*/
static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
- int err;
+ int err = SUCCESS;
unsigned long flags;
struct ufs_hba *hba;
hba = shost_priv(cmd->device->host);
- ufshcd_hold(hba, false);
- /*
- * Check if there is any race with fatal error handling.
- * If so, wait for it to complete. Even though fatal error
- * handling does reset and restore in some cases, don't assume
- * anything out of it. We are just avoiding race here.
- */
- do {
- spin_lock_irqsave(hba->host->host_lock, flags);
- if (!(work_pending(&hba->eh_work) ||
- hba->ufshcd_state == UFSHCD_STATE_RESET ||
- hba->ufshcd_state == UFSHCD_STATE_EH_SCHEDULED))
- break;
- spin_unlock_irqrestore(hba->host->host_lock, flags);
- dev_dbg(hba->dev, "%s: reset in progress\n", __func__);
- flush_work(&hba->eh_work);
- } while (1);
-
- hba->ufshcd_state = UFSHCD_STATE_RESET;
- ufshcd_set_eh_in_progress(hba);
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ hba->force_reset = true;
+ ufshcd_schedule_eh_work(hba);
+ dev_err(hba->dev, "%s: reset in progress - 1\n", __func__);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- err = ufshcd_reset_and_restore(hba);
+ flush_work(&hba->eh_work);
spin_lock_irqsave(hba->host->host_lock, flags);
- if (!err) {
- err = SUCCESS;
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
- } else {
+ if (hba->ufshcd_state == UFSHCD_STATE_ERROR)
err = FAILED;
- hba->ufshcd_state = UFSHCD_STATE_ERROR;
- }
- ufshcd_clear_eh_in_progress(hba);
spin_unlock_irqrestore(hba->host->host_lock, flags);
- ufshcd_release(hba);
return err;
}
@@ -7395,6 +7611,7 @@ out:
static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
{
int ret;
+ unsigned long flags;
ktime_t start = ktime_get();
ret = ufshcd_link_startup(hba);
@@ -7459,14 +7676,17 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool async)
*/
ufshcd_set_active_icc_lvl(hba);
- /* set the state as operational after switching to desired gear */
- hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
-
ufshcd_wb_config(hba);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
out:
+ spin_lock_irqsave(hba->host->host_lock, flags);
+ if (ret)
+ hba->ufshcd_state = UFSHCD_STATE_ERROR;
+ else if (hba->ufshcd_state == UFSHCD_STATE_RESET)
+ hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+ spin_unlock_irqrestore(hba->host->host_lock, flags);
trace_ufshcd_init(dev_name(hba->dev), ret,
ktime_to_us(ktime_sub(ktime_get(), start)),
@@ -7606,12 +7826,10 @@ static int ufshcd_config_vreg(struct device *dev,
if (vreg->min_uV && vreg->max_uV) {
min_uV = on ? vreg->min_uV : 0;
ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
- if (ret) {
+ if (ret)
dev_err(dev,
"%s: %s set voltage failed, err=%d\n",
__func__, name, ret);
- goto out;
- }
}
}
out:
@@ -7674,8 +7892,6 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
goto out;
ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
- if (ret)
- goto out;
out:
if (ret) {
@@ -7721,10 +7937,8 @@ static int ufshcd_init_vreg(struct ufs_hba *hba)
goto out;
ret = ufshcd_get_vreg(dev, info->vccq);
- if (ret)
- goto out;
-
- ret = ufshcd_get_vreg(dev, info->vccq2);
+ if (!ret)
+ ret = ufshcd_get_vreg(dev, info->vccq2);
out:
return ret;
}
@@ -7868,12 +8082,7 @@ static int ufshcd_variant_hba_init(struct ufs_hba *hba)
err = ufshcd_vops_setup_regulators(hba, true);
if (err)
- goto out_exit;
-
- goto out;
-
-out_exit:
- ufshcd_vops_exit(hba);
+ ufshcd_vops_exit(hba);
out:
if (err)
dev_err(hba->dev, "%s: variant %s init failed err %d\n",
@@ -8073,10 +8282,13 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
if (req_link_state == UIC_LINK_HIBERN8_STATE) {
ret = ufshcd_uic_hibern8_enter(hba);
- if (!ret)
+ if (!ret) {
ufshcd_set_link_hibern8(hba);
- else
+ } else {
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
+ __func__, ret);
goto out;
+ }
}
/*
* If autobkops is enabled, link can't be turned off because
@@ -8092,8 +8304,11 @@ static int ufshcd_link_state_transition(struct ufs_hba *hba,
* unipro. But putting the link in hibern8 is much faster.
*/
ret = ufshcd_uic_hibern8_enter(hba);
- if (ret)
+ if (ret) {
+ dev_err(hba->dev, "%s: hibern8 enter failed %d\n",
+ __func__, ret);
goto out;
+ }
/*
* Change controller state to "reset state" which
* should also put the link in off/reset state
@@ -8331,8 +8546,11 @@ disable_clks:
/* If link is active, device ref_clk can't be switched off */
__ufshcd_setup_clocks(hba, false, true);
- hba->clk_gating.state = CLKS_OFF;
- trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
@@ -8410,10 +8628,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ufshcd_is_link_hibern8(hba)) {
ret = ufshcd_uic_hibern8_exit(hba);
- if (!ret)
+ if (!ret) {
ufshcd_set_link_active(hba);
- else
+ } else {
+ dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+ __func__, ret);
goto vendor_suspend;
+ }
} else if (ufshcd_is_link_off(hba)) {
/*
* A full initialization of the host and the device is
@@ -8472,6 +8693,11 @@ disable_irq_and_vops_clks:
if (hba->clk_scaling.is_allowed)
ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
+ if (ufshcd_is_clkgating_allowed(hba)) {
+ hba->clk_gating.state = CLKS_OFF;
+ trace_ufshcd_clk_gating(dev_name(hba->dev),
+ hba->clk_gating.state);
+ }
out:
hba->pm_op_in_progress = 0;
if (ret)
@@ -8782,6 +9008,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
int err;
struct Scsi_Host *host = hba->host;
struct device *dev = hba->dev;
+ char eh_wq_name[sizeof("ufs_eh_wq_00")];
if (!mmio_base) {
dev_err(hba->dev,
@@ -8843,6 +9070,15 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
hba->max_pwr_info.is_valid = false;
/* Initialize work queues */
+ snprintf(eh_wq_name, sizeof(eh_wq_name), "ufs_eh_wq_%d",
+ hba->host->host_no);
+ hba->eh_wq = create_singlethread_workqueue(eh_wq_name);
+ if (!hba->eh_wq) {
+ dev_err(hba->dev, "%s: failed to create eh workqueue\n",
+ __func__);
+ err = -ENOMEM;
+ goto out_disable;
+ }
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 363589c0bd37..47eb1430274c 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -90,6 +90,7 @@ enum uic_link_state {
UIC_LINK_OFF_STATE = 0, /* Link powered down or disabled */
UIC_LINK_ACTIVE_STATE = 1, /* Link is in Fast/Slow/Sleep state */
UIC_LINK_HIBERN8_STATE = 2, /* Link is in Hibernate state */
+ UIC_LINK_BROKEN_STATE = 3, /* Link is in broken state */
};
#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
@@ -97,11 +98,15 @@ enum uic_link_state {
UIC_LINK_ACTIVE_STATE)
#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
UIC_LINK_HIBERN8_STATE)
+#define ufshcd_is_link_broken(hba) ((hba)->uic_link_state == \
+ UIC_LINK_BROKEN_STATE)
#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
UIC_LINK_ACTIVE_STATE)
#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_link_broken(hba) ((hba)->uic_link_state = \
+ UIC_LINK_BROKEN_STATE)
#define ufshcd_set_ufs_dev_active(h) \
((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
@@ -409,6 +414,8 @@ struct ufs_err_reg_hist {
/**
* struct ufs_stats - keeps usage/err statistics
+ * @last_intr_status: record the last interrupt status.
+ * @last_intr_ts: record the last interrupt timestamp.
* @hibern8_exit_cnt: Counter to keep track of number of exits,
* reset this after link-startup.
* @last_hibern8_exit_tstamp: Set time after the hibern8 exit.
@@ -428,6 +435,9 @@ struct ufs_err_reg_hist {
* @tsk_abort: tracks task abort events
*/
struct ufs_stats {
+ u32 last_intr_status;
+ ktime_t last_intr_ts;
+
u32 hibern8_exit_cnt;
ktime_t last_hibern8_exit_tstamp;
@@ -526,6 +536,12 @@ enum ufshcd_quirks {
* auto-hibernate capability but it doesn't work.
*/
UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8 = 1 << 11,
+
+ /*
+ * This quirk needs to disable manual flush for write booster
+ */
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
+
};
enum ufshcd_caps {
@@ -617,12 +633,15 @@ struct ufs_hba_variant_params {
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
+ * @eh_wq: Workqueue that eh_work works on
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
* @errors: HBA errors
* @uic_error: UFS interconnect layer error status
* @saved_err: sticky error mask
* @saved_uic_err: sticky UIC error mask
+ * @force_reset: flag to force eh_work perform a full reset
+ * @force_pmc: flag to force a power mode change
* @silence_err_logs: flag to silence error logs
* @dev_cmd: ufs device management command information
* @last_dme_cmd_tstamp: time stamp of the last completed DME command
@@ -711,6 +730,7 @@ struct ufs_hba {
bool is_powered;
/* Work Queues */
+ struct workqueue_struct *eh_wq;
struct work_struct eh_work;
struct work_struct eeh_work;
@@ -720,6 +740,8 @@ struct ufs_hba {
u32 saved_err;
u32 saved_uic_err;
struct ufs_stats ufs_stats;
+ bool force_reset;
+ bool force_pmc;
bool silence_err_logs;
/* Device management request data */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index ba31b090f784..6795e1f0e8f8 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -171,6 +171,7 @@ enum {
#define UIC_PHY_ADAPTER_LAYER_ERROR 0x80000000
#define UIC_PHY_ADAPTER_LAYER_ERROR_CODE_MASK 0x1F
#define UIC_PHY_ADAPTER_LAYER_LANE_ERR_MASK 0xF
+#define UIC_PHY_ADAPTER_LAYER_GENERIC_ERROR 0x10
/* UECDL - Host UIC Error Code Data Link Layer 3Ch */
#define UIC_DATA_LINK_LAYER_ERROR 0x80000000
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 4ee64782fd48..f6b52ce36de6 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -205,6 +205,9 @@ enum {
UNCHANGED = 7,
};
+#define PWRMODE_MASK 0xF
+#define PWRMODE_RX_OFFSET 4
+
/* PA TX/RX Frequency Series */
enum {
PA_HS_MODE_A = 1,
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 3b1803432090..b9c86a7e3b97 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -284,7 +284,12 @@ static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
switch (virtio32_to_cpu(vscsi->vdev, event->reason)) {
case VIRTIO_SCSI_EVT_RESET_RESCAN:
- scsi_add_device(shost, 0, target, lun);
+ if (lun == 0) {
+ scsi_scan_target(&shost->shost_gendev, 0, target,
+ SCAN_WILD_CARD, SCSI_SCAN_INITIAL);
+ } else {
+ scsi_add_device(shost, 0, target, lun);
+ }
break;
case VIRTIO_SCSI_EVT_RESET_REMOVED:
sdev = scsi_device_lookup(shost, 0, target, lun);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7b56fe9f1062..f77e5eee6b80 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4529,7 +4529,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_closesession(conn);
break;
}
- ret = 0;
break;
case ISCSI_LOGOUT_REASON_CLOSE_CONNECTION:
if (conn->cid == cmd->logout_cid) {
@@ -4540,7 +4539,6 @@ int iscsit_logout_post_handler(
iscsit_logout_post_handler_samecid(conn);
break;
}
- ret = 0;
} else {
switch (cmd->logout_response) {
case ISCSI_LOGOUT_SUCCESS:
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 9b7592350502..4c960b66de8e 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -177,9 +177,12 @@ struct tcmu_cmd {
/* Can't use se_cmd when cleaning up expired cmds, because if
cmd has been completed then accessing se_cmd is off limits */
uint32_t dbi_cnt;
+ uint32_t dbi_bidi_cnt;
uint32_t dbi_cur;
uint32_t *dbi;
+ uint32_t data_len_bidi;
+
unsigned long deadline;
#define TCMU_CMD_BIT_EXPIRED 0
@@ -242,7 +245,7 @@ static int tcmu_set_global_max_data_area(const char *str,
static int tcmu_get_global_max_data_area(char *buffer,
const struct kernel_param *kp)
{
- return sprintf(buffer, "%d", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
+ return sprintf(buffer, "%d\n", TCMU_BLOCKS_TO_MBS(tcmu_global_max_blocks));
}
static const struct kernel_param_ops tcmu_global_max_data_area_op = {
@@ -492,15 +495,16 @@ static void tcmu_cmd_free_data(struct tcmu_cmd *tcmu_cmd, uint32_t len)
clear_bit(tcmu_cmd->dbi[i], udev->data_bitmap);
}
-static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static inline int tcmu_get_empty_block(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd,
+ int prev_dbi, int *iov_cnt)
{
struct page *page;
int ret, dbi;
dbi = find_first_zero_bit(udev->data_bitmap, udev->dbi_thresh);
if (dbi == udev->dbi_thresh)
- return false;
+ return -1;
page = radix_tree_lookup(&udev->data_blocks, dbi);
if (!page) {
@@ -524,24 +528,30 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev,
set_bit(dbi, udev->data_bitmap);
tcmu_cmd_set_dbi(tcmu_cmd, dbi);
- return true;
+ if (dbi != prev_dbi + 1)
+ *iov_cnt += 1;
+
+ return dbi;
err_insert:
__free_page(page);
err_alloc:
atomic_dec(&global_db_count);
- return false;
+ return -1;
}
-static bool tcmu_get_empty_blocks(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd)
+static int tcmu_get_empty_blocks(struct tcmu_dev *udev,
+ struct tcmu_cmd *tcmu_cmd, int dbi_cnt)
{
- int i;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
+ int i, iov_cnt = 0;
- for (i = tcmu_cmd->dbi_cur; i < tcmu_cmd->dbi_cnt; i++) {
- if (!tcmu_get_empty_block(udev, tcmu_cmd))
- return false;
+ for (i = 0; i < dbi_cnt; i++) {
+ dbi = tcmu_get_empty_block(udev, tcmu_cmd, dbi, &iov_cnt);
+ if (dbi < 0)
+ return -1;
}
- return true;
+ return iov_cnt;
}
static inline struct page *
@@ -558,25 +568,58 @@ static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd)
kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
}
-static inline size_t tcmu_cmd_get_data_length(struct tcmu_cmd *tcmu_cmd)
+static inline void tcmu_cmd_set_block_cnts(struct tcmu_cmd *cmd)
{
- struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
- size_t data_length = round_up(se_cmd->data_length, DATA_BLOCK_SIZE);
+ int i, len;
+ struct se_cmd *se_cmd = cmd->se_cmd;
+
+ cmd->dbi_cnt = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
if (se_cmd->se_cmd_flags & SCF_BIDI) {
BUG_ON(!(se_cmd->t_bidi_data_sg && se_cmd->t_bidi_data_nents));
- data_length += round_up(se_cmd->t_bidi_data_sg->length,
- DATA_BLOCK_SIZE);
+ for (i = 0, len = 0; i < se_cmd->t_bidi_data_nents; i++)
+ len += se_cmd->t_bidi_data_sg[i].length;
+ cmd->dbi_bidi_cnt = DIV_ROUND_UP(len, DATA_BLOCK_SIZE);
+ cmd->dbi_cnt += cmd->dbi_bidi_cnt;
+ cmd->data_len_bidi = len;
}
+}
+
+static int new_block_to_iov(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int prev_dbi, int *remain)
+{
+ /* Get the next dbi */
+ int dbi = tcmu_cmd_get_dbi(cmd);
+ /* Do not add more than DATA_BLOCK_SIZE to iov */
+ int len = min_t(int, DATA_BLOCK_SIZE, *remain);
- return data_length;
+ *remain -= len;
+ /*
+ * The following code will gather and map the blocks to the same iovec
+ * when the blocks are all next to each other.
+ */
+ if (dbi != prev_dbi + 1) {
+ /* dbi is not next to previous dbi, so start new iov */
+ if (prev_dbi >= 0)
+ (*iov)++;
+ /* write offset relative to mb_addr */
+ (*iov)->iov_base = (void __user *)
+ (udev->data_off + dbi * DATA_BLOCK_SIZE);
+ }
+ (*iov)->iov_len += len;
+
+ return dbi;
}
-static inline uint32_t tcmu_cmd_get_block_cnt(struct tcmu_cmd *tcmu_cmd)
+static void tcmu_setup_iovs(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ struct iovec **iov, int data_length)
{
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ /* start value of dbi + 1 must not be a valid dbi */
+ int dbi = -2;
- return data_length / DATA_BLOCK_SIZE;
+ /* We prepare the IOVs for DMA_FROM_DEVICE transfer direction */
+ while (data_length > 0)
+ dbi = new_block_to_iov(udev, cmd, iov, dbi, &data_length);
}
static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
@@ -593,8 +636,7 @@ static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
+ tcmu_cmd_set_block_cnts(tcmu_cmd);
tcmu_cmd->dbi = kcalloc(tcmu_cmd->dbi_cnt, sizeof(uint32_t),
GFP_NOIO);
if (!tcmu_cmd->dbi) {
@@ -644,46 +686,22 @@ static inline size_t head_to_end(size_t head, size_t size)
return size - head;
}
-static inline void new_iov(struct iovec **iov, int *iov_cnt)
-{
- struct iovec *iovec;
-
- if (*iov_cnt != 0)
- (*iov)++;
- (*iov_cnt)++;
-
- iovec = *iov;
- memset(iovec, 0, sizeof(struct iovec));
-}
-
#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
-/* offset is relative to mb_addr */
-static inline size_t get_block_offset_user(struct tcmu_dev *dev,
- int dbi, int remaining)
-{
- return dev->data_off + dbi * DATA_BLOCK_SIZE +
- DATA_BLOCK_SIZE - remaining;
-}
-
-static inline size_t iov_tail(struct iovec *iov)
-{
- return (size_t)iov->iov_base + iov->iov_len;
-}
-
-static void scatter_data_area(struct tcmu_dev *udev,
- struct tcmu_cmd *tcmu_cmd, struct scatterlist *data_sg,
- unsigned int data_nents, struct iovec **iov,
- int *iov_cnt, bool copy_data)
+static void scatter_data_area(struct tcmu_dev *udev, struct tcmu_cmd *tcmu_cmd,
+ struct iovec **iov)
{
- int i, dbi;
+ struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
+ /* start value of dbi + 1 must not be a valid dbi */
+ int i, dbi = -2;
int block_remaining = 0;
+ int data_len = se_cmd->data_length;
void *from, *to = NULL;
- size_t copy_bytes, to_offset, offset;
+ size_t copy_bytes, offset;
struct scatterlist *sg;
- struct page *page;
+ struct page *page = NULL;
- for_each_sg(data_sg, sg, data_nents, i) {
+ for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
int sg_remaining = sg->length;
from = kmap_atomic(sg_page(sg)) + sg->offset;
while (sg_remaining > 0) {
@@ -693,50 +711,19 @@ static void scatter_data_area(struct tcmu_dev *udev,
kunmap_atomic(to);
}
- block_remaining = DATA_BLOCK_SIZE;
- dbi = tcmu_cmd_get_dbi(tcmu_cmd);
+ /* get next dbi and add to IOVs */
+ dbi = new_block_to_iov(udev, tcmu_cmd, iov, dbi,
+ &data_len);
page = tcmu_get_block_page(udev, dbi);
to = kmap_atomic(page);
+ block_remaining = DATA_BLOCK_SIZE;
}
- /*
- * Covert to virtual offset of the ring data area.
- */
- to_offset = get_block_offset_user(udev, dbi,
- block_remaining);
-
- /*
- * The following code will gather and map the blocks
- * to the same iovec when the blocks are all next to
- * each other.
- */
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
- if (*iov_cnt != 0 &&
- to_offset == iov_tail(*iov)) {
- /*
- * Will append to the current iovec, because
- * the current block page is next to the
- * previous one.
- */
- (*iov)->iov_len += copy_bytes;
- } else {
- /*
- * Will allocate a new iovec because we are
- * first time here or the current block page
- * is not next to the previous one.
- */
- new_iov(iov, iov_cnt);
- (*iov)->iov_base = (void __user *)to_offset;
- (*iov)->iov_len = copy_bytes;
- }
-
- if (copy_data) {
- offset = DATA_BLOCK_SIZE - block_remaining;
- memcpy(to + offset,
- from + sg->length - sg_remaining,
- copy_bytes);
- }
+ offset = DATA_BLOCK_SIZE - block_remaining;
+ memcpy(to + offset, from + sg->length - sg_remaining,
+ copy_bytes);
sg_remaining -= copy_bytes;
block_remaining -= copy_bytes;
@@ -767,13 +754,12 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
data_sg = se_cmd->t_data_sg;
data_nents = se_cmd->t_data_nents;
} else {
-
/*
* For bidi case, the first count blocks are for Data-Out
* buffer blocks, and before gathering the Data-In buffer
- * the Data-Out buffer blocks should be discarded.
+ * the Data-Out buffer blocks should be skipped.
*/
- count = DIV_ROUND_UP(se_cmd->data_length, DATA_BLOCK_SIZE);
+ count = cmd->dbi_cnt - cmd->dbi_bidi_cnt;
data_sg = se_cmd->t_bidi_data_sg;
data_nents = se_cmd->t_bidi_data_nents;
@@ -821,17 +807,13 @@ static inline size_t spc_bitmap_free(unsigned long *bitmap, uint32_t thresh)
}
/*
- * We can't queue a command until we have space available on the cmd ring *and*
- * space available on the data area.
+ * We can't queue a command until we have space available on the cmd ring.
*
* Called with ring lock held.
*/
-static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- size_t cmd_size, size_t data_needed)
+static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size)
{
struct tcmu_mailbox *mb = udev->mb_addr;
- uint32_t blocks_needed = (data_needed + DATA_BLOCK_SIZE - 1)
- / DATA_BLOCK_SIZE;
size_t space, cmd_needed;
u32 cmd_head;
@@ -854,29 +836,54 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
udev->cmdr_last_cleaned, udev->cmdr_size);
return false;
}
+ return true;
+}
- if (!data_needed)
- return true;
+/*
+ * We have to allocate data buffers before we can queue a command.
+ * Returns -1 on error (not enough space) or number of needed iovs on success
+ *
+ * Called with ring lock held.
+ */
+static int tcmu_alloc_data_space(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
+ int *iov_bidi_cnt)
+{
+ int space, iov_cnt = 0, ret = 0;
+
+ if (!cmd->dbi_cnt)
+ goto wr_iov_cnts;
/* try to check and get the data blocks as needed */
space = spc_bitmap_free(udev->data_bitmap, udev->dbi_thresh);
- if ((space * DATA_BLOCK_SIZE) < data_needed) {
+ if (space < cmd->dbi_cnt) {
unsigned long blocks_left =
(udev->max_blocks - udev->dbi_thresh) + space;
- if (blocks_left < blocks_needed) {
- pr_debug("no data space: only %lu available, but ask for %zu\n",
+ if (blocks_left < cmd->dbi_cnt) {
+ pr_debug("no data space: only %lu available, but ask for %lu\n",
blocks_left * DATA_BLOCK_SIZE,
- data_needed);
- return false;
+ cmd->dbi_cnt * DATA_BLOCK_SIZE);
+ return -1;
}
- udev->dbi_thresh += blocks_needed;
+ udev->dbi_thresh += cmd->dbi_cnt;
if (udev->dbi_thresh > udev->max_blocks)
udev->dbi_thresh = udev->max_blocks;
}
- return tcmu_get_empty_blocks(udev, cmd);
+ iov_cnt = tcmu_get_empty_blocks(udev, cmd,
+ cmd->dbi_cnt - cmd->dbi_bidi_cnt);
+ if (iov_cnt < 0)
+ return -1;
+
+ if (cmd->dbi_bidi_cnt) {
+ ret = tcmu_get_empty_blocks(udev, cmd, cmd->dbi_bidi_cnt);
+ if (ret < 0)
+ return -1;
+ }
+wr_iov_cnts:
+ *iov_bidi_cnt = ret;
+ return iov_cnt + ret;
}
static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt)
@@ -986,11 +993,11 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
struct tcmu_mailbox *mb = udev->mb_addr;
struct tcmu_cmd_entry *entry;
struct iovec *iov;
- int iov_cnt, cmd_id;
+ int iov_cnt, iov_bidi_cnt, cmd_id;
uint32_t cmd_head;
uint64_t cdb_off;
- bool copy_to_data_area;
- size_t data_length = tcmu_cmd_get_data_length(tcmu_cmd);
+ /* size of data buffer needed */
+ size_t data_length = (size_t)tcmu_cmd->dbi_cnt * DATA_BLOCK_SIZE;
*scsi_err = TCM_NO_SENSE;
@@ -1004,42 +1011,54 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
return -1;
}
+ if (!list_empty(&udev->qfull_queue))
+ goto queue;
+
+ if (data_length > udev->data_size) {
+ pr_warn("TCMU: Request of size %zu is too big for %zu data area\n",
+ data_length, udev->data_size);
+ *scsi_err = TCM_INVALID_CDB_FIELD;
+ return -1;
+ }
+
+ iov_cnt = tcmu_alloc_data_space(udev, tcmu_cmd, &iov_bidi_cnt);
+ if (iov_cnt < 0)
+ goto free_and_queue;
+
/*
* Must be a certain minimum size for response sense info, but
* also may be larger if the iov array is large.
- *
- * We prepare as many iovs as possbile for potential uses here,
- * because it's expensive to tell how many regions are freed in
- * the bitmap & global data pool, as the size calculated here
- * will only be used to do the checks.
- *
- * The size will be recalculated later as actually needed to save
- * cmd area memories.
*/
- base_command_size = tcmu_cmd_get_base_cmd_size(tcmu_cmd->dbi_cnt);
+ base_command_size = tcmu_cmd_get_base_cmd_size(iov_cnt);
command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
- if (!list_empty(&udev->qfull_queue))
- goto queue;
-
- if ((command_size > (udev->cmdr_size / 2)) ||
- data_length > udev->data_size) {
- pr_warn("TCMU: Request of size %zu/%zu is too big for %u/%zu "
- "cmd ring/data area\n", command_size, data_length,
- udev->cmdr_size, udev->data_size);
+ if (command_size > (udev->cmdr_size / 2)) {
+ pr_warn("TCMU: Request of size %zu is too big for %u cmd ring\n",
+ command_size, udev->cmdr_size);
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
*scsi_err = TCM_INVALID_CDB_FIELD;
return -1;
}
- if (!is_ring_space_avail(udev, tcmu_cmd, command_size, data_length)) {
+ if (!is_ring_space_avail(udev, command_size))
/*
* Don't leave commands partially setup because the unmap
* thread might need the blocks to make forward progress.
*/
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
- tcmu_cmd_reset_dbi_cur(tcmu_cmd);
- goto queue;
+ goto free_and_queue;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ *scsi_err = TCM_OUT_OF_RESOURCES;
+ return -1;
}
+ tcmu_cmd->cmd_id = cmd_id;
+
+ pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
+ tcmu_cmd, udev->name);
cmd_head = ring_insert_padding(udev, command_size);
@@ -1047,52 +1066,29 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- /* Handle allocating space from the data area */
+ /* prepare iov list and copy data to data area if necessary */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
iov = &entry->req.iov[0];
- iov_cnt = 0;
- copy_to_data_area = (se_cmd->data_direction == DMA_TO_DEVICE
- || se_cmd->se_cmd_flags & SCF_BIDI);
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_data_sg,
- se_cmd->t_data_nents, &iov, &iov_cnt,
- copy_to_data_area);
- entry->req.iov_cnt = iov_cnt;
+
+ if (se_cmd->data_direction == DMA_TO_DEVICE ||
+ se_cmd->se_cmd_flags & SCF_BIDI)
+ scatter_data_area(udev, tcmu_cmd, &iov);
+ else
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, se_cmd->data_length);
+
+ entry->req.iov_cnt = iov_cnt - iov_bidi_cnt;
/* Handle BIDI commands */
- iov_cnt = 0;
if (se_cmd->se_cmd_flags & SCF_BIDI) {
iov++;
- scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg,
- se_cmd->t_bidi_data_nents, &iov, &iov_cnt,
- false);
- }
- entry->req.iov_bidi_cnt = iov_cnt;
-
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
- if (cmd_id < 0) {
- pr_err("tcmu: Could not allocate cmd id.\n");
-
- tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
- *scsi_err = TCM_OUT_OF_RESOURCES;
- return -1;
+ tcmu_setup_iovs(udev, tcmu_cmd, &iov, tcmu_cmd->data_len_bidi);
+ entry->req.iov_bidi_cnt = iov_bidi_cnt;
}
- tcmu_cmd->cmd_id = cmd_id;
-
- pr_debug("allocated cmd id %u for cmd %p dev %s\n", tcmu_cmd->cmd_id,
- tcmu_cmd, udev->name);
tcmu_setup_cmd_timer(tcmu_cmd, udev->cmd_time_out, &udev->cmd_timer);
entry->hdr.cmd_id = tcmu_cmd->cmd_id;
- /*
- * Recalaulate the command's base size and size according
- * to the actual needs
- */
- base_command_size = tcmu_cmd_get_base_cmd_size(entry->req.iov_cnt +
- entry->req.iov_bidi_cnt);
- command_size = tcmu_cmd_get_cmd_size(tcmu_cmd, base_command_size);
-
tcmu_hdr_set_len(&entry->hdr.len_op, command_size);
/* All offsets relative to mb_addr, not start of entry! */
@@ -1111,6 +1107,10 @@ static int queue_cmd_ring(struct tcmu_cmd *tcmu_cmd, sense_reason_t *scsi_err)
return 0;
+free_and_queue:
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cur);
+ tcmu_cmd_reset_dbi_cur(tcmu_cmd);
+
queue:
if (add_to_qfull_queue(tcmu_cmd)) {
*scsi_err = TCM_OUT_OF_RESOURCES;
@@ -1145,7 +1145,7 @@ queue_tmr_ring(struct tcmu_dev *udev, struct tcmu_tmr *tmr)
cmd_size = round_up(sizeof(*entry) + id_list_sz, TCMU_OP_ALIGN_SIZE);
if (!list_empty(&udev->tmr_queue) ||
- !is_ring_space_avail(udev, NULL, cmd_size, 0)) {
+ !is_ring_space_avail(udev, cmd_size)) {
list_add_tail(&tmr->queue_entry, &udev->tmr_queue);
pr_debug("adding tmr %p on dev %s to TMR ring space wait queue\n",
tmr, udev->name);