summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 10:24:58 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-02-22 10:24:58 -0800
commitbdb39c9509e6d31943cb29dbb6ccd1b64013fb98 (patch)
tree36bf88ee1db29c69f0e488b7f537b2907ebff095
parent325b764089c9bef2be45354db4f15e5b12ae406d (diff)
parentd2aacd36a8e00bc1813841b482e3933acb1ea0b5 (diff)
downloadlinux-next-bdb39c9509e6d31943cb29dbb6ccd1b64013fb98.tar.gz
Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
Pull SCSI updates from James Bottomley: "This series consists of the usual driver updates (ufs, ibmvfc, qla2xxx, hisi_sas, pm80xx) plus the removal of the gdth driver (which is bound to cause conflicts with a trivial change somewhere). The only big major rework of note is the one from Hannes trying to clean up our result handling code in the drivers to make it consistent" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (194 commits) scsi: MAINTAINERS: Adjust to reflect gdth scsi driver removal scsi: ufs: Give clk scaling min gear a value scsi: lpfc: Fix 'physical' typos scsi: megaraid_mbox: Fix spelling of 'allocated' scsi: qla2xxx: Simplify the calculation of variables scsi: message: fusion: Fix 'physical' typos scsi: target: core: Change ASCQ for residual write scsi: target: core: Signal WRITE residuals scsi: target: core: Set residuals for 4Kn devices scsi: hisi_sas: Add trace FIFO debugfs support scsi: hisi_sas: Flush workqueue in hisi_sas_v3_remove() scsi: hisi_sas: Enable debugfs support by default scsi: hisi_sas: Don't check .nr_hw_queues in hisi_sas_task_prep() scsi: hisi_sas: Remove deferred probe check in hisi_sas_v2_probe() scsi: lpfc: Add auto select on IRQ_POLL scsi: ncr53c8xx: Fix typos scsi: lpfc: Fix ancient double free scsi: qla2xxx: Fix some memory corruption scsi: qla2xxx: Remove redundant NULL check scsi: megaraid: Fix ifnullfree.cocci warnings ...
-rw-r--r--Documentation/ABI/testing/sysfs-driver-ufs11
-rw-r--r--Documentation/kbuild/makefiles.rst4
-rw-r--r--Documentation/process/magic-number.rst2
-rw-r--r--Documentation/scsi/libsas.rst9
-rw-r--r--Documentation/scsi/scsi-parameters.rst3
-rw-r--r--Documentation/userspace-api/ioctl/ioctl-number.rst1
-rw-r--r--MAINTAINERS8
-rw-r--r--drivers/message/fusion/lsi/mpi_cnfg.h2
-rw-r--r--drivers/message/fusion/lsi/mpi_history.txt2
-rw-r--r--drivers/s390/scsi/zfcp_fc.h1
-rw-r--r--drivers/scsi/3w-9xxx.c56
-rw-r--r--drivers/scsi/3w-9xxx.h156
-rw-r--r--drivers/scsi/3w-sas.c52
-rw-r--r--drivers/scsi/3w-sas.h118
-rw-r--r--drivers/scsi/3w-xxxx.c251
-rw-r--r--drivers/scsi/3w-xxxx.h199
-rw-r--r--drivers/scsi/Kconfig16
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/aacraid/aachba.c173
-rw-r--r--drivers/scsi/advansys.c84
-rw-r--r--drivers/scsi/aha1542.c133
-rw-r--r--drivers/scsi/aha1542.h33
-rw-r--r--drivers/scsi/aic7xxx/aic79xx.h36
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_core.c257
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c20
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h37
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm_pci.c6
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_proc.c13
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_93cx6.c4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_core.c263
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c88
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h39
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_proc.c15
-rw-r--r--drivers/scsi/aic7xxx/aiclib.h15
-rw-r--r--drivers/scsi/aic7xxx/scsi_message.h41
-rw-r--r--drivers/scsi/aic94xx/aic94xx_scb.c24
-rw-r--r--drivers/scsi/arm/acornscsi.c14
-rw-r--r--drivers/scsi/atp870u.c451
-rw-r--r--drivers/scsi/atp870u.h14
-rw-r--r--drivers/scsi/bfa/bfa_fc.h15
-rw-r--r--drivers/scsi/bfa/bfa_fcpim.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/dc395x.c28
-rw-r--r--drivers/scsi/dc395x.h38
-rw-r--r--drivers/scsi/dpt_i2o.c2
-rw-r--r--drivers/scsi/esp_scsi.c23
-rw-r--r--drivers/scsi/gdth.c4322
-rw-r--r--drivers/scsi/gdth.h981
-rw-r--r--drivers/scsi/gdth_ioctl.h251
-rw-r--r--drivers/scsi/gdth_proc.c586
-rw-r--r--drivers/scsi/gdth_proc.h18
-rw-r--r--drivers/scsi/hisi_sas/Kconfig6
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas.h18
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c48
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v1_hw.c7
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v2_hw.c19
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c264
-rw-r--r--drivers/scsi/hpsa.c4
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c1256
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.h91
-rw-r--r--drivers/scsi/initio.c64
-rw-r--r--drivers/scsi/initio.h25
-rw-r--r--drivers/scsi/ips.c9
-rw-r--r--drivers/scsi/isci/port.c11
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/libsas/sas_event.c27
-rw-r--r--drivers/scsi/libsas/sas_init.c19
-rw-r--r--drivers/scsi/libsas/sas_internal.h6
-rw-r--r--drivers/scsi/lpfc/lpfc.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c4
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h6
-rw-r--r--drivers/scsi/lpfc/lpfc_disc.h15
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c49
-rw-r--r--drivers/scsi/lpfc/lpfc_hbadisc.c36
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c241
-rw-r--r--drivers/scsi/lpfc/lpfc_mbox.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c21
-rw-r--r--drivers/scsi/lpfc/lpfc_nvme.c45
-rw-r--r--drivers/scsi/lpfc/lpfc_nvmet.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c59
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c141
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_vport.c2
-rw-r--r--drivers/scsi/mac53c94.c1
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c3
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c25
-rw-r--r--drivers/scsi/ncr53c8xx.c83
-rw-r--r--drivers/scsi/ncr53c8xx.h16
-rw-r--r--drivers/scsi/nsp32.c2
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.c12
-rw-r--r--drivers/scsi/pcmcia/nsp_cs.h11
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c69
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c20
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.c21
-rw-r--r--drivers/scsi/pm8001/pm8001_sas.h2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c280
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.h17
-rw-r--r--drivers/scsi/qedf/qedf_main.c2
-rw-r--r--drivers/scsi/qla1280.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c342
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h5
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h83
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c28
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h27
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h29
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c1
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c245
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c87
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nvme.c93
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c27
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h1
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c2
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c23
-rw-r--r--drivers/scsi/scsi_lib.c1
-rw-r--r--drivers/scsi/scsi_transport_fc.c118
-rw-r--r--drivers/scsi/stex.c25
-rw-r--r--drivers/scsi/storvsc_drv.c60
-rw-r--r--drivers/scsi/ufs/Kconfig14
-rw-r--r--drivers/scsi/ufs/Makefile13
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.c56
-rw-r--r--drivers/scsi/ufs/ufs-debugfs.h22
-rw-r--r--drivers/scsi/ufs/ufs-exynos.c9
-rw-r--r--drivers/scsi/ufs/ufs-mediatek.c1
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c18
-rw-r--r--drivers/scsi/ufs/ufs-sysfs.c175
-rw-r--r--drivers/scsi/ufs/ufs.h52
-rw-r--r--drivers/scsi/ufs/ufshcd-crypto.c4
-rw-r--r--drivers/scsi/ufs/ufshcd.c568
-rw-r--r--drivers/scsi/ufs/ufshcd.h41
-rw-r--r--drivers/scsi/wd33c93.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c20
-rw-r--r--drivers/target/iscsi/iscsi_target.h2
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c11
-rw-r--r--drivers/target/iscsi/iscsi_target_util.h2
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_file.c2
-rw-r--r--drivers/target/target_core_transport.c65
-rw-r--r--include/scsi/libsas.h9
-rw-r--r--include/scsi/scsi.h2
-rw-r--r--include/scsi/scsi_cmnd.h5
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/ufs.h108
154 files changed, 5488 insertions, 9021 deletions
diff --git a/Documentation/ABI/testing/sysfs-driver-ufs b/Documentation/ABI/testing/sysfs-driver-ufs
index 75ccc5c62b3c..d1bc23cb6a9d 100644
--- a/Documentation/ABI/testing/sysfs-driver-ufs
+++ b/Documentation/ABI/testing/sysfs-driver-ufs
@@ -1161,3 +1161,14 @@ Description: This entry shows the configured size of WriteBooster buffer.
0400h corresponds to 4GB.
The file is read only.
+
+What: /sys/bus/platform/drivers/ufshcd/*/wb_on
+Date: January 2021
+Contact: Bean Huo <beanhuo@micron.com>
+Description: This node is used to set or display whether UFS WriteBooster is
+ enabled. Echo 0 to this file to disable UFS WriteBooster or 1 to
+ enable it. The WriteBooster is enabled after power-on/reset,
+ however, it will be disabled/enable while CLK scaling down/up
+ (if the platform supports UFSHCD_CAP_CLK_SCALING). For a
+ platform that doesn't support UFSHCD_CAP_CLK_SCALING, we can
+ disable/enable WriteBooster through this sysfs node.
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 910c6303c7ea..de669c2f1fcd 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -461,10 +461,8 @@ more details, with real examples.
# drivers/scsi/Makefile
CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
- CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ \
- -DGDTH_STATISTICS
- These two lines specify compilation flags for aha152x.o and gdth.o.
+ This line specify compilation flags for aha152x.o.
$(AFLAGS_$@) is a similar feature for source files in assembly
languages.
diff --git a/Documentation/process/magic-number.rst b/Documentation/process/magic-number.rst
index c6dfe060ec2f..fa5a62f4150c 100644
--- a/Documentation/process/magic-number.rst
+++ b/Documentation/process/magic-number.rst
@@ -99,7 +99,6 @@ USB_SERIAL_PORT_MAGIC 0x7301 usb_serial_port ``drivers/usb/se
CG_MAGIC 0x00090255 ufs_cylinder_group ``include/linux/ufs_fs.h``
RPORT_MAGIC 0x00525001 r_port ``drivers/char/rocket_int.h``
LSEMAGIC 0x05091998 lse ``drivers/fc4/fc.c``
-GDTIOCTL_MAGIC 0x06030f07 gdth_iowr_str ``drivers/scsi/gdth_ioctl.h``
RIEBL_MAGIC 0x09051990 ``drivers/net/atarilance.c``
NBD_REQUEST_MAGIC 0x12560953 nbd_request ``include/linux/nbd.h``
RED_MAGIC2 0x170fc2a5 (any) ``mm/slab.c``
@@ -142,7 +141,6 @@ PWC_MAGIC 0x89DC10AB pwc_device ``drivers/usb/me
NBD_REPLY_MAGIC 0x96744668 nbd_reply ``include/linux/nbd.h``
ENI155_MAGIC 0xa54b872d midway_eprom ``drivers/atm/eni.h``
CODA_MAGIC 0xC0DAC0DA coda_file_info ``fs/coda/coda_fs_i.h``
-DPMEM_MAGIC 0xc0ffee11 gdt_pci_sram ``drivers/scsi/gdth.h``
YAM_MAGIC 0xF10A7654 yam_port ``drivers/net/hamradio/yam.c``
CCB_MAGIC 0xf2691ad2 ccb ``drivers/scsi/ncr53c8xx.c``
QUEUE_MAGIC_FREE 0xf7e1c9a3 queue_entry ``drivers/scsi/arm/queue.c``
diff --git a/Documentation/scsi/libsas.rst b/Documentation/scsi/libsas.rst
index 7216b5d25800..6589dfefbc02 100644
--- a/Documentation/scsi/libsas.rst
+++ b/Documentation/scsi/libsas.rst
@@ -189,13 +189,8 @@ num_phys
The event interface::
/* LLDD calls these to notify the class of an event. */
- void (*notify_ha_event)(struct sas_ha_struct *, enum ha_event);
- void (*notify_port_event)(struct sas_phy *, enum port_event);
- void (*notify_phy_event)(struct sas_phy *, enum phy_event);
-
-When sas_register_ha() returns, those are set and can be
-called by the LLDD to notify the SAS layer of such events
-the SAS layer.
+ void sas_notify_port_event(struct sas_phy *, enum port_event, gfp_t);
+ void sas_notify_phy_event(struct sas_phy *, enum phy_event, gfp_t);
The port notification::
diff --git a/Documentation/scsi/scsi-parameters.rst b/Documentation/scsi/scsi-parameters.rst
index dea5803f5c62..c42c55e1e25e 100644
--- a/Documentation/scsi/scsi-parameters.rst
+++ b/Documentation/scsi/scsi-parameters.rst
@@ -38,9 +38,6 @@ parameters may be changed at runtime by the command
See drivers/scsi/BusLogic.c, comment before function
BusLogic_ParseDriverOptions().
- gdth= [HW,SCSI]
- See header of drivers/scsi/gdth.c.
-
gvp11= [HW,SCSI]
ips= [HW,SCSI] Adaptec / IBM ServeRAID controller
diff --git a/Documentation/userspace-api/ioctl/ioctl-number.rst b/Documentation/userspace-api/ioctl/ioctl-number.rst
index b5231d7f9200..d02ba2f67df8 100644
--- a/Documentation/userspace-api/ioctl/ioctl-number.rst
+++ b/Documentation/userspace-api/ioctl/ioctl-number.rst
@@ -157,7 +157,6 @@ Code Seq# Include File Comments
'I' all linux/isdn.h conflict!
'I' 00-0F drivers/isdn/divert/isdn_divert.h conflict!
'I' 40-4F linux/mISDNif.h conflict!
-'J' 00-1F drivers/scsi/gdth_ioctl.h
'K' all linux/kd.h
'L' 00-1F linux/loop.h conflict!
'L' 10-1F drivers/scsi/mpt3sas/mpt3sas_ctl.h conflict!
diff --git a/MAINTAINERS b/MAINTAINERS
index 7ab5f04d4e5b..42f929b4d34b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -7363,13 +7363,6 @@ M: Kieran Bingham <kbingham@kernel.org>
S: Supported
F: scripts/gdb/
-GDT SCSI DISK ARRAY CONTROLLER DRIVER
-M: Achim Leubner <achim_leubner@adaptec.com>
-L: linux-scsi@vger.kernel.org
-S: Supported
-W: http://www.icp-vortex.com/
-F: drivers/scsi/gdt*
-
GEMTEK FM RADIO RECEIVER DRIVER
M: Hans Verkuil <hverkuil@xs4all.nl>
L: linux-media@vger.kernel.org
@@ -8864,7 +8857,6 @@ F: drivers/mfd/intel_pmc_bxt.c
F: include/linux/mfd/intel_pmc_bxt.h
INTEL C600 SERIES SAS CONTROLLER DRIVER
-M: Intel SCU Linux support <intel-linux-scu@intel.com>
M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
L: linux-scsi@vger.kernel.org
S: Supported
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
index 178f414ea8f9..3770cb1cff7d 100644
--- a/drivers/message/fusion/lsi/mpi_cnfg.h
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -313,7 +313,7 @@
* define.
* Added BIOS Page 4 structure.
* Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
- * Physcial Disk Page 1.
+ * Physical Disk Page 1.
* 01-15-07 01.05.17 Added additional bit defines for ExtFlags field of
* Manufacturing Page 4.
* Added Solid State Drives Supported bit to IOC Page 6
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
index fa9249b4971a..2f76204fa1b0 100644
--- a/drivers/message/fusion/lsi/mpi_history.txt
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -513,7 +513,7 @@ mpi_cnfg.h
* define.
* Added BIOS Page 4 structure.
* Added MPI_RAID_PHYS_DISK1_PATH_MAX define for RAID
- * Physcial Disk Page 1.
+ * Physical Disk Page 1.
* 01-15-07 01.05.17 Added additional bit defines for ExtFlags field of
* Manufacturing Page 4.
* Added Solid State Drives Supported bit to IOC Page 6
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 6902ae1f8e4f..8aaf409ce9cb 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -275,7 +275,6 @@ void zfcp_fc_eval_fcp_rsp(struct fcp_resp_with_ext *fcp_rsp,
u32 sense_len, resid;
u8 rsp_flags;
- set_msg_byte(scsi, COMMAND_COMPLETE);
scsi->result |= fcp_rsp->resp.fr_status;
rsp_flags = fcp_rsp->resp.fr_flags;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index b4718a1b2bd6..b96e82de4237 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -128,14 +128,14 @@ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_
static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
- u32 set_features, unsigned short current_fw_srl,
- unsigned short current_fw_arch_id,
- unsigned short current_fw_branch,
- unsigned short current_fw_build,
- unsigned short *fw_on_ctlr_srl,
- unsigned short *fw_on_ctlr_arch_id,
- unsigned short *fw_on_ctlr_branch,
- unsigned short *fw_on_ctlr_build,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
u32 *init_connect_result);
static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
@@ -171,7 +171,7 @@ static ssize_t twa_show_stats(struct device *dev,
"Last sector count: %4d\n"
"Max sector count: %4d\n"
"SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
+ "AEN's: %4d\n",
TW_DRIVER_VERSION,
tw_dev->posted_request_count,
tw_dev->max_posted_request_count,
@@ -190,7 +190,7 @@ static ssize_t twa_show_stats(struct device *dev,
/* Create sysfs 'stats' entry */
static struct device_attribute twa_host_stats_attr = {
.attr = {
- .name = "stats",
+ .name = "stats",
.mode = S_IRUGO,
},
.show = twa_show_stats
@@ -242,7 +242,7 @@ static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
/* Keep reading the queue in case there are more aen's */
if (twa_aen_read_queue(tw_dev, request_id))
goto out2;
- else {
+ else {
retval = 0;
goto out;
}
@@ -497,7 +497,7 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
param->parameter_size_bytes = cpu_to_le16(4);
- /* Convert system time in UTC to local time seconds since last
+ /* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
@@ -729,7 +729,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
/* Now copy in the command packet response */
memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
-
+
/* Now complete the io */
spin_lock_irqsave(tw_dev->host->host_lock, flags);
tw_dev->posted_request_count--;
@@ -766,7 +766,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
if (tw_dev->aen_clobber) {
tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
tw_dev->aen_clobber = 0;
- } else
+ } else
tw_ioctl->driver_command.status = 0;
event_index = tw_dev->error_index;
} else {
@@ -1067,8 +1067,8 @@ static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
command_packet = &full_command_packet->command.oldcommand;
command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
- command_packet->size = TW_COMMAND_SIZE;
- command_packet->request_id = request_id;
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->request_id = request_id;
command_packet->byte6_offset.block_count = cpu_to_le16(1);
/* Now setup the param */
@@ -1106,14 +1106,14 @@ static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
/* This function will send an initconnection command to controller */
static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
- u32 set_features, unsigned short current_fw_srl,
- unsigned short current_fw_arch_id,
- unsigned short current_fw_branch,
- unsigned short current_fw_build,
- unsigned short *fw_on_ctlr_srl,
- unsigned short *fw_on_ctlr_arch_id,
- unsigned short *fw_on_ctlr_branch,
- unsigned short *fw_on_ctlr_build,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
u32 *init_connect_result)
{
TW_Command_Full *full_command_packet;
@@ -1124,7 +1124,7 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
full_command_packet = tw_dev->command_packet_virt[request_id];
memset(full_command_packet, 0, sizeof(TW_Command_Full));
full_command_packet->header.header_desc.size_header = 128;
-
+
tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
tw_initconnect->request_id = request_id;
@@ -1142,7 +1142,7 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
- } else
+ } else
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
/* Send command packet to the board */
@@ -1455,7 +1455,7 @@ out:
/* This function will poll the status register for a flag */
static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
{
- u32 status_reg_value;
+ u32 status_reg_value;
unsigned long before;
int retval = 1;
@@ -1770,7 +1770,7 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
/* Save done function into scsi_cmnd struct */
SCpnt->scsi_done = done;
-
+
/* Get a free request id */
twa_get_request_id(tw_dev, &request_id);
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index d88cd3499bd5..d3f479324527 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -49,8 +49,8 @@
/* AEN string type */
typedef struct TAG_twa_message_type {
- unsigned int code;
- char* text;
+ unsigned int code;
+ char* text;
} twa_message_type;
/* AEN strings */
@@ -263,9 +263,9 @@ static twa_message_type twa_error_table[] = {
#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080
#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040
#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020
-#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
-#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
-#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
+#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
+#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
+#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
/* Status register bit definitions */
#define TW_STATUS_MAJOR_VERSION_MASK 0xF0000000
@@ -284,25 +284,25 @@ static twa_message_type twa_error_table[] = {
#define TW_STATUS_COMMAND_QUEUE_EMPTY 0x00001000
#define TW_STATUS_EXPECTED_BITS 0x00002000
#define TW_STATUS_UNEXPECTED_BITS 0x00F00000
-#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0000
/* PCI related defines */
#define TW_PCI_CLEAR_PARITY_ERRORS 0xc100
#define TW_PCI_CLEAR_PCI_ABORT 0x2000
/* Command packet opcodes used by the driver */
-#define TW_OP_INIT_CONNECTION 0x1
-#define TW_OP_GET_PARAM 0x12
-#define TW_OP_SET_PARAM 0x13
-#define TW_OP_EXECUTE_SCSI 0x10
+#define TW_OP_INIT_CONNECTION 0x1
+#define TW_OP_GET_PARAM 0x12
+#define TW_OP_SET_PARAM 0x13
+#define TW_OP_EXECUTE_SCSI 0x10
#define TW_OP_DOWNLOAD_FIRMWARE 0x16
-#define TW_OP_RESET 0x1C
+#define TW_OP_RESET 0x1C
/* Asynchronous Event Notification (AEN) codes used by the driver */
-#define TW_AEN_QUEUE_EMPTY 0x0000
-#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_QUEUE_EMPTY 0x0000
+#define TW_AEN_SOFT_RESET 0x0001
#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
-#define TW_AEN_SEVERITY_ERROR 0x1
+#define TW_AEN_SEVERITY_ERROR 0x1
#define TW_AEN_SEVERITY_DEBUG 0x4
#define TW_AEN_NOT_RETRIEVED 0x1
#define TW_AEN_RETRIEVED 0x2
@@ -323,9 +323,9 @@ static twa_message_type twa_error_table[] = {
/* Misc defines */
#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
-#define TW_SECTOR_SIZE 512
-#define TW_ALIGNMENT_9000 4 /* 4 bytes */
-#define TW_ALIGNMENT_9000_SGL 0x3
+#define TW_SECTOR_SIZE 512
+#define TW_ALIGNMENT_9000 4 /* 4 bytes */
+#define TW_ALIGNMENT_9000_SGL 0x3
#define TW_MAX_UNITS 16
#define TW_MAX_UNITS_9650SE 32
#define TW_INIT_MESSAGE_CREDITS 0x100
@@ -338,7 +338,7 @@ static twa_message_type twa_error_table[] = {
#define TW_BASE_FW_SRL 24
#define TW_BASE_FW_BRANCH 0
#define TW_BASE_FW_BUILD 1
-#define TW_FW_SRL_LUNS_SUPPORTED 28
+#define TW_FW_SRL_LUNS_SUPPORTED 28
#define TW_Q_LENGTH 256
#define TW_Q_START 0
#define TW_MAX_SLOT 32
@@ -346,19 +346,19 @@ static twa_message_type twa_error_table[] = {
#define TW_MAX_CMDS_PER_LUN 254
#define TW_MAX_RESPONSE_DRAIN 256
#define TW_MAX_AEN_DRAIN 255
-#define TW_IN_RESET 2
+#define TW_IN_RESET 2
#define TW_USING_MSI 3
#define TW_IN_ATTENTION_LOOP 4
-#define TW_MAX_SECTORS 256
-#define TW_AEN_WAIT_TIME 1000
-#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
-#define TW_MAX_CDB_LEN 16
-#define TW_ISR_DONT_COMPLETE 2
-#define TW_ISR_DONT_RESULT 3
-#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
-#define TW_IOCTL_CHRDEV_FREE -1
-#define TW_COMMAND_OFFSET 128 /* 128 bytes */
-#define TW_VERSION_TABLE 0x0402
+#define TW_MAX_SECTORS 256
+#define TW_AEN_WAIT_TIME 1000
+#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
+#define TW_MAX_CDB_LEN 16
+#define TW_ISR_DONT_COMPLETE 2
+#define TW_ISR_DONT_RESULT 3
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
+#define TW_VERSION_TABLE 0x0402
#define TW_TIMEKEEP_TABLE 0x040A
#define TW_INFORMATION_TABLE 0x0403
#define TW_PARAM_FWVER 3
@@ -367,22 +367,22 @@ static twa_message_type twa_error_table[] = {
#define TW_PARAM_BIOSVER_LENGTH 16
#define TW_PARAM_PORTCOUNT 3
#define TW_PARAM_PORTCOUNT_LENGTH 1
-#define TW_MIN_SGL_LENGTH 0x200 /* 512 bytes */
-#define TW_MAX_SENSE_LENGTH 256
-#define TW_EVENT_SOURCE_AEN 0x1000
-#define TW_EVENT_SOURCE_COMMAND 0x1001
-#define TW_EVENT_SOURCE_PCHIP 0x1002
-#define TW_EVENT_SOURCE_DRIVER 0x1003
+#define TW_MIN_SGL_LENGTH 0x200 /* 512 bytes */
+#define TW_MAX_SENSE_LENGTH 256
+#define TW_EVENT_SOURCE_AEN 0x1000
+#define TW_EVENT_SOURCE_COMMAND 0x1001
+#define TW_EVENT_SOURCE_PCHIP 0x1002
+#define TW_EVENT_SOURCE_DRIVER 0x1003
#define TW_IOCTL_GET_COMPATIBILITY_INFO 0x101
-#define TW_IOCTL_GET_LAST_EVENT 0x102
-#define TW_IOCTL_GET_FIRST_EVENT 0x103
-#define TW_IOCTL_GET_NEXT_EVENT 0x104
-#define TW_IOCTL_GET_PREVIOUS_EVENT 0x105
-#define TW_IOCTL_GET_LOCK 0x106
-#define TW_IOCTL_RELEASE_LOCK 0x107
-#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108
+#define TW_IOCTL_GET_LAST_EVENT 0x102
+#define TW_IOCTL_GET_FIRST_EVENT 0x103
+#define TW_IOCTL_GET_NEXT_EVENT 0x104
+#define TW_IOCTL_GET_PREVIOUS_EVENT 0x105
+#define TW_IOCTL_GET_LOCK 0x106
+#define TW_IOCTL_RELEASE_LOCK 0x107
+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108
#define TW_IOCTL_ERROR_STATUS_NOT_LOCKED 0x1001 // Not locked
-#define TW_IOCTL_ERROR_STATUS_LOCKED 0x1002 // Already locked
+#define TW_IOCTL_ERROR_STATUS_LOCKED 0x1002 // Already locked
#define TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS 0x1003 // No more events
#define TW_IOCTL_ERROR_STATUS_AEN_CLOBBER 0x1004 // AEN clobber occurred
#define TW_IOCTL_ERROR_OS_EFAULT -EFAULT // Bad address
@@ -397,12 +397,12 @@ static twa_message_type twa_error_table[] = {
#define TW_SENSE_DATA_LENGTH 18
#define TW_STATUS_CHECK_CONDITION 2
#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
-#define TW_ERROR_UNIT_OFFLINE 0x128
+#define TW_ERROR_UNIT_OFFLINE 0x128
#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
-#define TW_MESSAGE_SOURCE_LINUX_DRIVER 6
+#define TW_MESSAGE_SOURCE_LINUX_DRIVER 6
#define TW_DRIVER TW_MESSAGE_SOURCE_LINUX_DRIVER
-#define TW_MESSAGE_SOURCE_LINUX_OS 9
+#define TW_MESSAGE_SOURCE_LINUX_OS 9
#define TW_OS TW_MESSAGE_SOURCE_LINUX_OS
#ifndef PCI_DEVICE_ID_3WARE_9000
#define PCI_DEVICE_ID_3WARE_9000 0x1002
@@ -434,24 +434,38 @@ static twa_message_type twa_error_table[] = {
#define TW_RESID_OUT(x) ((x >> 4) & 0xff)
/* request_id: 12, lun: 4 */
-#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_REQ_LUN_IN(lun, request_id) \
+ (((lun << 12) & 0xf000) | (request_id & 0xfff))
#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
/* Macros */
#define TW_CONTROL_REG_ADDR(x) (x->base_addr)
#define TW_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0x4)
-#define TW_COMMAND_QUEUE_REG_ADDR(x) (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
-#define TW_COMMAND_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x20)
-#define TW_RESPONSE_QUEUE_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + 0xC)
-#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) ((unsigned char __iomem *)x->base_addr + 0x30)
-#define TW_CLEAR_ALL_INTERRUPTS(x) (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_ATTENTION_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_HOST_INTERRUPT(x) (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_DISABLE_INTERRUPTS(x) (writel(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_MASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_UNMASK_COMMAND_INTERRUPT(x) (writel(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_SOFT_RESET(x) (writel(TW_CONTROL_ISSUE_SOFT_RESET | \
+#define TW_COMMAND_QUEUE_REG_ADDR(x) \
+ (sizeof(dma_addr_t) > 4 ? ((unsigned char __iomem *)x->base_addr + 0x20) : ((unsigned char __iomem *)x->base_addr + 0x8))
+#define TW_COMMAND_QUEUE_REG_ADDR_LARGE(x) \
+ ((unsigned char __iomem *)x->base_addr + 0x20)
+#define TW_RESPONSE_QUEUE_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + 0xC)
+#define TW_RESPONSE_QUEUE_REG_ADDR_LARGE(x) \
+ ((unsigned char __iomem *)x->base_addr + 0x30)
+#define TW_CLEAR_ALL_INTERRUPTS(x) \
+ (writel(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_ATTENTION_INTERRUPT(x) \
+ (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_HOST_INTERRUPT(x) \
+ (writel(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_DISABLE_INTERRUPTS(x) \
+ (writel(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) \
+ (writel(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_MASK_COMMAND_INTERRUPT(x) \
+ (writel(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_UNMASK_COMMAND_INTERRUPT(x) \
+ (writel(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_SOFT_RESET(x) (writel(TW_CONTROL_ISSUE_SOFT_RESET | \
TW_CONTROL_CLEAR_HOST_INTERRUPT | \
TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
TW_CONTROL_MASK_COMMAND_INTERRUPT | \
@@ -586,7 +600,7 @@ typedef struct TAG_TW_Ioctl_Driver_Command {
typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
- char padding[488];
+ char padding[488];
TW_Command_Full firmware_command;
char data_buffer[1];
} TW_Ioctl_Buf_Apache;
@@ -634,10 +648,10 @@ typedef struct TAG_TW_Compatibility_Info
#pragma pack()
typedef struct TAG_TW_Device_Extension {
- u32 __iomem *base_addr;
- unsigned long *generic_buffer_virt[TW_Q_LENGTH];
- dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
- TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
+ u32 __iomem *base_addr;
+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
dma_addr_t command_packet_phys[TW_Q_LENGTH];
struct pci_dev *tw_pci_dev;
struct scsi_cmnd *srb[TW_Q_LENGTH];
@@ -647,10 +661,10 @@ typedef struct TAG_TW_Device_Extension {
unsigned char pending_queue[TW_Q_LENGTH];
unsigned char pending_head;
unsigned char pending_tail;
- int state[TW_Q_LENGTH];
+ int state[TW_Q_LENGTH];
unsigned int posted_request_count;
unsigned int max_posted_request_count;
- unsigned int pending_request_count;
+ unsigned int pending_request_count;
unsigned int max_pending_request_count;
unsigned int max_sgl_entries;
unsigned int sgl_entries;
@@ -661,12 +675,12 @@ typedef struct TAG_TW_Device_Extension {
struct Scsi_Host *host;
long flags;
int reset_print;
- TW_Event *event_queue[TW_Q_LENGTH];
- unsigned char error_index;
+ TW_Event *event_queue[TW_Q_LENGTH];
+ unsigned char error_index;
unsigned char event_queue_wrapped;
- unsigned int error_sequence_id;
- int ioctl_sem_lock;
- ktime_t ioctl_time;
+ unsigned int error_sequence_id;
+ int ioctl_sem_lock;
+ ktime_t ioctl_time;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index b8f1848ecef2..3db0e42e9aa7 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -120,7 +120,7 @@ static struct bin_attribute twl_sysfs_aen_read_attr = {
.attr = {
.name = "3ware_aen_read",
.mode = S_IRUSR,
- },
+ },
.size = 0,
.read = twl_sysfs_aen_read
};
@@ -151,7 +151,7 @@ static struct bin_attribute twl_sysfs_compat_info_attr = {
.attr = {
.name = "3ware_compat_info",
.mode = S_IRUSR,
- },
+ },
.size = 0,
.read = twl_sysfs_compat_info
};
@@ -174,7 +174,7 @@ static ssize_t twl_show_stats(struct device *dev,
"Last sector count: %4d\n"
"Max sector count: %4d\n"
"SCSI Host Resets: %4d\n"
- "AEN's: %4d\n",
+ "AEN's: %4d\n",
TW_DRIVER_VERSION,
tw_dev->posted_request_count,
tw_dev->max_posted_request_count,
@@ -191,7 +191,7 @@ static ssize_t twl_show_stats(struct device *dev,
/* stats sysfs attribute initializer */
static struct device_attribute twl_host_stats_attr = {
.attr = {
- .name = "3ware_stats",
+ .name = "3ware_stats",
.mode = S_IRUGO,
},
.show = twl_show_stats
@@ -432,7 +432,7 @@ static void twl_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
param->parameter_size_bytes = cpu_to_le16(4);
- /* Convert system time in UTC to local time seconds since last
+ /* Convert system time in UTC to local time seconds since last
Sunday 12:00AM */
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
@@ -483,7 +483,7 @@ static int twl_aen_complete(TW_Device_Extension *tw_dev, int request_id)
/* Keep reading the queue in case there are more aen's */
if (twl_aen_read_queue(tw_dev, request_id))
goto out2;
- else {
+ else {
retval = 0;
goto out;
}
@@ -548,7 +548,7 @@ static int twl_poll_response(TW_Device_Extension *tw_dev, int request_id, int se
msleep(50);
}
retval = 0;
-out:
+out:
return retval;
} /* End twl_poll_response() */
@@ -802,7 +802,7 @@ static long twl_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
/* Now copy in the command packet response */
memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
-
+
/* Now complete the io */
spin_lock_irqsave(tw_dev->host->host_lock, flags);
tw_dev->posted_request_count--;
@@ -879,7 +879,7 @@ static int twl_fill_sense(TW_Device_Extension *tw_dev, int i, int request_id, in
tw_dev->host->host_no,
TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
header->status_block.error,
- error_str,
+ error_str,
header->err_specific_desc);
else
printk(KERN_WARNING "3w-sas: ERROR: (0x%02X:0x%04X): %s:%s.\n",
@@ -937,8 +937,8 @@ static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
command_packet = &full_command_packet->command.oldcommand;
command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
- command_packet->size = TW_COMMAND_SIZE;
- command_packet->request_id = request_id;
+ command_packet->size = TW_COMMAND_SIZE;
+ command_packet->request_id = request_id;
command_packet->byte6_offset.block_count = cpu_to_le16(1);
/* Now setup the param */
@@ -968,14 +968,14 @@ static void *twl_get_param(TW_Device_Extension *tw_dev, int request_id, int tabl
/* This function will send an initconnection command to controller */
static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
- u32 set_features, unsigned short current_fw_srl,
- unsigned short current_fw_arch_id,
- unsigned short current_fw_branch,
- unsigned short current_fw_build,
- unsigned short *fw_on_ctlr_srl,
- unsigned short *fw_on_ctlr_arch_id,
- unsigned short *fw_on_ctlr_branch,
- unsigned short *fw_on_ctlr_build,
+ u32 set_features, unsigned short current_fw_srl,
+ unsigned short current_fw_arch_id,
+ unsigned short current_fw_branch,
+ unsigned short current_fw_build,
+ unsigned short *fw_on_ctlr_srl,
+ unsigned short *fw_on_ctlr_arch_id,
+ unsigned short *fw_on_ctlr_branch,
+ unsigned short *fw_on_ctlr_build,
u32 *init_connect_result)
{
TW_Command_Full *full_command_packet;
@@ -986,7 +986,7 @@ static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
full_command_packet = tw_dev->command_packet_virt[request_id];
memset(full_command_packet, 0, sizeof(TW_Command_Full));
full_command_packet->header.header_desc.size_header = 128;
-
+
tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
tw_initconnect->request_id = request_id;
@@ -1004,7 +1004,7 @@ static int twl_initconnection(TW_Device_Extension *tw_dev, int message_credits,
tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
- } else
+ } else
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
/* Send command packet to the board */
@@ -1211,7 +1211,7 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
if (!error)
cmd->result = (DID_OK << 16);
-
+
/* Report residual bytes for single sgl */
if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
@@ -1245,7 +1245,7 @@ static int twl_poll_register(TW_Device_Extension *tw_dev, void *reg, u32 value,
reg_value = readl(reg);
before = jiffies;
- while ((reg_value & value) != result) {
+ while ((reg_value & value) != result) {
reg_value = readl(reg);
if (time_after(jiffies, before + HZ * seconds))
goto out;
@@ -1470,7 +1470,7 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
/* Save done function into scsi_cmnd struct */
SCpnt->scsi_done = done;
-
+
/* Get a free request id */
twl_get_request_id(tw_dev, &request_id);
@@ -1524,7 +1524,7 @@ static void twl_shutdown(struct pci_dev *pdev)
tw_dev = (TW_Device_Extension *)host->hostdata;
- if (tw_dev->online)
+ if (tw_dev->online)
__twl_shutdown(tw_dev);
} /* End twl_shutdown() */
@@ -1675,7 +1675,7 @@ static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
/* Re-enable interrupts on the card */
TWL_UNMASK_INTERRUPTS(tw_dev);
-
+
/* Finally, scan the host */
scsi_scan_host(host);
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index 05e77d84c16d..b0508039a280 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -52,17 +52,17 @@ static char *twl_aen_severity_table[] =
};
/* Liberator register offsets */
-#define TWL_STATUS 0x0 /* Status */
-#define TWL_HIBDB 0x20 /* Inbound doorbell */
-#define TWL_HISTAT 0x30 /* Host interrupt status */
-#define TWL_HIMASK 0x34 /* Host interrupt mask */
+#define TWL_STATUS 0x0 /* Status */
+#define TWL_HIBDB 0x20 /* Inbound doorbell */
+#define TWL_HISTAT 0x30 /* Host interrupt status */
+#define TWL_HIMASK 0x34 /* Host interrupt mask */
#define TWL_HOBDB 0x9C /* Outbound doorbell */
-#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
-#define TWL_SCRPD3 0xBC /* Scratchpad */
-#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
-#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
-#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
-#define TWL_HOBQPH 0xCC /* Host outbound Q high */
+#define TWL_HOBDBC 0xA0 /* Outbound doorbell clear */
+#define TWL_SCRPD3 0xBC /* Scratchpad */
+#define TWL_HIBQPL 0xC0 /* Host inbound Q low */
+#define TWL_HIBQPH 0xC4 /* Host inbound Q high */
+#define TWL_HOBQPL 0xC8 /* Host outbound Q low */
+#define TWL_HOBQPH 0xCC /* Host outbound Q high */
#define TWL_HISTATUS_VALID_INTERRUPT 0xC
#define TWL_HISTATUS_ATTENTION_INTERRUPT 0x4
#define TWL_HISTATUS_RESPONSE_INTERRUPT 0x8
@@ -80,12 +80,12 @@ static char *twl_aen_severity_table[] =
#define TW_OP_EXECUTE_SCSI 0x10
/* Asynchronous Event Notification (AEN) codes used by the driver */
-#define TW_AEN_QUEUE_EMPTY 0x0000
-#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_QUEUE_EMPTY 0x0000
+#define TW_AEN_SOFT_RESET 0x0001
#define TW_AEN_SYNC_TIME_WITH_HOST 0x031
-#define TW_AEN_SEVERITY_ERROR 0x1
-#define TW_AEN_SEVERITY_DEBUG 0x4
-#define TW_AEN_NOT_RETRIEVED 0x1
+#define TW_AEN_SEVERITY_ERROR 0x1
+#define TW_AEN_SEVERITY_DEBUG 0x4
+#define TW_AEN_NOT_RETRIEVED 0x1
/* Command state defines */
#define TW_S_INITIAL 0x1 /* Initial state */
@@ -101,7 +101,7 @@ static char *twl_aen_severity_table[] =
#define TW_CURRENT_DRIVER_BRANCH 0
/* Misc defines */
-#define TW_SECTOR_SIZE 512
+#define TW_SECTOR_SIZE 512
#define TW_MAX_UNITS 32
#define TW_INIT_MESSAGE_CREDITS 0x100
#define TW_INIT_COMMAND_PACKET_SIZE 0x3
@@ -116,15 +116,15 @@ static char *twl_aen_severity_table[] =
#define TW_MAX_RESET_TRIES 2
#define TW_MAX_CMDS_PER_LUN 254
#define TW_MAX_AEN_DRAIN 255
-#define TW_IN_RESET 2
+#define TW_IN_RESET 2
#define TW_USING_MSI 3
#define TW_IN_ATTENTION_LOOP 4
-#define TW_MAX_SECTORS 256
-#define TW_MAX_CDB_LEN 16
-#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
-#define TW_IOCTL_CHRDEV_FREE -1
-#define TW_COMMAND_OFFSET 128 /* 128 bytes */
-#define TW_VERSION_TABLE 0x0402
+#define TW_MAX_SECTORS 256
+#define TW_MAX_CDB_LEN 16
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_COMMAND_OFFSET 128 /* 128 bytes */
+#define TW_VERSION_TABLE 0x0402
#define TW_TIMEKEEP_TABLE 0x040A
#define TW_INFORMATION_TABLE 0x0403
#define TW_PARAM_FWVER 3
@@ -136,15 +136,15 @@ static char *twl_aen_severity_table[] =
#define TW_PARAM_PHY_SUMMARY_TABLE 1
#define TW_PARAM_PHYCOUNT 2
#define TW_PARAM_PHYCOUNT_LENGTH 1
-#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
+#define TW_IOCTL_FIRMWARE_PASS_THROUGH 0x108 // Used by smartmontools
#define TW_ALLOCATION_LENGTH 128
#define TW_SENSE_DATA_LENGTH 18
#define TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED 0x10a
#define TW_ERROR_INVALID_FIELD_IN_CDB 0x10d
-#define TW_ERROR_UNIT_OFFLINE 0x128
+#define TW_ERROR_UNIT_OFFLINE 0x128
#define TW_MESSAGE_SOURCE_CONTROLLER_ERROR 3
#define TW_MESSAGE_SOURCE_CONTROLLER_EVENT 4
-#define TW_DRIVER 6
+#define TW_DRIVER 6
#ifndef PCI_DEVICE_ID_3WARE_9750
#define PCI_DEVICE_ID_3WARE_9750 0x1010
#endif
@@ -167,25 +167,41 @@ static char *twl_aen_severity_table[] =
#define TW_NOTMFA_OUT(x) (x & 0x1)
/* request_id: 12, lun: 4 */
-#define TW_REQ_LUN_IN(lun, request_id) (((lun << 12) & 0xf000) | (request_id & 0xfff))
+#define TW_REQ_LUN_IN(lun, request_id) \
+ (((lun << 12) & 0xf000) | (request_id & 0xfff))
#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
/* Register access macros */
-#define TWL_STATUS_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
-#define TWL_HOBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
-#define TWL_HOBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
-#define TWL_HOBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
-#define TWL_HOBDBC_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
-#define TWL_HIMASK_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
-#define TWL_HISTAT_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
-#define TWL_HIBQPH_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
-#define TWL_HIBQPL_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
-#define TWL_HIBDB_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
-#define TWL_SCRPD3_REG_ADDR(x) ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
-#define TWL_MASK_INTERRUPTS(x) (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
-#define TWL_UNMASK_INTERRUPTS(x) (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
-#define TWL_CLEAR_DB_INTERRUPT(x) (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
-#define TWL_SOFT_RESET(x) (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
+#define TWL_STATUS_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_STATUS)
+#define TWL_HOBQPL_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBQPL)
+#define TWL_HOBQPH_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBQPH)
+#define TWL_HOBDB_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBDB)
+#define TWL_HOBDBC_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HOBDBC)
+#define TWL_HIMASK_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIMASK)
+#define TWL_HISTAT_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HISTAT)
+#define TWL_HIBQPH_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIBQPH)
+#define TWL_HIBQPL_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIBQPL)
+#define TWL_HIBDB_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_HIBDB)
+#define TWL_SCRPD3_REG_ADDR(x) \
+ ((unsigned char __iomem *)x->base_addr + TWL_SCRPD3)
+#define TWL_MASK_INTERRUPTS(x) \
+ (writel(~0, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_UNMASK_INTERRUPTS(x) \
+ (writel(~TWL_HISTATUS_VALID_INTERRUPT, TWL_HIMASK_REG_ADDR(tw_dev)))
+#define TWL_CLEAR_DB_INTERRUPT(x) \
+ (writel(~0, TWL_HOBDBC_REG_ADDR(tw_dev)))
+#define TWL_SOFT_RESET(x) \
+ (writel(TWL_ISSUE_SOFT_RESET, TWL_HIBDB_REG_ADDR(tw_dev)))
/* Macros */
#define TW_PRINTK(h,a,b,c) { \
@@ -317,7 +333,7 @@ typedef struct TAG_TW_Ioctl_Driver_Command {
typedef struct TAG_TW_Ioctl_Apache {
TW_Ioctl_Driver_Command driver_command;
- char padding[488];
+ char padding[488];
TW_Command_Full firmware_command;
char data_buffer[1];
} TW_Ioctl_Buf_Apache;
@@ -352,10 +368,10 @@ typedef struct TAG_TW_Compatibility_Info
#pragma pack()
typedef struct TAG_TW_Device_Extension {
- void __iomem *base_addr;
- unsigned long *generic_buffer_virt[TW_Q_LENGTH];
- dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
- TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
+ void __iomem *base_addr;
+ unsigned long *generic_buffer_virt[TW_Q_LENGTH];
+ dma_addr_t generic_buffer_phys[TW_Q_LENGTH];
+ TW_Command_Full *command_packet_virt[TW_Q_LENGTH];
dma_addr_t command_packet_phys[TW_Q_LENGTH];
TW_Command_Apache_Header *sense_buffer_virt[TW_Q_LENGTH];
dma_addr_t sense_buffer_phys[TW_Q_LENGTH];
@@ -364,7 +380,7 @@ typedef struct TAG_TW_Device_Extension {
unsigned char free_queue[TW_Q_LENGTH];
unsigned char free_head;
unsigned char free_tail;
- int state[TW_Q_LENGTH];
+ int state[TW_Q_LENGTH];
unsigned int posted_request_count;
unsigned int max_posted_request_count;
unsigned int max_sgl_entries;
@@ -375,9 +391,9 @@ typedef struct TAG_TW_Device_Extension {
unsigned int aen_count;
struct Scsi_Host *host;
long flags;
- TW_Event *event_queue[TW_Q_LENGTH];
- unsigned char error_index;
- unsigned int error_sequence_id;
+ TW_Event *event_queue[TW_Q_LENGTH];
+ unsigned char error_index;
+ unsigned int error_sequence_id;
int chrdev_request_id;
wait_queue_head_t ioctl_wqueue;
struct mutex ioctl_lock;
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index fb6444d0409c..d90b9fca4aea 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1,52 +1,52 @@
-/*
+/*
3w-xxxx.c -- 3ware Storage Controller device driver for Linux.
Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
Copyright (C) 1999-2010 3ware Inc.
- Kernel compatibility By: Andre Hedrick <andre@suse.com>
+ Kernel compatibility By: Andre Hedrick <andre@suse.com>
Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
-
+
Further tiny build fixes and trivial hoovering Alan Cox
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- NO WARRANTY
- THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- solely responsible for determining the appropriateness of using and
- distributing the Program and assumes all risks associated with its
- exercise of rights under this Agreement, including but not limited to
- the risks and costs of program errors, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- DISCLAIMER OF LIABILITY
- NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- Bugs/Comments/Suggestions should be mailed to:
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
aradford@gmail.com
@@ -70,7 +70,7 @@
1.02.00.003 - Fix tw_interrupt() to report error to scsi layer when
controller status is non-zero.
Added handling of request_sense opcode.
- Fix possible null pointer dereference in
+ Fix possible null pointer dereference in
tw_reset_device_extension()
1.02.00.004 - Add support for device id of 3ware 7000 series controllers.
Make tw_setfeature() call with interrupts disabled.
@@ -239,7 +239,7 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev);
/* This function will check the status register for unexpected bits */
static int tw_check_bits(u32 status_reg_value)
{
- if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) {
+ if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS) {
dprintk(KERN_WARNING "3w-xxxx: tw_check_bits(): No expected bits (0x%x).\n", status_reg_value);
return 1;
}
@@ -291,7 +291,7 @@ static int tw_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value, int
}
return 1;
}
-
+
return 0;
} /* End tw_decode_bits() */
@@ -390,7 +390,7 @@ static int tw_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
} else {
tw_dev->pending_tail = tw_dev->pending_tail + 1;
}
- }
+ }
TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
return 1;
}
@@ -403,7 +403,7 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
int i;
TW_Command *command;
- dprintk(KERN_WARNING "3w-xxxx: tw_decode_sense()\n");
+ dprintk(KERN_WARNING "3w-xxxx: tw_decode_sense()\n");
command = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
printk(KERN_WARNING "3w-xxxx: scsi%d: Command failed: status = 0x%x, flags = 0x%x, unit #%d.\n", tw_dev->host->host_no, command->status, command->flags, TW_UNIT_OUT(command->unit__hostid));
@@ -443,10 +443,10 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
} /* End tw_decode_sense() */
/* This function will report controller error status */
-static int tw_check_errors(TW_Device_Extension *tw_dev)
+static int tw_check_errors(TW_Device_Extension *tw_dev)
{
u32 status_reg_value;
-
+
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
if (TW_STATUS_ERRORS(status_reg_value) || tw_check_bits(status_reg_value)) {
@@ -458,7 +458,7 @@ static int tw_check_errors(TW_Device_Extension *tw_dev)
} /* End tw_check_errors() */
/* This function will empty the response que */
-static void tw_empty_response_que(TW_Device_Extension *tw_dev)
+static void tw_empty_response_que(TW_Device_Extension *tw_dev)
{
u32 status_reg_value, response_que_value;
@@ -525,7 +525,7 @@ static ssize_t tw_show_stats(struct device *dev, struct device_attribute *attr,
/* Create sysfs 'stats' entry */
static struct device_attribute tw_host_stats_attr = {
.attr = {
- .name = "stats",
+ .name = "stats",
.mode = S_IRUGO,
},
.show = tw_show_stats
@@ -538,7 +538,7 @@ static struct device_attribute *tw_host_attrs[] = {
};
/* This function will read the aen queue from the isr */
-static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
+static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
{
TW_Command *command_packet;
TW_Param *param;
@@ -604,7 +604,7 @@ static int tw_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
} /* End tw_aen_read_queue() */
/* This function will complete an aen request from the isr */
-static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id)
+static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id)
{
TW_Param *param;
unsigned short aen;
@@ -628,7 +628,7 @@ static int tw_aen_complete(TW_Device_Extension *tw_dev, int request_id)
if ((tw_aen_string[aen & 0xff][strlen(tw_aen_string[aen & 0xff])-1]) == '#') {
printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s%d.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff], aen >> 8);
} else {
- if (aen != 0x0)
+ if (aen != 0x0)
printk(KERN_WARNING "3w-xxxx: scsi%d: AEN: %s.\n", tw_dev->host->host_no, tw_aen_string[aen & 0xff]);
}
} else {
@@ -746,7 +746,7 @@ static int tw_aen_drain_queue(TW_Device_Extension *tw_dev)
printk(KERN_WARNING "3w-xxxx: tw_aen_drain_queue(): Unexpected request id.\n");
return 1;
}
-
+
if (command_packet->status != 0) {
if (command_packet->flags != TW_AEN_TABLE_UNDEFINED) {
/* Bad response */
@@ -908,7 +908,7 @@ static long tw_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long a
/* Hardware can only do multiple of 512 byte transfers */
data_buffer_length_adjusted = (data_buffer_length + 511) & ~511;
-
+
/* Now allocate ioctl buf memory */
cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_New_Ioctl) - 1, &dma_handle, GFP_KERNEL);
if (cpu_addr == NULL) {
@@ -1075,7 +1075,7 @@ static void tw_free_device_extension(TW_Device_Extension *tw_dev)
} /* End tw_free_device_extension() */
/* This function will send an initconnection command to controller */
-static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
+static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
{
unsigned long command_que_value;
TW_Command *command_packet;
@@ -1105,10 +1105,10 @@ static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
printk(KERN_WARNING "3w-xxxx: tw_initconnection(): Bad command packet physical address.\n");
return 1;
}
-
+
/* Send command packet to the board */
outl(command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
-
+
/* Poll for completion */
if (tw_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, 30) == 0) {
response_queue.value = inl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
@@ -1130,7 +1130,7 @@ static int tw_initconnection(TW_Device_Extension *tw_dev, int message_credits)
/* Set a value in the features table */
static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
- unsigned char *val)
+ unsigned char *val)
{
TW_Param *param;
TW_Command *command_packet;
@@ -1139,7 +1139,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
unsigned long command_que_value;
unsigned long param_value;
- /* Initialize SetParam command packet */
+ /* Initialize SetParam command packet */
if (tw_dev->command_packet_virtual_address[request_id] == NULL) {
printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet virtual address.\n");
return 1;
@@ -1169,7 +1169,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
command_packet->request_id = request_id;
command_packet->byte6.parameter_count = 1;
- command_que_value = tw_dev->command_packet_physical_address[request_id];
+ command_que_value = tw_dev->command_packet_physical_address[request_id];
if (command_que_value == 0) {
printk(KERN_WARNING "3w-xxxx: tw_setfeature(): Bad command packet physical address.\n");
return 1;
@@ -1199,7 +1199,7 @@ static int tw_setfeature(TW_Device_Extension *tw_dev, int parm, int param_size,
} /* End tw_setfeature() */
/* This function will reset a controller */
-static int tw_reset_sequence(TW_Device_Extension *tw_dev)
+static int tw_reset_sequence(TW_Device_Extension *tw_dev)
{
int error = 0;
int tries = 0;
@@ -1298,7 +1298,7 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
/* Abort all requests that are in progress */
for (i=0;i<TW_Q_LENGTH;i++) {
- if ((tw_dev->state[i] != TW_S_FINISHED) &&
+ if ((tw_dev->state[i] != TW_S_FINISHED) &&
(tw_dev->state[i] != TW_S_INITIAL) &&
(tw_dev->state[i] != TW_S_COMPLETED)) {
srb = tw_dev->srb[i];
@@ -1339,11 +1339,11 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
/* This funciton returns unit geometry in cylinders/heads/sectors */
static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev,
- sector_t capacity, int geom[])
+ sector_t capacity, int geom[])
{
int heads, sectors, cylinders;
TW_Device_Extension *tw_dev;
-
+
dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam()\n");
tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
@@ -1358,7 +1358,7 @@ static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev
}
dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_biosparam(): heads = %d, sectors = %d, cylinders = %d\n", heads, sectors, cylinders);
- geom[0] = heads;
+ geom[0] = heads;
geom[1] = sectors;
geom[2] = cylinders;
@@ -1366,7 +1366,7 @@ static int tw_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev
} /* End tw_scsi_biosparam() */
/* This is the new scsi eh reset function */
-static int tw_scsi_eh_reset(struct scsi_cmnd *SCpnt)
+static int tw_scsi_eh_reset(struct scsi_cmnd *SCpnt)
{
TW_Device_Extension *tw_dev=NULL;
int retval = FAILED;
@@ -1554,7 +1554,7 @@ static int tw_scsiop_mode_sense(TW_Device_Extension *tw_dev, int request_id)
/* Now try to post the command packet */
tw_post_command_packet(tw_dev, request_id);
-
+
return 0;
} /* End tw_scsiop_mode_sense() */
@@ -1575,16 +1575,16 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
flags = (char *)&(param->data[0]);
memset(request_buffer, 0, sizeof(request_buffer));
- request_buffer[0] = 0xf; /* mode data length */
- request_buffer[1] = 0; /* default medium type */
- request_buffer[2] = 0x10; /* dpo/fua support on */
- request_buffer[3] = 0; /* no block descriptors */
- request_buffer[4] = 0x8; /* caching page */
- request_buffer[5] = 0xa; /* page length */
+ request_buffer[0] = 0xf; /* mode data length */
+ request_buffer[1] = 0; /* default medium type */
+ request_buffer[2] = 0x10; /* dpo/fua support on */
+ request_buffer[3] = 0; /* no block descriptors */
+ request_buffer[4] = 0x8; /* caching page */
+ request_buffer[5] = 0xa; /* page length */
if (*flags & 0x1)
- request_buffer[6] = 0x5; /* WCE on, RCD on */
+ request_buffer[6] = 0x5; /* WCE on, RCD on */
else
- request_buffer[6] = 0x1; /* WCE off, RCD on */
+ request_buffer[6] = 0x1; /* WCE off, RCD on */
tw_transfer_internal(tw_dev, request_id, request_buffer,
sizeof(request_buffer));
@@ -1592,7 +1592,7 @@ static int tw_scsiop_mode_sense_complete(TW_Device_Extension *tw_dev, int reques
} /* End tw_scsiop_mode_sense_complete() */
/* This function handles scsi read_capacity commands */
-static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
+static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
{
TW_Param *param;
TW_Command *command_packet;
@@ -1624,8 +1624,8 @@ static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
}
param = (TW_Param *)tw_dev->alignment_virtual_address[request_id];
memset(param, 0, sizeof(TW_Sector));
- param->table_id = TW_UNIT_INFORMATION_TABLE_BASE +
- tw_dev->srb[request_id]->device->id;
+ param->table_id = TW_UNIT_INFORMATION_TABLE_BASE +
+ tw_dev->srb[request_id]->device->id;
param->parameter_id = 4; /* unitcapacity parameter */
param->parameter_size_bytes = 4;
param_value = tw_dev->alignment_physical_address[request_id];
@@ -1633,7 +1633,7 @@ static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_capacity(): Bad alignment physical address.\n");
return 1;
}
-
+
command_packet->byte8.param.sgl[0].address = param_value;
command_packet->byte8.param.sgl[0].length = sizeof(TW_Sector);
command_que_value = tw_dev->command_packet_physical_address[request_id];
@@ -1644,7 +1644,7 @@ static int tw_scsiop_read_capacity(TW_Device_Extension *tw_dev, int request_id)
/* Now try to post the command to the board */
tw_post_command_packet(tw_dev, request_id);
-
+
return 0;
} /* End tw_scsiop_read_capacity() */
@@ -1666,7 +1666,7 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
}
param_data = &(param->data[0]);
- capacity = (param_data[3] << 24) | (param_data[2] << 16) |
+ capacity = (param_data[3] << 24) | (param_data[2] << 16) |
(param_data[1] << 8) | param_data[0];
/* Subtract one sector to fix get last sector ioctl */
@@ -1692,7 +1692,7 @@ static int tw_scsiop_read_capacity_complete(TW_Device_Extension *tw_dev, int req
} /* End tw_scsiop_read_capacity_complete() */
/* This function handles scsi read or write commands */
-static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
+static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
{
TW_Command *command_packet;
unsigned long command_que_value;
@@ -1742,12 +1742,12 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
lba = ((u32)srb->cmnd[2] << 24) | ((u32)srb->cmnd[3] << 16) | ((u32)srb->cmnd[4] << 8) | (u32)srb->cmnd[5];
num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
}
-
+
/* Update sector statistic */
tw_dev->sector_count = num_sectors;
if (tw_dev->sector_count > tw_dev->max_sector_count)
tw_dev->max_sector_count = tw_dev->sector_count;
-
+
dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): lba = 0x%x num_sectors = 0x%x\n", lba, num_sectors);
command_packet->byte8.io.lba = lba;
command_packet->byte6.block_count = num_sectors;
@@ -1772,7 +1772,7 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
dprintk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Bad command packet physical address.\n");
return 1;
}
-
+
/* Now try to post the command to the board */
tw_post_command_packet(tw_dev, request_id);
@@ -1933,7 +1933,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
/* Save done function into struct scsi_cmnd */
SCpnt->scsi_done = done;
-
+
/* Queue the command and get a request id */
tw_state_request_start(tw_dev, &request_id);
@@ -1941,48 +1941,47 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
tw_dev->srb[request_id] = SCpnt;
switch (*command) {
- case READ_10:
- case READ_6:
- case WRITE_10:
- case WRITE_6:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ/WRITE.\n");
- retval = tw_scsiop_read_write(tw_dev, request_id);
- break;
- case TEST_UNIT_READY:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught TEST_UNIT_READY.\n");
- retval = tw_scsiop_test_unit_ready(tw_dev, request_id);
- break;
- case INQUIRY:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught INQUIRY.\n");
- retval = tw_scsiop_inquiry(tw_dev, request_id);
- break;
- case READ_CAPACITY:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ_CAPACITY.\n");
- retval = tw_scsiop_read_capacity(tw_dev, request_id);
- break;
- case REQUEST_SENSE:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught REQUEST_SENSE.\n");
- retval = tw_scsiop_request_sense(tw_dev, request_id);
- break;
- case MODE_SENSE:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught MODE_SENSE.\n");
- retval = tw_scsiop_mode_sense(tw_dev, request_id);
- break;
- case SYNCHRONIZE_CACHE:
- dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught SYNCHRONIZE_CACHE.\n");
- retval = tw_scsiop_synchronize_cache(tw_dev, request_id);
- break;
- case TW_IOCTL:
- printk(KERN_WARNING "3w-xxxx: SCSI_IOCTL_SEND_COMMAND deprecated, please update your 3ware tools.\n");
- break;
- default:
- printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
- tw_dev->state[request_id] = TW_S_COMPLETED;
- tw_state_request_finish(tw_dev, request_id);
- SCpnt->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
- scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
- done(SCpnt);
- retval = 0;
+ case READ_10:
+ case READ_6:
+ case WRITE_10:
+ case WRITE_6:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ/WRITE.\n");
+ retval = tw_scsiop_read_write(tw_dev, request_id);
+ break;
+ case TEST_UNIT_READY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught TEST_UNIT_READY.\n");
+ retval = tw_scsiop_test_unit_ready(tw_dev, request_id);
+ break;
+ case INQUIRY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught INQUIRY.\n");
+ retval = tw_scsiop_inquiry(tw_dev, request_id);
+ break;
+ case READ_CAPACITY:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught READ_CAPACITY.\n");
+ retval = tw_scsiop_read_capacity(tw_dev, request_id);
+ break;
+ case REQUEST_SENSE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught REQUEST_SENSE.\n");
+ retval = tw_scsiop_request_sense(tw_dev, request_id);
+ break;
+ case MODE_SENSE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught MODE_SENSE.\n");
+ retval = tw_scsiop_mode_sense(tw_dev, request_id);
+ break;
+ case SYNCHRONIZE_CACHE:
+ dprintk(KERN_NOTICE "3w-xxxx: tw_scsi_queue(): caught SYNCHRONIZE_CACHE.\n");
+ retval = tw_scsiop_synchronize_cache(tw_dev, request_id);
+ break;
+ case TW_IOCTL:
+ printk(KERN_WARNING "3w-xxxx: SCSI_IOCTL_SEND_COMMAND deprecated, please update your 3ware tools.\n");
+ break;
+ default:
+ printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ tw_state_request_finish(tw_dev, request_id);
+ scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
+ done(SCpnt);
+ retval = 0;
}
if (retval) {
tw_dev->state[request_id] = TW_S_COMPLETED;
@@ -1997,7 +1996,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
static DEF_SCSI_QCMD(tw_scsi_queue)
/* This function is the interrupt service routine */
-static irqreturn_t tw_interrupt(int irq, void *dev_instance)
+static irqreturn_t tw_interrupt(int irq, void *dev_instance)
{
int request_id;
u32 status_reg_value;
@@ -2073,7 +2072,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
}
}
/* If there are no more pending requests, we mask command interrupt */
- if (tw_dev->pending_request_count == 0)
+ if (tw_dev->pending_request_count == 0)
TW_MASK_COMMAND_INTERRUPT(tw_dev);
}
@@ -2174,7 +2173,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
tw_dev->posted_request_count--;
}
}
-
+
/* Check for valid status after each drain */
status_reg_value = inl(TW_STATUS_REG_ADDR(tw_dev));
if (tw_check_bits(status_reg_value)) {
@@ -2244,7 +2243,7 @@ static struct scsi_host_template driver_template = {
.this_id = -1,
.sg_tablesize = TW_MAX_SGL_LENGTH,
.max_sectors = TW_MAX_SECTORS,
- .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
+ .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
.shost_attrs = tw_host_attrs,
.emulated = 1,
.no_write_same = 1,
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index bd87fbacfbc7..e8f3f081b7d8 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -1,9 +1,9 @@
-/*
+/*
3w-xxxx.h -- 3ware Storage Controller device driver for Linux.
-
+
Written By: Adam Radford <aradford@gmail.com>
Modifications By: Joel Jacobson <linux@3ware.com>
- Arnaldo Carvalho de Melo <acme@conectiva.com.br>
+ Arnaldo Carvalho de Melo <acme@conectiva.com.br>
Brad Strand <linux@3ware.com>
Copyright (C) 1999-2010 3ware Inc.
@@ -15,39 +15,39 @@
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- NO WARRANTY
- THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- solely responsible for determining the appropriateness of using and
- distributing the Program and assumes all risks associated with its
- exercise of rights under this Agreement, including but not limited to
- the risks and costs of program errors, damage to or loss of data,
- programs or equipment, and unavailability or interruption of operations.
-
- DISCLAIMER OF LIABILITY
- NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the Free Software
- Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-
- Bugs/Comments/Suggestions should be mailed to:
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ NO WARRANTY
+ THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ solely responsible for determining the appropriateness of using and
+ distributing the Program and assumes all risks associated with its
+ exercise of rights under this Agreement, including but not limited to
+ the risks and costs of program errors, damage to or loss of data,
+ programs or equipment, and unavailability or interruption of operations.
+
+ DISCLAIMER OF LIABILITY
+ NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+ Bugs/Comments/Suggestions should be mailed to:
aradford@gmail.com
-
+
For more information, goto:
http://www.lsi.com
*/
@@ -99,21 +99,21 @@ static char *tw_aen_string[] = {
static unsigned char tw_sense_table[][4] =
{
/* Codes for newer firmware */
- // ATA Error SCSI Error
- {0x01, 0x03, 0x13, 0x00}, // Address mark not found Address mark not found for data field
- {0x04, 0x0b, 0x00, 0x00}, // Aborted command Aborted command
- {0x10, 0x0b, 0x14, 0x00}, // ID not found Recorded entity not found
- {0x40, 0x03, 0x11, 0x00}, // Uncorrectable ECC error Unrecovered read error
- {0x61, 0x04, 0x00, 0x00}, // Device fault Hardware error
- {0x84, 0x0b, 0x47, 0x00}, // Data CRC error SCSI parity error
- {0xd0, 0x0b, 0x00, 0x00}, // Device busy Aborted command
- {0xd1, 0x0b, 0x00, 0x00}, // Device busy Aborted command
- {0x37, 0x02, 0x04, 0x00}, // Unit offline Not ready
- {0x09, 0x02, 0x04, 0x00}, // Unrecovered disk error Not ready
-
- /* Codes for older firmware */
- // 3ware Error SCSI Error
- {0x51, 0x0b, 0x00, 0x00} // Unspecified Aborted command
+ // ATA Error SCSI Error
+ {0x01, 0x03, 0x13, 0x00}, // Address mark not found Address mark not found for data field
+ {0x04, 0x0b, 0x00, 0x00}, // Aborted command Aborted command
+ {0x10, 0x0b, 0x14, 0x00}, // ID not found Recorded entity not found
+ {0x40, 0x03, 0x11, 0x00}, // Uncorrectable ECC error Unrecovered read error
+ {0x61, 0x04, 0x00, 0x00}, // Device fault Hardware error
+ {0x84, 0x0b, 0x47, 0x00}, // Data CRC error SCSI parity error
+ {0xd0, 0x0b, 0x00, 0x00}, // Device busy Aborted command
+ {0xd1, 0x0b, 0x00, 0x00}, // Device busy Aborted command
+ {0x37, 0x02, 0x04, 0x00}, // Unit offline Not ready
+ {0x09, 0x02, 0x04, 0x00}, // Unrecovered disk error Not ready
+
+ /* Codes for older firmware */
+ // 3ware Error SCSI Error
+ {0x51, 0x0b, 0x00, 0x00} // Unspecified Aborted command
};
/* Control register bit definitions */
@@ -128,9 +128,9 @@ static unsigned char tw_sense_table[][4] =
#define TW_CONTROL_ENABLE_INTERRUPTS 0x00000080
#define TW_CONTROL_DISABLE_INTERRUPTS 0x00000040
#define TW_CONTROL_ISSUE_HOST_INTERRUPT 0x00000020
-#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
-#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
-#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
+#define TW_CONTROL_CLEAR_PARITY_ERROR 0x00800000
+#define TW_CONTROL_CLEAR_QUEUE_ERROR 0x00400000
+#define TW_CONTROL_CLEAR_PCI_ABORT 0x00100000
#define TW_CONTROL_CLEAR_SBUF_WRITE_ERROR 0x00000008
/* Status register bit definitions */
@@ -152,8 +152,8 @@ static unsigned char tw_sense_table[][4] =
#define TW_STATUS_CLEARABLE_BITS 0x00D00000
#define TW_STATUS_EXPECTED_BITS 0x00002000
#define TW_STATUS_UNEXPECTED_BITS 0x00F00008
-#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
-#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
+#define TW_STATUS_SBUF_WRITE_ERROR 0x00000008
+#define TW_STATUS_VALID_INTERRUPT 0x00DF0008
/* RESPONSE QUEUE BIT DEFINITIONS */
#define TW_RESPONSE_ID_MASK 0x00000FF0
@@ -179,33 +179,33 @@ static unsigned char tw_sense_table[][4] =
#define TW_OP_SECTOR_INFO 0x1a
#define TW_OP_AEN_LISTEN 0x1c
#define TW_OP_FLUSH_CACHE 0x0e
-#define TW_CMD_PACKET 0x1d
+#define TW_CMD_PACKET 0x1d
#define TW_CMD_PACKET_WITH_DATA 0x1f
/* Asynchronous Event Notification (AEN) Codes */
#define TW_AEN_QUEUE_EMPTY 0x0000
-#define TW_AEN_SOFT_RESET 0x0001
+#define TW_AEN_SOFT_RESET 0x0001
#define TW_AEN_DEGRADED_MIRROR 0x0002
#define TW_AEN_CONTROLLER_ERROR 0x0003
#define TW_AEN_REBUILD_FAIL 0x0004
#define TW_AEN_REBUILD_DONE 0x0005
-#define TW_AEN_QUEUE_FULL 0x00ff
+#define TW_AEN_QUEUE_FULL 0x00ff
#define TW_AEN_TABLE_UNDEFINED 0x15
#define TW_AEN_APORT_TIMEOUT 0x0009
#define TW_AEN_DRIVE_ERROR 0x000A
-#define TW_AEN_SMART_FAIL 0x000F
-#define TW_AEN_SBUF_FAIL 0x0024
+#define TW_AEN_SMART_FAIL 0x000F
+#define TW_AEN_SBUF_FAIL 0x0024
/* Misc defines */
#define TW_ALIGNMENT_6000 64 /* 64 bytes */
-#define TW_ALIGNMENT_7000 4 /* 4 bytes */
+#define TW_ALIGNMENT_7000 4 /* 4 bytes */
#define TW_MAX_UNITS 16
#define TW_COMMAND_ALIGNMENT_MASK 0x1ff
#define TW_INIT_MESSAGE_CREDITS 0x100
#define TW_INIT_COMMAND_PACKET_SIZE 0x3
-#define TW_POLL_MAX_RETRIES 20000
+#define TW_POLL_MAX_RETRIES 20000
#define TW_MAX_SGL_LENGTH 62
-#define TW_ATA_PASS_SGL_MAX 60
+#define TW_ATA_PASS_SGL_MAX 60
#define TW_Q_LENGTH 256
#define TW_Q_START 0
#define TW_MAX_SLOT 32
@@ -216,20 +216,20 @@ static unsigned char tw_sense_table[][4] =
chrdev ioctl, one for
internal aen post */
#define TW_BLOCK_SIZE 0x200 /* 512-byte blocks */
-#define TW_IOCTL 0x80
-#define TW_UNIT_ONLINE 1
-#define TW_IN_INTR 1
-#define TW_IN_RESET 2
-#define TW_IN_CHRDEV_IOCTL 3
-#define TW_MAX_SECTORS 256
+#define TW_IOCTL 0x80
+#define TW_UNIT_ONLINE 1
+#define TW_IN_INTR 1
+#define TW_IN_RESET 2
+#define TW_IN_CHRDEV_IOCTL 3
+#define TW_MAX_SECTORS 256
#define TW_MAX_IOCTL_SECTORS 512
-#define TW_AEN_WAIT_TIME 1000
-#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
-#define TW_ISR_DONT_COMPLETE 2
-#define TW_ISR_DONT_RESULT 3
-#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
-#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
-#define TW_IOCTL_CHRDEV_FREE -1
+#define TW_AEN_WAIT_TIME 1000
+#define TW_IOCTL_WAIT_TIME (1 * HZ) /* 1 second */
+#define TW_ISR_DONT_COMPLETE 2
+#define TW_ISR_DONT_RESULT 3
+#define TW_IOCTL_TIMEOUT 25 /* 25 seconds */
+#define TW_IOCTL_CHRDEV_TIMEOUT 60 /* 60 seconds */
+#define TW_IOCTL_CHRDEV_FREE -1
#define TW_MAX_CDB_LEN 16
/* Bitmask macros to eliminate bitfields */
@@ -250,26 +250,35 @@ static unsigned char tw_sense_table[][4] =
#define TW_STATUS_REG_ADDR(x) (x->base_addr + 0x4)
#define TW_COMMAND_QUEUE_REG_ADDR(x) (x->base_addr + 0x8)
#define TW_RESPONSE_QUEUE_REG_ADDR(x) (x->base_addr + 0xC)
-#define TW_CLEAR_ALL_INTERRUPTS(x) (outl(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_ATTENTION_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_CLEAR_HOST_INTERRUPT(x) (outl(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_DISABLE_INTERRUPTS(x) (outl(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_MASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_UNMASK_COMMAND_INTERRUPT(x) (outl(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
-#define TW_SOFT_RESET(x) (outl(TW_CONTROL_ISSUE_SOFT_RESET | \
- TW_CONTROL_CLEAR_HOST_INTERRUPT | \
- TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
- TW_CONTROL_MASK_COMMAND_INTERRUPT | \
- TW_CONTROL_MASK_RESPONSE_INTERRUPT | \
- TW_CONTROL_CLEAR_ERROR_STATUS | \
- TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
-#define TW_STATUS_ERRORS(x) \
- (((x & TW_STATUS_PCI_ABORT) || \
- (x & TW_STATUS_PCI_PARITY_ERROR) || \
- (x & TW_STATUS_QUEUE_ERROR) || \
- (x & TW_STATUS_MICROCONTROLLER_ERROR)) && \
- (x & TW_STATUS_MICROCONTROLLER_READY))
+#define TW_CLEAR_ALL_INTERRUPTS(x) \
+ (outl(TW_STATUS_VALID_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_ATTENTION_INTERRUPT(x) \
+ (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_CLEAR_HOST_INTERRUPT(x) \
+ (outl(TW_CONTROL_CLEAR_HOST_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_DISABLE_INTERRUPTS(x) \
+ (outl(TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_ENABLE_AND_CLEAR_INTERRUPTS(x) \
+ (outl(TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_UNMASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_ENABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_MASK_COMMAND_INTERRUPT(x) \
+ (outl(TW_CONTROL_MASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_UNMASK_COMMAND_INTERRUPT(x) \
+ (outl(TW_CONTROL_UNMASK_COMMAND_INTERRUPT, TW_CONTROL_REG_ADDR(x)))
+#define TW_SOFT_RESET(x) (outl(TW_CONTROL_ISSUE_SOFT_RESET | \
+ TW_CONTROL_CLEAR_HOST_INTERRUPT | \
+ TW_CONTROL_CLEAR_ATTENTION_INTERRUPT | \
+ TW_CONTROL_MASK_COMMAND_INTERRUPT | \
+ TW_CONTROL_MASK_RESPONSE_INTERRUPT | \
+ TW_CONTROL_CLEAR_ERROR_STATUS | \
+ TW_CONTROL_DISABLE_INTERRUPTS, TW_CONTROL_REG_ADDR(x)))
+#define TW_STATUS_ERRORS(x) \
+ (((x & TW_STATUS_PCI_ABORT) || \
+ (x & TW_STATUS_PCI_PARITY_ERROR) || \
+ (x & TW_STATUS_QUEUE_ERROR) || \
+ (x & TW_STATUS_MICROCONTROLLER_ERROR)) && \
+ (x & TW_STATUS_MICROCONTROLLER_READY))
#ifdef TW_DEBUG
#define dprintk(msg...) printk(msg)
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 701b61ec76ee..06b87c7f6bab 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -669,20 +669,6 @@ config SCSI_FDOMAIN_ISA
To compile this driver as a module, choose M here: the
module will be called fdomain_isa.
-config SCSI_GDTH
- tristate "Intel/ICP (former GDT SCSI Disk Array) RAID Controller support"
- depends on PCI && SCSI
- help
- Formerly called GDT SCSI Disk Array Controller Support.
-
- This is a driver for RAID/SCSI Disk Array Controllers (EISA/ISA/PCI)
- manufactured by Intel Corporation/ICP vortex GmbH. It is documented
- in the kernel source in <file:drivers/scsi/gdth.c> and
- <file:drivers/scsi/gdth.h>.
-
- To compile this driver as a module, choose M here: the
- module will be called gdth.
-
config SCSI_ISCI
tristate "Intel(R) C600 Series Chipset SAS Controller"
depends on PCI && SCSI
@@ -1159,6 +1145,7 @@ config SCSI_LPFC
depends on NVME_TARGET_FC || NVME_TARGET_FC=n
depends on NVME_FC || NVME_FC=n
select CRC_T10DIF
+ select IRQ_POLL
help
This lpfc driver supports the Emulex LightPulse
Family of Fibre Channel PCI host adapters.
@@ -1182,6 +1169,7 @@ config SCSI_SIM710
config SCSI_DC395x
tristate "Tekram DC395(U/UW/F) and DC315(U) SCSI support"
depends on PCI && SCSI
+ select SCSI_SPI_ATTRS
help
This driver supports PCI SCSI host adapters based on the ASIC
TRM-S1040 chip, e.g Tekram DC395(U/UW/F) and DC315(U) variants.
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index c00e3dd57990..bc3882f5cc69 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -16,7 +16,6 @@
CFLAGS_aha152x.o = -DAHA152X_STAT -DAUTOCONF
-CFLAGS_gdth.o = # -DDEBUG_GDTH=2 -D__SERIAL__ -D__COM2__ -DGDTH_STATISTICS
obj-$(CONFIG_PCMCIA) += pcmcia/
@@ -103,7 +102,6 @@ obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
-obj-$(CONFIG_SCSI_GDTH) += gdth.o
obj-$(CONFIG_SCSI_INITIO) += initio.o
obj-$(CONFIG_SCSI_INIA100) += a100u2w.o
obj-$(CONFIG_SCSI_QLOGICPTI) += qlogicpti.o
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 31233f6a0274..4ca5e13a26a6 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -556,7 +556,7 @@ static void get_container_name_callback(void *context, struct fib * fibptr)
}
}
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
scsicmd->scsi_done(scsicmd);
@@ -1092,7 +1092,7 @@ static void get_container_serial_callback(void *context, struct fib * fibptr)
}
}
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
scsicmd->scsi_done(scsicmd);
@@ -1191,8 +1191,7 @@ static int aac_bounds_32(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba)
if (lba & 0xffffffff00000000LL) {
int cid = scmd_id(cmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
@@ -2364,13 +2363,11 @@ static void io_callback(void *context, struct fib * fibptr)
readreply = (struct aac_read_reply *)fib_data(fibptr);
switch (le32_to_cpu(readreply->status)) {
case ST_OK:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
dev->fsa_dev[cid].sense_data.sense_key = NO_SENSE;
break;
case ST_NOT_READY:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data, NOT_READY,
SENCODE_BECOMING_READY, ASENCODE_BECOMING_READY, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
@@ -2378,8 +2375,7 @@ static void io_callback(void *context, struct fib * fibptr)
SCSI_SENSE_BUFFERSIZE));
break;
case ST_MEDERR:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data, MEDIUM_ERROR,
SENCODE_UNRECOVERED_READ_ERROR, ASENCODE_NO_SENSE, 0, 0);
memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
@@ -2391,8 +2387,7 @@ static void io_callback(void *context, struct fib * fibptr)
printk(KERN_WARNING "io_callback: io failed, status = %d\n",
le32_to_cpu(readreply->status));
#endif
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
@@ -2467,8 +2462,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
cid = scmd_id(scsicmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
@@ -2500,7 +2494,7 @@ static int aac_read(struct scsi_cmnd * scsicmd)
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
scsicmd->scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
aac_fib_free(cmd_fibcontext);
@@ -2559,8 +2553,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
if ((lba + count) > (dev->fsa_dev[scmd_id(scsicmd)].size)) {
cid = scmd_id(scsicmd);
dprintk((KERN_DEBUG "aacraid: Illegal lba\n"));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
@@ -2592,7 +2585,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
/*
* For some reason, the Fib didn't queue, return QUEUE_FULL
*/
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_TASK_SET_FULL;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_TASK_SET_FULL;
scsicmd->scsi_done(scsicmd);
aac_fib_complete(cmd_fibcontext);
@@ -2615,8 +2608,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
synchronizereply = fib_data(fibptr);
if (le32_to_cpu(synchronizereply->status) == CT_OK)
- cmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ cmd->result = DID_OK << 16 | SAM_STAT_GOOD;
else {
struct scsi_device *sdev = cmd->device;
struct aac_dev *dev = fibptr->dev;
@@ -2624,8 +2616,7 @@ static void synchronize_callback(void *context, struct fib *fibptr)
printk(KERN_WARNING
"synchronize_callback: synchronize failed, status = %d\n",
le32_to_cpu(synchronizereply->status));
- cmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ cmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
@@ -2699,7 +2690,7 @@ static void aac_start_stop_callback(void *context, struct fib *fibptr)
BUG_ON(fibptr == NULL);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
aac_fib_complete(fibptr);
aac_fib_free(fibptr);
@@ -2716,8 +2707,7 @@ static int aac_start_stop(struct scsi_cmnd *scsicmd)
if (!(aac->supplement_adapter_info.supported_options2 &
AAC_OPTION_POWER_MANAGEMENT)) {
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
scsicmd->scsi_done(scsicmd);
return 0;
}
@@ -2848,7 +2838,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(scsicmd->cmnd[0] != TEST_UNIT_READY))
{
dprintk((KERN_WARNING "Only INQUIRY & TUR command supported for controller, rcvd = 0x%x.\n", scsicmd->cmnd[0]));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
@@ -2877,8 +2867,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case SYNCHRONIZE_CACHE:
if (((aac_cache & 6) == 6) && dev->cache_protected) {
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
/* Issue FIB to tell Firmware to flush it's cache */
@@ -2907,9 +2896,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
arr[1] = scsicmd->cmnd[2];
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x80) {
/* unit serial number page */
arr[3] = setinqserial(dev, &arr[4],
@@ -2920,9 +2907,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
} else if (scsicmd->cmnd[2] == 0x83) {
/* vpd page 0x83 - Device Identification Page */
char *sno = (char *)&inq_data;
@@ -2931,14 +2916,10 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
if (aac_wwn != 2)
return aac_get_container_serial(
scsicmd);
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
} else {
/* vpd page not implemented */
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_CDB_FIELD,
ASENCODE_NO_SENSE, 7, 2);
@@ -2964,8 +2945,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
inq_data.inqd_pdt = INQD_PDT_PROC; /* Processor device */
scsi_sg_copy_from_buffer(scsicmd, &inq_data,
sizeof(inq_data));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
if (dev->in_reset)
@@ -3014,8 +2994,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
@@ -3041,8 +3020,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd, cp, sizeof(cp));
/* Do not cache partition table for arrays */
scsicmd->device->removable = 1;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
@@ -3121,8 +3099,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
scsi_sg_copy_from_buffer(scsicmd,
(char *)&mpd,
mode_buf_length);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
case MODE_SENSE_10:
@@ -3199,8 +3176,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
(char *)&mpd10,
mode_buf_length);
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
}
case REQUEST_SENSE:
@@ -3209,8 +3185,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
sizeof(struct sense_data));
memset(&dev->fsa_dev[cid].sense_data, 0,
sizeof(struct sense_data));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
case ALLOW_MEDIUM_REMOVAL:
@@ -3220,16 +3195,14 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
else
fsa_dev_ptr[cid].locked = 0;
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
/*
* These commands are all No-Ops
*/
case TEST_UNIT_READY:
if (fsa_dev_ptr[cid].sense_data.sense_key == NOT_READY) {
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
NOT_READY, SENCODE_BECOMING_READY,
ASENCODE_BECOMING_READY, 0, 0);
@@ -3246,8 +3219,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
case REZERO_UNIT:
case REASSIGN_BLOCKS:
case SEEK_10:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_GOOD;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_GOOD;
break;
case START_STOP:
@@ -3259,8 +3231,7 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
*/
dprintk((KERN_WARNING "Unhandled SCSI Command: 0x%x.\n",
scsicmd->cmnd[0]));
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
- SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_CHECK_CONDITION;
set_sense(&dev->fsa_dev[cid].sense_data,
ILLEGAL_REQUEST, SENCODE_INVALID_COMMAND,
ASENCODE_INVALID_COMMAND, 0, 0);
@@ -3441,9 +3412,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
le32_to_cpu(srbreply->status));
len = min_t(u32, le32_to_cpu(srbreply->sense_data_size),
SCSI_SENSE_BUFFERSIZE);
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8
- | SAM_STAT_CHECK_CONDITION;
+ scsicmd->result = DID_ERROR << 16 | SAM_STAT_CHECK_CONDITION;
memcpy(scsicmd->sense_buffer,
srbreply->sense_data, len);
}
@@ -3455,7 +3424,7 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
case SRB_STATUS_ERROR_RECOVERY:
case SRB_STATUS_PENDING:
case SRB_STATUS_SUCCESS:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
case SRB_STATUS_DATA_OVERRUN:
switch (scsicmd->cmnd[0]) {
@@ -3472,60 +3441,52 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
pr_warn("aacraid: SCSI CMD underflow\n");
else
pr_warn("aacraid: SCSI CMD Data Overrun\n");
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case INQUIRY:
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
default:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
}
break;
case SRB_STATUS_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ scsicmd->result = DID_ABORT << 16;
break;
case SRB_STATUS_ABORT_FAILED:
/*
* Not sure about this one - but assuming the
* hba was trying to abort for some reason
*/
- scsicmd->result = DID_ERROR << 16 | ABORT << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case SRB_STATUS_PARITY_ERROR:
- scsicmd->result = DID_PARITY << 16
- | MSG_PARITY_ERROR << 8;
+ scsicmd->result = DID_PARITY << 16;
break;
case SRB_STATUS_NO_DEVICE:
case SRB_STATUS_INVALID_PATH_ID:
case SRB_STATUS_INVALID_TARGET_ID:
case SRB_STATUS_INVALID_LUN:
case SRB_STATUS_SELECTION_TIMEOUT:
- scsicmd->result = DID_NO_CONNECT << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
case SRB_STATUS_COMMAND_TIMEOUT:
case SRB_STATUS_TIMEOUT:
- scsicmd->result = DID_TIME_OUT << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_TIME_OUT << 16;
break;
case SRB_STATUS_BUSY:
- scsicmd->result = DID_BUS_BUSY << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_BUS_BUSY << 16;
break;
case SRB_STATUS_BUS_RESET:
- scsicmd->result = DID_RESET << 16
- | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_RESET << 16;
break;
case SRB_STATUS_MESSAGE_REJECTED:
- scsicmd->result = DID_ERROR << 16
- | MESSAGE_REJECT << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case SRB_STATUS_REQUEST_FLUSHED:
case SRB_STATUS_ERROR:
@@ -3561,19 +3522,14 @@ static void aac_srb_callback(void *context, struct fib * fibptr)
|| (scsicmd->cmnd[0] == ATA_16)) {
if (scsicmd->cmnd[2] & (0x01 << 5)) {
- scsicmd->result = DID_OK << 16
- | COMMAND_COMPLETE << 8;
- break;
+ scsicmd->result = DID_OK << 16;
} else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
+ scsicmd->result = DID_ERROR << 16;
}
} else {
- scsicmd->result = DID_ERROR << 16
- | COMMAND_COMPLETE << 8;
- break;
+ scsicmd->result = DID_ERROR << 16;
}
+ break;
}
if (le32_to_cpu(srbreply->scsi_status)
== SAM_STAT_CHECK_CONDITION) {
@@ -3609,7 +3565,7 @@ static void hba_resp_task_complete(struct aac_dev *dev,
switch (err->status) {
case SAM_STAT_GOOD:
- scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_OK << 16;
break;
case SAM_STAT_CHECK_CONDITION:
{
@@ -3620,19 +3576,19 @@ static void hba_resp_task_complete(struct aac_dev *dev,
if (len)
memcpy(scsicmd->sense_buffer,
err->sense_response_buf, len);
- scsicmd->result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_OK << 16;
break;
}
case SAM_STAT_BUSY:
- scsicmd->result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_BUS_BUSY << 16;
break;
case SAM_STAT_TASK_ABORTED:
- scsicmd->result |= DID_ABORT << 16 | ABORT << 8;
+ scsicmd->result |= DID_ABORT << 16;
break;
case SAM_STAT_RESERVATION_CONFLICT:
case SAM_STAT_TASK_SET_FULL:
default:
- scsicmd->result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result |= DID_ERROR << 16;
break;
}
}
@@ -3652,27 +3608,26 @@ static void hba_resp_task_failure(struct aac_dev *dev,
dev->hba_map[bus][cid].devtype = AAC_DEVTYPE_ARC_RAW;
dev->hba_map[bus][cid].rmw_nexus = 0xffffffff;
}
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
}
case HBA_RESP_STAT_IO_ERROR:
case HBA_RESP_STAT_NO_PATH_TO_DEVICE:
- scsicmd->result = DID_OK << 16 |
- COMMAND_COMPLETE << 8 | SAM_STAT_BUSY;
+ scsicmd->result = DID_OK << 16 | SAM_STAT_BUSY;
break;
case HBA_RESP_STAT_IO_ABORTED:
- scsicmd->result = DID_ABORT << 16 | ABORT << 8;
+ scsicmd->result = DID_ABORT << 16;
break;
case HBA_RESP_STAT_INVALID_DEVICE:
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
case HBA_RESP_STAT_UNDERRUN:
/* UNDERRUN is OK */
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
case HBA_RESP_STAT_OVERRUN:
default:
- scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
}
}
@@ -3705,7 +3660,7 @@ void aac_hba_callback(void *context, struct fib *fibptr)
if (fibptr->flags & FIB_CONTEXT_FLAG_FASTRESP) {
/* fast response */
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
goto out;
}
@@ -3717,17 +3672,17 @@ void aac_hba_callback(void *context, struct fib *fibptr)
hba_resp_task_failure(dev, scsicmd, err);
break;
case HBA_RESP_SVCRES_TMF_REJECTED:
- scsicmd->result = DID_ERROR << 16 | MESSAGE_REJECT << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
case HBA_RESP_SVCRES_TMF_LUN_INVALID:
- scsicmd->result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_NO_CONNECT << 16;
break;
case HBA_RESP_SVCRES_TMF_COMPLETE:
case HBA_RESP_SVCRES_TMF_SUCCEEDED:
- scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_OK << 16;
break;
default:
- scsicmd->result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ scsicmd->result = DID_ERROR << 16;
break;
}
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index b1e97f75b0ba..ec5627890809 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -2085,12 +2085,6 @@ do { \
#define ASC_BUSY 0
#define ASC_ERROR (-1)
-/* struct scsi_cmnd function return codes */
-#define STATUS_BYTE(byte) (byte)
-#define MSG_BYTE(byte) ((byte) << 8)
-#define HOST_BYTE(byte) ((byte) << 16)
-#define DRIVER_BYTE(byte) ((byte) << 24)
-
#define ASC_STATS(shost, counter) ASC_STATS_ADD(shost, counter, 1)
#ifndef ADVANSYS_STATS
#define ASC_STATS_ADD(shost, counter, count)
@@ -5986,10 +5980,10 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
/*
* 'done_status' contains the command's ending status.
*/
+ scp->result = 0;
switch (scsiqp->done_status) {
case QD_NO_ERROR:
ASC_DBG(2, "QD_NO_ERROR\n");
- scp->result = 0;
/*
* Check for an underrun condition.
@@ -6010,47 +6004,33 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
ASC_DBG(2, "QD_WITH_ERROR\n");
switch (scsiqp->host_status) {
case QHSTA_NO_ERROR:
+ set_status_byte(scp, scsiqp->scsi_status);
if (scsiqp->scsi_status == SAM_STAT_CHECK_CONDITION) {
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- /*
- * Note: The 'status_byte()' macro used by
- * target drivers defined in scsi.h shifts the
- * status byte returned by host drivers right
- * by 1 bit. This is why target drivers also
- * use right shifted status byte definitions.
- * For instance target drivers use
- * CHECK_CONDITION, defined to 0x1, instead of
- * the SCSI defined check condition value of
- * 0x2. Host drivers are supposed to return
- * the status byte as it is defined by SCSI.
- */
- scp->result = DRIVER_BYTE(DRIVER_SENSE) |
- STATUS_BYTE(scsiqp->scsi_status);
- } else {
- scp->result = STATUS_BYTE(scsiqp->scsi_status);
+ set_driver_byte(scp, DRIVER_SENSE);
}
break;
default:
/* Some other QHSTA error occurred. */
ASC_DBG(1, "host_status 0x%x\n", scsiqp->host_status);
- scp->result = HOST_BYTE(DID_BAD_TARGET);
+ set_host_byte(scp, DID_BAD_TARGET);
break;
}
break;
case QD_ABORTED_BY_HOST:
ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
- scp->result =
- HOST_BYTE(DID_ABORT) | STATUS_BYTE(scsiqp->scsi_status);
+ set_status_byte(scp, scsiqp->scsi_status);
+ set_host_byte(scp, DID_ABORT);
break;
default:
ASC_DBG(1, "done_status 0x%x\n", scsiqp->done_status);
- scp->result =
- HOST_BYTE(DID_ERROR) | STATUS_BYTE(scsiqp->scsi_status);
+ set_status_byte(scp, scsiqp->scsi_status);
+ set_host_byte(scp, DID_ERROR);
break;
}
@@ -6752,10 +6732,10 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
/*
* 'qdonep' contains the command's ending status.
*/
+ scp->result = 0;
switch (qdonep->d3.done_stat) {
case QD_NO_ERROR:
ASC_DBG(2, "QD_NO_ERROR\n");
- scp->result = 0;
/*
* Check for an underrun condition.
@@ -6775,51 +6755,35 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
ASC_DBG(2, "QD_WITH_ERROR\n");
switch (qdonep->d3.host_stat) {
case QHSTA_NO_ERROR:
+ set_status_byte(scp, qdonep->d3.scsi_stat);
if (qdonep->d3.scsi_stat == SAM_STAT_CHECK_CONDITION) {
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
SCSI_SENSE_BUFFERSIZE);
- /*
- * Note: The 'status_byte()' macro used by
- * target drivers defined in scsi.h shifts the
- * status byte returned by host drivers right
- * by 1 bit. This is why target drivers also
- * use right shifted status byte definitions.
- * For instance target drivers use
- * CHECK_CONDITION, defined to 0x1, instead of
- * the SCSI defined check condition value of
- * 0x2. Host drivers are supposed to return
- * the status byte as it is defined by SCSI.
- */
- scp->result = DRIVER_BYTE(DRIVER_SENSE) |
- STATUS_BYTE(qdonep->d3.scsi_stat);
- } else {
- scp->result = STATUS_BYTE(qdonep->d3.scsi_stat);
+ set_driver_byte(scp, DRIVER_SENSE);
}
break;
default:
/* QHSTA error occurred */
ASC_DBG(1, "host_stat 0x%x\n", qdonep->d3.host_stat);
- scp->result = HOST_BYTE(DID_BAD_TARGET);
+ set_host_byte(scp, DID_BAD_TARGET);
break;
}
break;
case QD_ABORTED_BY_HOST:
ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
- scp->result =
- HOST_BYTE(DID_ABORT) | MSG_BYTE(qdonep->d3.
- scsi_msg) |
- STATUS_BYTE(qdonep->d3.scsi_stat);
+ set_status_byte(scp, qdonep->d3.scsi_stat);
+ set_msg_byte(scp, qdonep->d3.scsi_msg);
+ set_host_byte(scp, DID_ABORT);
break;
default:
ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat);
- scp->result =
- HOST_BYTE(DID_ERROR) | MSG_BYTE(qdonep->d3.
- scsi_msg) |
- STATUS_BYTE(qdonep->d3.scsi_stat);
+ set_status_byte(scp, qdonep->d3.scsi_stat);
+ set_msg_byte(scp, qdonep->d3.scsi_msg);
+ set_host_byte(scp, DID_ERROR);
break;
}
@@ -7558,7 +7522,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
"sg_tablesize %d\n", use_sg,
scp->device->host->sg_tablesize);
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
return ASC_ERROR;
}
@@ -7566,7 +7530,7 @@ static int asc_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
use_sg * sizeof(struct asc_sg_list), GFP_ATOMIC);
if (!asc_sg_head) {
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_SOFT_ERROR);
+ set_host_byte(scp, DID_SOFT_ERROR);
return ASC_ERROR;
}
@@ -7809,7 +7773,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
"ADV_MAX_SG_LIST %d\n", use_sg,
scp->device->host->sg_tablesize);
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
reqp->cmndp = NULL;
scp->host_scribble = NULL;
@@ -7821,7 +7785,7 @@ adv_build_req(struct asc_board *boardp, struct scsi_cmnd *scp,
ret = adv_get_sglist(boardp, reqp, scsiqp, scp, use_sg);
if (ret != ADV_SUCCESS) {
scsi_dma_unmap(scp);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
reqp->cmndp = NULL;
scp->host_scribble = NULL;
@@ -8518,13 +8482,13 @@ static int asc_execute_scsi_cmnd(struct scsi_cmnd *scp)
scmd_printk(KERN_ERR, scp, "ExeScsiQueue() ASC_ERROR, "
"err_code 0x%x\n", err_code);
ASC_STATS(scp->device->host, exe_error);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
break;
default:
scmd_printk(KERN_ERR, scp, "ExeScsiQueue() unknown, "
"err_code 0x%x\n", err_code);
ASC_STATS(scp->device->host, exe_unknown);
- scp->result = HOST_BYTE(DID_ERROR);
+ set_host_byte(scp, DID_ERROR);
break;
}
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index e0d8cca1c70b..21aab9f5b117 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -119,8 +119,10 @@ static int aha1542_out(unsigned int base, u8 *buf, int len)
return 0;
}
-/* Only used at boot time, so we do not need to worry about latency as much
- here */
+/*
+ * Only used at boot time, so we do not need to worry about latency as much
+ * here
+ */
static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout)
{
@@ -142,35 +144,43 @@ static int makecode(unsigned hosterr, unsigned scsierr)
break;
case 0x11: /* Selection time out-The initiator selection or target
- reselection was not complete within the SCSI Time out period */
+ * reselection was not complete within the SCSI Time out period
+ */
hosterr = DID_TIME_OUT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
- than was allocated by the Data Length field or the sum of the
- Scatter / Gather Data Length fields. */
+ * than was allocated by the Data Length field or the sum of the
+ * Scatter / Gather Data Length fields.
+ */
case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was
- invalid. This usually indicates a software failure. */
+ * invalid. This usually indicates a software failure.
+ */
case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid.
- This usually indicates a software failure. */
+ * This usually indicates a software failure.
+ */
case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set
- of linked CCB's does not specify the same logical unit number as
- the first. */
+ * of linked CCB's does not specify the same logical unit number as
+ * the first.
+ */
case 0x18: /* Invalid Target Direction received from Host-The direction of a
- Target Mode CCB was invalid. */
+ * Target Mode CCB was invalid.
+ */
case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was
- received to service data transfer between the same target LUN
- and initiator SCSI ID in the same direction. */
+ * received to service data transfer between the same target LUN
+ * and initiator SCSI ID in the same direction.
+ */
case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero
- length segment or invalid segment list boundaries was received.
- A CCB parameter was invalid. */
+ * length segment or invalid segment list boundaries was received.
+ * A CCB parameter was invalid.
+ */
#ifdef DEBUG
printk("Aha1542: %x %x\n", hosterr, scsierr);
#endif
@@ -178,9 +188,10 @@ static int makecode(unsigned hosterr, unsigned scsierr)
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
- phase sequence was requested by the target. The host adapter
- will generate a SCSI Reset Condition, notifying the host with
- a SCRD interrupt */
+ * phase sequence was requested by the target. The host adapter
+ * will generate a SCSI Reset Condition, notifying the host with
+ * a SCRD interrupt
+ */
hosterr = DID_RESET;
break;
default:
@@ -216,8 +227,10 @@ static int aha1542_test_port(struct Scsi_Host *sh)
if (inb(INTRFLAGS(sh->io_port)) & INTRMASK)
return 0;
- /* Perform a host adapter inquiry instead so we do not need to set
- up the mailboxes ahead of time */
+ /*
+ * Perform a host adapter inquiry instead so we do not need to set
+ * up the mailboxes ahead of time
+ */
aha1542_outb(sh->io_port, CMD_INQUIRY);
@@ -292,10 +305,12 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
while (1) {
flag = inb(INTRFLAGS(sh->io_port));
- /* Check for unusual interrupts. If any of these happen, we should
- probably do something special, but for now just printing a message
- is sufficient. A SCSI reset detected is something that we really
- need to deal with in some way. */
+ /*
+ * Check for unusual interrupts. If any of these happen, we should
+ * probably do something special, but for now just printing a message
+ * is sufficient. A SCSI reset detected is something that we really
+ * need to deal with in some way.
+ */
if (flag & ~MBIF) {
if (flag & MBOA)
printk("MBOF ");
@@ -355,9 +370,11 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
}
my_done = tmp_cmd->scsi_done;
aha1542_free_cmd(tmp_cmd);
- /* Fetch the sense data, and tuck it away, in the required slot. The
- Adaptec automatically fetches it, and there is no guarantee that
- we will still have it in the cdb when we come back */
+ /*
+ * Fetch the sense data, and tuck it away, in the required slot. The
+ * Adaptec automatically fetches it, and there is no guarantee that
+ * we will still have it in the cdb when we come back
+ */
if (ccb[mbo].tarstat == 2)
memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen],
SCSI_SENSE_BUFFERSIZE);
@@ -383,7 +400,8 @@ static irqreturn_t aha1542_interrupt(int irq, void *dev_id)
#endif
tmp_cmd->result = errstatus;
aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as
- far as queuecommand is concerned */
+ * far as queuecommand is concerned
+ */
my_done(tmp_cmd);
number_serviced++;
};
@@ -433,8 +451,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
goto out_free_chain;
}
- /* Use the outgoing mailboxes in a round-robin fashion, because this
- is how the host adapter will scan for them */
+ /*
+ * Use the outgoing mailboxes in a round-robin fashion, because this
+ * is how the host adapter will scan for them
+ */
spin_lock_irqsave(sh->host_lock, flags);
mbo = aha1542->aha1542_last_mbo_used + 1;
@@ -453,7 +473,8 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
panic("Unable to find empty mailbox for aha1542.\n");
aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from
- screwing with this cdb. */
+ * screwing with this cdb.
+ */
aha1542->aha1542_last_mbo_used = mbo;
@@ -565,8 +586,10 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
sh->dma_channel = 0;
break;
case 0:
- /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
- Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */
+ /*
+ * This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel.
+ * Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this.
+ */
sh->dma_channel = 0xFF;
break;
default:
@@ -600,8 +623,10 @@ static int aha1542_getconfig(struct Scsi_Host *sh)
return 0;
}
-/* This function should only be called for 1542C boards - we can detect
- the special firmware settings and unlock the board */
+/*
+ * This function should only be called for 1542C boards - we can detect
+ * the special firmware settings and unlock the board
+ */
static int aha1542_mbenable(struct Scsi_Host *sh)
{
@@ -655,10 +680,11 @@ static int aha1542_query(struct Scsi_Host *sh)
aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */
- /* For an AHA1740 series board, we ignore the board since there is a
- hardware bug which can lead to wrong blocks being returned if the board
- is operating in the 1542 emulation mode. Since there is an extended mode
- driver, we simply ignore the board and let the 1740 driver pick it up.
+ /*
+ * For an AHA1740 series board, we ignore the board since there is a
+ * hardware bug which can lead to wrong blocks being returned if the board
+ * is operating in the 1542 emulation mode. Since there is an extended mode
+ * driver, we simply ignore the board and let the 1740 driver pick it up.
*/
if (inquiry_result[0] == 0x43) {
@@ -666,8 +692,10 @@ static int aha1542_query(struct Scsi_Host *sh)
return 1;
};
- /* Always call this - boards that do not support extended bios translation
- will ignore the command, and we will set the proper default */
+ /*
+ * Always call this - boards that do not support extended bios translation
+ * will ignore the command, and we will set the proper default
+ */
aha1542->bios_translation = aha1542_mbenable(sh);
@@ -877,8 +905,9 @@ static int aha1542_dev_reset(struct scsi_cmnd *cmd)
panic("Unable to find empty mailbox for aha1542.\n");
aha1542->int_cmds[mbo] = cmd; /* This will effectively
- prevent someone else from
- screwing with this cdb. */
+ * prevent someone else from
+ * screwing with this cdb.
+ */
aha1542->aha1542_last_mbo_used = mbo;
@@ -894,9 +923,9 @@ static int aha1542_dev_reset(struct scsi_cmnd *cmd)
ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0;
ccb[mbo].commlinkid = 0;
- /*
- * Now tell the 1542 to flush all pending commands for this
- * target
+ /*
+ * Now tell the 1542 to flush all pending commands for this
+ * target
*/
aha1542_outb(sh->io_port, CMD_START_SCSI);
spin_unlock_irqrestore(sh->host_lock, flags);
@@ -915,7 +944,7 @@ static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
int i;
spin_lock_irqsave(sh->host_lock, flags);
- /*
+ /*
* This does a scsi reset for all devices on the bus.
* In principle, we could also reset the 1542 - should
* we do this? Try this first, and we can add that later
@@ -939,7 +968,7 @@ static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd)
/*
* Now try to pick up the pieces. For all pending commands,
* free any internal data structures, and basically clear things
- * out. We do not try and restart any commands or anything -
+ * out. We do not try and restart any commands or anything -
* the strategy handler takes care of that crap.
*/
shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no);
@@ -1008,10 +1037,10 @@ static struct scsi_host_template driver_template = {
.eh_bus_reset_handler = aha1542_bus_reset,
.eh_host_reset_handler = aha1542_host_reset,
.bios_param = aha1542_biosparam,
- .can_queue = AHA1542_MAILBOXES,
+ .can_queue = AHA1542_MAILBOXES,
.this_id = 7,
.sg_tablesize = 16,
- .unchecked_isa_dma = 1,
+ .unchecked_isa_dma = 1,
};
static int aha1542_isa_match(struct device *pdev, unsigned int ndev)
@@ -1062,8 +1091,10 @@ static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *i
io[indx] = pnp_port_start(pdev, 0);
- /* The card can be queried for its DMA, we have
- the DMA set up that is enough */
+ /*
+ * The card can be queried for its DMA, we have
+ * the DMA set up that is enough
+ */
dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]);
}
diff --git a/drivers/scsi/aha1542.h b/drivers/scsi/aha1542.h
index f5b0d210fb3c..92a5f9896ace 100644
--- a/drivers/scsi/aha1542.h
+++ b/drivers/scsi/aha1542.h
@@ -78,23 +78,28 @@ static inline void any2scsi(u8 *p, u32 v)
#define MAX_CDB 12
#define MAX_SENSE 14
-struct ccb { /* Command Control Block 5.3 */
- u8 op; /* Command Control Block Operation Code */
- u8 idlun; /* op=0,2:Target Id, op=1:Initiator Id */
- /* Outbound data transfer, length is checked*/
- /* Inbound data transfer, length is checked */
- /* Logical Unit Number */
+/* Command Control Block (CCB), 5.3 */
+struct ccb {
+ u8 op; /* Command Control Block Operation Code: */
+ /* 0x00: SCSI Initiator CCB, 0x01: SCSI Target CCB, */
+ /* 0x02: SCSI Initiator CCB with Scatter/Gather, */
+ /* 0x81: SCSI Bus Device Reset CCB */
+ u8 idlun; /* Address and Direction Control: */
+ /* Bits 7-5: op=0, 2: Target ID, op=1: Initiator ID */
+ /* Bit 4: Outbound data transfer, length is checked */
+ /* Bit 3: Inbound data transfer, length is checked */
+ /* Bits 2-0: Logical Unit Number */
u8 cdblen; /* SCSI Command Length */
- u8 rsalen; /* Request Sense Allocation Length/Disable */
- u8 datalen[3]; /* Data Length (msb, .., lsb) */
- u8 dataptr[3]; /* Data Pointer */
- u8 linkptr[3]; /* Link Pointer */
+ u8 rsalen; /* Request Sense Allocation Length/Disable Auto Sense */
+ u8 datalen[3]; /* Data Length (MSB, ..., LSB) */
+ u8 dataptr[3]; /* Data Pointer (MSB, ..., LSB) */
+ u8 linkptr[3]; /* Link Pointer (MSB, ..., LSB) */
u8 commlinkid; /* Command Linking Identifier */
- u8 hastat; /* Host Adapter Status (HASTAT) */
- u8 tarstat; /* Target Device Status */
+ u8 hastat; /* Host Adapter Status (HASTAT) */
+ u8 tarstat; /* Target Device Status (TARSTAT) */
u8 reserved[2];
- u8 cdb[MAX_CDB+MAX_SENSE]; /* SCSI Command Descriptor Block */
- /* REQUEST SENSE */
+ u8 cdb[MAX_CDB + MAX_SENSE]; /* SCSI Command Descriptor Block */
+ /* followed by the Auto Sense data */
};
#define AHA1542_REGION_SIZE 4
diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h
index dd5dfd4f30a5..77e1d6bb59a3 100644
--- a/drivers/scsi/aic7xxx/aic79xx.h
+++ b/drivers/scsi/aic7xxx/aic79xx.h
@@ -211,7 +211,7 @@ typedef enum {
*/
typedef enum {
AHD_FENONE = 0x00000,
- AHD_WIDE = 0x00001,/* Wide Channel */
+ AHD_WIDE = 0x00001,/* Wide Channel */
AHD_AIC79XXB_SLOWCRC = 0x00002,/* SLOWCRC bit should be set */
AHD_MULTI_FUNC = 0x00100,/* Multi-Function/Channel Device */
AHD_TARGETMODE = 0x01000,/* Has tested target mode support */
@@ -433,7 +433,7 @@ union initiator_data {
* Target mode version of the shared data SCB segment.
*/
struct target_data {
- uint32_t spare[2];
+ uint32_t spare[2];
uint8_t scsi_status; /* SCSI status to give to initiator */
uint8_t target_phases; /* Bitmap of phases to execute */
uint8_t data_phase; /* Data-In or Data-Out */
@@ -608,9 +608,9 @@ struct scb {
struct ahd_softc *ahd_softc;
scb_flag flags;
struct scb_platform_data *platform_data;
- struct map_node *hscb_map;
- struct map_node *sg_map;
- struct map_node *sense_map;
+ struct map_node *hscb_map;
+ struct map_node *sg_map;
+ struct map_node *sense_map;
void *sg_list;
uint8_t *sense_data;
dma_addr_t sg_list_busaddr;
@@ -674,7 +674,7 @@ struct scb_data {
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
uint8_t identify; /* Identify message */
- uint8_t bytes[22]; /*
+ uint8_t bytes[22]; /*
* Bytes contains any additional message
* bytes terminated by 0xFF. The remainder
* is the cdb to execute.
@@ -712,7 +712,7 @@ struct ahd_tmode_event {
* structure here so we can store arrays of them, etc. in OS neutral
* data structures.
*/
-#ifdef AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
struct ahd_tmode_lstate {
struct cam_path *path;
struct ccb_hdr_slist accept_tios;
@@ -807,11 +807,11 @@ struct ahd_tmode_tstate {
/***************************** Lookup Tables **********************************/
/*
* Phase -> name and message out response
- * to parity errors in each phase table.
+ * to parity errors in each phase table.
*/
struct ahd_phase_table_entry {
- uint8_t phase;
- uint8_t mesg_out; /* Message response to parity errors */
+ uint8_t phase;
+ uint8_t mesg_out; /* Message response to parity errors */
const char *phasemsg;
};
@@ -844,7 +844,7 @@ struct seeprom_config {
#define CFBS_ENABLED 0x04
#define CFBS_DISABLED_SCAN 0x08
#define CFENABLEDV 0x0010 /* Perform Domain Validation */
-#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
+#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
#define CFSPARITY 0x0040 /* SCSI parity */
#define CFEXTEND 0x0080 /* extended translation enabled */
#define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */
@@ -858,7 +858,7 @@ struct seeprom_config {
/*
* Host Adapter Control Bits
*/
- uint16_t adapter_control; /* word 17 */
+ uint16_t adapter_control; /* word 17 */
#define CFAUTOTERM 0x0001 /* Perform Auto termination */
#define CFSTERM 0x0002 /* SCSI low byte termination */
#define CFWSTERM 0x0004 /* SCSI high byte termination */
@@ -867,7 +867,7 @@ struct seeprom_config {
#define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */
#define CFSTPWLEVEL 0x0040 /* Termination level control */
#define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */
-#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */
+#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */
#define CFCLUSTERENB 0x8000 /* Cluster Enable */
/*
@@ -881,7 +881,7 @@ struct seeprom_config {
/*
* Maximum targets
*/
- uint16_t max_targets; /* word 19 */
+ uint16_t max_targets; /* word 19 */
#define CFMAXTARG 0x00ff /* maximum targets */
#define CFBOOTLUN 0x0f00 /* Lun to boot from */
#define CFBOOTID 0xf000 /* Target to boot from */
@@ -941,7 +941,7 @@ struct vpd_config {
#define FLX_ROMSTAT_EE_2MBx8 0x2
#define FLX_ROMSTAT_EE_4MBx8 0x3
#define FLX_ROMSTAT_EE_16MBx8 0x4
-#define CURSENSE_ENB 0x1
+#define CURSENSE_ENB 0x1
#define FLXADDR_FLEXSTAT 0x2
#define FLX_FSTAT_BUSY 0x1
#define FLXADDR_CURRENT_STAT 0x4
@@ -1051,8 +1051,8 @@ struct ahd_completion
};
struct ahd_softc {
- bus_space_tag_t tags[2];
- bus_space_handle_t bshs[2];
+ bus_space_tag_t tags[2];
+ bus_space_handle_t bshs[2];
struct scb_data scb_data;
struct hardware_scb *next_queued_hscb;
@@ -1243,7 +1243,7 @@ struct ahd_softc {
u_int int_coalescing_threshold;
u_int int_coalescing_stop_threshold;
- uint16_t user_discenable;/* Disconnection allowed */
+ uint16_t user_discenable;/* Disconnection allowed */
uint16_t user_tagenable;/* Tagged Queuing allowed */
};
diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c
index c55b5880eb7e..3e3100dbfda3 100644
--- a/drivers/scsi/aic7xxx/aic79xx_core.c
+++ b/drivers/scsi/aic7xxx/aic79xx_core.c
@@ -57,7 +57,7 @@ static const char *const ahd_chip_names[] =
* Hardware error codes.
*/
struct ahd_hard_error_entry {
- uint8_t errno;
+ uint8_t errno;
const char *errmesg;
};
@@ -73,16 +73,16 @@ static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors);
static const struct ahd_phase_table_entry ahd_phase_table[] =
{
- { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
- { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
- { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
- { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
- { P_COMMAND, MSG_NOOP, "in Command phase" },
- { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
- { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
+ { P_DATAOUT, NOP, "in Data-out phase" },
+ { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" },
+ { P_DATAOUT_DT, NOP, "in DT Data-out phase" },
+ { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" },
+ { P_COMMAND, NOP, "in Command phase" },
+ { P_MESGOUT, NOP, "in Message-out phase" },
+ { P_STATUS, INITIATOR_ERROR, "in Status phase" },
{ P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
- { P_BUSFREE, MSG_NOOP, "while idle" },
- { 0, MSG_NOOP, "in unknown phase" }
+ { P_BUSFREE, NOP, "while idle" },
+ { 0, NOP, "in unknown phase" }
};
/*
@@ -113,7 +113,7 @@ static void ahd_free_tstate(struct ahd_softc *ahd,
u_int scsi_id, char channel, int force);
#endif
static void ahd_devlimited_syncrate(struct ahd_softc *ahd,
- struct ahd_initiator_tinfo *,
+ struct ahd_initiator_tinfo *,
u_int *period,
u_int *ppr_options,
role_t role);
@@ -170,7 +170,7 @@ static void ahd_setup_target_msgin(struct ahd_softc *ahd,
static u_int ahd_sglist_size(struct ahd_softc *ahd);
static u_int ahd_sglist_allocsize(struct ahd_softc *ahd);
static bus_dmamap_callback_t
- ahd_dmamap_cb;
+ ahd_dmamap_cb;
static void ahd_initialize_hscbs(struct ahd_softc *ahd);
static int ahd_init_scbdata(struct ahd_softc *ahd);
static void ahd_fini_scbdata(struct ahd_softc *ahd);
@@ -268,7 +268,7 @@ static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused);
static void ahd_handle_hwerrint(struct ahd_softc *ahd);
static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
static void ahd_handle_scsiint(struct ahd_softc *ahd,
- u_int intstat);
+ u_int intstat);
/************************ Sequencer Execution Control *************************/
void
@@ -1126,7 +1126,7 @@ ahd_restart(struct ahd_softc *ahd)
/* No more pending messages */
ahd_clear_msg_state(ahd);
ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */
- ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */
+ ahd_outb(ahd, MSG_OUT, NOP); /* No message to send */
ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET);
ahd_outb(ahd, SEQINTCTL, 0);
ahd_outb(ahd, LASTPHASE, P_BUSFREE);
@@ -1203,7 +1203,7 @@ ahd_flush_qoutfifo(struct ahd_softc *ahd)
while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
u_int fifo_mode;
u_int i;
-
+
scbid = ahd_inw(ahd, GSFIFO);
scb = ahd_lookup_scb(ahd, scbid);
if (scb == NULL) {
@@ -1326,7 +1326,7 @@ rescan_fifos:
while (!SCBID_IS_NULL(scbid)) {
uint8_t *hscb_ptr;
u_int i;
-
+
ahd_set_scbptr(ahd, scbid);
next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
scb = ahd_lookup_scb(ahd, scbid);
@@ -1991,7 +1991,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
{
struct scb *scb;
u_int scb_index;
-
+
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
@@ -2007,7 +2007,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
*/
ahd_assert_atn(ahd);
ahd_outb(ahd, MSG_OUT, HOST_MSG);
- ahd->msgout_buf[0] = MSG_ABORT_TASK;
+ ahd->msgout_buf[0] = ABORT_TASK;
ahd->msgout_len = 1;
ahd->msgout_index = 0;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -2094,8 +2094,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
ahd->msg_type =
MSG_TYPE_TARGET_MSGOUT;
ahd->msgin_index = 0;
- }
- else
+ } else
ahd_setup_target_msgin(ahd,
&devinfo,
scb);
@@ -2136,7 +2135,7 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0));
printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0));
ahd_dump_card_state(ahd);
- ahd->msgout_buf[0] = MSG_BUS_DEV_RESET;
+ ahd->msgout_buf[0] = TARGET_RESET;
ahd->msgout_len = 1;
ahd->msgout_index = 0;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -2338,9 +2337,9 @@ ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat)
;
ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
- SCB_GET_CHANNEL(ahd, scb),
- SCB_GET_LUN(scb), SCB_GET_TAG(scb),
- ROLE_INITIATOR, /*status*/0,
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+ ROLE_INITIATOR, /*status*/0,
SEARCH_REMOVE);
}
break;
@@ -2692,16 +2691,16 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
lastphase = ahd_inb(ahd, LASTPHASE);
curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK;
perrdiag = ahd_inb(ahd, PERRDIAG);
- msg_out = MSG_INITIATOR_DET_ERR;
+ msg_out = INITIATOR_ERROR;
ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
-
+
/*
* Try to find the SCB associated with this error.
*/
silent = FALSE;
if (lqistat1 == 0
|| (lqistat1 & LQICRCI_NLQ) != 0) {
- if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
+ if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
ahd_set_active_fifo(ahd);
scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid);
@@ -2818,20 +2817,20 @@ ahd_handle_transmission_error(struct ahd_softc *ahd)
ahd_lookup_phase_entry(curphase)->phasemsg);
ahd_inb(ahd, SCSIDAT);
}
-
+
if (curphase == P_MESGIN)
msg_out = MSG_PARITY_ERROR;
}
/*
- * We've set the hardware to assert ATN if we
+ * We've set the hardware to assert ATN if we
* get a parity error on "in" phases, so all we
* need to do is stuff the message buffer with
* the appropriate message. "In" phases have set
- * mesg_out to something other than MSG_NOP.
+ * mesg_out to something other than NOP.
*/
ahd->send_msg_perror = msg_out;
- if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR)
+ if (scb != NULL && msg_out == INITIATOR_ERROR)
scb->flags |= SCB_TRANSMISSION_ERROR;
ahd_outb(ahd, MSG_OUT, HOST_MSG);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
@@ -3051,8 +3050,8 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
u_int tag;
tag = SCB_LIST_NULL;
- if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE)
- || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK, TRUE)
+ || ahd_sent_msg(ahd, AHDMSG_1B, ABORT_TASK_SET, TRUE)) {
int found;
int sent_msg;
@@ -3067,9 +3066,9 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
ahd_print_path(ahd, scb);
printk("SCB %d - Abort%s Completed.\n",
SCB_GET_TAG(scb),
- sent_msg == MSG_ABORT_TAG ? "" : " Tag");
+ sent_msg == ABORT_TASK ? "" : " Tag");
- if (sent_msg == MSG_ABORT_TAG)
+ if (sent_msg == ABORT_TASK)
tag = SCB_GET_TAG(scb);
if ((scb->flags & SCB_EXTERNAL_RESET) != 0) {
@@ -3094,12 +3093,12 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printk("found == 0x%x\n", found);
printerror = 0;
} else if (ahd_sent_msg(ahd, AHDMSG_1B,
- MSG_BUS_DEV_RESET, TRUE)) {
+ TARGET_RESET, TRUE)) {
ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD,
CAM_BDR_SENT, "Bus Device Reset",
/*verbose_level*/0);
printerror = 0;
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE)
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, FALSE)
&& ppr_busfree == 0) {
struct ahd_initiator_tinfo *tinfo;
struct ahd_tmode_tstate *tstate;
@@ -3152,7 +3151,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
}
printerror = 0;
}
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE)
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, FALSE)
&& ppr_busfree == 0) {
/*
* Negotiation Rejected. Go-narrow and
@@ -3177,7 +3176,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
ahd_qinfifo_requeue_tail(ahd, scb);
}
printerror = 0;
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE)
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, FALSE)
&& ppr_busfree == 0) {
/*
* Negotiation Rejected. Go-async and
@@ -3205,7 +3204,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printerror = 0;
} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0
&& ahd_sent_msg(ahd, AHDMSG_1B,
- MSG_INITIATOR_DET_ERR, TRUE)) {
+ INITIATOR_ERROR, TRUE)) {
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
@@ -3214,7 +3213,7 @@ ahd_handle_nonpkt_busfree(struct ahd_softc *ahd)
printerror = 0;
} else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE)
&& ahd_sent_msg(ahd, AHDMSG_1B,
- MSG_MESSAGE_REJECT, TRUE)) {
+ MESSAGE_REJECT, TRUE)) {
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
@@ -3368,7 +3367,7 @@ proto_violation_reset:
ahd_outb(ahd, MSG_OUT, HOST_MSG);
if (scb == NULL) {
ahd_print_devinfo(ahd, &devinfo);
- ahd->msgout_buf[0] = MSG_ABORT_TASK;
+ ahd->msgout_buf[0] = ABORT_TASK;
ahd->msgout_len = 1;
ahd->msgout_index = 0;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -3446,7 +3445,6 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
cs = ahd->critical_sections;
for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
-
if (cs->begin < seqaddr && cs->end >= seqaddr)
break;
}
@@ -3472,8 +3470,8 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
if (stepping == FALSE) {
first_instr = seqaddr;
- ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
- simode0 = ahd_inb(ahd, SIMODE0);
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ simode0 = ahd_inb(ahd, SIMODE0);
simode3 = ahd_inb(ahd, SIMODE3);
lqimode0 = ahd_inb(ahd, LQIMODE0);
lqimode1 = ahd_inb(ahd, LQIMODE1);
@@ -3515,7 +3513,7 @@ ahd_clear_critical_section(struct ahd_softc *ahd)
ahd_outb(ahd, LQOMODE1, lqomode1);
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
- ahd_outb(ahd, SIMODE1, simode1);
+ ahd_outb(ahd, SIMODE1, simode1);
/*
* SCSIINT seems to glitch occasionally when
* the interrupt masks are restored. Clear SCSIINT
@@ -3553,7 +3551,7 @@ ahd_clear_intstat(struct ahd_softc *ahd)
ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
|CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
- |CLRIOERR|CLROVERRUN);
+ |CLRIOERR|CLROVERRUN);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
}
@@ -3689,7 +3687,7 @@ ahd_devlimited_syncrate(struct ahd_softc *ahd,
*/
if (role == ROLE_TARGET)
transinfo = &tinfo->user;
- else
+ else
transinfo = &tinfo->goal;
*ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
@@ -3720,7 +3718,7 @@ ahd_find_syncrate(struct ahd_softc *ahd, u_int *period,
if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
&& *period > AHD_SYNCRATE_MIN_DT)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
-
+
if (*period > AHD_SYNCRATE_MIN)
*period = 0;
@@ -4083,7 +4081,7 @@ ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
ahd_outb(ahd, NEGOADDR, devinfo->target);
period = tinfo->period;
offset = tinfo->offset;
- memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
+ memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
|MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
con_opts = 0;
@@ -4391,7 +4389,7 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
} else if (scb == NULL) {
printk("%s: WARNING. No pending message for "
"I_T msgin. Issuing NO-OP\n", ahd_name(ahd));
- ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP;
+ ahd->msgout_buf[ahd->msgout_index++] = NOP;
ahd->msgout_len++;
ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
return;
@@ -4417,7 +4415,7 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
}
if (scb->flags & SCB_DEVICE_RESET) {
- ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET;
+ ahd->msgout_buf[ahd->msgout_index++] = TARGET_RESET;
ahd->msgout_len++;
ahd_print_path(ahd, scb);
printk("Bus Device Reset Message Sent\n");
@@ -4432,9 +4430,9 @@ ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
} else if ((scb->flags & SCB_ABORT) != 0) {
if ((scb->hscb->control & TAG_ENB) != 0) {
- ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG;
+ ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK;
} else {
- ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT;
+ ahd->msgout_buf[ahd->msgout_index++] = ABORT_TASK_SET;
}
ahd->msgout_len++;
ahd_print_path(ahd, scb);
@@ -4666,7 +4664,7 @@ ahd_clear_msg_state(struct ahd_softc *ahd)
*/
ahd_outb(ahd, CLRSINT1, CLRATNO);
}
- ahd_outb(ahd, MSG_OUT, MSG_NOOP);
+ ahd_outb(ahd, MSG_OUT, NOP);
ahd_outb(ahd, SEQ_FLAGS2,
ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
ahd_restore_modes(ahd, saved_modes);
@@ -4747,7 +4745,7 @@ reswitch:
* with a busfree.
*/
if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0
- && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR)
+ && ahd->send_msg_perror == INITIATOR_ERROR)
ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE;
ahd_outb(ahd, RETURN_2, ahd->send_msg_perror);
@@ -4849,7 +4847,7 @@ reswitch:
#endif
ahd_assert_atn(ahd);
}
- } else
+ } else
ahd->msgin_index++;
if (message_done == MSGLOOP_TERMINATED) {
@@ -4952,7 +4950,7 @@ reswitch:
*/
return;
}
-
+
ahd->msgin_index++;
/*
@@ -5025,7 +5023,7 @@ ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
index = 0;
while (index < ahd->msgout_len) {
- if (ahd->msgout_buf[index] == MSG_EXTENDED) {
+ if (ahd->msgout_buf[index] == EXTENDED_MESSAGE) {
u_int end_index;
end_index = index + 1 + ahd->msgout_buf[index + 1];
@@ -5039,8 +5037,8 @@ ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full)
found = TRUE;
}
index = end_index;
- } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK
- && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+ } else if (ahd->msgout_buf[index] >= SIMPLE_QUEUE_TAG
+ && ahd->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) {
/* Skip tag type and tag id or residue param*/
index += 2;
@@ -5091,36 +5089,36 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
* extended message type.
*/
switch (ahd->msgin_buf[0]) {
- case MSG_DISCONNECT:
- case MSG_SAVEDATAPOINTER:
- case MSG_CMDCOMPLETE:
- case MSG_RESTOREPOINTERS:
- case MSG_IGN_WIDE_RESIDUE:
+ case DISCONNECT:
+ case SAVE_POINTERS:
+ case COMMAND_COMPLETE:
+ case RESTORE_POINTERS:
+ case IGNORE_WIDE_RESIDUE:
/*
* End our message loop as these are messages
* the sequencer handles on its own.
*/
done = MSGLOOP_TERMINATED;
break;
- case MSG_MESSAGE_REJECT:
+ case MESSAGE_REJECT:
response = ahd_handle_msg_reject(ahd, devinfo);
fallthrough;
- case MSG_NOOP:
+ case NOP:
done = MSGLOOP_MSGCOMPLETE;
break;
- case MSG_EXTENDED:
+ case EXTENDED_MESSAGE:
{
/* Wait for enough of the message to begin validation */
if (ahd->msgin_index < 2)
break;
switch (ahd->msgin_buf[2]) {
- case MSG_EXT_SDTR:
+ case EXTENDED_SDTR:
{
u_int period;
u_int ppr_options;
u_int offset;
u_int saved_offset;
-
+
if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
reject = TRUE;
break;
@@ -5162,7 +5160,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
* and didn't have to fall down to async
* transfers.
*/
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, TRUE)) {
/* We started it */
if (saved_offset != offset) {
/* Went too low - force async */
@@ -5189,7 +5187,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_WDTR:
+ case EXTENDED_WDTR:
{
u_int bus_width;
u_int saved_width;
@@ -5223,7 +5221,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
saved_width, bus_width);
}
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, TRUE)) {
/*
* Don't send a WDTR back to the
* target, since we asked first.
@@ -5285,7 +5283,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_PPR:
+ case EXTENDED_PPR:
{
u_int period;
u_int offset;
@@ -5340,7 +5338,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd_validate_offset(ahd, tinfo, period, &offset,
bus_width, devinfo->role);
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, TRUE)) {
/*
* If we are unable to do any of the
* requested options (we went too low),
@@ -5403,7 +5401,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
break;
}
#ifdef AHD_TARGET_MODE
- case MSG_BUS_DEV_RESET:
+ case TARGET_RESET:
ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD,
CAM_BDR_SENT,
"Bus Device Reset Received",
@@ -5411,9 +5409,9 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd_restart(ahd);
done = MSGLOOP_TERMINATED;
break;
- case MSG_ABORT_TAG:
- case MSG_ABORT:
- case MSG_CLEAR_QUEUE:
+ case ABORT_TASK:
+ case ABORT_TASK_SET:
+ case CLEAR_TASK_SET:
{
int tag;
@@ -5423,7 +5421,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
break;
}
tag = SCB_LIST_NULL;
- if (ahd->msgin_buf[0] == MSG_ABORT_TAG)
+ if (ahd->msgin_buf[0] == ABORT_TASK)
tag = ahd_inb(ahd, INITIATOR_TAG);
ahd_abort_scbs(ahd, devinfo->target, devinfo->channel,
devinfo->lun, tag, ROLE_TARGET,
@@ -5447,7 +5445,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
break;
}
#endif
- case MSG_QAS_REQUEST:
+ case QAS_REQUEST:
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_MESSAGES) != 0)
printk("%s: QAS request. SCSISIGI == 0x%x\n",
@@ -5455,7 +5453,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
#endif
ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE;
fallthrough;
- case MSG_TERM_IO_PROC:
+ case TERMINATE_IO_PROC:
default:
reject = TRUE;
break;
@@ -5467,7 +5465,7 @@ ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
*/
ahd->msgout_index = 0;
ahd->msgout_len = 1;
- ahd->msgout_buf[0] = MSG_MESSAGE_REJECT;
+ ahd->msgout_buf[0] = MESSAGE_REJECT;
done = MSGLOOP_MSGCOMPLETE;
response = TRUE;
}
@@ -5506,8 +5504,8 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
/* Might be necessary */
last_msg = ahd_inb(ahd, LAST_MSG);
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
- if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE)
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) {
+ if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_PPR, /*full*/TRUE)
&& tinfo->goal.period <= AHD_SYNCRATE_PACED) {
/*
* Target may not like our SPI-4 PPR Options.
@@ -5544,7 +5542,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd_build_transfer_msg(ahd, devinfo);
ahd->msgout_index = 0;
response = 1;
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) {
/* note 8bit xfers */
printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
@@ -5569,7 +5567,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
ahd->msgout_index = 0;
response = 1;
}
- } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+ } else if (ahd_sent_msg(ahd, AHDMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) {
/* note asynch xfers and clear flag */
ahd_set_syncrate(ahd, devinfo, /*period*/0,
/*offset*/0, /*ppr_options*/0,
@@ -5579,13 +5577,13 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
"Using asynchronous transfers\n",
ahd_name(ahd), devinfo->channel,
devinfo->target, devinfo->lun);
- } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+ } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) {
int tag_type;
int mask;
- tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+ tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG);
- if (tag_type == MSG_SIMPLE_TASK) {
+ if (tag_type == SIMPLE_QUEUE_TAG) {
printk("(%s:%c:%d:%d): refuses tagged commands. "
"Performing non-tagged I/O\n", ahd_name(ahd),
devinfo->channel, devinfo->target, devinfo->lun);
@@ -5595,7 +5593,7 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
printk("(%s:%c:%d:%d): refuses %s tagged commands. "
"Performing simple queue tagged I/O only\n",
ahd_name(ahd), devinfo->channel, devinfo->target,
- devinfo->lun, tag_type == MSG_ORDERED_TASK
+ devinfo->lun, tag_type == ORDERED_QUEUE_TAG
? "ordered" : "head of queue");
ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC);
mask = ~0x03;
@@ -5607,9 +5605,9 @@ ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo)
*/
ahd_outb(ahd, SCB_CONTROL,
ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
- scb->hscb->control &= mask;
+ scb->hscb->control &= mask;
ahd_set_transaction_tag(scb, /*enabled*/FALSE,
- /*type*/MSG_SIMPLE_TASK);
+ /*type*/SIMPLE_QUEUE_TAG);
ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
ahd_assert_atn(ahd);
ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun),
@@ -5816,7 +5814,7 @@ ahd_reinitialize_dataptrs(struct ahd_softc *ahd)
AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
-
+
scb_index = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scb_index);
@@ -5924,7 +5922,7 @@ ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
continue;
ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid,
- MSG_BUS_DEV_RESET, /*arg*/0);
+ TARGET_RESET, /*arg*/0);
ahd_send_lstate_events(ahd, lstate);
}
}
@@ -5938,7 +5936,7 @@ ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
/*ppr_options*/0, AHD_TRANS_CUR,
/*paused*/TRUE);
-
+
if (status != CAM_SEL_TIMEOUT)
ahd_send_async(ahd, devinfo->channel, devinfo->target,
CAM_LUN_WILDCARD, AC_SENT_BDR);
@@ -5954,11 +5952,11 @@ ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo,
struct scb *scb)
{
- /*
+ /*
* To facilitate adding multiple messages together,
* each routine should increment the index and len
* variables instead of setting them explicitly.
- */
+ */
ahd->msgout_index = 0;
ahd->msgout_len = 0;
@@ -6091,7 +6089,7 @@ ahd_softc_init(struct ahd_softc *ahd)
{
ahd->unpause = 0;
- ahd->pause = PAUSE;
+ ahd->pause = PAUSE;
return (0);
}
@@ -6204,7 +6202,7 @@ ahd_reset(struct ahd_softc *ahd, int reinit)
u_int sxfrctl1;
int wait;
uint32_t cmd;
-
+
/*
* Preserve the value of the SXFRCTL1 register for all channels.
* It contains settings that affect termination and we don't want
@@ -6444,7 +6442,7 @@ ahd_init_scbdata(struct ahd_softc *ahd)
/*
* Note that we were successful
*/
- return (0);
+ return (0);
error_exit:
@@ -6962,7 +6960,7 @@ ahd_controller_info(struct ahd_softc *ahd, char *buf)
static const char *channel_strings[] = {
"Primary Low",
"Primary High",
- "Secondary Low",
+ "Secondary Low",
"Secondary High"
};
@@ -7234,7 +7232,7 @@ ahd_chip_init(struct ahd_softc *ahd)
} else {
sxfrctl1 |= ahd->seltime;
}
-
+
ahd_outb(ahd, SXFRCTL0, DFON);
ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
@@ -7490,7 +7488,7 @@ ahd_chip_init(struct ahd_softc *ahd)
ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
-
+
/* Tell the sequencer of our initial queue positions */
ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
@@ -7884,7 +7882,7 @@ void __maybe_unused
ahd_resume(struct ahd_softc *ahd)
{
ahd_reset(ahd, /*reinit*/TRUE);
- ahd_intr_enable(ahd, TRUE);
+ ahd_intr_enable(ahd, TRUE);
ahd_restart(ahd);
}
@@ -7925,7 +7923,7 @@ ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl)
u_int scbid;
u_int scb_offset;
u_int saved_scbptr;
-
+
scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
scbid = ahd_inw_scbram(ahd, scb_offset);
ahd_set_scbptr(ahd, saved_scbptr);
@@ -7937,7 +7935,7 @@ ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid)
{
u_int scb_offset;
u_int saved_scbptr;
-
+
scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
ahd_outw(ahd, scb_offset, scbid);
ahd_set_scbptr(ahd, saved_scbptr);
@@ -7990,7 +7988,7 @@ ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb)
target = SCB_GET_TARGET(ahd, scb);
lun = SCB_GET_LUN(scb);
channel = SCB_GET_CHANNEL(ahd, scb);
-
+
ahd_search_qinfifo(ahd, target, channel, lun,
/*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
@@ -8031,7 +8029,7 @@ ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb,
ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
} else {
prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
- ahd_sync_scb(ahd, prev_scb,
+ ahd_sync_scb(ahd, prev_scb,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
@@ -8331,7 +8329,7 @@ ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel,
static int
ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
int lun, u_int tag, role_t role, uint32_t status,
- ahd_search_action action, u_int *list_head,
+ ahd_search_action action, u_int *list_head,
u_int *list_tail, u_int tid)
{
struct scb *scb;
@@ -8789,7 +8787,7 @@ ahd_stat_timer(struct timer_list *t)
struct ahd_softc *ahd = from_timer(ahd, t, stat_timer);
u_long s;
int enint_coal;
-
+
ahd_lock(ahd, &s);
enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
@@ -8834,7 +8832,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
* operations are on data structures that the sequencer
* is not touching once the queue is frozen.
*/
- hscb = scb->hscb;
+ hscb = scb->hscb;
if (ahd_is_paused(ahd)) {
paused = 1;
@@ -8913,7 +8911,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
break;
}
}
- if (siu->status == SCSI_STATUS_OK)
+ if (siu->status == SAM_STAT_GOOD)
ahd_set_transaction_status(scb,
CAM_REQ_CMP_ERR);
}
@@ -8927,8 +8925,8 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
ahd_done(ahd, scb);
break;
}
- case SCSI_STATUS_CMD_TERMINATED:
- case SCSI_STATUS_CHECK_COND:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
{
struct ahd_devinfo devinfo;
struct ahd_dma_seg *sg;
@@ -9018,7 +9016,7 @@ ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb)
ahd_queue_scb(ahd, scb);
break;
}
- case SCSI_STATUS_OK:
+ case SAM_STAT_GOOD:
printk("%s: Interrupted for status of 0???\n",
ahd_name(ahd));
fallthrough;
@@ -9108,7 +9106,7 @@ ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb)
/*
* Remainder of the SG where the transfer
- * stopped.
+ * stopped.
*/
resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
@@ -9160,7 +9158,7 @@ ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate,
- (lstate->event_r_idx - lstate->event_w_idx);
if (event_type == EVENT_TYPE_BUS_RESET
- || event_type == MSG_BUS_DEV_RESET) {
+ || event_type == TARGET_RESET) {
/*
* Any earlier events are irrelevant, so reset our buffer.
* This has the effect of allowing us to deal with reset
@@ -9291,7 +9289,7 @@ ahd_loadseq(struct ahd_softc *ahd)
/*
* Setup downloadable constant table.
- *
+ *
* The computation for the S/G prefetch variables is
* a bit complicated. We would like to always fetch
* in terms of cachelined sized increments. However,
@@ -9380,7 +9378,7 @@ ahd_loadseq(struct ahd_softc *ahd)
if (begin_set[cs_count] == TRUE
&& end_set[cs_count] == FALSE) {
cs_table[cs_count].end = downloaded;
- end_set[cs_count] = TRUE;
+ end_set[cs_count] = TRUE;
cs_count++;
}
continue;
@@ -9615,7 +9613,7 @@ ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries,
printed_mask == 0 ? ":(" : "|",
table[entry].name);
printed_mask |= table[entry].mask;
-
+
break;
}
if (entry >= num_entries)
@@ -9652,7 +9650,7 @@ ahd_dump_card_state(struct ahd_softc *ahd)
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
"%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
- ahd_name(ahd),
+ ahd_name(ahd),
ahd_inw(ahd, CURADDR),
ahd_build_mode_state(ahd, ahd->saved_src_mode,
ahd->saved_dst_mode));
@@ -9768,7 +9766,6 @@ ahd_dump_card_state(struct ahd_softc *ahd)
}
printk("\n");
-
printk("Sequencer DMA-Up and Complete list: ");
scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
i = 0;
@@ -9946,7 +9943,7 @@ ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf,
ahd_outb(ahd, SEEADR, cur_addr);
ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
-
+
error = ahd_wait_seeprom(ahd);
if (error)
break;
@@ -10001,7 +9998,7 @@ ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf,
ahd_outw(ahd, SEEDAT, *buf++);
ahd_outb(ahd, SEEADR, cur_addr);
ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
-
+
retval = ahd_wait_seeprom(ahd);
if (retval)
break;
@@ -10106,7 +10103,7 @@ ahd_acquire_seeprom(struct ahd_softc *ahd)
error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
if (error != 0
- || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
+ || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
return (0);
return (1);
#endif
@@ -10248,7 +10245,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
our_id = ahd->our_id;
if (ccb->ccb_h.target_id != our_id) {
if ((ahd->features & AHD_MULTI_TID) != 0
- && (ahd->flags & AHD_INITIATORROLE) != 0) {
+ && (ahd->flags & AHD_INITIATORROLE) != 0) {
/*
* Only allow additional targets if
* the initiator role is disabled.
@@ -10435,7 +10432,7 @@ ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb)
}
ahd_lock(ahd, &s);
-
+
ccb->ccb_h.status = CAM_REQ_CMP;
LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
struct ccb_hdr *ccbh;
@@ -10699,7 +10696,7 @@ ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd)
printk("Reserved or VU command code type encountered\n");
break;
}
-
+
memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
atio->ccb_h.status |= CAM_CDB_RECVD;
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index d413b1c5fdc5..4f7102f8eeb0 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -1602,10 +1602,10 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
if ((dev->flags & (AHD_DEV_Q_TAGGED|AHD_DEV_Q_BASIC)) != 0) {
if (dev->commands_since_idle_or_otag == AHD_OTAG_THRESH
&& (dev->flags & AHD_DEV_Q_TAGGED) != 0) {
- hscb->control |= MSG_ORDERED_TASK;
+ hscb->control |= ORDERED_QUEUE_TAG;
dev->commands_since_idle_or_otag = 0;
} else {
- hscb->control |= MSG_SIMPLE_TASK;
+ hscb->control |= SIMPLE_QUEUE_TAG;
}
}
@@ -1834,7 +1834,7 @@ ahd_done(struct ahd_softc *ahd, struct scb *scb)
if (dev->openings == 1
&& ahd_get_transaction_status(scb) == CAM_REQ_CMP
- && ahd_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+ && ahd_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL)
dev->tag_success_count++;
/*
* Some devices deal with temporary internal resource
@@ -1891,8 +1891,8 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
switch (ahd_get_scsi_status(scb)) {
default:
break;
- case SCSI_STATUS_CHECK_COND:
- case SCSI_STATUS_CMD_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_COMMAND_TERMINATED:
{
struct scsi_cmnd *cmd;
@@ -1947,7 +1947,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
}
break;
}
- case SCSI_STATUS_QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
/*
* By the time the core driver has returned this
* command, all other commands that were queued
@@ -1993,7 +1993,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
dev->last_queuefull_same_count = 0;
}
ahd_set_transaction_status(scb, CAM_REQUEUE_REQ);
- ahd_set_scsi_status(scb, SCSI_STATUS_OK);
+ ahd_set_scsi_status(scb, SAM_STAT_GOOD);
ahd_platform_set_tags(ahd, sdev, &devinfo,
(dev->flags & AHD_DEV_Q_BASIC)
? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
@@ -2007,7 +2007,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
ahd_platform_set_tags(ahd, sdev, &devinfo,
(dev->flags & AHD_DEV_Q_BASIC)
? AHD_QUEUE_BASIC : AHD_QUEUE_TAGGED);
- ahd_set_scsi_status(scb, SCSI_STATUS_BUSY);
+ ahd_set_scsi_status(scb, SAM_STAT_BUSY);
}
}
@@ -2039,8 +2039,8 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
scsi_status = ahd_cmd_get_scsi_status(cmd);
switch(scsi_status) {
- case SCSI_STATUS_CMD_TERMINATED:
- case SCSI_STATUS_CHECK_COND:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
if ((cmd->result >> 24) != DRIVER_SENSE) {
do_fallback = 1;
} else {
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index 8a8b7ae7aed3..35ec24f28d2c 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -242,7 +242,7 @@ struct ahd_linux_device {
int active;
/*
- * The currently allowed number of
+ * The currently allowed number of
* transactions that can be queued to
* the device. Must be signed for
* conversion from tagged to untagged
@@ -256,7 +256,7 @@ struct ahd_linux_device {
* device's queue is halted.
*/
u_int qfrozen;
-
+
/*
* Cumulative command counter.
*/
@@ -340,11 +340,11 @@ struct ahd_platform_data {
/*
* Fields accessed from interrupt context.
*/
- struct scsi_target *starget[AHD_NUM_TARGETS];
+ struct scsi_target *starget[AHD_NUM_TARGETS];
spinlock_t spin_lock;
struct completion *eh_done;
- struct Scsi_Host *host; /* pointer to scsi host */
+ struct Scsi_Host *host; /* pointer to scsi host */
#define AHD_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
@@ -497,29 +497,6 @@ int ahd_proc_write_seeprom(struct Scsi_Host *, char *, int);
int ahd_linux_show_info(struct seq_file *,struct Scsi_Host *);
/*********************** Transaction Access Wrappers **************************/
-static inline void ahd_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
-static inline void ahd_set_transaction_status(struct scb *, uint32_t);
-static inline void ahd_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
-static inline void ahd_set_scsi_status(struct scb *, uint32_t);
-static inline uint32_t ahd_cmd_get_transaction_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahd_get_transaction_status(struct scb *);
-static inline uint32_t ahd_cmd_get_scsi_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahd_get_scsi_status(struct scb *);
-static inline void ahd_set_transaction_tag(struct scb *, int, u_int);
-static inline u_long ahd_get_transfer_length(struct scb *);
-static inline int ahd_get_transfer_dir(struct scb *);
-static inline void ahd_set_residual(struct scb *, u_long);
-static inline void ahd_set_sense_residual(struct scb *scb, u_long resid);
-static inline u_long ahd_get_residual(struct scb *);
-static inline u_long ahd_get_sense_residual(struct scb *);
-static inline int ahd_perform_autosense(struct scb *);
-static inline uint32_t ahd_get_sense_bufsize(struct ahd_softc *,
- struct scb *);
-static inline void ahd_notify_xfer_settings_change(struct ahd_softc *,
- struct ahd_devinfo *);
-static inline void ahd_platform_scb_free(struct ahd_softc *ahd,
- struct scb *scb);
-static inline void ahd_freeze_scb(struct scb *scb);
static inline
void ahd_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
@@ -655,9 +632,9 @@ static inline void
ahd_freeze_scb(struct scb *scb)
{
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
- scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
- scb->platform_data->dev->qfrozen++;
- }
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
}
void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
index 07b670b80f1b..b92e2e3c358a 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm_pci.c
@@ -45,8 +45,8 @@
/* Define the macro locally since it's different for different class of chips.
*/
-#define ID(x) \
- ID2C(x), \
+#define ID(x) \
+ ID2C(x), \
ID2C(IDIROC(x))
static const struct pci_device_id ahd_linux_pci_id_table[] = {
@@ -367,7 +367,7 @@ ahd_pci_map_int(struct ahd_softc *ahd)
IRQF_SHARED, "aic79xx", ahd);
if (!error)
ahd->platform_data->irq = ahd->dev_softc->irq;
-
+
return (-error);
}
diff --git a/drivers/scsi/aic7xxx/aic79xx_proc.c b/drivers/scsi/aic7xxx/aic79xx_proc.c
index add2da581d66..746d0ca2a657 100644
--- a/drivers/scsi/aic7xxx/aic79xx_proc.c
+++ b/drivers/scsi/aic7xxx/aic79xx_proc.c
@@ -100,17 +100,17 @@ ahd_format_transinfo(struct seq_file *m, struct ahd_transinfo *tinfo)
seq_puts(m, "Renegotiation Pending\n");
return;
}
- speed = 3300;
- freq = 0;
+ speed = 3300;
+ freq = 0;
if (tinfo->offset != 0) {
freq = ahd_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);
- mb = speed / 1000;
- if (mb > 0)
+ mb = speed / 1000;
+ if (mb > 0)
seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
- else
+ else
seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
@@ -242,7 +242,8 @@ ahd_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
u_int start_addr;
if (ahd->seep_config == NULL) {
- ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
+ ahd->seep_config = kmalloc(sizeof(*ahd->seep_config),
+ GFP_ATOMIC);
if (ahd->seep_config == NULL) {
printk("aic79xx: Unable to allocate serial "
"eeprom buffer. Write failing\n");
diff --git a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
index cc9e41967ce4..11ddffbcc2f3 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_93cx6.c
@@ -73,8 +73,8 @@
* add other 93Cx6 functions.
*/
struct seeprom_cmd {
- uint8_t len;
- uint8_t bits[11];
+ uint8_t len;
+ uint8_t bits[11];
};
/* Short opcodes for the c46 */
diff --git a/drivers/scsi/aic7xxx/aic7xxx_core.c b/drivers/scsi/aic7xxx/aic7xxx_core.c
index b1b852fe940b..4b04ab8908f8 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_core.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_core.c
@@ -66,7 +66,7 @@ static const char *const ahc_chip_names[] = {
* Hardware error codes.
*/
struct ahc_hard_error_entry {
- uint8_t errno;
+ uint8_t errno;
const char *errmesg;
};
@@ -84,16 +84,16 @@ static const u_int num_errors = ARRAY_SIZE(ahc_hard_errors);
static const struct ahc_phase_table_entry ahc_phase_table[] =
{
- { P_DATAOUT, MSG_NOOP, "in Data-out phase" },
- { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" },
- { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" },
- { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" },
- { P_COMMAND, MSG_NOOP, "in Command phase" },
- { P_MESGOUT, MSG_NOOP, "in Message-out phase" },
- { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" },
+ { P_DATAOUT, NOP, "in Data-out phase" },
+ { P_DATAIN, INITIATOR_ERROR, "in Data-in phase" },
+ { P_DATAOUT_DT, NOP, "in DT Data-out phase" },
+ { P_DATAIN_DT, INITIATOR_ERROR, "in DT Data-in phase" },
+ { P_COMMAND, NOP, "in Command phase" },
+ { P_MESGOUT, NOP, "in Message-out phase" },
+ { P_STATUS, INITIATOR_ERROR, "in Status phase" },
{ P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" },
- { P_BUSFREE, MSG_NOOP, "while idle" },
- { 0, MSG_NOOP, "in unknown phase" }
+ { P_BUSFREE, NOP, "while idle" },
+ { 0, NOP, "in unknown phase" }
};
/*
@@ -142,7 +142,7 @@ static void ahc_free_tstate(struct ahc_softc *ahc,
#endif
static const struct ahc_syncrate*
ahc_devlimited_syncrate(struct ahc_softc *ahc,
- struct ahc_initiator_tinfo *,
+ struct ahc_initiator_tinfo *,
u_int *period,
u_int *ppr_options,
role_t role);
@@ -195,7 +195,7 @@ static void ahc_setup_target_msgin(struct ahc_softc *ahc,
struct scb *scb);
#endif
-static bus_dmamap_callback_t ahc_dmamap_cb;
+static bus_dmamap_callback_t ahc_dmamap_cb;
static void ahc_build_free_scb_list(struct ahc_softc *ahc);
static int ahc_init_scbdata(struct ahc_softc *ahc);
static void ahc_fini_scbdata(struct ahc_softc *ahc);
@@ -815,7 +815,7 @@ ahc_restart(struct ahc_softc *ahc)
ahc_clear_msg_state(ahc);
ahc_outb(ahc, SCSISIGO, 0); /* De-assert BSY */
- ahc_outb(ahc, MSG_OUT, MSG_NOOP); /* No message to send */
+ ahc_outb(ahc, MSG_OUT, NOP); /* No message to send */
ahc_outb(ahc, SXFRCTL1, ahc_inb(ahc, SXFRCTL1) & ~BITBUCKET);
ahc_outb(ahc, LASTPHASE, P_BUSFREE);
ahc_outb(ahc, SAVED_SCSIID, 0xFF);
@@ -978,7 +978,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
{
struct scb *scb;
struct ahc_devinfo devinfo;
-
+
ahc_fetch_devinfo(ahc, &devinfo);
/*
@@ -1022,7 +1022,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
goto unpause;
}
- hscb = scb->hscb;
+ hscb = scb->hscb;
/* Don't want to clobber the original sense code */
if ((scb->flags & SCB_SENSE) != 0) {
@@ -1041,12 +1041,12 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
ahc_freeze_scb(scb);
ahc_set_scsi_status(scb, hscb->shared_data.status.scsi_status);
switch (hscb->shared_data.status.scsi_status) {
- case SCSI_STATUS_OK:
+ case SAM_STAT_GOOD:
printk("%s: Interrupted for status of 0???\n",
ahc_name(ahc));
break;
- case SCSI_STATUS_CMD_TERMINATED:
- case SCSI_STATUS_CHECK_COND:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
{
struct ahc_dma_seg *sg;
struct scsi_sense *sc;
@@ -1071,7 +1071,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
&tstate);
tinfo = &targ_info->curr;
sg = scb->sg_list;
- sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
+ sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
/*
* Save off the residual if there is one.
*/
@@ -1117,8 +1117,8 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
* errors will be reported before any data
* phases occur.
*/
- if (ahc_get_residual(scb)
- == ahc_get_transfer_length(scb)) {
+ if (ahc_get_residual(scb)
+ == ahc_get_transfer_length(scb)) {
ahc_update_neg_request(ahc, &devinfo,
tstate, targ_info,
AHC_NEG_IF_NON_ASYNC);
@@ -1129,7 +1129,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
scb->flags |= SCB_AUTO_NEGOTIATE;
}
hscb->cdb_len = sizeof(*sc);
- hscb->dataptr = sg->addr;
+ hscb->dataptr = sg->addr;
hscb->datacnt = sg->len;
hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
hscb->sgptr = ahc_htole32(hscb->sgptr);
@@ -1179,7 +1179,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
printk("SXFRCTL0 == 0x%x\n", ahc_inb(ahc, SXFRCTL0));
printk("SEQCTL == 0x%x\n", ahc_inb(ahc, SEQCTL));
ahc_dump_card_state(ahc);
- ahc->msgout_buf[0] = MSG_BUS_DEV_RESET;
+ ahc->msgout_buf[0] = TARGET_RESET;
ahc->msgout_len = 1;
ahc->msgout_index = 0;
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -1187,13 +1187,13 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
ahc_assert_atn(ahc);
break;
}
- case SEND_REJECT:
+ case SEND_REJECT:
{
u_int rejbyte = ahc_inb(ahc, ACCUM);
printk("%s:%c:%d: Warning - unknown message received from "
- "target (0x%x). Rejecting\n",
+ "target (0x%x). Rejecting\n",
ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
- break;
+ break;
}
case PROTO_VIOLATION:
{
@@ -1286,8 +1286,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
ahc->msg_type =
MSG_TYPE_TARGET_MSGOUT;
ahc->msgin_index = 0;
- }
- else
+ } else
ahc_setup_target_msgin(ahc,
&devinfo,
scb);
@@ -1359,7 +1358,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
if (scb != NULL)
ahc_set_transaction_status(scb,
CAM_UNCOR_PARITY);
- ahc_reset_channel(ahc, devinfo.channel,
+ ahc_reset_channel(ahc, devinfo.channel,
/*init reset*/TRUE);
}
} else {
@@ -1391,7 +1390,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
printk("data overrun detected %s."
" Tag == 0x%x.\n",
ahc_phase_table[i].phasemsg,
- scb->hscb->tag);
+ scb->hscb->tag);
ahc_print_path(ahc, scb);
printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
@@ -1402,7 +1401,7 @@ ahc_handle_seqint(struct ahc_softc *ahc, u_int intstat)
printk("sg[%d] - Addr 0x%x%x : Length %d\n",
i,
(ahc_le32toh(scb->sg_list[i].len) >> 24
- & SG_HIGH_ADDR_BITS),
+ & SG_HIGH_ADDR_BITS),
ahc_le32toh(scb->sg_list[i].addr),
ahc_le32toh(scb->sg_list[i].len)
& AHC_SG_LEN_MASK);
@@ -1549,7 +1548,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
if (status == 0 && status0 == 0) {
if ((ahc->features & AHC_TWIN) != 0) {
/* Try the other channel */
- ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
status = ahc_inb(ahc, SSTAT1)
& (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
intr_channel = (cur_channel == 'A') ? 'B' : 'A';
@@ -1595,7 +1594,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
printk("%s: Someone reset channel %c\n",
ahc_name(ahc), intr_channel);
if (intr_channel != cur_channel)
- ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
} else if ((status & SCSIPERR) != 0) {
/*
@@ -1684,17 +1683,17 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
* data direction, so ignore the value
* in the phase table.
*/
- mesg_out = MSG_INITIATOR_DET_ERR;
+ mesg_out = INITIATOR_ERROR;
}
/*
- * We've set the hardware to assert ATN if we
- * get a parity error on "in" phases, so all we
+ * We've set the hardware to assert ATN if we
+ * get a parity error on "in" phases, so all we
* need to do is stuff the message buffer with
* the appropriate message. "In" phases have set
* mesg_out to something other than MSG_NOP.
*/
- if (mesg_out != MSG_NOOP) {
+ if (mesg_out != NOP) {
if (ahc->msg_type != MSG_TYPE_NONE)
ahc->send_msg_perror = TRUE;
else
@@ -1818,10 +1817,10 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
u_int tag;
tag = SCB_LIST_NULL;
- if (ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT_TAG, TRUE)
- || ahc_sent_msg(ahc, AHCMSG_1B, MSG_ABORT, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK, TRUE)
+ || ahc_sent_msg(ahc, AHCMSG_1B, ABORT_TASK_SET, TRUE)) {
if (ahc->msgout_buf[ahc->msgout_index - 1]
- == MSG_ABORT_TAG)
+ == ABORT_TASK)
tag = scb->hscb->tag;
ahc_print_path(ahc, scb);
printk("SCB %d - Abort%s Completed.\n",
@@ -1833,7 +1832,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
CAM_REQ_ABORTED);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_1B,
- MSG_BUS_DEV_RESET, TRUE)) {
+ TARGET_RESET, TRUE)) {
ahc_compile_devinfo(&devinfo,
initiator_role_id,
target,
@@ -1846,7 +1845,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
/*verbose_level*/0);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
- MSG_EXT_PPR, FALSE)) {
+ EXTENDED_PPR, FALSE)) {
struct ahc_initiator_tinfo *tinfo;
struct ahc_tmode_tstate *tstate;
@@ -1865,7 +1864,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
ahc_qinfifo_requeue_tail(ahc, scb);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
- MSG_EXT_WDTR, FALSE)) {
+ EXTENDED_WDTR, FALSE)) {
/*
* Negotiation Rejected. Go-narrow and
* retry command.
@@ -1877,7 +1876,7 @@ ahc_handle_scsiint(struct ahc_softc *ahc, u_int intstat)
ahc_qinfifo_requeue_tail(ahc, scb);
printerror = 0;
} else if (ahc_sent_msg(ahc, AHCMSG_EXT,
- MSG_EXT_SDTR, FALSE)) {
+ EXTENDED_SDTR, FALSE)) {
/*
* Negotiation Rejected. Go-async and
* retry command.
@@ -1986,7 +1985,7 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
| (ahc_inb(ahc, SEQADDR1) << 8);
/*
- * Seqaddr represents the next instruction to execute,
+ * Seqaddr represents the next instruction to execute,
* so we are really executing the instruction just
* before it.
*/
@@ -1994,7 +1993,6 @@ ahc_clear_critical_section(struct ahc_softc *ahc)
seqaddr -= 1;
cs = ahc->critical_sections;
for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
-
if (cs->begin < seqaddr && cs->end >= seqaddr)
break;
}
@@ -2064,7 +2062,7 @@ ahc_clear_intstat(struct ahc_softc *ahc)
CLRREQINIT);
ahc_flush_device_writes(ahc);
ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
- ahc_flush_device_writes(ahc);
+ ahc_flush_device_writes(ahc);
ahc_outb(ahc, CLRINT, CLRSCSIINT);
ahc_flush_device_writes(ahc);
}
@@ -2101,7 +2099,7 @@ ahc_print_scb(struct scb *scb)
printk("sg[%d] - Addr 0x%x%x : Length %d\n",
i,
(ahc_le32toh(scb->sg_list[i].len) >> 24
- & SG_HIGH_ADDR_BITS),
+ & SG_HIGH_ADDR_BITS),
ahc_le32toh(scb->sg_list[i].addr),
ahc_le32toh(scb->sg_list[i].len));
}
@@ -2223,7 +2221,7 @@ ahc_devlimited_syncrate(struct ahc_softc *ahc,
*/
if (role == ROLE_TARGET)
transinfo = &tinfo->user;
- else
+ else
transinfo = &tinfo->goal;
*ppr_options &= transinfo->ppr_options;
if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
@@ -2655,9 +2653,9 @@ ahc_set_tags(struct ahc_softc *ahc, struct scsi_cmnd *cmd,
{
struct scsi_device *sdev = cmd->device;
- ahc_platform_set_tags(ahc, sdev, devinfo, alg);
- ahc_send_async(ahc, devinfo->channel, devinfo->target,
- devinfo->lun, AC_TRANSFER_NEG);
+ ahc_platform_set_tags(ahc, sdev, devinfo, alg);
+ ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ devinfo->lun, AC_TRANSFER_NEG);
}
/*
@@ -2756,9 +2754,9 @@ ahc_fetch_devinfo(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
role = ROLE_INITIATOR;
if (role == ROLE_TARGET
- && (ahc->features & AHC_MULTI_TID) != 0
- && (ahc_inb(ahc, SEQ_FLAGS)
- & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
+ && (ahc->features & AHC_MULTI_TID) != 0
+ && (ahc_inb(ahc, SEQ_FLAGS)
+ & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
/* We were selected, so pull our id from TARGIDIN */
our_id = ahc_inb(ahc, TARGIDIN) & OID;
} else if ((ahc->features & AHC_ULTRA2) != 0)
@@ -2880,7 +2878,7 @@ ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
}
if (scb->flags & SCB_DEVICE_RESET) {
- ahc->msgout_buf[ahc->msgout_index++] = MSG_BUS_DEV_RESET;
+ ahc->msgout_buf[ahc->msgout_index++] = TARGET_RESET;
ahc->msgout_len++;
ahc_print_path(ahc, scb);
printk("Bus Device Reset Message Sent\n");
@@ -2894,9 +2892,9 @@ ahc_setup_initiator_msgout(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
} else if ((scb->flags & SCB_ABORT) != 0) {
if ((scb->hscb->control & TAG_ENB) != 0)
- ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT_TAG;
+ ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK;
else
- ahc->msgout_buf[ahc->msgout_index++] = MSG_ABORT;
+ ahc->msgout_buf[ahc->msgout_index++] = ABORT_TASK_SET;
ahc->msgout_len++;
ahc_print_path(ahc, scb);
printk("Abort%s Message Sent\n",
@@ -3106,7 +3104,7 @@ ahc_clear_msg_state(struct ahc_softc *ahc)
*/
ahc_outb(ahc, CLRSINT1, CLRATNO);
}
- ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+ ahc_outb(ahc, MSG_OUT, NOP);
ahc_outb(ahc, SEQ_FLAGS2,
ahc_inb(ahc, SEQ_FLAGS2) & ~TARGET_MSG_PENDING);
}
@@ -3192,7 +3190,7 @@ proto_violation_reset:
ahc_outb(ahc, MSG_OUT, HOST_MSG);
if (scb == NULL) {
ahc_print_devinfo(ahc, &devinfo);
- ahc->msgout_buf[0] = MSG_ABORT_TASK;
+ ahc->msgout_buf[0] = ABORT_TASK;
ahc->msgout_len = 1;
ahc->msgout_index = 0;
ahc->msg_type = MSG_TYPE_INITIATOR_MSGOUT;
@@ -3366,7 +3364,7 @@ reswitch:
#endif
ahc_assert_atn(ahc);
}
- } else
+ } else
ahc->msgin_index++;
if (message_done == MSGLOOP_TERMINATED) {
@@ -3459,7 +3457,7 @@ reswitch:
*/
return;
}
-
+
ahc->msgin_index++;
/*
@@ -3520,7 +3518,7 @@ ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
index = 0;
while (index < ahc->msgout_len) {
- if (ahc->msgout_buf[index] == MSG_EXTENDED) {
+ if (ahc->msgout_buf[index] == EXTENDED_MESSAGE) {
u_int end_index;
end_index = index + 1 + ahc->msgout_buf[index + 1];
@@ -3534,8 +3532,8 @@ ahc_sent_msg(struct ahc_softc *ahc, ahc_msgtype type, u_int msgval, int full)
found = TRUE;
}
index = end_index;
- } else if (ahc->msgout_buf[index] >= MSG_SIMPLE_TASK
- && ahc->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) {
+ } else if (ahc->msgout_buf[index] >= SIMPLE_QUEUE_TAG
+ && ahc->msgout_buf[index] <= IGNORE_WIDE_RESIDUE) {
/* Skip tag type and tag id or residue param*/
index += 2;
@@ -3586,37 +3584,37 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
* extended message type.
*/
switch (ahc->msgin_buf[0]) {
- case MSG_DISCONNECT:
- case MSG_SAVEDATAPOINTER:
- case MSG_CMDCOMPLETE:
- case MSG_RESTOREPOINTERS:
- case MSG_IGN_WIDE_RESIDUE:
+ case DISCONNECT:
+ case SAVE_POINTERS:
+ case COMMAND_COMPLETE:
+ case RESTORE_POINTERS:
+ case IGNORE_WIDE_RESIDUE:
/*
* End our message loop as these are messages
* the sequencer handles on its own.
*/
done = MSGLOOP_TERMINATED;
break;
- case MSG_MESSAGE_REJECT:
+ case MESSAGE_REJECT:
response = ahc_handle_msg_reject(ahc, devinfo);
fallthrough;
- case MSG_NOOP:
+ case NOP:
done = MSGLOOP_MSGCOMPLETE;
break;
- case MSG_EXTENDED:
+ case EXTENDED_MESSAGE:
{
/* Wait for enough of the message to begin validation */
if (ahc->msgin_index < 2)
break;
switch (ahc->msgin_buf[2]) {
- case MSG_EXT_SDTR:
+ case EXTENDED_SDTR:
{
const struct ahc_syncrate *syncrate;
u_int period;
u_int ppr_options;
u_int offset;
u_int saved_offset;
-
+
if (ahc->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
reject = TRUE;
break;
@@ -3650,7 +3648,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc->msgin_buf[3], saved_offset,
period, offset);
}
- ahc_set_syncrate(ahc, devinfo,
+ ahc_set_syncrate(ahc, devinfo,
syncrate, period,
offset, ppr_options,
AHC_TRANS_ACTIVE|AHC_TRANS_GOAL,
@@ -3661,7 +3659,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
* and didn't have to fall down to async
* transfers.
*/
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, TRUE)) {
/* We started it */
if (saved_offset != offset) {
/* Went too low - force async */
@@ -3688,7 +3686,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_WDTR:
+ case EXTENDED_WDTR:
{
u_int bus_width;
u_int saved_width;
@@ -3722,7 +3720,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
saved_width, bus_width);
}
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, TRUE)) {
/*
* Don't send a WDTR back to the
* target, since we asked first.
@@ -3784,7 +3782,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
done = MSGLOOP_MSGCOMPLETE;
break;
}
- case MSG_EXT_PPR:
+ case EXTENDED_PPR:
{
const struct ahc_syncrate *syncrate;
u_int period;
@@ -3844,7 +3842,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
&offset, bus_width,
devinfo->role);
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, TRUE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, TRUE)) {
/*
* If we are unable to do any of the
* requested options (we went too low),
@@ -3908,7 +3906,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
break;
}
#ifdef AHC_TARGET_MODE
- case MSG_BUS_DEV_RESET:
+ case TARGET_RESET:
ahc_handle_devreset(ahc, devinfo,
CAM_BDR_SENT,
"Bus Device Reset Received",
@@ -3916,9 +3914,9 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc_restart(ahc);
done = MSGLOOP_TERMINATED;
break;
- case MSG_ABORT_TAG:
- case MSG_ABORT:
- case MSG_CLEAR_QUEUE:
+ case ABORT_TASK:
+ case ABORT_TASK_SET:
+ case CLEAR_QUEUE_TASK_SET:
{
int tag;
@@ -3928,7 +3926,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
break;
}
tag = SCB_LIST_NULL;
- if (ahc->msgin_buf[0] == MSG_ABORT_TAG)
+ if (ahc->msgin_buf[0] == ABORT_TASK)
tag = ahc_inb(ahc, INITIATOR_TAG);
ahc_abort_scbs(ahc, devinfo->target, devinfo->channel,
devinfo->lun, tag, ROLE_TARGET,
@@ -3952,7 +3950,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
break;
}
#endif
- case MSG_TERM_IO_PROC:
+ case TERMINATE_IO_PROC:
default:
reject = TRUE;
break;
@@ -3964,7 +3962,7 @@ ahc_parse_msg(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
*/
ahc->msgout_index = 0;
ahc->msgout_len = 1;
- ahc->msgout_buf[0] = MSG_MESSAGE_REJECT;
+ ahc->msgout_buf[0] = MESSAGE_REJECT;
done = MSGLOOP_MSGCOMPLETE;
response = TRUE;
}
@@ -4003,7 +4001,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
/* Might be necessary */
last_msg = ahc_inb(ahc, LAST_MSG);
- if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) {
+ if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_PPR, /*full*/FALSE)) {
/*
* Target does not support the PPR message.
* Attempt to negotiate SPI-2 style.
@@ -4022,7 +4020,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc_build_transfer_msg(ahc, devinfo);
ahc->msgout_index = 0;
response = 1;
- } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) {
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_WDTR, /*full*/FALSE)) {
/* note 8bit xfers */
printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using "
@@ -4047,7 +4045,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
ahc->msgout_index = 0;
response = 1;
}
- } else if (ahc_sent_msg(ahc, AHCMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) {
+ } else if (ahc_sent_msg(ahc, AHCMSG_EXT, EXTENDED_SDTR, /*full*/FALSE)) {
/* note asynch xfers and clear flag */
ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL, /*period*/0,
/*offset*/0, /*ppr_options*/0,
@@ -4057,13 +4055,13 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
"Using asynchronous transfers\n",
ahc_name(ahc), devinfo->channel,
devinfo->target, devinfo->lun);
- } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) {
+ } else if ((scb->hscb->control & SIMPLE_QUEUE_TAG) != 0) {
int tag_type;
int mask;
- tag_type = (scb->hscb->control & MSG_SIMPLE_TASK);
+ tag_type = (scb->hscb->control & SIMPLE_QUEUE_TAG);
- if (tag_type == MSG_SIMPLE_TASK) {
+ if (tag_type == SIMPLE_QUEUE_TAG) {
printk("(%s:%c:%d:%d): refuses tagged commands. "
"Performing non-tagged I/O\n", ahc_name(ahc),
devinfo->channel, devinfo->target, devinfo->lun);
@@ -4073,7 +4071,7 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
printk("(%s:%c:%d:%d): refuses %s tagged commands. "
"Performing simple queue tagged I/O only\n",
ahc_name(ahc), devinfo->channel, devinfo->target,
- devinfo->lun, tag_type == MSG_ORDERED_TASK
+ devinfo->lun, tag_type == ORDERED_QUEUE_TAG
? "ordered" : "head of queue");
ahc_set_tags(ahc, scb->io_ctx, devinfo, AHC_QUEUE_BASIC);
mask = ~0x03;
@@ -4085,9 +4083,9 @@ ahc_handle_msg_reject(struct ahc_softc *ahc, struct ahc_devinfo *devinfo)
*/
ahc_outb(ahc, SCB_CONTROL,
ahc_inb(ahc, SCB_CONTROL) & mask);
- scb->hscb->control &= mask;
+ scb->hscb->control &= mask;
ahc_set_transaction_tag(scb, /*enabled*/FALSE,
- /*type*/MSG_SIMPLE_TASK);
+ /*type*/SIMPLE_QUEUE_TAG);
ahc_outb(ahc, MSG_OUT, MSG_IDENTIFYFLAG);
ahc_assert_atn(ahc);
@@ -4324,7 +4322,7 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
continue;
ahc_queue_lstate_event(ahc, lstate, devinfo->our_scsiid,
- MSG_BUS_DEV_RESET, /*arg*/0);
+ TARGET_RESET, /*arg*/0);
ahc_send_lstate_events(ahc, lstate);
}
}
@@ -4338,7 +4336,7 @@ ahc_handle_devreset(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
/*period*/0, /*offset*/0, /*ppr_options*/0,
AHC_TRANS_CUR, /*paused*/TRUE);
-
+
if (status != CAM_SEL_TIMEOUT)
ahc_send_async(ahc, devinfo->channel, devinfo->target,
CAM_LUN_WILDCARD, AC_SENT_BDR);
@@ -4355,11 +4353,11 @@ ahc_setup_target_msgin(struct ahc_softc *ahc, struct ahc_devinfo *devinfo,
struct scb *scb)
{
- /*
+ /*
* To facilitate adding multiple messages together,
* each routine should increment the index and len
* variables instead of setting them explicitly.
- */
+ */
ahc->msgout_index = 0;
ahc->msgout_len = 0;
@@ -4432,7 +4430,7 @@ ahc_softc_init(struct ahc_softc *ahc)
ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
else
ahc->unpause = 0;
- ahc->pause = ahc->unpause | PAUSE;
+ ahc->pause = ahc->unpause | PAUSE;
/* XXX The shared scb data stuff should be deprecated */
if (ahc->scb_data == NULL) {
ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
@@ -4554,7 +4552,7 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
u_int sxfrctl1_a, sxfrctl1_b;
int error;
int wait;
-
+
/*
* Preserve the value of the SXFRCTL1 register for all channels.
* It contains settings that affect termination and we don't want
@@ -4643,7 +4641,7 @@ ahc_reset(struct ahc_softc *ahc, int reinit)
*/
error = ahc->bus_chip_init(ahc);
#ifdef AHC_DUMP_SEQ
- else
+ else
ahc_dumpseq(ahc);
#endif
@@ -4708,7 +4706,7 @@ ahc_build_free_scb_list(struct ahc_softc *ahc)
/* Set the next pointer */
if ((ahc->flags & AHC_PAGESCBS) != 0)
ahc_outb(ahc, SCB_NEXT, i+1);
- else
+ else
ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
/* Make the tag number, SCSIID, and lun invalid */
@@ -4861,7 +4859,7 @@ ahc_init_scbdata(struct ahc_softc *ahc)
/*
* Note that we were successful
*/
- return (0);
+ return (0);
error_exit:
@@ -5004,7 +5002,7 @@ ahc_controller_info(struct ahc_softc *ahc, char *buf)
len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
buf += len;
if ((ahc->features & AHC_TWIN) != 0)
- len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
+ len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
"B SCSI Id=%d, primary %c, ",
ahc->our_id, ahc->our_id_b,
(ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
@@ -5140,7 +5138,7 @@ ahc_chip_init(struct ahc_softc *ahc)
ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
-
+
if ((ahc->features & AHC_HS_MAILBOX) != 0)
ahc_outb(ahc, HS_MAILBOX, 0);
@@ -5170,7 +5168,7 @@ ahc_chip_init(struct ahc_softc *ahc)
ahc_outb(ahc, DISCONNECTED_SCBH, SCB_LIST_NULL);
/* Message out buffer starts empty */
- ahc_outb(ahc, MSG_OUT, MSG_NOOP);
+ ahc_outb(ahc, MSG_OUT, NOP);
/*
* Setup the allowed SCSI Sequences based on operational mode.
@@ -5271,7 +5269,7 @@ ahc_init(struct ahc_softc *ahc)
*/
if ((ahc->flags & AHC_USEDEFAULTS) != 0)
ahc->our_id = ahc->our_id_b = 7;
-
+
/*
* Default to allowing initiator operations.
*/
@@ -5289,7 +5287,7 @@ ahc_init(struct ahc_softc *ahc)
* DMA tag for our command fifos and other data in system memory
* the card's sequencer must be able to access. For initiator
* roles, we need to allocate space for the qinfifo and qoutfifo.
- * The qinfifo and qoutfifo are composed of 256 1 byte elements.
+ * The qinfifo and qoutfifo are composed of 256 1 byte elements.
* When providing for the target mode role, we must additionally
* provide space for the incoming target command fifo and an extra
* byte to deal with a dma bug in some chip versions.
@@ -5398,7 +5396,7 @@ ahc_init(struct ahc_softc *ahc)
&& (ahc->flags & AHC_INITIATORROLE) != 0)
ahc->flags |= AHC_RESET_BUS_A;
- ultraenb = 0;
+ ultraenb = 0;
tagenable = ALL_TARGETS_MASK;
/* Grab the disconnection disable table and invert it for our needs */
@@ -5494,9 +5492,9 @@ ahc_init(struct ahc_softc *ahc)
&& (ultraenb & mask) != 0) {
/* Treat 10MHz as a non-ultra speed */
scsirate &= ~SXFR;
- ultraenb &= ~mask;
+ ultraenb &= ~mask;
}
- tinfo->user.period =
+ tinfo->user.period =
ahc_find_period(ahc, scsirate,
(ultraenb & mask)
? AHC_SYNCRATE_ULTRA
@@ -5622,7 +5620,7 @@ ahc_resume(struct ahc_softc *ahc)
{
ahc_reset(ahc, /*reinit*/TRUE);
- ahc_intr_enable(ahc, TRUE);
+ ahc_intr_enable(ahc, TRUE);
ahc_restart(ahc);
return (0);
}
@@ -5639,7 +5637,7 @@ ahc_index_busy_tcl(struct ahc_softc *ahc, u_int tcl)
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
@@ -5659,7 +5657,7 @@ ahc_unbusy_tcl(struct ahc_softc *ahc, u_int tcl)
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
@@ -5677,7 +5675,7 @@ ahc_busy_tcl(struct ahc_softc *ahc, u_int tcl, u_int scbid)
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
@@ -5735,7 +5733,7 @@ ahc_freeze_devq(struct ahc_softc *ahc, struct scb *scb)
target = SCB_GET_TARGET(ahc, scb);
lun = SCB_GET_LUN(scb);
channel = SCB_GET_CHANNEL(ahc, scb);
-
+
ahc_search_qinfifo(ahc, target, channel, lun,
/*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
@@ -5773,7 +5771,7 @@ ahc_qinfifo_requeue(struct ahc_softc *ahc, struct scb *prev_scb,
ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
} else {
prev_scb->hscb->next = scb->hscb->tag;
- ahc_sync_scb(ahc, prev_scb,
+ ahc_sync_scb(ahc, prev_scb,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
@@ -5989,7 +5987,6 @@ ahc_search_qinfifo(struct ahc_softc *ahc, int target, char channel,
break;
}
} else {
-
prev = next;
next = ahc_inb(ahc, SCB_NEXT);
}
@@ -6235,7 +6232,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
/* update the waiting list */
if (prev == SCB_LIST_NULL) {
/* First in the list */
- ahc_outb(ahc, WAITING_SCBH, next);
+ ahc_outb(ahc, WAITING_SCBH, next);
/*
* Ensure we aren't attempting to perform
@@ -6244,7 +6241,7 @@ ahc_rem_wscb(struct ahc_softc *ahc, u_int scbpos, u_int prev)
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
} else {
/*
- * Select the scb that pointed to us
+ * Select the scb that pointed to us
* and update its next pointer.
*/
ahc_outb(ahc, SCBPTR, prev);
@@ -6638,7 +6635,7 @@ ahc_calc_residual(struct ahc_softc *ahc, struct scb *scb)
/*
* Remainder of the SG where the transfer
- * stopped.
+ * stopped.
*/
resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
@@ -6690,7 +6687,7 @@ ahc_queue_lstate_event(struct ahc_softc *ahc, struct ahc_tmode_lstate *lstate,
- (lstate->event_r_idx - lstate->event_w_idx);
if (event_type == EVENT_TYPE_BUS_RESET
- || event_type == MSG_BUS_DEV_RESET) {
+ || event_type == TARGET_RESET) {
/*
* Any earlier events are irrelevant, so reset our buffer.
* This has the effect of allowing us to deal with reset
@@ -6857,7 +6854,7 @@ ahc_loadseq(struct ahc_softc *ahc)
if (begin_set[cs_count] == TRUE
&& end_set[cs_count] == FALSE) {
cs_table[cs_count].end = downloaded;
- end_set[cs_count] = TRUE;
+ end_set[cs_count] = TRUE;
cs_count++;
}
continue;
@@ -7085,7 +7082,6 @@ ahc_print_register(const ahc_reg_parse_entry_t *table, u_int num_entries,
printed_mask == 0 ? ":(" : "|",
table[entry].name);
printed_mask |= table[entry].mask;
-
break;
}
if (entry >= num_entries)
@@ -7199,7 +7195,7 @@ ahc_dump_card_state(struct ahc_softc *ahc)
scb_index = ahc_inb(ahc, SCB_NEXT);
}
printk("\n");
-
+
ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
printk("QOUTFIFO entries: ");
qoutpos = ahc->qoutfifonext;
@@ -7376,7 +7372,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
if ((ahc->features & AHC_MULTIROLE) != 0) {
if ((ahc->features & AHC_MULTI_TID) != 0
- && (ahc->flags & AHC_INITIATORROLE) != 0) {
+ && (ahc->flags & AHC_INITIATORROLE) != 0) {
/*
* Only allow additional targets if
* the initiator role is disabled.
@@ -7527,7 +7523,6 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
targid_mask |= target_mask;
ahc_outb(ahc, TARGID, targid_mask);
ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
-
ahc_update_scsiid(ahc, targid_mask);
} else {
u_int our_id;
@@ -7592,7 +7587,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
}
ahc_lock(ahc, &s);
-
+
ccb->ccb_h.status = CAM_REQ_CMP;
LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
struct ccb_hdr *ccbh;
@@ -7651,7 +7646,7 @@ ahc_handle_en_lun(struct ahc_softc *ahc, struct cam_sim *sim, union ccb *ccb)
targid_mask &= ~target_mask;
ahc_outb(ahc, TARGID, targid_mask);
ahc_outb(ahc, TARGID+1,
- (targid_mask >> 8));
+ (targid_mask >> 8));
ahc_update_scsiid(ahc, targid_mask);
}
}
@@ -7780,7 +7775,7 @@ ahc_run_tqinfifo(struct ahc_softc *ahc, int paused)
ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
} else {
if (!paused)
- ahc_pause(ahc);
+ ahc_pause(ahc);
ahc_outb(ahc, KERNEL_TQINPOS,
ahc->tqinfifonext & HOST_TQINPOS);
if (!paused)
@@ -7879,7 +7874,7 @@ ahc_handle_target_cmd(struct ahc_softc *ahc, struct target_cmd *cmd)
printk("Reserved or VU command code type encountered\n");
break;
}
-
+
memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
atio->ccb_h.status |= CAM_CDB_RECVD;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 7bba961d1ae0..d33f5a00bf0b 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -1,3 +1,4 @@
+
/*
* Adaptec AIC7xxx device driver for Linux.
*
@@ -452,7 +453,7 @@ ahc_insb(struct ahc_softc * ahc, long port, uint8_t *array, int count)
static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
- struct ahc_dma_seg *sg,
+ struct ahc_dma_seg *sg,
dma_addr_t addr, bus_size_t len);
static void
@@ -571,7 +572,7 @@ ahc_linux_target_alloc(struct scsi_target *starget)
target_offset = starget->id;
if (starget->channel != 0)
target_offset += 8;
-
+
if (starget->channel)
our_id = ahc->our_id_b;
@@ -597,18 +598,18 @@ ahc_linux_target_alloc(struct scsi_target *starget)
ultra = 0;
flags &= ~CFXFER;
}
-
+
if ((ahc->features & AHC_ULTRA2) != 0) {
scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0);
} else {
scsirate = (flags & CFXFER) << 4;
- maxsync = ultra ? AHC_SYNCRATE_ULTRA :
+ maxsync = ultra ? AHC_SYNCRATE_ULTRA :
AHC_SYNCRATE_FAST;
}
spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
if (!(flags & CFSYNCH))
spi_max_offset(starget) = 0;
- spi_min_period(starget) =
+ spi_min_period(starget) =
ahc_find_period(ahc, scsirate, maxsync);
}
ahc_compile_devinfo(&devinfo, our_id, starget->id,
@@ -657,7 +658,7 @@ ahc_linux_slave_alloc(struct scsi_device *sdev)
* a tagged queuing capable device.
*/
dev->maxtags = 0;
-
+
spi_period(starget) = 0;
return 0;
@@ -1219,8 +1220,8 @@ ahc_platform_free(struct ahc_softc *ahc)
starget = ahc->platform_data->starget[i];
if (starget != NULL) {
ahc->platform_data->starget[i] = NULL;
- }
- }
+ }
+ }
if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
free_irq(ahc->platform_data->irq, ahc);
@@ -1267,7 +1268,7 @@ ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
default:
case AHC_QUEUE_NONE:
now_queuing = 0;
- break;
+ break;
case AHC_QUEUE_BASIC:
now_queuing = AHC_DEV_Q_BASIC;
break;
@@ -1468,10 +1469,10 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
hscb->scsioffset = tinfo->curr.offset;
if ((tstate->ultraenb & mask) != 0)
hscb->control |= ULTRAENB;
-
+
if ((ahc->user_discenable & mask) != 0)
hscb->control |= DISCENB;
-
+
if ((tstate->auto_negotiate & mask) != 0) {
scb->flags |= SCB_AUTO_NEGOTIATE;
scb->hscb->control |= MK_MESSAGE;
@@ -1480,10 +1481,10 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
&& (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
- hscb->control |= MSG_ORDERED_TASK;
+ hscb->control |= ORDERED_QUEUE_TAG;
dev->commands_since_idle_or_otag = 0;
} else {
- hscb->control |= MSG_SIMPLE_TASK;
+ hscb->control |= SIMPLE_QUEUE_TAG;
}
}
@@ -1531,7 +1532,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
*/
scb->hscb->sgptr =
ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
-
+
/*
* Copy the first SG into the "current"
* data pointer area.
@@ -1551,7 +1552,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
dev->commands_issued++;
if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
dev->commands_since_idle_or_otag++;
-
+
scb->flags |= SCB_ACTIVE;
if (untagged_q) {
TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
@@ -1572,7 +1573,7 @@ ahc_linux_isr(int irq, void *dev_id)
int ours;
ahc = (struct ahc_softc *) dev_id;
- ahc_lock(ahc, &flags);
+ ahc_lock(ahc, &flags);
ours = ahc_intr(ahc);
ahc_unlock(ahc, &flags);
return IRQ_RETVAL(ours);
@@ -1647,22 +1648,22 @@ ahc_send_async(struct ahc_softc *ahc, char channel,
spi_display_xfer_agreement(starget);
break;
}
- case AC_SENT_BDR:
+ case AC_SENT_BDR:
{
WARN_ON(lun != CAM_LUN_WILDCARD);
scsi_report_device_reset(ahc->platform_data->host,
channel - 'A', target);
break;
}
- case AC_BUS_RESET:
+ case AC_BUS_RESET:
if (ahc->platform_data->host != NULL) {
scsi_report_bus_reset(ahc->platform_data->host,
channel - 'A');
}
- break;
- default:
- panic("ahc_send_async: Unexpected async event");
- }
+ break;
+ default:
+ panic("ahc_send_async: Unexpected async event");
+ }
}
/*
@@ -1759,7 +1760,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
if (dev->openings == 1
&& ahc_get_transaction_status(scb) == CAM_REQ_CMP
- && ahc_get_scsi_status(scb) != SCSI_STATUS_QUEUE_FULL)
+ && ahc_get_scsi_status(scb) != SAM_STAT_TASK_SET_FULL)
dev->tag_success_count++;
/*
* Some devices deal with temporary internal resource
@@ -1802,7 +1803,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
sdev->sdev_target->id, sdev->lun,
sdev->sdev_target->channel == 0 ? 'A' : 'B',
ROLE_INITIATOR);
-
+
/*
* We don't currently trust the mid-layer to
* properly deal with queue full or busy. So,
@@ -1816,8 +1817,8 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
switch (ahc_get_scsi_status(scb)) {
default:
break;
- case SCSI_STATUS_CHECK_COND:
- case SCSI_STATUS_CMD_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
+ case SAM_STAT_COMMAND_TERMINATED:
{
struct scsi_cmnd *cmd;
@@ -1855,7 +1856,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
}
break;
}
- case SCSI_STATUS_QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
{
/*
* By the time the core driver has returned this
@@ -1899,7 +1900,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
dev->last_queuefull_same_count = 0;
}
ahc_set_transaction_status(scb, CAM_REQUEUE_REQ);
- ahc_set_scsi_status(scb, SCSI_STATUS_OK);
+ ahc_set_scsi_status(scb, SAM_STAT_GOOD);
ahc_platform_set_tags(ahc, sdev, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
@@ -1910,7 +1911,7 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
* as if the target returned BUSY SCSI status.
*/
dev->openings = 1;
- ahc_set_scsi_status(scb, SCSI_STATUS_BUSY);
+ ahc_set_scsi_status(scb, SAM_STAT_BUSY);
ahc_platform_set_tags(ahc, sdev, &devinfo,
(dev->flags & AHC_DEV_Q_BASIC)
? AHC_QUEUE_BASIC : AHC_QUEUE_TAGGED);
@@ -2108,7 +2109,7 @@ ahc_linux_queue_recovery_cmd(struct scsi_cmnd *cmd, scb_flag flag)
/* Any SCB for this device will do for a target reset */
LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
- if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
+ if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
scmd_channel(cmd) + 'A',
CAM_LUN_WILDCARD,
SCB_LIST_NULL, ROLE_INITIATOR))
@@ -2329,7 +2330,7 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2361,7 +2362,8 @@ static void ahc_linux_set_period(struct scsi_target *starget, int period)
ppr_options &= MSG_EXT_PPR_QAS_REQ;
}
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2373,7 +2375,7 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2386,7 +2388,8 @@ static void ahc_linux_set_offset(struct scsi_target *starget, int offset)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
if (offset != 0) {
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
period = tinfo->goal.period;
ppr_options = tinfo->goal.ppr_options;
}
@@ -2401,7 +2404,7 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2422,7 +2425,8 @@ static void ahc_linux_set_dt(struct scsi_target *starget, int dt)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2439,7 +2443,7 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2455,7 +2459,8 @@ static void ahc_linux_set_qas(struct scsi_target *starget, int qas)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2467,7 +2472,7 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
@@ -2483,7 +2488,8 @@ static void ahc_linux_set_iu(struct scsi_target *starget, int iu)
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
@@ -2499,7 +2505,7 @@ static void ahc_linux_get_signalling(struct Scsi_Host *shost)
if (!(ahc->features & AHC_ULTRA2)) {
/* non-LVD chipset, may not have SBLKCTL reg */
- spi_signalling(shost) =
+ spi_signalling(shost) =
ahc->features & AHC_HVD ?
SPI_SIGNAL_HVD :
SPI_SIGNAL_SE;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index f8489078f003..53240f53b654 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -258,7 +258,7 @@ struct ahc_linux_device {
int active;
/*
- * The currently allowed number of
+ * The currently allowed number of
* transactions that can be queued to
* the device. Must be signed for
* conversion from tagged to untagged
@@ -272,7 +272,7 @@ struct ahc_linux_device {
* device's queue is halted.
*/
u_int qfrozen;
-
+
/*
* Cumulative command counter.
*/
@@ -351,16 +351,16 @@ struct ahc_platform_data {
/*
* Fields accessed from interrupt context.
*/
- struct scsi_target *starget[AHC_NUM_TARGETS];
+ struct scsi_target *starget[AHC_NUM_TARGETS];
spinlock_t spin_lock;
u_int qfrozen;
struct completion *eh_done;
- struct Scsi_Host *host; /* pointer to scsi host */
+ struct Scsi_Host *host; /* pointer to scsi host */
#define AHC_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
- resource_size_t mem_busaddr; /* Mem Base Addr */
+ resource_size_t mem_busaddr; /* Mem Base Addr */
};
void ahc_delay(long);
@@ -515,29 +515,6 @@ int ahc_linux_show_info(struct seq_file *, struct Scsi_Host *);
/*************************** Domain Validation ********************************/
/*********************** Transaction Access Wrappers *************************/
-static inline void ahc_cmd_set_transaction_status(struct scsi_cmnd *, uint32_t);
-static inline void ahc_set_transaction_status(struct scb *, uint32_t);
-static inline void ahc_cmd_set_scsi_status(struct scsi_cmnd *, uint32_t);
-static inline void ahc_set_scsi_status(struct scb *, uint32_t);
-static inline uint32_t ahc_cmd_get_transaction_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahc_get_transaction_status(struct scb *);
-static inline uint32_t ahc_cmd_get_scsi_status(struct scsi_cmnd *cmd);
-static inline uint32_t ahc_get_scsi_status(struct scb *);
-static inline void ahc_set_transaction_tag(struct scb *, int, u_int);
-static inline u_long ahc_get_transfer_length(struct scb *);
-static inline int ahc_get_transfer_dir(struct scb *);
-static inline void ahc_set_residual(struct scb *, u_long);
-static inline void ahc_set_sense_residual(struct scb *scb, u_long resid);
-static inline u_long ahc_get_residual(struct scb *);
-static inline u_long ahc_get_sense_residual(struct scb *);
-static inline int ahc_perform_autosense(struct scb *);
-static inline uint32_t ahc_get_sense_bufsize(struct ahc_softc *,
- struct scb *);
-static inline void ahc_notify_xfer_settings_change(struct ahc_softc *,
- struct ahc_devinfo *);
-static inline void ahc_platform_scb_free(struct ahc_softc *ahc,
- struct scb *scb);
-static inline void ahc_freeze_scb(struct scb *scb);
static inline
void ahc_cmd_set_transaction_status(struct scsi_cmnd *cmd, uint32_t status)
@@ -671,9 +648,9 @@ static inline void
ahc_freeze_scb(struct scb *scb)
{
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
- scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
- scb->platform_data->dev->qfrozen++;
- }
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
}
void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
diff --git a/drivers/scsi/aic7xxx/aic7xxx_proc.c b/drivers/scsi/aic7xxx/aic7xxx_proc.c
index 18459605d991..4bc9e2dfccf6 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_proc.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_proc.c
@@ -97,17 +97,17 @@ ahc_format_transinfo(struct seq_file *m, struct ahc_transinfo *tinfo)
u_int freq;
u_int mb;
- speed = 3300;
- freq = 0;
+ speed = 3300;
+ freq = 0;
if (tinfo->offset != 0) {
freq = ahc_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);
- mb = speed / 1000;
- if (mb > 0)
+ mb = speed / 1000;
+ if (mb > 0)
seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
- else
+ else
seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
@@ -234,7 +234,7 @@ ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
if ((ahc->chip & AHC_VL) != 0) {
sd.sd_control_offset = SEECTL_2840;
sd.sd_status_offset = STATUS_2840;
- sd.sd_dataout_offset = STATUS_2840;
+ sd.sd_dataout_offset = STATUS_2840;
sd.sd_chip = C46;
sd.sd_MS = 0;
sd.sd_RDY = EEPROM_TF;
@@ -255,7 +255,8 @@ ahc_proc_write_seeprom(struct Scsi_Host *shost, char *buffer, int length)
u_int start_addr;
if (ahc->seep_config == NULL) {
- ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
+ ahc->seep_config = kmalloc(sizeof(*ahc->seep_config),
+ GFP_ATOMIC);
if (ahc->seep_config == NULL) {
printk("aic7xxx: Unable to allocate serial "
"eeprom buffer. Write failing\n");
diff --git a/drivers/scsi/aic7xxx/aiclib.h b/drivers/scsi/aic7xxx/aiclib.h
index f8fd198aafbc..ba08eb3c4e3b 100644
--- a/drivers/scsi/aic7xxx/aiclib.h
+++ b/drivers/scsi/aic7xxx/aiclib.h
@@ -117,21 +117,6 @@ struct scsi_sense_data
#define SSD_FULL_SIZE sizeof(struct scsi_sense_data)
};
-/*
- * Status Byte
- */
-#define SCSI_STATUS_OK 0x00
-#define SCSI_STATUS_CHECK_COND 0x02
-#define SCSI_STATUS_COND_MET 0x04
-#define SCSI_STATUS_BUSY 0x08
-#define SCSI_STATUS_INTERMED 0x10
-#define SCSI_STATUS_INTERMED_COND_MET 0x14
-#define SCSI_STATUS_RESERV_CONFLICT 0x18
-#define SCSI_STATUS_CMD_TERMINATED 0x22 /* Obsolete in SAM-2 */
-#define SCSI_STATUS_QUEUE_FULL 0x28
-#define SCSI_STATUS_ACA_ACTIVE 0x30
-#define SCSI_STATUS_TASK_ABORTED 0x40
-
/************************* Large Disk Handling ********************************/
static inline int
aic_sector_div(sector_t capacity, int heads, int sectors)
diff --git a/drivers/scsi/aic7xxx/scsi_message.h b/drivers/scsi/aic7xxx/scsi_message.h
index 75811e245ec7..a7515c3039ed 100644
--- a/drivers/scsi/aic7xxx/scsi_message.h
+++ b/drivers/scsi/aic7xxx/scsi_message.h
@@ -3,44 +3,6 @@
* $FreeBSD: src/sys/cam/scsi/scsi_message.h,v 1.2 2000/05/01 20:21:29 peter Exp $
*/
-/* Messages (1 byte) */ /* I/T (M)andatory or (O)ptional */
-#define MSG_CMDCOMPLETE 0x00 /* M/M */
-#define MSG_TASK_COMPLETE 0x00 /* M/M */ /* SPI3 Terminology */
-#define MSG_EXTENDED 0x01 /* O/O */
-#define MSG_SAVEDATAPOINTER 0x02 /* O/O */
-#define MSG_RESTOREPOINTERS 0x03 /* O/O */
-#define MSG_DISCONNECT 0x04 /* O/O */
-#define MSG_INITIATOR_DET_ERR 0x05 /* M/M */
-#define MSG_ABORT 0x06 /* O/M */
-#define MSG_ABORT_TASK_SET 0x06 /* O/M */ /* SPI3 Terminology */
-#define MSG_MESSAGE_REJECT 0x07 /* M/M */
-#define MSG_NOOP 0x08 /* M/M */
-#define MSG_PARITY_ERROR 0x09 /* M/M */
-#define MSG_LINK_CMD_COMPLETE 0x0a /* O/O */
-#define MSG_LINK_CMD_COMPLETEF 0x0b /* O/O */
-#define MSG_BUS_DEV_RESET 0x0c /* O/M */
-#define MSG_TARGET_RESET 0x0c /* O/M */ /* SPI3 Terminology */
-#define MSG_ABORT_TAG 0x0d /* O/O */
-#define MSG_ABORT_TASK 0x0d /* O/O */ /* SPI3 Terminology */
-#define MSG_CLEAR_QUEUE 0x0e /* O/O */
-#define MSG_CLEAR_TASK_SET 0x0e /* O/O */ /* SPI3 Terminology */
-#define MSG_INIT_RECOVERY 0x0f /* O/O */ /* Deprecated in SPI3 */
-#define MSG_REL_RECOVERY 0x10 /* O/O */ /* Deprecated in SPI3 */
-#define MSG_TERM_IO_PROC 0x11 /* O/O */ /* Deprecated in SPI3 */
-#define MSG_CLEAR_ACA 0x16 /* O/O */ /* SPI3 */
-#define MSG_LOGICAL_UNIT_RESET 0x17 /* O/O */ /* SPI3 */
-#define MSG_QAS_REQUEST 0x55 /* O/O */ /* SPI3 */
-
-/* Messages (2 byte) */
-#define MSG_SIMPLE_Q_TAG 0x20 /* O/O */
-#define MSG_SIMPLE_TASK 0x20 /* O/O */ /* SPI3 Terminology */
-#define MSG_HEAD_OF_Q_TAG 0x21 /* O/O */
-#define MSG_HEAD_OF_QUEUE_TASK 0x21 /* O/O */ /* SPI3 Terminology */
-#define MSG_ORDERED_Q_TAG 0x22 /* O/O */
-#define MSG_ORDERED_TASK 0x22 /* O/O */ /* SPI3 Terminology */
-#define MSG_IGN_WIDE_RESIDUE 0x23 /* O/O */
-#define MSG_ACA_TASK 0x24 /* 0/0 */ /* SPI3 */
-
/* Identify message */ /* M/M */
#define MSG_IDENTIFYFLAG 0x80
#define MSG_IDENTIFY_DISCFLAG 0x40
@@ -49,16 +11,13 @@
#define MSG_IDENTIFY_LUNMASK 0x3F
/* Extended messages (opcode and length) */
-#define MSG_EXT_SDTR 0x01
#define MSG_EXT_SDTR_LEN 0x03
-#define MSG_EXT_WDTR 0x03
#define MSG_EXT_WDTR_LEN 0x02
#define MSG_EXT_WDTR_BUS_8_BIT 0x00
#define MSG_EXT_WDTR_BUS_16_BIT 0x01
#define MSG_EXT_WDTR_BUS_32_BIT 0x02 /* Deprecated in SPI3 */
-#define MSG_EXT_PPR 0x04 /* SPI3 */
#define MSG_EXT_PPR_LEN 0x06
#define MSG_EXT_PPR_PCOMP_EN 0x80
#define MSG_EXT_PPR_RTI 0x40
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c
index 13677973da5c..68214a58b160 100644
--- a/drivers/scsi/aic94xx/aic94xx_scb.c
+++ b/drivers/scsi/aic94xx/aic94xx_scb.c
@@ -68,7 +68,6 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
struct done_list_struct *dl)
{
struct asd_ha_struct *asd_ha = ascb->ha;
- struct sas_ha_struct *sas_ha = &asd_ha->sas_ha;
int phy_id = dl->status_block[0] & DL_PHY_MASK;
struct asd_phy *phy = &asd_ha->phys[phy_id];
@@ -81,7 +80,8 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
ASD_DPRINTK("phy%d: device unplugged\n", phy_id);
asd_turn_led(asd_ha, phy_id, 0);
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
+ GFP_ATOMIC);
break;
case CURRENT_OOB_DONE:
/* hot plugged device */
@@ -89,12 +89,13 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
get_lrate_mode(phy, oob_mode);
ASD_DPRINTK("phy%d device plugged: lrate:0x%x, proto:0x%x\n",
phy_id, phy->sas_phy.linkrate, phy->sas_phy.iproto);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
break;
case CURRENT_SPINUP_HOLD:
/* hot plug SATA, no COMWAKE sent */
asd_turn_led(asd_ha, phy_id, 1);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
+ GFP_ATOMIC);
break;
case CURRENT_GTO_TIMEOUT:
case CURRENT_OOB_ERROR:
@@ -102,7 +103,7 @@ static void asd_phy_event_tasklet(struct asd_ascb *ascb,
dl->status_block[1]);
asd_turn_led(asd_ha, phy_id, 0);
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
break;
}
}
@@ -222,7 +223,6 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
int edb_el = edb_id + ascb->edb_index;
struct asd_dma_tok *edb = ascb->ha->seq.edb_arr[edb_el];
struct asd_phy *phy = &ascb->ha->phys[phy_id];
- struct sas_ha_struct *sas_ha = phy->sas_phy.ha;
u16 size = ((dl->status_block[3] & 7) << 8) | dl->status_block[2];
size = min(size, (u16) sizeof(phy->frame_rcvd));
@@ -234,7 +234,7 @@ static void asd_bytes_dmaed_tasklet(struct asd_ascb *ascb,
spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags);
asd_dump_frame_rcvd(phy, dl);
asd_form_port(ascb->ha, phy);
- sas_ha->notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED);
+ sas_notify_port_event(&phy->sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC);
}
static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
@@ -270,7 +270,7 @@ static void asd_link_reset_err_tasklet(struct asd_ascb *ascb,
asd_turn_led(asd_ha, phy_id, 0);
sas_phy_disconnected(sas_phy);
asd_deform_port(asd_ha, phy);
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR, GFP_ATOMIC);
if (retries_left == 0) {
int num = 1;
@@ -315,7 +315,8 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = ffs(cont);
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy,PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case LmUNKNOWNP:
@@ -336,7 +337,8 @@ static void asd_primitive_rcvd_tasklet(struct asd_ascb *ascb,
/* The sequencer disables all phys on that port.
* We have to re-enable the phys ourselves. */
asd_deform_port(asd_ha, phy);
- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ sas_notify_port_event(sas_phy, PORTE_HARD_RESET,
+ GFP_ATOMIC);
break;
default:
@@ -567,7 +569,7 @@ static void escb_tasklet_complete(struct asd_ascb *ascb,
/* the device is gone */
sas_phy_disconnected(sas_phy);
asd_deform_port(asd_ha, phy);
- sas_ha->notify_port_event(sas_phy, PORTE_TIMER_EVENT);
+ sas_notify_port_event(sas_phy, PORTE_TIMER_EVENT, GFP_ATOMIC);
break;
default:
ASD_DPRINTK("%s: phy%d: unknown event:0x%x\n", __func__,
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index 9a912fd0f70b..248a5bfad153 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -144,12 +144,6 @@
#define VER_MINOR 0
#define VER_PATCH 6
-#ifndef ABORT_TAG
-#define ABORT_TAG 0xd
-#else
-#error "Yippee! ABORT TAG is now defined! Remove this error!"
-#endif
-
#ifdef USE_DMAC
/*
* DMAC setup parameters
@@ -1490,8 +1484,8 @@ void acornscsi_message(AS_Host *host)
}
switch (message[0]) {
- case ABORT:
- case ABORT_TAG:
+ case ABORT_TASK_SET:
+ case ABORT_TASK:
case COMMAND_COMPLETE:
if (host->scsi.phase != PHASE_STATUSIN) {
printk(KERN_ERR "scsi%d.%c: command complete following non-status in phase?\n",
@@ -1596,10 +1590,6 @@ void acornscsi_message(AS_Host *host)
}
break;
- case QUEUE_FULL:
- /* TODO: target queue is full */
- break;
-
case SIMPLE_QUEUE_TAG:
/* tag queue reconnect... message[1] = queue tag. Print something to indicate something happened! */
printk("scsi%d.%c: reconnect queue tag %02X\n",
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index c6a752309dda..da6ca2b153d8 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -42,7 +42,8 @@
static struct scsi_host_template atp870u_template;
static void send_s870(struct atp_unit *dev,unsigned char c);
-static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode);
+static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip,
+ unsigned char lvdmode);
static inline void atp_writeb_base(struct atp_unit *atp, u8 reg, u8 val)
{
@@ -137,16 +138,17 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
}
if ((j & 0x80) == 0)
return IRQ_NONE;
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("atp870u_intr_handle enter\n");
-#endif
+#endif
dev->in_int[c] = 1;
cmdp = atp_readb_io(dev, c, 0x10);
if (dev->working[c] != 0) {
if (is885(dev)) {
if ((atp_readb_io(dev, c, 0x16) & 0x80) == 0)
- atp_writeb_io(dev, c, 0x16, (atp_readb_io(dev, c, 0x16) | 0x80));
- }
+ atp_writeb_io(dev, c, 0x16,
+ (atp_readb_io(dev, c, 0x16) | 0x80));
+ }
if ((atp_readb_pci(dev, c, 0x00) & 0x08) != 0)
{
for (k=0; k < 1000; k++) {
@@ -157,9 +159,9 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
}
}
atp_writeb_pci(dev, c, 0, 0x00);
-
+
i = atp_readb_io(dev, c, 0x17);
-
+
if (is885(dev))
atp_writeb_pci(dev, c, 2, 0x06);
@@ -185,44 +187,51 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->r1f[c][target_id] |= j;
#ifdef ED_DBGP
printk("atp870u_intr_handle status = %x\n",i);
-#endif
+#endif
if (i == 0x85) {
if ((dev->last_cmd[c] & 0xf0) != 0x40) {
dev->last_cmd[c] = 0xff;
}
if (is885(dev)) {
adrcnt = 0;
- ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
- ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
- ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
+ ((unsigned char *) &adrcnt)[2] =
+ atp_readb_io(dev, c, 0x12);
+ ((unsigned char *) &adrcnt)[1] =
+ atp_readb_io(dev, c, 0x13);
+ ((unsigned char *) &adrcnt)[0] =
+ atp_readb_io(dev, c, 0x14);
if (dev->id[c][target_id].last_len != adrcnt) {
k = dev->id[c][target_id].last_len;
- k -= adrcnt;
- dev->id[c][target_id].tran_len = k;
+ k -= adrcnt;
+ dev->id[c][target_id].tran_len = k;
dev->id[c][target_id].last_len = adrcnt;
}
#ifdef ED_DBGP
- printk("dev->id[c][target_id].last_len = %d dev->id[c][target_id].tran_len = %d\n",dev->id[c][target_id].last_len,dev->id[c][target_id].tran_len);
-#endif
+ printk("dev->id[c][target_id].last_len = %d "
+ "dev->id[c][target_id].tran_len = %d\n",
+ dev->id[c][target_id].last_len,
+ dev->id[c][target_id].tran_len);
+#endif
}
/*
* Flip wide
- */
+ */
if (dev->wide_id[c] != 0) {
atp_writeb_io(dev, c, 0x1b, 0x01);
while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01)
atp_writeb_io(dev, c, 0x1b, 0x01);
- }
+ }
/*
* Issue more commands
*/
- spin_lock_irqsave(dev->host->host_lock, flags);
- if (((dev->quhd[c] != dev->quend[c]) || (dev->last_cmd[c] != 0xff)) &&
+ spin_lock_irqsave(dev->host->host_lock, flags);
+ if (((dev->quhd[c] != dev->quend[c]) ||
+ (dev->last_cmd[c] != 0xff)) &&
(dev->in_snd[c] == 0)) {
#ifdef ED_DBGP
printk("Call sent_s870\n");
-#endif
+#endif
send_s870(dev,c);
}
spin_unlock_irqrestore(dev->host->host_lock, flags);
@@ -232,7 +241,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("Status 0x85 return\n");
-#endif
+#endif
return IRQ_HANDLED;
}
@@ -247,9 +256,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->last_cmd[c] = 0xff;
}
adrcnt = 0;
- ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
- ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
- ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
+ ((unsigned char *) &adrcnt)[2] =
+ atp_readb_io(dev, c, 0x12);
+ ((unsigned char *) &adrcnt)[1] =
+ atp_readb_io(dev, c, 0x13);
+ ((unsigned char *) &adrcnt)[0] =
+ atp_readb_io(dev, c, 0x14);
k = dev->id[c][target_id].last_len;
k -= adrcnt;
dev->id[c][target_id].tran_len = k;
@@ -262,17 +274,16 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (is885(dev)) {
if ((i == 0x4c) || (i == 0x4d) || (i == 0x8c) || (i == 0x8d)) {
- if ((i == 0x4c) || (i == 0x8c))
- i=0x48;
- else
- i=0x49;
- }
-
+ if ((i == 0x4c) || (i == 0x8c))
+ i=0x48;
+ else
+ i=0x49;
+ }
}
if ((i == 0x80) || (i == 0x8f)) {
#ifdef ED_DBGP
printk(KERN_DEBUG "Device reselect\n");
-#endif
+#endif
lun = 0;
if (cmdp == 0x44 || i == 0x80)
lun = atp_readb_io(dev, c, 0x1d) & 0x07;
@@ -283,11 +294,14 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (cmdp == 0x41) {
#ifdef ED_DBGP
printk("cmdp = 0x41\n");
-#endif
+#endif
adrcnt = 0;
- ((unsigned char *) &adrcnt)[2] = atp_readb_io(dev, c, 0x12);
- ((unsigned char *) &adrcnt)[1] = atp_readb_io(dev, c, 0x13);
- ((unsigned char *) &adrcnt)[0] = atp_readb_io(dev, c, 0x14);
+ ((unsigned char *) &adrcnt)[2] =
+ atp_readb_io(dev, c, 0x12);
+ ((unsigned char *) &adrcnt)[1] =
+ atp_readb_io(dev, c, 0x13);
+ ((unsigned char *) &adrcnt)[0] =
+ atp_readb_io(dev, c, 0x14);
k = dev->id[c][target_id].last_len;
k -= adrcnt;
dev->id[c][target_id].tran_len = k;
@@ -298,7 +312,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
} else {
#ifdef ED_DBGP
printk("cmdp != 0x41\n");
-#endif
+#endif
atp_writeb_io(dev, c, 0x10, 0x46);
dev->id[c][target_id].dirct = 0x00;
atp_writeb_io(dev, c, 0x12, 0x00);
@@ -330,13 +344,13 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (is885(dev))
atp_writeb_io(dev, c, 0x10, 0x45);
workreq = dev->id[c][target_id].curr_req;
-#ifdef ED_DBGP
+#ifdef ED_DBGP
scmd_printk(KERN_DEBUG, workreq, "CDB");
for (l = 0; l < workreq->cmd_len; l++)
printk(KERN_DEBUG " %x",workreq->cmnd[l]);
printk("\n");
-#endif
-
+#endif
+
atp_writeb_io(dev, c, 0x0f, lun);
atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp);
adrcnt = dev->id[c][target_id].tran_len;
@@ -345,9 +359,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x12, ((unsigned char *) &k)[2]);
atp_writeb_io(dev, c, 0x13, ((unsigned char *) &k)[1]);
atp_writeb_io(dev, c, 0x14, ((unsigned char *) &k)[0]);
-#ifdef ED_DBGP
- printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k, atp_readb_io(dev, c, 0x14), atp_readb_io(dev, c, 0x13), atp_readb_io(dev, c, 0x12));
-#endif
+#ifdef ED_DBGP
+ printk("k %x, k[0] 0x%x k[1] 0x%x k[2] 0x%x\n", k,
+ atp_readb_io(dev, c, 0x14),
+ atp_readb_io(dev, c, 0x13),
+ atp_readb_io(dev, c, 0x12));
+#endif
/* Remap wide */
j = target_id;
if (target_id > 7) {
@@ -357,26 +374,39 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
j |= dev->id[c][target_id].dirct;
atp_writeb_io(dev, c, 0x15, j);
atp_writeb_io(dev, c, 0x16, 0x80);
-
- /* enable 32 bit fifo transfer */
+
+ /* enable 32 bit fifo transfer */
if (is885(dev)) {
i = atp_readb_pci(dev, c, 1) & 0xf3;
- //j=workreq->cmnd[0];
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
+ //j=workreq->cmnd[0];
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10)) {
i |= 0x0c;
}
atp_writeb_pci(dev, c, 1, i);
} else if (is880(dev)) {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3b,
+ (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
else
- atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f);
- } else {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
+ atp_writeb_base(dev, 0x3b,
+ atp_readb_base(dev, 0x3b) & 0x3f);
+ } else {
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3a,
+ (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
else
- atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3);
- }
+ atp_writeb_base(dev, 0x3a,
+ atp_readb_base(dev, 0x3a) & 0xf3);
+ }
j = 0;
id = 1;
id = id << target_id;
@@ -394,12 +424,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("dev->id[c][target_id].last_len = 0\n");
-#endif
+#endif
return IRQ_HANDLED;
}
#ifdef ED_DBGP
printk("target_id = %d adrcnt = %d\n",target_id,adrcnt);
-#endif
+#endif
prd = dev->id[c][target_id].prd_pos;
while (adrcnt != 0) {
id = ((unsigned short int *)prd)[2];
@@ -409,8 +439,8 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
k = id;
}
if (k > adrcnt) {
- ((unsigned short int *)prd)[2] = (unsigned short int)
- (k - adrcnt);
+ ((unsigned short int *)prd)[2] =
+ (unsigned short int)(k - adrcnt);
((unsigned long *)prd)[0] += adrcnt;
adrcnt = 0;
dev->id[c][target_id].prd_pos = prd;
@@ -421,11 +451,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
if (adrcnt == 0) {
dev->id[c][target_id].prd_pos = prd;
}
- }
+ }
}
atp_writel_pci(dev, c, 0x04, dev->id[c][target_id].prdaddr);
#ifdef ED_DBGP
- printk("dev->id[%d][%d].prdaddr 0x%8x\n", c, target_id, dev->id[c][target_id].prdaddr);
+ printk("dev->id[%d][%d].prdaddr 0x%8x\n",
+ c, target_id, dev->id[c][target_id].prdaddr);
#endif
if (!is885(dev)) {
atp_writeb_pci(dev, c, 2, 0x06);
@@ -440,7 +471,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("status 0x80 return dirct != 0\n");
-#endif
+#endif
return IRQ_HANDLED;
}
atp_writeb_io(dev, c, 0x18, 0x08);
@@ -448,7 +479,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
dev->in_int[c] = 0;
#ifdef ED_DBGP
printk("status 0x80 return dirct = 0\n");
-#endif
+#endif
return IRQ_HANDLED;
}
@@ -466,10 +497,10 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
workreq->result = atp_readb_io(dev, c, 0x0f);
if (((dev->r1f[c][target_id] & 0x10) != 0) && is885(dev)) {
printk(KERN_WARNING "AEC67162 CRC ERROR !\n");
- workreq->result = 0x02;
+ workreq->result = SAM_STAT_CHECK_CONDITION;
}
} else
- workreq->result = 0x02;
+ workreq->result = SAM_STAT_CHECK_CONDITION;
if (is885(dev)) {
j = atp_readb_base(dev, 0x29) | 0x01;
@@ -484,7 +515,7 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
(*workreq->scsi_done) (workreq);
#ifdef ED_DBGP
printk("workreq->scsi_done\n");
-#endif
+#endif
/*
* Clear it off the queue
*/
@@ -498,16 +529,17 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x1b, 0x01);
while ((atp_readb_io(dev, c, 0x1b) & 0x01) != 0x01)
atp_writeb_io(dev, c, 0x1b, 0x01);
- }
+ }
/*
* If there is stuff to send and nothing going then send it
*/
spin_lock_irqsave(dev->host->host_lock, flags);
- if (((dev->last_cmd[c] != 0xff) || (dev->quhd[c] != dev->quend[c])) &&
+ if (((dev->last_cmd[c] != 0xff) ||
+ (dev->quhd[c] != dev->quend[c])) &&
(dev->in_snd[c] == 0)) {
#ifdef ED_DBGP
printk("Call sent_s870(scsi_done)\n");
-#endif
+#endif
send_s870(dev,c);
}
spin_unlock_irqrestore(dev->host->host_lock, flags);
@@ -528,9 +560,12 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x10, 0x41);
if (is885(dev)) {
k = dev->id[c][target_id].last_len;
- atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]);
- atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]);
- atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]);
+ atp_writeb_io(dev, c, 0x12,
+ ((unsigned char *) (&k))[2]);
+ atp_writeb_io(dev, c, 0x13,
+ ((unsigned char *) (&k))[1]);
+ atp_writeb_io(dev, c, 0x14,
+ ((unsigned char *) (&k))[0]);
dev->id[c][target_id].dirct = 0x00;
} else {
dev->id[c][target_id].dirct = 0x00;
@@ -547,11 +582,15 @@ static irqreturn_t atp870u_intr_handle(int irq, void *dev_id)
atp_writeb_io(dev, c, 0x10, 0x41);
if (is885(dev)) {
k = dev->id[c][target_id].last_len;
- atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&k))[2]);
- atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&k))[1]);
- atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&k))[0]);
+ atp_writeb_io(dev, c, 0x12,
+ ((unsigned char *) (&k))[2]);
+ atp_writeb_io(dev, c, 0x13,
+ ((unsigned char *) (&k))[1]);
+ atp_writeb_io(dev, c, 0x14,
+ ((unsigned char *) (&k))[0]);
}
- atp_writeb_io(dev, c, 0x15, atp_readb_io(dev, c, 0x15) | 0x20);
+ atp_writeb_io(dev, c, 0x15,
+ atp_readb_io(dev, c, 0x15) | 0x20);
dev->id[c][target_id].dirct = 0x20;
atp_writeb_io(dev, c, 0x18, 0x08);
atp_writeb_pci(dev, c, 0, 0x01);
@@ -591,19 +630,17 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
req_p->sense_buffer[0]=0;
scsi_set_resid(req_p, 0);
if (scmd_channel(req_p) > 1) {
- req_p->result = 0x00040000;
+ req_p->result = DID_BAD_TARGET << 16;
done(req_p);
-#ifdef ED_DBGP
- printk("atp870u_queuecommand : req_p->device->channel > 1\n");
-#endif
+#ifdef ED_DBGP
+ printk("atp870u_queuecommand : req_p->device->channel > 1\n");
+#endif
return 0;
}
host = req_p->device->host;
dev = (struct atp_unit *)&host->hostdata;
-
-
m = 1;
m = m << scmd_id(req_p);
@@ -612,7 +649,7 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
*/
if ((m & dev->active_id[c]) == 0) {
- req_p->result = 0x00040000;
+ req_p->result = DID_BAD_TARGET << 16;
done(req_p);
return 0;
}
@@ -620,14 +657,14 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
if (done) {
req_p->scsi_done = done;
} else {
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk( "atp870u_queuecommand: done can't be NULL\n");
-#endif
+#endif
req_p->result = 0;
done(req_p);
return 0;
}
-
+
/*
* Count new command
*/
@@ -635,7 +672,7 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
if (dev->quend[c] >= qcnt) {
dev->quend[c] = 0;
}
-
+
/*
* Check queue state
*/
@@ -643,27 +680,32 @@ static int atp870u_queuecommand_lck(struct scsi_cmnd *req_p,
if (dev->quend[c] == 0) {
dev->quend[c] = qcnt;
}
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("atp870u_queuecommand : dev->quhd[c] == dev->quend[c]\n");
-#endif
+#endif
dev->quend[c]--;
- req_p->result = 0x00020000;
- done(req_p);
+ req_p->result = DID_BUS_BUSY << 16;
+ done(req_p);
return 0;
}
dev->quereq[c][dev->quend[c]] = req_p;
-#ifdef ED_DBGP
- printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",dev->ioport[c],atp_readb_io(dev, c, 0x1c),c,dev->in_int[c],c,dev->in_snd[c]);
+#ifdef ED_DBGP
+ printk("dev->ioport[c] = %x atp_readb_io(dev, c, 0x1c) = %x "
+ "dev->in_int[%d] = %d dev->in_snd[%d] = %d\n",
+ dev->ioport[c], atp_readb_io(dev, c, 0x1c), c,
+ dev->in_int[c],c,dev->in_snd[c]);
#endif
- if ((atp_readb_io(dev, c, 0x1c) == 0) && (dev->in_int[c] == 0) && (dev->in_snd[c] == 0)) {
+ if ((atp_readb_io(dev, c, 0x1c) == 0) &&
+ (dev->in_int[c] == 0) &&
+ (dev->in_snd[c] == 0)) {
#ifdef ED_DBGP
printk("Call sent_s870(atp870u_queuecommand)\n");
-#endif
+#endif
send_s870(dev,c);
}
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("atp870u_queuecommand : exit\n");
-#endif
+#endif
return 0;
}
@@ -674,7 +716,7 @@ static DEF_SCSI_QCMD(atp870u_queuecommand)
* @host: host
*
* On entry there is work queued to be done. We move some of that work to the
- * controller itself.
+ * controller itself.
*
* Caller holds the host lock.
*/
@@ -689,7 +731,7 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
unsigned long sg_count;
if (dev->in_snd[c] != 0) {
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("cmnd in_snd\n");
#endif
return;
@@ -729,7 +771,8 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
dev->id[c][scmd_id(workreq)].curr_req = workreq;
dev->last_cmd[c] = scmd_id(workreq);
}
- if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 || atp_readb_io(dev, c, 0x1c) != 0) {
+ if ((atp_readb_io(dev, c, 0x1f) & 0xb0) != 0 ||
+ atp_readb_io(dev, c, 0x1c) != 0) {
#ifdef ED_DBGP
printk("Abort to Send\n");
#endif
@@ -744,7 +787,7 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
printk(" %x",workreq->cmnd[i]);
}
printk("\n");
-#endif
+#endif
l = scsi_bufflen(workreq);
if (is885(dev)) {
@@ -752,12 +795,12 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
atp_writeb_base(dev, 0x29, j);
dev->r1f[c][scmd_id(workreq)] = 0;
}
-
+
if (workreq->cmnd[0] == READ_CAPACITY) {
if (l > 8)
l = 8;
}
- if (workreq->cmnd[0] == 0x00) {
+ if (workreq->cmnd[0] == TEST_UNIT_READY) {
l = 0;
}
@@ -796,8 +839,9 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
* Write the target
*/
atp_writeb_io(dev, c, 0x11, dev->id[c][target_id].devsp);
-#ifdef ED_DBGP
- printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,dev->id[c][target_id].devsp);
+#ifdef ED_DBGP
+ printk("dev->id[%d][%d].devsp = %2x\n",c,target_id,
+ dev->id[c][target_id].devsp);
#endif
sg_count = scsi_dma_map(workreq);
@@ -807,12 +851,12 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
atp_writeb_io(dev, c, 0x12, ((unsigned char *) (&l))[2]);
atp_writeb_io(dev, c, 0x13, ((unsigned char *) (&l))[1]);
atp_writeb_io(dev, c, 0x14, ((unsigned char *) (&l))[0]);
- j = target_id;
+ j = target_id;
dev->id[c][j].last_len = l;
dev->id[c][j].tran_len = 0;
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("dev->id[%2d][%2d].last_len = %d\n",c,j,dev->id[c][j].last_len);
-#endif
+#endif
/*
* Flip the wide bits
*/
@@ -832,8 +876,8 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
if (l == 0) {
if (atp_readb_io(dev, c, 0x1c) == 0) {
#ifdef ED_DBGP
- printk("change SCSI_CMD_REG 0x08\n");
-#endif
+ printk("change SCSI_CMD_REG 0x08\n");
+#endif
atp_writeb_io(dev, c, 0x18, 0x08);
} else
dev->last_cmd[c] |= 0x40;
@@ -854,9 +898,9 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
scsi_for_each_sg(workreq, sgpnt, sg_count, j) {
bttl = sg_dma_address(sgpnt);
l=sg_dma_len(sgpnt);
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk("1. bttl %x, l %x\n",bttl, l);
-#endif
+#endif
while (l > 0x10000) {
(((u16 *) (prd))[i + 3]) = 0x0000;
(((u16 *) (prd))[i + 2]) = 0x0000;
@@ -868,48 +912,65 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
(((u32 *) (prd))[i >> 1]) = cpu_to_le32(bttl);
(((u16 *) (prd))[i + 2]) = cpu_to_le16(l);
(((u16 *) (prd))[i + 3]) = 0;
- i += 0x04;
+ i += 0x04;
}
- (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000);
-#ifdef ED_DBGP
- printk("prd %4x %4x %4x %4x\n",(((unsigned short int *)prd)[0]),(((unsigned short int *)prd)[1]),(((unsigned short int *)prd)[2]),(((unsigned short int *)prd)[3]));
+ (((u16 *) (prd))[i - 1]) = cpu_to_le16(0x8000);
+#ifdef ED_DBGP
+ printk("prd %4x %4x %4x %4x\n",
+ (((unsigned short int *)prd)[0]),
+ (((unsigned short int *)prd)[1]),
+ (((unsigned short int *)prd)[2]),
+ (((unsigned short int *)prd)[3]));
printk("2. bttl %x, l %x\n",bttl, l);
-#endif
+#endif
}
-#ifdef ED_DBGP
- printk("send_s870: prdaddr_2 0x%8x target_id %d\n", dev->id[c][target_id].prdaddr,target_id);
-#endif
+#ifdef ED_DBGP
+ printk("send_s870: prdaddr_2 0x%8x target_id %d\n",
+ dev->id[c][target_id].prdaddr,target_id);
+#endif
dev->id[c][target_id].prdaddr = dev->id[c][target_id].prd_bus;
atp_writel_pci(dev, c, 4, dev->id[c][target_id].prdaddr);
atp_writeb_pci(dev, c, 2, 0x06);
atp_writeb_pci(dev, c, 2, 0x00);
if (is885(dev)) {
j = atp_readb_pci(dev, c, 1) & 0xf3;
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) ||
- (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a)) {
- j |= 0x0c;
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10)) {
+ j |= 0x0c;
}
atp_writeb_pci(dev, c, 1, j);
} else if (is880(dev)) {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3b, (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3b,
+ (atp_readb_base(dev, 0x3b) & 0x3f) | 0xc0);
else
- atp_writeb_base(dev, 0x3b, atp_readb_base(dev, 0x3b) & 0x3f);
- } else {
- if ((workreq->cmnd[0] == 0x08) || (workreq->cmnd[0] == 0x28) || (workreq->cmnd[0] == 0x0a) || (workreq->cmnd[0] == 0x2a))
- atp_writeb_base(dev, 0x3a, (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
+ atp_writeb_base(dev, 0x3b,
+ atp_readb_base(dev, 0x3b) & 0x3f);
+ } else {
+ if ((workreq->cmnd[0] == READ_6) ||
+ (workreq->cmnd[0] == READ_10) ||
+ (workreq->cmnd[0] == WRITE_6) ||
+ (workreq->cmnd[0] == WRITE_10))
+ atp_writeb_base(dev, 0x3a,
+ (atp_readb_base(dev, 0x3a) & 0xf3) | 0x08);
else
- atp_writeb_base(dev, 0x3a, atp_readb_base(dev, 0x3a) & 0xf3);
- }
+ atp_writeb_base(dev, 0x3a,
+ atp_readb_base(dev, 0x3a) & 0xf3);
+ }
if(workreq->sc_data_direction == DMA_TO_DEVICE) {
dev->id[c][target_id].dirct = 0x20;
if (atp_readb_io(dev, c, 0x1c) == 0) {
atp_writeb_io(dev, c, 0x18, 0x08);
atp_writeb_pci(dev, c, 0, 0x01);
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk( "start DMA(to target)\n");
-#endif
+#endif
} else {
dev->last_cmd[c] |= 0x40;
}
@@ -919,9 +980,9 @@ static void send_s870(struct atp_unit *dev,unsigned char c)
if (atp_readb_io(dev, c, 0x1c) == 0) {
atp_writeb_io(dev, c, 0x18, 0x08);
atp_writeb_pci(dev, c, 0, 0x09);
-#ifdef ED_DBGP
+#ifdef ED_DBGP
printk( "start DMA(to host)\n");
-#endif
+#endif
} else {
dev->last_cmd[c] |= 0x40;
}
@@ -1193,7 +1254,9 @@ static void atp870u_free_tables(struct Scsi_Host *host)
for (k = 0; k < 16; k++) {
if (!atp_dev->id[j][k].prd_table)
continue;
- dma_free_coherent(&atp_dev->pdev->dev, 1024, atp_dev->id[j][k].prd_table, atp_dev->id[j][k].prd_bus);
+ dma_free_coherent(&atp_dev->pdev->dev, 1024,
+ atp_dev->id[j][k].prd_table,
+ atp_dev->id[j][k].prd_bus);
atp_dev->id[j][k].prd_table = NULL;
}
}
@@ -1204,35 +1267,38 @@ static int atp870u_init_tables(struct Scsi_Host *host)
struct atp_unit *atp_dev = (struct atp_unit *)&host->hostdata;
int c,k;
for(c=0;c < 2;c++) {
- for(k=0;k<16;k++) {
- atp_dev->id[c][k].prd_table = dma_alloc_coherent(&atp_dev->pdev->dev, 1024, &(atp_dev->id[c][k].prd_bus), GFP_KERNEL);
- if (!atp_dev->id[c][k].prd_table) {
- printk("atp870u_init_tables fail\n");
+ for(k=0;k<16;k++) {
+ atp_dev->id[c][k].prd_table =
+ dma_alloc_coherent(&atp_dev->pdev->dev, 1024,
+ &(atp_dev->id[c][k].prd_bus),
+ GFP_KERNEL);
+ if (!atp_dev->id[c][k].prd_table) {
+ printk("atp870u_init_tables fail\n");
atp870u_free_tables(host);
return -ENOMEM;
}
atp_dev->id[c][k].prdaddr = atp_dev->id[c][k].prd_bus;
atp_dev->id[c][k].devsp=0x20;
atp_dev->id[c][k].devtype = 0x7f;
- atp_dev->id[c][k].curr_req = NULL;
- }
-
- atp_dev->active_id[c] = 0;
- atp_dev->wide_id[c] = 0;
- atp_dev->host_id[c] = 0x07;
- atp_dev->quhd[c] = 0;
- atp_dev->quend[c] = 0;
- atp_dev->last_cmd[c] = 0xff;
- atp_dev->in_snd[c] = 0;
- atp_dev->in_int[c] = 0;
-
- for (k = 0; k < qcnt; k++) {
- atp_dev->quereq[c][k] = NULL;
- }
- for (k = 0; k < 16; k++) {
+ atp_dev->id[c][k].curr_req = NULL;
+ }
+
+ atp_dev->active_id[c] = 0;
+ atp_dev->wide_id[c] = 0;
+ atp_dev->host_id[c] = 0x07;
+ atp_dev->quhd[c] = 0;
+ atp_dev->quend[c] = 0;
+ atp_dev->last_cmd[c] = 0xff;
+ atp_dev->in_snd[c] = 0;
+ atp_dev->in_int[c] = 0;
+
+ for (k = 0; k < qcnt; k++) {
+ atp_dev->quereq[c][k] = NULL;
+ }
+ for (k = 0; k < 16; k++) {
atp_dev->id[c][k].curr_req = NULL;
atp_dev->sp[c][k] = 0x04;
- }
+ }
}
return 0;
}
@@ -1263,7 +1329,8 @@ static void atp870_init(struct Scsi_Host *shpnt)
pci_read_config_byte(pdev, 0x49, &host_id);
- dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 Host Adapter: IO:%lx, IRQ:%d.\n",
+ dev_info(&pdev->dev, "ACARD AEC-671X PCI Ultra/W SCSI-2/3 "
+ "Host Adapter: IO:%lx, IRQ:%d.\n",
shpnt->io_port, shpnt->irq);
atpdev->ioport[0] = shpnt->io_port;
@@ -1314,7 +1381,8 @@ static void atp880_init(struct Scsi_Host *shpnt)
host_id = atp_readb_base(atpdev, 0x39) >> 4;
- dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n",
+ dev_info(&pdev->dev, "ACARD AEC-67160 PCI Ultra3 LVD "
+ "Host Adapter: IO:%lx, IRQ:%d.\n",
shpnt->io_port, shpnt->irq);
atpdev->host_id[0] = host_id;
@@ -1393,7 +1461,8 @@ static void atp885_init(struct Scsi_Host *shpnt)
unsigned int n;
unsigned char setupdata[2][16];
- dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD Host Adapter: IO:%lx, IRQ:%d.\n",
+ dev_info(&pdev->dev, "ACARD AEC-67162 PCI Ultra3 LVD "
+ "Host Adapter: IO:%lx, IRQ:%d.\n",
shpnt->io_port, shpnt->irq);
atpdev->ioport[0] = shpnt->io_port + 0x80;
@@ -1413,11 +1482,13 @@ static void atp885_init(struct Scsi_Host *shpnt)
atpdev->global_map[m] = 0;
for (k = 0; k < 4; k++) {
atp_writew_base(atpdev, 0x3c, n++);
- ((u32 *)&setupdata[m][0])[k] = atp_readl_base(atpdev, 0x38);
+ ((u32 *)&setupdata[m][0])[k] =
+ atp_readl_base(atpdev, 0x38);
}
for (k = 0; k < 4; k++) {
atp_writew_base(atpdev, 0x3c, n++);
- ((u32 *)&atpdev->sp[m][0])[k] = atp_readl_base(atpdev, 0x38);
+ ((u32 *)&atpdev->sp[m][0])[k] =
+ atp_readl_base(atpdev, 0x38);
}
n += 8;
}
@@ -1510,17 +1581,17 @@ static int atp870u_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto fail;
if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
- err = -EIO;
- goto disable_device;
- }
+ printk(KERN_ERR "atp870u: DMA mask required but not available.\n");
+ err = -EIO;
+ goto disable_device;
+ }
err = pci_request_regions(pdev, "atp870u");
if (err)
goto disable_device;
pci_set_master(pdev);
- err = -ENOMEM;
+ err = -ENOMEM;
shpnt = scsi_host_alloc(&atp870u_template, sizeof(struct atp_unit));
if (!shpnt)
goto release_region;
@@ -1586,7 +1657,7 @@ static int atp870u_abort(struct scsi_cmnd * SCpnt)
{
unsigned char j, k, c;
struct scsi_cmnd *workrequ;
- struct atp_unit *dev;
+ struct atp_unit *dev;
struct Scsi_Host *host;
host = SCpnt->device->host;
@@ -1655,11 +1726,10 @@ static int atp870u_biosparam(struct scsi_device *disk, struct block_device *dev,
}
static void atp870u_remove (struct pci_dev *pdev)
-{
+{
struct atp_unit *devext = pci_get_drvdata(pdev);
struct Scsi_Host *pshost = devext->host;
-
-
+
scsi_remove_host(pshost);
free_irq(pshost->irq, pshost);
pci_release_regions(pdev);
@@ -1671,23 +1741,23 @@ MODULE_LICENSE("GPL");
static struct scsi_host_template atp870u_template = {
.module = THIS_MODULE,
- .name = "atp870u" /* name */,
+ .name = "atp870u" /* name */,
.proc_name = "atp870u",
.show_info = atp870u_show_info,
- .info = atp870u_info /* info */,
- .queuecommand = atp870u_queuecommand /* queuecommand */,
- .eh_abort_handler = atp870u_abort /* abort */,
- .bios_param = atp870u_biosparam /* biosparm */,
- .can_queue = qcnt /* can_queue */,
- .this_id = 7 /* SCSI ID */,
- .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
+ .info = atp870u_info /* info */,
+ .queuecommand = atp870u_queuecommand /* queuecommand */,
+ .eh_abort_handler = atp870u_abort /* abort */,
+ .bios_param = atp870u_biosparam /* biosparm */,
+ .can_queue = qcnt /* can_queue */,
+ .this_id = 7 /* SCSI ID */,
+ .sg_tablesize = ATP870U_SCATTER /*SG_ALL*/,
.max_sectors = ATP870U_MAX_SECTORS,
};
static struct pci_device_id atp870u_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP885_DEVID) },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
- { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_ARTOP, ATP880_DEVID2) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7610) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612UW) },
{ PCI_DEVICE(PCI_VENDOR_ID_ARTOP, PCI_DEVICE_ID_ARTOP_AEC7612U) },
@@ -1709,7 +1779,8 @@ static struct pci_driver atp870u_driver = {
module_pci_driver(atp870u_driver);
-static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip, unsigned char lvdmode)
+static void atp_is(struct atp_unit *dev, unsigned char c, bool wide_chip,
+ unsigned char lvdmode)
{
unsigned char i, j, k, rmb, n;
unsigned short int m;
@@ -1982,8 +2053,9 @@ u3p_cmd:
m = m << i;
dev->wide_id[c] |= m;
dev->id[c][i].devsp = 0xce;
-#ifdef ED_DBGP
- printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp);
+#ifdef ED_DBGP
+ printk("dev->id[%2d][%2d].devsp = %2x\n",
+ c, i, dev->id[c][i].devsp);
#endif
continue;
}
@@ -2005,7 +2077,8 @@ chg_wide:
while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00)
cpu_relax();
- if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e)
+ if (atp_readb_io(dev, c, 0x17) != 0x11 &&
+ atp_readb_io(dev, c, 0x17) != 0x8e)
continue;
while (atp_readb_io(dev, c, 0x17) != 0x8e)
@@ -2109,7 +2182,9 @@ widep_cmd:
m = m << i;
dev->wide_id[c] |= m;
not_wide:
- if ((dev->id[c][i].devtype == 0x00) || (dev->id[c][i].devtype == 0x07) || ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) {
+ if ((dev->id[c][i].devtype == 0x00) ||
+ (dev->id[c][i].devtype == 0x07) ||
+ ((dev->id[c][i].devtype == 0x05) && ((n & 0x10) != 0))) {
m = 1;
m = m << i;
if ((dev->async[c] & m) != 0) {
@@ -2148,7 +2223,8 @@ set_sync:
while ((atp_readb_io(dev, c, 0x1f) & 0x80) == 0x00)
cpu_relax();
- if (atp_readb_io(dev, c, 0x17) != 0x11 && atp_readb_io(dev, c, 0x17) != 0x8e)
+ if (atp_readb_io(dev, c, 0x17) != 0x11 &&
+ atp_readb_io(dev, c, 0x17) != 0x8e)
continue;
while (atp_readb_io(dev, c, 0x17) != 0x8e)
@@ -2310,7 +2386,8 @@ tar_dcons:
set_syn_ok:
dev->id[c][i].devsp = (dev->id[c][i].devsp & 0x0f) | j;
#ifdef ED_DBGP
- printk("dev->id[%2d][%2d].devsp = %2x\n",c,i,dev->id[c][i].devsp);
+ printk("dev->id[%2d][%2d].devsp = %2x\n",
+ c,i,dev->id[c][i].devsp);
#endif
}
}
diff --git a/drivers/scsi/atp870u.h b/drivers/scsi/atp870u.h
index 75c44399fc88..31f6ab24b5cb 100644
--- a/drivers/scsi/atp870u.h
+++ b/drivers/scsi/atp870u.h
@@ -7,10 +7,10 @@
/* I/O Port */
-#define MAX_CDB 12
-#define MAX_SENSE 14
-#define qcnt 32
-#define ATP870U_SCATTER 128
+#define MAX_CDB 12
+#define MAX_SENSE 14
+#define qcnt 32
+#define ATP870U_SCATTER 128
#define MAX_ADAPTER 8
#define MAX_SCSI_ID 16
@@ -40,7 +40,7 @@ struct atp_unit
unsigned short ultra_map[2];
unsigned short async[2];
unsigned char sp[2][16];
- unsigned char r1f[2][16];
+ unsigned char r1f[2][16];
struct scsi_cmnd *quereq[2][qcnt];
struct atp_id
{
@@ -55,8 +55,8 @@ struct atp_unit
dma_addr_t prdaddr; /* Dynamically updated in driver */
struct scsi_cmnd *curr_req;
} id[2][16];
- struct Scsi_Host *host;
- struct pci_dev *pdev;
+ struct Scsi_Host *host;
+ struct pci_dev *pdev;
unsigned int unit;
};
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index b00fb2409c50..d536270bbe9f 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -33,21 +33,6 @@ struct scsi_cdb_s {
u8 scsi_cdb[SCSI_MAX_CDBLEN];
};
-/* ------------------------------------------------------------
- * SCSI status byte values
- * ------------------------------------------------------------
- */
-#define SCSI_STATUS_GOOD 0x00
-#define SCSI_STATUS_CHECK_CONDITION 0x02
-#define SCSI_STATUS_CONDITION_MET 0x04
-#define SCSI_STATUS_BUSY 0x08
-#define SCSI_STATUS_INTERMEDIATE 0x10
-#define SCSI_STATUS_ICM 0x14 /* intermediate condition met */
-#define SCSI_STATUS_RESERVATION_CONFLICT 0x18
-#define SCSI_STATUS_COMMAND_TERMINATED 0x22
-#define SCSI_STATUS_QUEUE_FULL 0x28
-#define SCSI_STATUS_ACA_ACTIVE 0x30
-
#define SCSI_MAX_ALLOC_LEN 0xFF /* maximum allocarion length */
/*
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 38d1c453074d..7ad22288071b 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -2146,7 +2146,7 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
/*
* setup sense information, if present
*/
- if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
+ if ((m->scsi_status == SAM_STAT_CHECK_CONDITION) &&
m->sns_len) {
sns_len = m->sns_len;
snsinfo = BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 22f06be2606f..6b5841b1c06e 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -106,7 +106,7 @@ bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
struct bfad_itnim_data_s *itnim_data;
struct bfad_itnim_s *itnim;
- cmnd->result = DID_OK << 16 | SCSI_STATUS_GOOD;
+ cmnd->result = DID_OK << 16 | SAM_STAT_GOOD;
/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
if (cmnd->device->host != NULL)
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 7b522ff345d5..3ea345c12467 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -64,6 +64,7 @@
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_spi.h>
#include "dc395x.h"
@@ -1281,12 +1282,8 @@ static void build_sdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
} else if (dcb->sync_offset == 0)
dcb->sync_offset = SYNC_NEGO_OFFSET;
- *ptr++ = MSG_EXTENDED; /* (01h) */
- *ptr++ = 3; /* length */
- *ptr++ = EXTENDED_SDTR; /* (01h) */
- *ptr++ = dcb->min_nego_period; /* Transfer period (in 4ns) */
- *ptr++ = dcb->sync_offset; /* Transfer period (max. REQ/ACK dist) */
- srb->msg_count += 5;
+ srb->msg_count += spi_populate_sync_msg(ptr, dcb->min_nego_period,
+ dcb->sync_offset);
srb->state |= SRB_DO_SYNC_NEGO;
}
@@ -1305,11 +1302,7 @@ static void build_wdtr(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
srb->msgout_buf[1]);
return;
}
- *ptr++ = MSG_EXTENDED; /* (01h) */
- *ptr++ = 2; /* length */
- *ptr++ = EXTENDED_WDTR; /* (03h) */
- *ptr++ = wide;
- srb->msg_count += 4;
+ srb->msg_count += spi_populate_width_msg(ptr, wide);
srb->state |= SRB_DO_WIDE_NEGO;
}
@@ -1476,7 +1469,7 @@ static u8 start_scsi(struct AdapterCtlBlk* acb, struct DeviceCtlBlk* dcb,
return 1;
}
/* Send Tag id */
- DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_SIMPLE_QTAG);
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, SIMPLE_QUEUE_TAG);
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, tag_number);
dcb->tag_mask |= tag_mask;
srb->tag_number = tag_number;
@@ -1732,8 +1725,9 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
if (!srb->msg_count) {
dprintkdbg(DBG_0, "msgout_phase1: (0x%p) NOP msg\n",
srb->cmd);
- DC395x_write8(acb, TRM_S1040_SCSI_FIFO, MSG_NOP);
- DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH); /* it's important for atn stop */
+ DC395x_write8(acb, TRM_S1040_SCSI_FIFO, NOP);
+ DC395x_write16(acb, TRM_S1040_SCSI_CONTROL, DO_DATALATCH);
+ /* it's important for atn stop */
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
return;
}
@@ -1741,7 +1735,7 @@ static void msgout_phase1(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
for (i = 0; i < srb->msg_count; i++)
DC395x_write8(acb, TRM_S1040_SCSI_FIFO, *ptr++);
srb->msg_count = 0;
- if (srb->msgout_buf[0] == MSG_ABORT)
+ if (srb->msgout_buf[0] == ABORT_TASK_SET)
srb->state = SRB_ABORT_SENT;
DC395x_write8(acb, TRM_S1040_SCSI_COMMAND, SCMD_FIFO_OUT);
@@ -2538,7 +2532,7 @@ static struct ScsiReqBlk *msgin_qtag(struct AdapterCtlBlk *acb,
srb = acb->tmp_srb;
srb->state = SRB_UNEXPECT_RESEL;
dcb->active_srb = srb;
- srb->msgout_buf[0] = MSG_ABORT_TAG;
+ srb->msgout_buf[0] = ABORT_TASK;
srb->msg_count = 1;
DC395x_ENABLE_MSGOUT;
dprintkl(KERN_DEBUG, "msgin_qtag: Unknown tag %i - abort\n", tag);
@@ -2780,7 +2774,7 @@ static void msgin_phase0(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb,
msgin_reject(acb, srb);
break;
- case MSG_IGNOREWIDE:
+ case IGNORE_WIDE_RESIDUE:
/* Discard wide residual */
dprintkdbg(DBG_0, "msgin_phase0: Ignore Wide Residual!\n");
break;
diff --git a/drivers/scsi/dc395x.h b/drivers/scsi/dc395x.h
index 5379a936141a..24a36c046d07 100644
--- a/drivers/scsi/dc395x.h
+++ b/drivers/scsi/dc395x.h
@@ -156,15 +156,6 @@
#define H_ABORT 0x0FF
/* SCSI BUS Status byte codes */
-#define SCSI_STAT_GOOD 0x0 /* Good status */
-#define SCSI_STAT_CHECKCOND 0x02 /* SCSI Check Condition */
-#define SCSI_STAT_CONDMET 0x04 /* Condition Met */
-#define SCSI_STAT_BUSY 0x08 /* Target busy status */
-#define SCSI_STAT_INTER 0x10 /* Intermediate status */
-#define SCSI_STAT_INTERCONDMET 0x14 /* Intermediate condition met */
-#define SCSI_STAT_RESCONFLICT 0x18 /* Reservation conflict */
-#define SCSI_STAT_CMDTERM 0x22 /* Command Terminated */
-#define SCSI_STAT_QUEUEFULL 0x28 /* Queue Full */
#define SCSI_STAT_UNEXP_BUS_F 0xFD /* Unexpect Bus Free */
#define SCSI_STAT_BUS_RST_DETECT 0xFE /* Scsi Bus Reset detected */
#define SCSI_STAT_SEL_TIMEOUT 0xFF /* Selection Time out */
@@ -181,35 +172,6 @@
#define SYNC_NEGO_OFFSET 15
-/* SCSI MSG BYTE */
-#define MSG_COMPLETE 0x00
-#define MSG_EXTENDED 0x01
-#define MSG_SAVE_PTR 0x02
-#define MSG_RESTORE_PTR 0x03
-#define MSG_DISCONNECT 0x04
-#define MSG_INITIATOR_ERROR 0x05
-#define MSG_ABORT 0x06
-#define MSG_REJECT_ 0x07
-#define MSG_NOP 0x08
-#define MSG_PARITY_ERROR 0x09
-#define MSG_LINK_CMD_COMPL 0x0A
-#define MSG_LINK_CMD_COMPL_FLG 0x0B
-#define MSG_BUS_RESET 0x0C
-#define MSG_ABORT_TAG 0x0D
-#define MSG_SIMPLE_QTAG 0x20
-#define MSG_HEAD_QTAG 0x21
-#define MSG_ORDER_QTAG 0x22
-#define MSG_IGNOREWIDE 0x23
-#define MSG_IDENTIFY 0x80
-#define MSG_HOST_ID 0xC0
-
-/* SCSI STATUS BYTE */
-#define STATUS_GOOD 0x00
-#define CHECK_CONDITION_ 0x02
-#define STATUS_BUSY 0x08
-#define STATUS_INTERMEDIATE 0x10
-#define RESERVE_CONFLICT 0x18
-
/* cmd->result */
#define STATUS_MASK_ 0xFF
#define MSG_MASK 0xFF00
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 4251212acbbe..a18a4a08f049 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2226,7 +2226,7 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
default:
printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
pHba->name, cmd->cmnd[0]);
- cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
+ cmd->result = (DID_ERROR <<16);
cmd->scsi_done(cmd);
return 0;
}
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c
index 43a1fd11df5e..007ccef5d1e2 100644
--- a/drivers/scsi/esp_scsi.c
+++ b/drivers/scsi/esp_scsi.c
@@ -896,7 +896,7 @@ static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
}
static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
- struct scsi_cmnd *cmd, unsigned int result)
+ struct scsi_cmnd *cmd, unsigned char host_byte)
{
struct scsi_device *dev = cmd->device;
int tgt = dev->id;
@@ -905,7 +905,10 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
esp->active_cmd = NULL;
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, dev->hostdata);
- cmd->result = result;
+ cmd->result = 0;
+ set_host_byte(cmd, host_byte);
+ if (host_byte == DID_OK)
+ set_status_byte(cmd, ent->status);
if (ent->eh_done) {
complete(ent->eh_done);
@@ -921,7 +924,6 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
*/
cmd->result = ((DRIVER_SENSE << 24) |
(DID_OK << 16) |
- (COMMAND_COMPLETE << 8) |
(SAM_STAT_CHECK_CONDITION << 0));
ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
@@ -944,12 +946,6 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
esp_maybe_execute_command(esp);
}
-static unsigned int compose_result(unsigned int status, unsigned int message,
- unsigned int driver_code)
-{
- return (status | (message << 8) | (driver_code << 16));
-}
-
static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_device *dev = ent->cmd->device;
@@ -1244,7 +1240,7 @@ static int esp_finish_select(struct esp *esp)
* all bets are off.
*/
esp_schedule_reset(esp);
- esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
+ esp_cmd_is_done(esp, ent, cmd, DID_ERROR);
return 0;
}
@@ -1289,7 +1285,7 @@ static int esp_finish_select(struct esp *esp)
esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_ESEL);
- esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
+ esp_cmd_is_done(esp, ent, cmd, DID_BAD_TARGET);
return 1;
}
@@ -1874,10 +1870,7 @@ again:
ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
esp_autosense(esp, ent);
} else {
- esp_cmd_is_done(esp, ent, cmd,
- compose_result(ent->status,
- ent->message,
- DID_OK));
+ esp_cmd_is_done(esp, ent, cmd, DID_OK);
}
} else if (ent->message == DISCONNECT) {
esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
deleted file mode 100644
index 5d801388680b..000000000000
--- a/drivers/scsi/gdth.c
+++ /dev/null
@@ -1,4322 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/************************************************************************
- * Linux driver for *
- * ICP vortex GmbH: GDT PCI Disk Array Controllers *
- * Intel Corporation: Storage RAID Controllers *
- * *
- * gdth.c *
- * Copyright (C) 1995-06 ICP vortex GmbH, Achim Leubner *
- * Copyright (C) 2002-04 Intel Corporation *
- * Copyright (C) 2003-06 Adaptec Inc. *
- * <achim_leubner@adaptec.com> *
- * *
- * Additions/Fixes: *
- * Boji Tony Kannanthanam <boji.t.kannanthanam@intel.com> *
- * Johannes Dinner <johannes_dinner@adaptec.com> *
- * *
- * *
- * Linux kernel 2.6.x supported *
- * *
- ************************************************************************/
-
-/* All GDT Disk Array Controllers are fully supported by this driver.
- * This includes the PCI SCSI Disk Array Controllers and the
- * PCI Fibre Channel Disk Array Controllers. See gdth.h for a complete
- * list of all controller types.
- *
- * After the optional list of IRQ values, other possible
- * command line options are:
- * disable:Y disable driver
- * disable:N enable driver
- * reserve_mode:0 reserve no drives for the raw service
- * reserve_mode:1 reserve all not init., removable drives
- * reserve_mode:2 reserve all not init. drives
- * reserve_list:h,b,t,l,h,b,t,l,... reserve particular drive(s) with
- * h- controller no., b- channel no.,
- * t- target ID, l- LUN
- * reverse_scan:Y reverse scan order for PCI controllers
- * reverse_scan:N scan PCI controllers like BIOS
- * max_ids:x x - target ID count per channel (1..MAXID)
- * rescan:Y rescan all channels/IDs
- * rescan:N use all devices found until now
- * hdr_channel:x x - number of virtual bus for host drives
- * shared_access:Y disable driver reserve/release protocol to
- * access a shared resource from several nodes,
- * appropriate controller firmware required
- * shared_access:N enable driver reserve/release protocol
- * force_dma32:Y use only 32 bit DMA mode
- * force_dma32:N use 64 bit DMA mode, if supported
- *
- * The default values are: "gdth=disable:N,reserve_mode:1,reverse_scan:N,
- * max_ids:127,rescan:N,hdr_channel:0,
- * shared_access:Y,force_dma32:N".
- * Here is another example: "gdth=reserve_list:0,1,2,0,0,1,3,0,rescan:Y".
- *
- * When loading the gdth driver as a module, the same options are available.
- * You can set the IRQs with "IRQ=...". However, the syntax to specify the
- * options changes slightly. You must replace all ',' between options
- * with ' ' and all ':' with '=' and you must use
- * '1' in place of 'Y' and '0' in place of 'N'.
- *
- * Default: "modprobe gdth disable=0 reserve_mode=1 reverse_scan=0
- * max_ids=127 rescan=0 hdr_channel=0 shared_access=0
- * force_dma32=0"
- * The other example: "modprobe gdth reserve_list=0,1,2,0,0,1,3,0 rescan=1".
- */
-
-/* The meaning of the Scsi_Pointer members in this driver is as follows:
- * ptr: Chaining
- * this_residual: unused
- * buffer: unused
- * dma_handle: unused
- * buffers_residual: unused
- * Status: unused
- * Message: unused
- * have_data_in: unused
- * sent_command: unused
- * phase: unused
- */
-
-/* statistics */
-#define GDTH_STATISTICS
-
-#include <linux/module.h>
-
-#include <linux/version.h>
-#include <linux/kernel.h>
-#include <linux/types.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/ioport.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-#include <linux/in.h>
-#include <linux/proc_fs.h>
-#include <linux/time.h>
-#include <linux/timer.h>
-#include <linux/dma-mapping.h>
-#include <linux/list.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/reboot.h>
-
-#include <asm/dma.h>
-#include <asm/io.h>
-#include <linux/uaccess.h>
-#include <linux/spinlock.h>
-#include <linux/blkdev.h>
-#include <linux/scatterlist.h>
-
-#include "scsi.h"
-#include <scsi/scsi_host.h>
-#include "gdth.h"
-
-static DEFINE_MUTEX(gdth_mutex);
-static void gdth_delay(int milliseconds);
-static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs);
-static irqreturn_t gdth_interrupt(int irq, void *dev_id);
-static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
- int gdth_from_wait, int* pIndex);
-static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- struct scsi_cmnd *scp);
-static int gdth_async_event(gdth_ha_str *ha);
-static void gdth_log_event(gdth_evt_data *dvr, char *buffer);
-
-static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority);
-static void gdth_next(gdth_ha_str *ha);
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b);
-static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
- u16 idx, gdth_evt_data *evt);
-static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr);
-static void gdth_readapp_event(gdth_ha_str *ha, u8 application,
- gdth_evt_str *estr);
-static void gdth_clear_events(void);
-
-static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
- char *buffer, u16 count);
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp);
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
- u16 hdrive);
-
-static void gdth_enable_int(gdth_ha_str *ha);
-static int gdth_test_busy(gdth_ha_str *ha);
-static int gdth_get_cmd_index(gdth_ha_str *ha);
-static void gdth_release_event(gdth_ha_str *ha);
-static int gdth_wait(gdth_ha_str *ha, int index,u32 time);
-static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
- u32 p1, u64 p2,u64 p3);
-static int gdth_search_drives(gdth_ha_str *ha);
-static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive);
-
-static const char *gdth_ctr_name(gdth_ha_str *ha);
-
-static int gdth_open(struct inode *inode, struct file *filep);
-static int gdth_close(struct inode *inode, struct file *filep);
-static long gdth_unlocked_ioctl(struct file *filep, unsigned int cmd,
- unsigned long arg);
-
-static void gdth_flush(gdth_ha_str *ha);
-static int gdth_queuecommand(struct Scsi_Host *h, struct scsi_cmnd *cmd);
-static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
- struct gdth_cmndinfo *cmndinfo);
-static void gdth_scsi_done(struct scsi_cmnd *scp);
-
-#ifdef DEBUG_GDTH
-static u8 DebugState = DEBUG_GDTH;
-#define TRACE(a) {if (DebugState==1) {printk a;}}
-#define TRACE2(a) {if (DebugState==1 || DebugState==2) {printk a;}}
-#define TRACE3(a) {if (DebugState!=0) {printk a;}}
-#else /* !DEBUG */
-#define TRACE(a)
-#define TRACE2(a)
-#define TRACE3(a)
-#endif
-
-#ifdef GDTH_STATISTICS
-static u32 max_rq=0, max_index=0, max_sg=0;
-static u32 act_ints=0, act_ios=0, act_stats=0, act_rq=0;
-static struct timer_list gdth_timer;
-#endif
-
-#define PTR2USHORT(a) (u16)(unsigned long)(a)
-#define GDTOFFSOF(a,b) (size_t)&(((a*)0)->b)
-#define INDEX_OK(i,t) ((i)<ARRAY_SIZE(t))
-
-#define BUS_L2P(a,b) ((b)>(a)->virt_bus ? (b-1):(b))
-
-static u8 gdth_polling; /* polling if TRUE */
-static int gdth_ctr_count = 0; /* controller count */
-static LIST_HEAD(gdth_instances); /* controller list */
-static u8 gdth_write_through = FALSE; /* write through */
-static gdth_evt_str ebuffer[MAX_EVENTS]; /* event buffer */
-static int elastidx;
-static int eoldidx;
-static int major;
-
-#define DIN 1 /* IN data direction */
-#define DOU 2 /* OUT data direction */
-#define DNO DIN /* no data transfer */
-#define DUN DIN /* unknown data direction */
-static u8 gdth_direction_tab[0x100] = {
- DNO,DNO,DIN,DIN,DOU,DIN,DIN,DOU,DIN,DUN,DOU,DOU,DUN,DUN,DUN,DIN,
- DNO,DIN,DIN,DOU,DIN,DOU,DNO,DNO,DOU,DNO,DIN,DNO,DIN,DOU,DNO,DUN,
- DIN,DUN,DIN,DUN,DOU,DIN,DUN,DUN,DIN,DIN,DOU,DNO,DUN,DIN,DOU,DOU,
- DOU,DOU,DOU,DNO,DIN,DNO,DNO,DIN,DOU,DOU,DOU,DOU,DIN,DOU,DIN,DOU,
- DOU,DOU,DIN,DIN,DIN,DNO,DUN,DNO,DNO,DNO,DUN,DNO,DOU,DIN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DIN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DIN,DUN,
- DUN,DUN,DUN,DUN,DUN,DNO,DNO,DUN,DIN,DNO,DOU,DUN,DNO,DUN,DOU,DOU,
- DOU,DOU,DOU,DNO,DUN,DIN,DOU,DIN,DIN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DOU,DUN,DUN,DUN,DUN,DUN,
- DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN,DUN
-};
-
-/* LILO and modprobe/insmod parameters */
-/* disable driver flag */
-static int disable __initdata = 0;
-/* reserve flag */
-static int reserve_mode = 1;
-/* reserve list */
-static int reserve_list[MAX_RES_ARGS] =
-{0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
- 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,
- 0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff,0xff};
-/* scan order for PCI controllers */
-static int reverse_scan = 0;
-/* virtual channel for the host drives */
-static int hdr_channel = 0;
-/* max. IDs per channel */
-static int max_ids = MAXID;
-/* rescan all IDs */
-static int rescan = 0;
-/* shared access */
-static int shared_access = 1;
-/* 64 bit DMA mode, support for drives > 2 TB, if force_dma32 = 0 */
-static int force_dma32 = 0;
-
-/* parameters for modprobe/insmod */
-module_param(disable, int, 0);
-module_param(reserve_mode, int, 0);
-module_param_array(reserve_list, int, NULL, 0);
-module_param(reverse_scan, int, 0);
-module_param(hdr_channel, int, 0);
-module_param(max_ids, int, 0);
-module_param(rescan, int, 0);
-module_param(shared_access, int, 0);
-module_param(force_dma32, int, 0);
-MODULE_AUTHOR("Achim Leubner");
-MODULE_LICENSE("GPL");
-
-/* ioctl interface */
-static const struct file_operations gdth_fops = {
- .unlocked_ioctl = gdth_unlocked_ioctl,
- .open = gdth_open,
- .release = gdth_close,
- .llseek = noop_llseek,
-};
-
-#include "gdth_proc.h"
-#include "gdth_proc.c"
-
-static gdth_ha_str *gdth_find_ha(int hanum)
-{
- gdth_ha_str *ha;
-
- list_for_each_entry(ha, &gdth_instances, list)
- if (hanum == ha->hanum)
- return ha;
-
- return NULL;
-}
-
-static struct gdth_cmndinfo *gdth_get_cmndinfo(gdth_ha_str *ha)
-{
- struct gdth_cmndinfo *priv = NULL;
- unsigned long flags;
- int i;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (i=0; i<GDTH_MAXCMDS; ++i) {
- if (ha->cmndinfo[i].index == 0) {
- priv = &ha->cmndinfo[i];
- memset(priv, 0, sizeof(*priv));
- priv->index = i+1;
- break;
- }
- }
-
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- return priv;
-}
-
-static void gdth_put_cmndinfo(struct gdth_cmndinfo *priv)
-{
- BUG_ON(!priv);
- priv->index = 0;
-}
-
-static void gdth_delay(int milliseconds)
-{
- if (milliseconds == 0) {
- udelay(1);
- } else {
- mdelay(milliseconds);
- }
-}
-
-static void gdth_scsi_done(struct scsi_cmnd *scp)
-{
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- int internal_command = cmndinfo->internal_command;
-
- TRACE2(("gdth_scsi_done()\n"));
-
- gdth_put_cmndinfo(cmndinfo);
- scp->host_scribble = NULL;
-
- if (internal_command)
- complete((struct completion *)scp->request);
- else
- scp->scsi_done(scp);
-}
-
-static int __gdth_execute(struct scsi_device *sdev, gdth_cmd_str *gdtcmd,
- char *cmnd, int timeout, u32 *info)
-{
- gdth_ha_str *ha = shost_priv(sdev->host);
- struct scsi_cmnd *scp;
- struct gdth_cmndinfo cmndinfo;
- DECLARE_COMPLETION_ONSTACK(wait);
- int rval;
-
- scp = kzalloc(sizeof(*scp), GFP_KERNEL);
- if (!scp)
- return -ENOMEM;
-
- scp->sense_buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
- if (!scp->sense_buffer) {
- kfree(scp);
- return -ENOMEM;
- }
-
- scp->device = sdev;
- memset(&cmndinfo, 0, sizeof(cmndinfo));
-
- /* use request field to save the ptr. to completion struct. */
- scp->request = (struct request *)&wait;
- scp->cmd_len = 12;
- scp->cmnd = cmnd;
- cmndinfo.priority = IOCTL_PRI;
- cmndinfo.internal_cmd_str = gdtcmd;
- cmndinfo.internal_command = 1;
-
- TRACE(("__gdth_execute() cmd 0x%x\n", scp->cmnd[0]));
- __gdth_queuecommand(ha, scp, &cmndinfo);
-
- wait_for_completion(&wait);
-
- rval = cmndinfo.status;
- if (info)
- *info = cmndinfo.info;
- kfree(scp->sense_buffer);
- kfree(scp);
- return rval;
-}
-
-int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info)
-{
- struct scsi_device *sdev = scsi_get_host_dev(shost);
- int rval = __gdth_execute(sdev, gdtcmd, cmnd, timeout, info);
-
- scsi_free_host_dev(sdev);
- return rval;
-}
-
-static void gdth_eval_mapping(u32 size, u32 *cyls, int *heads, int *secs)
-{
- *cyls = size /HEADS/SECS;
- if (*cyls <= MAXCYLS) {
- *heads = HEADS;
- *secs = SECS;
- } else { /* too high for 64*32 */
- *cyls = size /MEDHEADS/MEDSECS;
- if (*cyls <= MAXCYLS) {
- *heads = MEDHEADS;
- *secs = MEDSECS;
- } else { /* too high for 127*63 */
- *cyls = size /BIGHEADS/BIGSECS;
- *heads = BIGHEADS;
- *secs = BIGSECS;
- }
- }
-}
-
-static bool gdth_search_vortex(u16 device)
-{
- if (device <= PCI_DEVICE_ID_VORTEX_GDT6555)
- return true;
- if (device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP &&
- device <= PCI_DEVICE_ID_VORTEX_GDTMAXRP)
- return true;
- if (device == PCI_DEVICE_ID_VORTEX_GDTNEWRX ||
- device == PCI_DEVICE_ID_VORTEX_GDTNEWRX2)
- return true;
- return false;
-}
-
-static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out);
-static int gdth_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-static void gdth_pci_remove_one(struct pci_dev *pdev);
-static void gdth_remove_one(gdth_ha_str *ha);
-
-/* Vortex only makes RAID controllers.
- * We do not really want to specify all 550 ids here, so wildcard match.
- */
-static const struct pci_device_id gdthtable[] = {
- { PCI_VDEVICE(VORTEX, PCI_ANY_ID) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC) },
- { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SRC_XSCALE) },
- { } /* terminate list */
-};
-MODULE_DEVICE_TABLE(pci, gdthtable);
-
-static struct pci_driver gdth_pci_driver = {
- .name = "gdth",
- .id_table = gdthtable,
- .probe = gdth_pci_init_one,
- .remove = gdth_pci_remove_one,
-};
-
-static void gdth_pci_remove_one(struct pci_dev *pdev)
-{
- gdth_ha_str *ha = pci_get_drvdata(pdev);
-
- list_del(&ha->list);
- gdth_remove_one(ha);
-
- pci_disable_device(pdev);
-}
-
-static int gdth_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- u16 vendor = pdev->vendor;
- u16 device = pdev->device;
- unsigned long base0, base1, base2;
- int rc;
- gdth_pci_str gdth_pcistr;
- gdth_ha_str *ha = NULL;
-
- TRACE(("gdth_search_dev() cnt %d vendor %x device %x\n",
- gdth_ctr_count, vendor, device));
-
- memset(&gdth_pcistr, 0, sizeof(gdth_pcistr));
-
- if (vendor == PCI_VENDOR_ID_VORTEX && !gdth_search_vortex(device))
- return -ENODEV;
-
- rc = pci_enable_device(pdev);
- if (rc)
- return rc;
-
- if (gdth_ctr_count >= MAXHA)
- return -EBUSY;
-
- /* GDT PCI controller found, resources are already in pdev */
- gdth_pcistr.pdev = pdev;
- base0 = pci_resource_flags(pdev, 0);
- base1 = pci_resource_flags(pdev, 1);
- base2 = pci_resource_flags(pdev, 2);
- if (device <= PCI_DEVICE_ID_VORTEX_GDT6000B || /* GDT6000/B */
- device >= PCI_DEVICE_ID_VORTEX_GDT6x17RP) { /* MPR */
- if (!(base0 & IORESOURCE_MEM))
- return -ENODEV;
- gdth_pcistr.dpmem = pci_resource_start(pdev, 0);
- } else { /* GDT6110, GDT6120, .. */
- if (!(base0 & IORESOURCE_MEM) ||
- !(base2 & IORESOURCE_MEM) ||
- !(base1 & IORESOURCE_IO))
- return -ENODEV;
- gdth_pcistr.dpmem = pci_resource_start(pdev, 2);
- gdth_pcistr.io = pci_resource_start(pdev, 1);
- }
- TRACE2(("Controller found at %d/%d, irq %d, dpmem 0x%lx\n",
- gdth_pcistr.pdev->bus->number,
- PCI_SLOT(gdth_pcistr.pdev->devfn),
- gdth_pcistr.irq,
- gdth_pcistr.dpmem));
-
- rc = gdth_pci_probe_one(&gdth_pcistr, &ha);
- if (rc)
- return rc;
-
- return 0;
-}
-
-static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
- gdth_ha_str *ha)
-{
- register gdt6_dpram_str __iomem *dp6_ptr;
- register gdt6c_dpram_str __iomem *dp6c_ptr;
- register gdt6m_dpram_str __iomem *dp6m_ptr;
- u32 retries;
- u8 prot_ver;
- u16 command;
- int i, found = FALSE;
-
- TRACE(("gdth_init_pci()\n"));
-
- if (pdev->vendor == PCI_VENDOR_ID_INTEL)
- ha->oem_id = OEM_ID_INTEL;
- else
- ha->oem_id = OEM_ID_ICP;
- ha->brd_phys = (pdev->bus->number << 8) | (pdev->devfn & 0xf8);
- ha->stype = (u32)pdev->device;
- ha->irq = pdev->irq;
- ha->pdev = pdev;
-
- if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6000B) { /* GDT6000/B */
- TRACE2(("init_pci() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
- ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- /* check and reset interface area */
- dp6_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6_ptr->u);
- if (readl(&dp6_ptr->u) != DPMEM_MAGIC) {
- printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
- pcistr->dpmem);
- found = FALSE;
- for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
- iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(u16));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- if (readw(ha->brd) != 0xffff) {
- TRACE2(("init_pci_old() address 0x%x busy\n", i));
- continue;
- }
- iounmap(ha->brd);
- pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
- ha->brd = ioremap(i, sizeof(gdt6_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- dp6_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6_ptr->u);
- if (readl(&dp6_ptr->u) == DPMEM_MAGIC) {
- printk("GDT-PCI: Use free address at 0x%x\n", i);
- found = TRUE;
- break;
- }
- }
- if (!found) {
- printk("GDT-PCI: No free address found!\n");
- iounmap(ha->brd);
- return 0;
- }
- }
- memset_io(&dp6_ptr->u, 0, sizeof(dp6_ptr->u));
- if (readl(&dp6_ptr->u) != 0) {
- printk("GDT-PCI: Initialization error (DPMEM write error)\n");
- iounmap(ha->brd);
- return 0;
- }
-
- /* disable board interrupts, deinit services */
- writeb(0xff, &dp6_ptr->io.irqdel);
- writeb(0x00, &dp6_ptr->io.irqen);
- writeb(0x00, &dp6_ptr->u.ic.S_Status);
- writeb(0x00, &dp6_ptr->u.ic.Cmd_Index);
-
- writel(pcistr->dpmem, &dp6_ptr->u.ic.S_Info[0]);
- writeb(0xff, &dp6_ptr->u.ic.S_Cmd_Indx);
- writeb(0, &dp6_ptr->io.event);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6_ptr->u.ic.S_Status) != 0xff) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)readl(&dp6_ptr->u.ic.S_Info[0]);
- writeb(0, &dp6_ptr->u.ic.S_Status);
- writeb(0xff, &dp6_ptr->io.irqdel);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-PCI: Illegal protocol version\n");
- iounmap(ha->brd);
- return 0;
- }
-
- ha->type = GDT_PCI;
- ha->ic_all_size = sizeof(dp6_ptr->u);
-
- /* special command to controller BIOS */
- writel(0x00, &dp6_ptr->u.ic.S_Info[0]);
- writel(0x00, &dp6_ptr->u.ic.S_Info[1]);
- writel(0x00, &dp6_ptr->u.ic.S_Info[2]);
- writel(0x00, &dp6_ptr->u.ic.S_Info[3]);
- writeb(0xfe, &dp6_ptr->u.ic.S_Cmd_Indx);
- writeb(0, &dp6_ptr->io.event);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6_ptr->u.ic.S_Status) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- writeb(0, &dp6_ptr->u.ic.S_Status);
- writeb(0xff, &dp6_ptr->io.irqdel);
-
- ha->dma64_support = 0;
-
- } else if (ha->pdev->device <= PCI_DEVICE_ID_VORTEX_GDT6555) { /* GDT6110, ... */
- ha->plx = (gdt6c_plx_regs *)pcistr->io;
- TRACE2(("init_pci_new() dpmem %lx irq %d\n",
- pcistr->dpmem,ha->irq));
- ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6c_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- iounmap(ha->brd);
- return 0;
- }
- /* check and reset interface area */
- dp6c_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6c_ptr->u);
- if (readl(&dp6c_ptr->u) != DPMEM_MAGIC) {
- printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
- pcistr->dpmem);
- found = FALSE;
- for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
- iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(u16));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- if (readw(ha->brd) != 0xffff) {
- TRACE2(("init_pci_plx() address 0x%x busy\n", i));
- continue;
- }
- iounmap(ha->brd);
- pci_write_config_dword(pdev, PCI_BASE_ADDRESS_2, i);
- ha->brd = ioremap(i, sizeof(gdt6c_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- dp6c_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6c_ptr->u);
- if (readl(&dp6c_ptr->u) == DPMEM_MAGIC) {
- printk("GDT-PCI: Use free address at 0x%x\n", i);
- found = TRUE;
- break;
- }
- }
- if (!found) {
- printk("GDT-PCI: No free address found!\n");
- iounmap(ha->brd);
- return 0;
- }
- }
- memset_io(&dp6c_ptr->u, 0, sizeof(dp6c_ptr->u));
- if (readl(&dp6c_ptr->u) != 0) {
- printk("GDT-PCI: Initialization error (DPMEM write error)\n");
- iounmap(ha->brd);
- return 0;
- }
-
- /* disable board interrupts, deinit services */
- outb(0x00,PTR2USHORT(&ha->plx->control1));
- outb(0xff,PTR2USHORT(&ha->plx->edoor_reg));
-
- writeb(0x00, &dp6c_ptr->u.ic.S_Status);
- writeb(0x00, &dp6c_ptr->u.ic.Cmd_Index);
-
- writel(pcistr->dpmem, &dp6c_ptr->u.ic.S_Info[0]);
- writeb(0xff, &dp6c_ptr->u.ic.S_Cmd_Indx);
-
- outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
-
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6c_ptr->u.ic.S_Status) != 0xff) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)readl(&dp6c_ptr->u.ic.S_Info[0]);
- writeb(0, &dp6c_ptr->u.ic.Status);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-PCI: Illegal protocol version\n");
- iounmap(ha->brd);
- return 0;
- }
-
- ha->type = GDT_PCINEW;
- ha->ic_all_size = sizeof(dp6c_ptr->u);
-
- /* special command to controller BIOS */
- writel(0x00, &dp6c_ptr->u.ic.S_Info[0]);
- writel(0x00, &dp6c_ptr->u.ic.S_Info[1]);
- writel(0x00, &dp6c_ptr->u.ic.S_Info[2]);
- writel(0x00, &dp6c_ptr->u.ic.S_Info[3]);
- writeb(0xfe, &dp6c_ptr->u.ic.S_Cmd_Indx);
-
- outb(1,PTR2USHORT(&ha->plx->ldoor_reg));
-
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6c_ptr->u.ic.S_Status) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- writeb(0, &dp6c_ptr->u.ic.S_Status);
-
- ha->dma64_support = 0;
-
- } else { /* MPR */
- TRACE2(("init_pci_mpr() dpmem %lx irq %d\n",pcistr->dpmem,ha->irq));
- ha->brd = ioremap(pcistr->dpmem, sizeof(gdt6m_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
-
- /* manipulate config. space to enable DPMEM, start RP controller */
- pci_read_config_word(pdev, PCI_COMMAND, &command);
- command |= 6;
- pci_write_config_word(pdev, PCI_COMMAND, command);
- gdth_delay(1);
-
- dp6m_ptr = ha->brd;
-
- /* Ensure that it is safe to access the non HW portions of DPMEM.
- * Aditional check needed for Xscale based RAID controllers */
- while( ((int)readb(&dp6m_ptr->i960r.sema0_reg) ) & 3 )
- gdth_delay(1);
-
- /* check and reset interface area */
- writel(DPMEM_MAGIC, &dp6m_ptr->u);
- if (readl(&dp6m_ptr->u) != DPMEM_MAGIC) {
- printk("GDT-PCI: Cannot access DPMEM at 0x%lx (shadowed?)\n",
- pcistr->dpmem);
- found = FALSE;
- for (i = 0xC8000; i < 0xE8000; i += 0x4000) {
- iounmap(ha->brd);
- ha->brd = ioremap(i, sizeof(u16));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- if (readw(ha->brd) != 0xffff) {
- TRACE2(("init_pci_mpr() address 0x%x busy\n", i));
- continue;
- }
- iounmap(ha->brd);
- pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, i);
- ha->brd = ioremap(i, sizeof(gdt6m_dpram_str));
- if (ha->brd == NULL) {
- printk("GDT-PCI: Initialization error (DPMEM remap error)\n");
- return 0;
- }
- dp6m_ptr = ha->brd;
- writel(DPMEM_MAGIC, &dp6m_ptr->u);
- if (readl(&dp6m_ptr->u) == DPMEM_MAGIC) {
- printk("GDT-PCI: Use free address at 0x%x\n", i);
- found = TRUE;
- break;
- }
- }
- if (!found) {
- printk("GDT-PCI: No free address found!\n");
- iounmap(ha->brd);
- return 0;
- }
- }
- memset_io(&dp6m_ptr->u, 0, sizeof(dp6m_ptr->u));
-
- /* disable board interrupts, deinit services */
- writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) | 4,
- &dp6m_ptr->i960r.edoor_en_reg);
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(0x00, &dp6m_ptr->u.ic.S_Status);
- writeb(0x00, &dp6m_ptr->u.ic.Cmd_Index);
-
- writel(pcistr->dpmem, &dp6m_ptr->u.ic.S_Info[0]);
- writeb(0xff, &dp6m_ptr->u.ic.S_Cmd_Indx);
- writeb(1, &dp6m_ptr->i960r.ldoor_reg);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6m_ptr->u.ic.S_Status) != 0xff) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)readl(&dp6m_ptr->u.ic.S_Info[0]);
- writeb(0, &dp6m_ptr->u.ic.S_Status);
- if (prot_ver != PROTOCOL_VERSION) {
- printk("GDT-PCI: Illegal protocol version\n");
- iounmap(ha->brd);
- return 0;
- }
-
- ha->type = GDT_PCIMPR;
- ha->ic_all_size = sizeof(dp6m_ptr->u);
-
- /* special command to controller BIOS */
- writel(0x00, &dp6m_ptr->u.ic.S_Info[0]);
- writel(0x00, &dp6m_ptr->u.ic.S_Info[1]);
- writel(0x00, &dp6m_ptr->u.ic.S_Info[2]);
- writel(0x00, &dp6m_ptr->u.ic.S_Info[3]);
- writeb(0xfe, &dp6m_ptr->u.ic.S_Cmd_Indx);
- writeb(1, &dp6m_ptr->i960r.ldoor_reg);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfe) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- writeb(0, &dp6m_ptr->u.ic.S_Status);
-
- /* read FW version to detect 64-bit DMA support */
- writeb(0xfd, &dp6m_ptr->u.ic.S_Cmd_Indx);
- writeb(1, &dp6m_ptr->i960r.ldoor_reg);
- retries = INIT_RETRIES;
- gdth_delay(20);
- while (readb(&dp6m_ptr->u.ic.S_Status) != 0xfd) {
- if (--retries == 0) {
- printk("GDT-PCI: Initialization error (DEINIT failed)\n");
- iounmap(ha->brd);
- return 0;
- }
- gdth_delay(1);
- }
- prot_ver = (u8)(readl(&dp6m_ptr->u.ic.S_Info[0]) >> 16);
- writeb(0, &dp6m_ptr->u.ic.S_Status);
- if (prot_ver < 0x2b) /* FW < x.43: no 64-bit DMA support */
- ha->dma64_support = 0;
- else
- ha->dma64_support = 1;
- }
-
- return 1;
-}
-
-/* controller protocol functions */
-
-static void gdth_enable_int(gdth_ha_str *ha)
-{
- unsigned long flags;
- gdt6_dpram_str __iomem *dp6_ptr;
- gdt6m_dpram_str __iomem *dp6m_ptr;
-
- TRACE(("gdth_enable_int() hanum %d\n",ha->hanum));
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- if (ha->type == GDT_PCI) {
- dp6_ptr = ha->brd;
- writeb(1, &dp6_ptr->io.irqdel);
- writeb(0, &dp6_ptr->u.ic.Cmd_Index);
- writeb(1, &dp6_ptr->io.irqen);
- } else if (ha->type == GDT_PCINEW) {
- outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
- outb(0x03, PTR2USHORT(&ha->plx->control1));
- } else if (ha->type == GDT_PCIMPR) {
- dp6m_ptr = ha->brd;
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(readb(&dp6m_ptr->i960r.edoor_en_reg) & ~4,
- &dp6m_ptr->i960r.edoor_en_reg);
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-/* return IStatus if interrupt was from this card else 0 */
-static u8 gdth_get_status(gdth_ha_str *ha)
-{
- u8 IStatus = 0;
-
- TRACE(("gdth_get_status() irq %d ctr_count %d\n", ha->irq, gdth_ctr_count));
-
- if (ha->type == GDT_PCI)
- IStatus =
- readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Cmd_Index);
- else if (ha->type == GDT_PCINEW)
- IStatus = inb(PTR2USHORT(&ha->plx->edoor_reg));
- else if (ha->type == GDT_PCIMPR)
- IStatus =
- readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.edoor_reg);
-
- return IStatus;
-}
-
-static int gdth_test_busy(gdth_ha_str *ha)
-{
- register int gdtsema0 = 0;
-
- TRACE(("gdth_test_busy() hanum %d\n", ha->hanum));
-
- if (ha->type == GDT_PCI)
- gdtsema0 = (int)readb(&((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
- else if (ha->type == GDT_PCINEW)
- gdtsema0 = (int)inb(PTR2USHORT(&ha->plx->sema0_reg));
- else if (ha->type == GDT_PCIMPR)
- gdtsema0 =
- (int)readb(&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
-
- return (gdtsema0 & 1);
-}
-
-
-static int gdth_get_cmd_index(gdth_ha_str *ha)
-{
- int i;
-
- TRACE(("gdth_get_cmd_index() hanum %d\n", ha->hanum));
-
- for (i=0; i<GDTH_MAXCMDS; ++i) {
- if (ha->cmd_tab[i].cmnd == UNUSED_CMND) {
- ha->cmd_tab[i].cmnd = ha->pccb->RequestBuffer;
- ha->cmd_tab[i].service = ha->pccb->Service;
- ha->pccb->CommandIndex = (u32)i+2;
- return (i+2);
- }
- }
- return 0;
-}
-
-
-static void gdth_set_sema0(gdth_ha_str *ha)
-{
- TRACE(("gdth_set_sema0() hanum %d\n", ha->hanum));
-
- if (ha->type == GDT_PCI) {
- writeb(1, &((gdt6_dpram_str __iomem *)ha->brd)->u.ic.Sema0);
- } else if (ha->type == GDT_PCINEW) {
- outb(1, PTR2USHORT(&ha->plx->sema0_reg));
- } else if (ha->type == GDT_PCIMPR) {
- writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.sema0_reg);
- }
-}
-
-
-static void gdth_copy_command(gdth_ha_str *ha)
-{
- register gdth_cmd_str *cmd_ptr;
- register gdt6m_dpram_str __iomem *dp6m_ptr;
- register gdt6c_dpram_str __iomem *dp6c_ptr;
- gdt6_dpram_str __iomem *dp6_ptr;
- u16 cp_count,dp_offset,cmd_no;
-
- TRACE(("gdth_copy_command() hanum %d\n", ha->hanum));
-
- cp_count = ha->cmd_len;
- dp_offset= ha->cmd_offs_dpmem;
- cmd_no = ha->cmd_cnt;
- cmd_ptr = ha->pccb;
-
- ++ha->cmd_cnt;
-
- /* set cpcount dword aligned */
- if (cp_count & 3)
- cp_count += (4 - (cp_count & 3));
-
- ha->cmd_offs_dpmem += cp_count;
-
- /* set offset and service, copy command to DPMEM */
- if (ha->type == GDT_PCI) {
- dp6_ptr = ha->brd;
- writew(dp_offset + DPMEM_COMMAND_OFFSET,
- &dp6_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((u16)cmd_ptr->Service,
- &dp6_ptr->u.ic.comm_queue[cmd_no].serv_id);
- memcpy_toio(&dp6_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
- } else if (ha->type == GDT_PCINEW) {
- dp6c_ptr = ha->brd;
- writew(dp_offset + DPMEM_COMMAND_OFFSET,
- &dp6c_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((u16)cmd_ptr->Service,
- &dp6c_ptr->u.ic.comm_queue[cmd_no].serv_id);
- memcpy_toio(&dp6c_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
- } else if (ha->type == GDT_PCIMPR) {
- dp6m_ptr = ha->brd;
- writew(dp_offset + DPMEM_COMMAND_OFFSET,
- &dp6m_ptr->u.ic.comm_queue[cmd_no].offset);
- writew((u16)cmd_ptr->Service,
- &dp6m_ptr->u.ic.comm_queue[cmd_no].serv_id);
- memcpy_toio(&dp6m_ptr->u.ic.gdt_dpr_cmd[dp_offset],cmd_ptr,cp_count);
- }
-}
-
-
-static void gdth_release_event(gdth_ha_str *ha)
-{
- TRACE(("gdth_release_event() hanum %d\n", ha->hanum));
-
-#ifdef GDTH_STATISTICS
- {
- u32 i,j;
- for (i=0,j=0; j<GDTH_MAXCMDS; ++j) {
- if (ha->cmd_tab[j].cmnd != UNUSED_CMND)
- ++i;
- }
- if (max_index < i) {
- max_index = i;
- TRACE3(("GDT: max_index = %d\n",(u16)i));
- }
- }
-#endif
-
- if (ha->pccb->OpCode == GDT_INIT)
- ha->pccb->Service |= 0x80;
-
- if (ha->type == GDT_PCI) {
- writeb(0, &((gdt6_dpram_str __iomem *)ha->brd)->io.event);
- } else if (ha->type == GDT_PCINEW) {
- outb(1, PTR2USHORT(&ha->plx->ldoor_reg));
- } else if (ha->type == GDT_PCIMPR) {
- writeb(1, &((gdt6m_dpram_str __iomem *)ha->brd)->i960r.ldoor_reg);
- }
-}
-
-static int gdth_wait(gdth_ha_str *ha, int index, u32 time)
-{
- int answer_found = FALSE;
- int wait_index = 0;
-
- TRACE(("gdth_wait() hanum %d index %d time %d\n", ha->hanum, index, time));
-
- if (index == 0)
- return 1; /* no wait required */
-
- do {
- __gdth_interrupt(ha, true, &wait_index);
- if (wait_index == index) {
- answer_found = TRUE;
- break;
- }
- gdth_delay(1);
- } while (--time);
-
- while (gdth_test_busy(ha))
- gdth_delay(0);
-
- return (answer_found);
-}
-
-
-static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
- u32 p1, u64 p2, u64 p3)
-{
- register gdth_cmd_str *cmd_ptr;
- int retries,index;
-
- TRACE2(("gdth_internal_cmd() service %d opcode %d\n",service,opcode));
-
- cmd_ptr = ha->pccb;
- memset((char*)cmd_ptr,0,sizeof(gdth_cmd_str));
-
- /* make command */
- for (retries = INIT_RETRIES;;) {
- cmd_ptr->Service = service;
- cmd_ptr->RequestBuffer = INTERNAL_CMND;
- if (!(index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
- gdth_set_sema0(ha);
- cmd_ptr->OpCode = opcode;
- cmd_ptr->BoardNode = LOCALBOARD;
- if (service == CACHESERVICE) {
- if (opcode == GDT_IOCTL) {
- cmd_ptr->u.ioctl.subfunc = p1;
- cmd_ptr->u.ioctl.channel = (u32)p2;
- cmd_ptr->u.ioctl.param_size = (u16)p3;
- cmd_ptr->u.ioctl.p_param = ha->scratch_phys;
- } else {
- if (ha->cache_feat & GDT_64BIT) {
- cmd_ptr->u.cache64.DeviceNo = (u16)p1;
- cmd_ptr->u.cache64.BlockNo = p2;
- } else {
- cmd_ptr->u.cache.DeviceNo = (u16)p1;
- cmd_ptr->u.cache.BlockNo = (u32)p2;
- }
- }
- } else if (service == SCSIRAWSERVICE) {
- if (ha->raw_feat & GDT_64BIT) {
- cmd_ptr->u.raw64.direction = p1;
- cmd_ptr->u.raw64.bus = (u8)p2;
- cmd_ptr->u.raw64.target = (u8)p3;
- cmd_ptr->u.raw64.lun = (u8)(p3 >> 8);
- } else {
- cmd_ptr->u.raw.direction = p1;
- cmd_ptr->u.raw.bus = (u8)p2;
- cmd_ptr->u.raw.target = (u8)p3;
- cmd_ptr->u.raw.lun = (u8)(p3 >> 8);
- }
- } else if (service == SCREENSERVICE) {
- if (opcode == GDT_REALTIME) {
- *(u32 *)&cmd_ptr->u.screen.su.data[0] = p1;
- *(u32 *)&cmd_ptr->u.screen.su.data[4] = (u32)p2;
- *(u32 *)&cmd_ptr->u.screen.su.data[8] = (u32)p3;
- }
- }
- ha->cmd_len = sizeof(gdth_cmd_str);
- ha->cmd_offs_dpmem = 0;
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- gdth_release_event(ha);
- gdth_delay(20);
- if (!gdth_wait(ha, index, INIT_TIMEOUT)) {
- printk("GDT: Initialization error (timeout service %d)\n",service);
- return 0;
- }
- if (ha->status != S_BSY || --retries == 0)
- break;
- gdth_delay(1);
- }
-
- return (ha->status != S_OK ? 0:1);
-}
-
-
-/* search for devices */
-
-static int gdth_search_drives(gdth_ha_str *ha)
-{
- u16 cdev_cnt, i;
- int ok;
- u32 bus_no, drv_cnt, drv_no, j;
- gdth_getch_str *chn;
- gdth_drlist_str *drl;
- gdth_iochan_str *ioc;
- gdth_raw_iochan_str *iocr;
- gdth_arcdl_str *alst;
- gdth_alist_str *alst2;
- gdth_oem_str_ioctl *oemstr;
-
- TRACE(("gdth_search_drives() hanum %d\n", ha->hanum));
- ok = 0;
-
- /* initialize controller services, at first: screen service */
- ha->screen_feat = 0;
- if (!force_dma32) {
- ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_X_INIT_SCR, 0, 0, 0);
- if (ok)
- ha->screen_feat = GDT_64BIT;
- }
- if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
- ok = gdth_internal_cmd(ha, SCREENSERVICE, GDT_INIT, 0, 0, 0);
- if (!ok) {
- printk("GDT-HA %d: Initialization error screen service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- TRACE2(("gdth_search_drives(): SCREENSERVICE initialized\n"));
-
- /* unfreeze all IOs */
- gdth_internal_cmd(ha, CACHESERVICE, GDT_UNFREEZE_IO, 0, 0, 0);
-
- /* initialize cache service */
- ha->cache_feat = 0;
- if (!force_dma32) {
- ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INIT_HOST, LINUX_OS,
- 0, 0);
- if (ok)
- ha->cache_feat = GDT_64BIT;
- }
- if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
- ok = gdth_internal_cmd(ha, CACHESERVICE, GDT_INIT, LINUX_OS, 0, 0);
- if (!ok) {
- printk("GDT-HA %d: Initialization error cache service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- TRACE2(("gdth_search_drives(): CACHESERVICE initialized\n"));
- cdev_cnt = (u16)ha->info;
- ha->fw_vers = ha->service;
-
- /* detect number of buses - try new IOCTL */
- iocr = (gdth_raw_iochan_str *)ha->pscratch;
- iocr->hdr.version = 0xffffffff;
- iocr->hdr.list_entries = MAXBUS;
- iocr->hdr.first_chan = 0;
- iocr->hdr.last_chan = MAXBUS-1;
- iocr->hdr.list_offset = GDTOFFSOF(gdth_raw_iochan_str, list[0]);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_RAW_DESC,
- INVALID_CHANNEL,sizeof(gdth_raw_iochan_str))) {
- TRACE2(("IOCHAN_RAW_DESC supported!\n"));
- ha->bus_cnt = iocr->hdr.chan_count;
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- if (iocr->list[bus_no].proc_id < MAXID)
- ha->bus_id[bus_no] = iocr->list[bus_no].proc_id;
- else
- ha->bus_id[bus_no] = 0xff;
- }
- } else {
- /* old method */
- chn = (gdth_getch_str *)ha->pscratch;
- for (bus_no = 0; bus_no < MAXBUS; ++bus_no) {
- chn->channel_no = bus_no;
- if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- SCSI_CHAN_CNT | L_CTRL_PATTERN,
- IO_CHANNEL | INVALID_CHANNEL,
- sizeof(gdth_getch_str))) {
- if (bus_no == 0) {
- printk("GDT-HA %d: Error detecting channel count (0x%x)\n",
- ha->hanum, ha->status);
- return 0;
- }
- break;
- }
- if (chn->siop_id < MAXID)
- ha->bus_id[bus_no] = chn->siop_id;
- else
- ha->bus_id[bus_no] = 0xff;
- }
- ha->bus_cnt = (u8)bus_no;
- }
- TRACE2(("gdth_search_drives() %d channels\n",ha->bus_cnt));
-
- /* read cache configuration */
- if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_INFO,
- INVALID_CHANNEL,sizeof(gdth_cinfo_str))) {
- printk("GDT-HA %d: Initialization error cache service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- ha->cpar = ((gdth_cinfo_str *)ha->pscratch)->cpar;
- TRACE2(("gdth_search_drives() cinfo: vs %x sta %d str %d dw %d b %d\n",
- ha->cpar.version,ha->cpar.state,ha->cpar.strategy,
- ha->cpar.write_back,ha->cpar.block_size));
-
- /* read board info and features */
- ha->more_proc = FALSE;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_INFO,
- INVALID_CHANNEL,sizeof(gdth_binfo_str))) {
- memcpy(&ha->binfo, (gdth_binfo_str *)ha->pscratch,
- sizeof(gdth_binfo_str));
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, BOARD_FEATURES,
- INVALID_CHANNEL,sizeof(gdth_bfeat_str))) {
- TRACE2(("BOARD_INFO/BOARD_FEATURES supported\n"));
- ha->bfeat = *(gdth_bfeat_str *)ha->pscratch;
- ha->more_proc = TRUE;
- }
- } else {
- TRACE2(("BOARD_INFO requires firmware >= 1.10/2.08\n"));
- strcpy(ha->binfo.type_string, gdth_ctr_name(ha));
- }
- TRACE2(("Controller name: %s\n",ha->binfo.type_string));
-
- /* read more informations */
- if (ha->more_proc) {
- /* physical drives, channel addresses */
- ioc = (gdth_iochan_str *)ha->pscratch;
- ioc->hdr.version = 0xffffffff;
- ioc->hdr.list_entries = MAXBUS;
- ioc->hdr.first_chan = 0;
- ioc->hdr.last_chan = MAXBUS-1;
- ioc->hdr.list_offset = GDTOFFSOF(gdth_iochan_str, list[0]);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, IOCHAN_DESC,
- INVALID_CHANNEL,sizeof(gdth_iochan_str))) {
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- ha->raw[bus_no].address = ioc->list[bus_no].address;
- ha->raw[bus_no].local_no = ioc->list[bus_no].local_no;
- }
- } else {
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- ha->raw[bus_no].address = IO_CHANNEL;
- ha->raw[bus_no].local_no = bus_no;
- }
- }
- for (bus_no = 0; bus_no < ha->bus_cnt; ++bus_no) {
- chn = (gdth_getch_str *)ha->pscratch;
- chn->channel_no = ha->raw[bus_no].local_no;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- SCSI_CHAN_CNT | L_CTRL_PATTERN,
- ha->raw[bus_no].address | INVALID_CHANNEL,
- sizeof(gdth_getch_str))) {
- ha->raw[bus_no].pdev_cnt = chn->drive_cnt;
- TRACE2(("Channel %d: %d phys. drives\n",
- bus_no,chn->drive_cnt));
- }
- if (ha->raw[bus_no].pdev_cnt > 0) {
- drl = (gdth_drlist_str *)ha->pscratch;
- drl->sc_no = ha->raw[bus_no].local_no;
- drl->sc_cnt = ha->raw[bus_no].pdev_cnt;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- SCSI_DR_LIST | L_CTRL_PATTERN,
- ha->raw[bus_no].address | INVALID_CHANNEL,
- sizeof(gdth_drlist_str))) {
- for (j = 0; j < ha->raw[bus_no].pdev_cnt; ++j)
- ha->raw[bus_no].id_list[j] = drl->sc_list[j];
- } else {
- ha->raw[bus_no].pdev_cnt = 0;
- }
- }
- }
-
- /* logical drives */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_CNT,
- INVALID_CHANNEL,sizeof(u32))) {
- drv_cnt = *(u32 *)ha->pscratch;
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL, CACHE_DRV_LIST,
- INVALID_CHANNEL,drv_cnt * sizeof(u32))) {
- for (j = 0; j < drv_cnt; ++j) {
- drv_no = ((u32 *)ha->pscratch)[j];
- if (drv_no < MAX_LDRIVES) {
- ha->hdr[drv_no].is_logdrv = TRUE;
- TRACE2(("Drive %d is log. drive\n",drv_no));
- }
- }
- }
- alst = (gdth_arcdl_str *)ha->pscratch;
- alst->entries_avail = MAX_LDRIVES;
- alst->first_entry = 0;
- alst->list_offset = GDTOFFSOF(gdth_arcdl_str, list[0]);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- ARRAY_DRV_LIST2 | LA_CTRL_PATTERN,
- INVALID_CHANNEL, sizeof(gdth_arcdl_str) +
- (alst->entries_avail-1) * sizeof(gdth_alist_str))) {
- for (j = 0; j < alst->entries_init; ++j) {
- ha->hdr[j].is_arraydrv = alst->list[j].is_arrayd;
- ha->hdr[j].is_master = alst->list[j].is_master;
- ha->hdr[j].is_parity = alst->list[j].is_parity;
- ha->hdr[j].is_hotfix = alst->list[j].is_hotfix;
- ha->hdr[j].master_no = alst->list[j].cd_handle;
- }
- } else if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- ARRAY_DRV_LIST | LA_CTRL_PATTERN,
- 0, 35 * sizeof(gdth_alist_str))) {
- for (j = 0; j < 35; ++j) {
- alst2 = &((gdth_alist_str *)ha->pscratch)[j];
- ha->hdr[j].is_arraydrv = alst2->is_arrayd;
- ha->hdr[j].is_master = alst2->is_master;
- ha->hdr[j].is_parity = alst2->is_parity;
- ha->hdr[j].is_hotfix = alst2->is_hotfix;
- ha->hdr[j].master_no = alst2->cd_handle;
- }
- }
- }
- }
-
- /* initialize raw service */
- ha->raw_feat = 0;
- if (!force_dma32) {
- ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_X_INIT_RAW, 0, 0, 0);
- if (ok)
- ha->raw_feat = GDT_64BIT;
- }
- if (force_dma32 || (!ok && ha->status == (u16)S_NOFUNC))
- ok = gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_INIT, 0, 0, 0);
- if (!ok) {
- printk("GDT-HA %d: Initialization error raw service (code %d)\n",
- ha->hanum, ha->status);
- return 0;
- }
- TRACE2(("gdth_search_drives(): RAWSERVICE initialized\n"));
-
- /* set/get features raw service (scatter/gather) */
- if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_SET_FEAT, SCATTER_GATHER,
- 0, 0)) {
- TRACE2(("gdth_search_drives(): set features RAWSERVICE OK\n"));
- if (gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_GET_FEAT, 0, 0, 0)) {
- TRACE2(("gdth_search_dr(): get feat RAWSERVICE %d\n",
- ha->info));
- ha->raw_feat |= (u16)ha->info;
- }
- }
-
- /* set/get features cache service (equal to raw service) */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_SET_FEAT, 0,
- SCATTER_GATHER,0)) {
- TRACE2(("gdth_search_drives(): set features CACHESERVICE OK\n"));
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_GET_FEAT, 0, 0, 0)) {
- TRACE2(("gdth_search_dr(): get feat CACHESERV. %d\n",
- ha->info));
- ha->cache_feat |= (u16)ha->info;
- }
- }
-
- /* reserve drives for raw service */
- if (reserve_mode != 0) {
- gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE_ALL,
- reserve_mode == 1 ? 1 : 3, 0, 0);
- TRACE2(("gdth_search_drives(): RESERVE_ALL code %d\n",
- ha->status));
- }
- for (i = 0; i < MAX_RES_ARGS; i += 4) {
- if (reserve_list[i] == ha->hanum && reserve_list[i+1] < ha->bus_cnt &&
- reserve_list[i+2] < ha->tid_cnt && reserve_list[i+3] < MAXLUN) {
- TRACE2(("gdth_search_drives(): reserve ha %d bus %d id %d lun %d\n",
- reserve_list[i], reserve_list[i+1],
- reserve_list[i+2], reserve_list[i+3]));
- if (!gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESERVE, 0,
- reserve_list[i+1], reserve_list[i+2] |
- (reserve_list[i+3] << 8))) {
- printk("GDT-HA %d: Error raw service (RESERVE, code %d)\n",
- ha->hanum, ha->status);
- }
- }
- }
-
- /* Determine OEM string using IOCTL */
- oemstr = (gdth_oem_str_ioctl *)ha->pscratch;
- oemstr->params.ctl_version = 0x01;
- oemstr->params.buffer_size = sizeof(oemstr->text);
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_IOCTL,
- CACHE_READ_OEM_STRING_RECORD,INVALID_CHANNEL,
- sizeof(gdth_oem_str_ioctl))) {
- TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD OK\n"));
- printk("GDT-HA %d: Vendor: %s Name: %s\n",
- ha->hanum, oemstr->text.oem_company_name, ha->binfo.type_string);
- /* Save the Host Drive inquiry data */
- strlcpy(ha->oem_name,oemstr->text.scsi_host_drive_inquiry_vendor_id,
- sizeof(ha->oem_name));
- } else {
- /* Old method, based on PCI ID */
- TRACE2(("gdth_search_drives(): CACHE_READ_OEM_STRING_RECORD failed\n"));
- printk("GDT-HA %d: Name: %s\n",
- ha->hanum, ha->binfo.type_string);
- if (ha->oem_id == OEM_ID_INTEL)
- strlcpy(ha->oem_name,"Intel ", sizeof(ha->oem_name));
- else
- strlcpy(ha->oem_name,"ICP ", sizeof(ha->oem_name));
- }
-
- /* scanning for host drives */
- for (i = 0; i < cdev_cnt; ++i)
- gdth_analyse_hdrive(ha, i);
-
- TRACE(("gdth_search_drives() OK\n"));
- return 1;
-}
-
-static int gdth_analyse_hdrive(gdth_ha_str *ha, u16 hdrive)
-{
- u32 drv_cyls;
- int drv_hds, drv_secs;
-
- TRACE(("gdth_analyse_hdrive() hanum %d drive %d\n", ha->hanum, hdrive));
- if (hdrive >= MAX_HDRIVES)
- return 0;
-
- if (!gdth_internal_cmd(ha, CACHESERVICE, GDT_INFO, hdrive, 0, 0))
- return 0;
- ha->hdr[hdrive].present = TRUE;
- ha->hdr[hdrive].size = ha->info;
-
- /* evaluate mapping (sectors per head, heads per cylinder) */
- ha->hdr[hdrive].size &= ~SECS32;
- if (ha->info2 == 0) {
- gdth_eval_mapping(ha->hdr[hdrive].size,&drv_cyls,&drv_hds,&drv_secs);
- } else {
- drv_hds = ha->info2 & 0xff;
- drv_secs = (ha->info2 >> 8) & 0xff;
- drv_cyls = (u32)ha->hdr[hdrive].size / drv_hds / drv_secs;
- }
- ha->hdr[hdrive].heads = (u8)drv_hds;
- ha->hdr[hdrive].secs = (u8)drv_secs;
- /* round size */
- ha->hdr[hdrive].size = drv_cyls * drv_hds * drv_secs;
-
- if (ha->cache_feat & GDT_64BIT) {
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_X_INFO, hdrive, 0, 0)
- && ha->info2 != 0) {
- ha->hdr[hdrive].size = ((u64)ha->info2 << 32) | ha->info;
- }
- }
- TRACE2(("gdth_search_dr() cdr. %d size %d hds %d scs %d\n",
- hdrive,ha->hdr[hdrive].size,drv_hds,drv_secs));
-
- /* get informations about device */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_DEVTYPE, hdrive, 0, 0)) {
- TRACE2(("gdth_search_dr() cache drive %d devtype %d\n",
- hdrive,ha->info));
- ha->hdr[hdrive].devtype = (u16)ha->info;
- }
-
- /* cluster info */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_CLUST_INFO, hdrive, 0, 0)) {
- TRACE2(("gdth_search_dr() cache drive %d cluster info %d\n",
- hdrive,ha->info));
- if (!shared_access)
- ha->hdr[hdrive].cluster_type = (u8)ha->info;
- }
-
- /* R/W attributes */
- if (gdth_internal_cmd(ha, CACHESERVICE, GDT_RW_ATTRIBS, hdrive, 0, 0)) {
- TRACE2(("gdth_search_dr() cache drive %d r/w attrib. %d\n",
- hdrive,ha->info));
- ha->hdr[hdrive].rw_attribs = (u8)ha->info;
- }
-
- return 1;
-}
-
-
-/* command queueing/sending functions */
-
-static void gdth_putq(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 priority)
-{
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- register struct scsi_cmnd *pscp;
- register struct scsi_cmnd *nscp;
- unsigned long flags;
-
- TRACE(("gdth_putq() priority %d\n",priority));
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- if (!cmndinfo->internal_command)
- cmndinfo->priority = priority;
-
- if (ha->req_first==NULL) {
- ha->req_first = scp; /* queue was empty */
- scp->SCp.ptr = NULL;
- } else { /* queue not empty */
- pscp = ha->req_first;
- nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
- /* priority: 0-highest,..,0xff-lowest */
- while (nscp && gdth_cmnd_priv(nscp)->priority <= priority) {
- pscp = nscp;
- nscp = (struct scsi_cmnd *)pscp->SCp.ptr;
- }
- pscp->SCp.ptr = (char *)scp;
- scp->SCp.ptr = (char *)nscp;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
-#ifdef GDTH_STATISTICS
- flags = 0;
- for (nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
- ++flags;
- if (max_rq < flags) {
- max_rq = flags;
- TRACE3(("GDT: max_rq = %d\n",(u16)max_rq));
- }
-#endif
-}
-
-static void gdth_next(gdth_ha_str *ha)
-{
- register struct scsi_cmnd *pscp;
- register struct scsi_cmnd *nscp;
- u8 b, t, l, firsttime;
- u8 this_cmd, next_cmd;
- unsigned long flags = 0;
- int cmd_index;
-
- TRACE(("gdth_next() hanum %d\n", ha->hanum));
- if (!gdth_polling)
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- ha->cmd_cnt = ha->cmd_offs_dpmem = 0;
- this_cmd = firsttime = TRUE;
- next_cmd = gdth_polling ? FALSE:TRUE;
- cmd_index = 0;
-
- for (nscp = pscp = ha->req_first; nscp; nscp = (struct scsi_cmnd *)nscp->SCp.ptr) {
- struct gdth_cmndinfo *nscp_cmndinfo = gdth_cmnd_priv(nscp);
- if (nscp != pscp && nscp != (struct scsi_cmnd *)pscp->SCp.ptr)
- pscp = (struct scsi_cmnd *)pscp->SCp.ptr;
- if (!nscp_cmndinfo->internal_command) {
- b = nscp->device->channel;
- t = nscp->device->id;
- l = nscp->device->lun;
- if (nscp_cmndinfo->priority >= DEFAULT_PRI) {
- if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha,b)].lock) ||
- (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock))
- continue;
- }
- } else
- b = t = l = 0;
-
- if (firsttime) {
- if (gdth_test_busy(ha)) { /* controller busy ? */
- TRACE(("gdth_next() controller %d busy !\n", ha->hanum));
- if (!gdth_polling) {
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return;
- }
- while (gdth_test_busy(ha))
- gdth_delay(1);
- }
- firsttime = FALSE;
- }
-
- if (!nscp_cmndinfo->internal_command) {
- if (nscp_cmndinfo->phase == -1) {
- nscp_cmndinfo->phase = CACHESERVICE; /* default: cache svc. */
- if (nscp->cmnd[0] == TEST_UNIT_READY) {
- TRACE2(("TEST_UNIT_READY Bus %d Id %d LUN %d\n",
- b, t, l));
- /* TEST_UNIT_READY -> set scan mode */
- if ((ha->scan_mode & 0x0f) == 0) {
- if (b == 0 && t == 0 && l == 0) {
- ha->scan_mode |= 1;
- TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
- }
- } else if ((ha->scan_mode & 0x0f) == 1) {
- if (b == 0 && ((t == 0 && l == 1) ||
- (t == 1 && l == 0))) {
- nscp_cmndinfo->OpCode = GDT_SCAN_START;
- nscp_cmndinfo->phase = ((ha->scan_mode & 0x10 ? 1:0) << 8)
- | SCSIRAWSERVICE;
- ha->scan_mode = 0x12;
- TRACE2(("Scan mode: 0x%x (SCAN_START)\n",
- ha->scan_mode));
- } else {
- ha->scan_mode &= 0x10;
- TRACE2(("Scan mode: 0x%x\n", ha->scan_mode));
- }
- } else if (ha->scan_mode == 0x12) {
- if (b == ha->bus_cnt && t == ha->tid_cnt-1) {
- nscp_cmndinfo->phase = SCSIRAWSERVICE;
- nscp_cmndinfo->OpCode = GDT_SCAN_END;
- ha->scan_mode &= 0x10;
- TRACE2(("Scan mode: 0x%x (SCAN_END)\n",
- ha->scan_mode));
- }
- }
- }
- if (b == ha->virt_bus && nscp->cmnd[0] != INQUIRY &&
- nscp->cmnd[0] != READ_CAPACITY && nscp->cmnd[0] != MODE_SENSE &&
- (ha->hdr[t].cluster_type & CLUSTER_DRIVE)) {
- /* always GDT_CLUST_INFO! */
- nscp_cmndinfo->OpCode = GDT_CLUST_INFO;
- }
- }
- }
-
- if (nscp_cmndinfo->OpCode != -1) {
- if ((nscp_cmndinfo->phase & 0xff) == CACHESERVICE) {
- if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- next_cmd = FALSE;
- } else if ((nscp_cmndinfo->phase & 0xff) == SCSIRAWSERVICE) {
- if (!(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
- this_cmd = FALSE;
- next_cmd = FALSE;
- } else {
- memset((char*)nscp->sense_buffer,0,16);
- nscp->sense_buffer[0] = 0x70;
- nscp->sense_buffer[2] = NOT_READY;
- nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- }
- } else if (gdth_cmnd_priv(nscp)->internal_command) {
- if (!(cmd_index=gdth_special_cmd(ha, nscp)))
- this_cmd = FALSE;
- next_cmd = FALSE;
- } else if (b != ha->virt_bus) {
- if (ha->raw[BUS_L2P(ha,b)].io_cnt[t] >= GDTH_MAX_RAW ||
- !(cmd_index=gdth_fill_raw_cmd(ha, nscp, BUS_L2P(ha, b))))
- this_cmd = FALSE;
- else
- ha->raw[BUS_L2P(ha,b)].io_cnt[t]++;
- } else if (t >= MAX_HDRIVES || !ha->hdr[t].present || l != 0) {
- TRACE2(("Command 0x%x to bus %d id %d lun %d -> IGNORE\n",
- nscp->cmnd[0], b, t, l));
- nscp->result = DID_BAD_TARGET << 16;
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else {
- switch (nscp->cmnd[0]) {
- case TEST_UNIT_READY:
- case INQUIRY:
- case REQUEST_SENSE:
- case READ_CAPACITY:
- case VERIFY:
- case START_STOP:
- case MODE_SENSE:
- case SERVICE_ACTION_IN_16:
- TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
- nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
- nscp->cmnd[4],nscp->cmnd[5]));
- if (ha->hdr[t].media_changed && nscp->cmnd[0] != INQUIRY) {
- /* return UNIT_ATTENTION */
- TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
- nscp->cmnd[0], t));
- ha->hdr[t].media_changed = FALSE;
- memset((char*)nscp->sense_buffer,0,16);
- nscp->sense_buffer[0] = 0x70;
- nscp->sense_buffer[2] = UNIT_ATTENTION;
- nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else if (gdth_internal_cache_cmd(ha, nscp))
- gdth_scsi_done(nscp);
- break;
-
- case ALLOW_MEDIUM_REMOVAL:
- TRACE(("cache cmd %x/%x/%x/%x/%x/%x\n",nscp->cmnd[0],
- nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
- nscp->cmnd[4],nscp->cmnd[5]));
- if ( (nscp->cmnd[4]&1) && !(ha->hdr[t].devtype&1) ) {
- TRACE(("Prevent r. nonremov. drive->do nothing\n"));
- nscp->result = DID_OK << 16;
- nscp->sense_buffer[0] = 0;
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else {
- nscp->cmnd[3] = (ha->hdr[t].devtype&1) ? 1:0;
- TRACE(("Prevent/allow r. %d rem. drive %d\n",
- nscp->cmnd[4],nscp->cmnd[3]));
- if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- }
- break;
-
- case RESERVE:
- case RELEASE:
- TRACE2(("cache cmd %s\n",nscp->cmnd[0] == RESERVE ?
- "RESERVE" : "RELEASE"));
- if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- break;
-
- case READ_6:
- case WRITE_6:
- case READ_10:
- case WRITE_10:
- case READ_16:
- case WRITE_16:
- if (ha->hdr[t].media_changed) {
- /* return UNIT_ATTENTION */
- TRACE2(("cmd 0x%x target %d: UNIT_ATTENTION\n",
- nscp->cmnd[0], t));
- ha->hdr[t].media_changed = FALSE;
- memset((char*)nscp->sense_buffer,0,16);
- nscp->sense_buffer[0] = 0x70;
- nscp->sense_buffer[2] = UNIT_ATTENTION;
- nscp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- } else if (!(cmd_index=gdth_fill_cache_cmd(ha, nscp, t)))
- this_cmd = FALSE;
- break;
-
- default:
- TRACE2(("cache cmd %x/%x/%x/%x/%x/%x unknown\n",nscp->cmnd[0],
- nscp->cmnd[1],nscp->cmnd[2],nscp->cmnd[3],
- nscp->cmnd[4],nscp->cmnd[5]));
- printk("GDT-HA %d: Unknown SCSI command 0x%x to cache service !\n",
- ha->hanum, nscp->cmnd[0]);
- nscp->result = DID_ABORT << 16;
- if (!nscp_cmndinfo->wait_for_completion)
- nscp_cmndinfo->wait_for_completion++;
- else
- gdth_scsi_done(nscp);
- break;
- }
- }
-
- if (!this_cmd)
- break;
- if (nscp == ha->req_first)
- ha->req_first = pscp = (struct scsi_cmnd *)nscp->SCp.ptr;
- else
- pscp->SCp.ptr = nscp->SCp.ptr;
- if (!next_cmd)
- break;
- }
-
- if (ha->cmd_cnt > 0) {
- gdth_release_event(ha);
- }
-
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- if (gdth_polling && ha->cmd_cnt > 0) {
- if (!gdth_wait(ha, cmd_index, POLL_TIMEOUT))
- printk("GDT-HA %d: Command %d timed out !\n",
- ha->hanum, cmd_index);
- }
-}
-
-/*
- * gdth_copy_internal_data() - copy to/from a buffer onto a scsi_cmnd's
- * buffers, kmap_atomic() as needed.
- */
-static void gdth_copy_internal_data(gdth_ha_str *ha, struct scsi_cmnd *scp,
- char *buffer, u16 count)
-{
- u16 cpcount,i, max_sg = scsi_sg_count(scp);
- u16 cpsum,cpnow;
- struct scatterlist *sl;
- char *address;
-
- cpcount = min_t(u16, count, scsi_bufflen(scp));
-
- if (cpcount) {
- cpsum=0;
- scsi_for_each_sg(scp, sl, max_sg, i) {
- unsigned long flags;
- cpnow = (u16)sl->length;
- TRACE(("copy_internal() now %d sum %d count %d %d\n",
- cpnow, cpsum, cpcount, scsi_bufflen(scp)));
- if (cpsum+cpnow > cpcount)
- cpnow = cpcount - cpsum;
- cpsum += cpnow;
- if (!sg_page(sl)) {
- printk("GDT-HA %d: invalid sc/gt element in gdth_copy_internal_data()\n",
- ha->hanum);
- return;
- }
- local_irq_save(flags);
- address = kmap_atomic(sg_page(sl)) + sl->offset;
- memcpy(address, buffer, cpnow);
- flush_dcache_page(sg_page(sl));
- kunmap_atomic(address);
- local_irq_restore(flags);
- if (cpsum == cpcount)
- break;
- buffer += cpnow;
- }
- } else if (count) {
- printk("GDT-HA %d: SCSI command with no buffers but data transfer expected!\n",
- ha->hanum);
- WARN_ON(1);
- }
-}
-
-static int gdth_internal_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
-{
- u8 t;
- gdth_inq_data inq;
- gdth_rdcap_data rdc;
- gdth_sense_data sd;
- gdth_modep_data mpd;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
-
- t = scp->device->id;
- TRACE(("gdth_internal_cache_cmd() cmd 0x%x hdrive %d\n",
- scp->cmnd[0],t));
-
- scp->result = DID_OK << 16;
- scp->sense_buffer[0] = 0;
-
- switch (scp->cmnd[0]) {
- case TEST_UNIT_READY:
- case VERIFY:
- case START_STOP:
- TRACE2(("Test/Verify/Start hdrive %d\n",t));
- break;
-
- case INQUIRY:
- TRACE2(("Inquiry hdrive %d devtype %d\n",
- t,ha->hdr[t].devtype));
- inq.type_qual = (ha->hdr[t].devtype&4) ? TYPE_ROM:TYPE_DISK;
- /* you can here set all disks to removable, if you want to do
- a flush using the ALLOW_MEDIUM_REMOVAL command */
- inq.modif_rmb = 0x00;
- if ((ha->hdr[t].devtype & 1) ||
- (ha->hdr[t].cluster_type & CLUSTER_DRIVE))
- inq.modif_rmb = 0x80;
- inq.version = 2;
- inq.resp_aenc = 2;
- inq.add_length= 32;
- strcpy(inq.vendor,ha->oem_name);
- snprintf(inq.product, sizeof(inq.product), "Host Drive #%02d",t);
- strcpy(inq.revision," ");
- gdth_copy_internal_data(ha, scp, (char*)&inq, sizeof(gdth_inq_data));
- break;
-
- case REQUEST_SENSE:
- TRACE2(("Request sense hdrive %d\n",t));
- sd.errorcode = 0x70;
- sd.segno = 0x00;
- sd.key = NO_SENSE;
- sd.info = 0;
- sd.add_length= 0;
- gdth_copy_internal_data(ha, scp, (char*)&sd, sizeof(gdth_sense_data));
- break;
-
- case MODE_SENSE:
- TRACE2(("Mode sense hdrive %d\n",t));
- memset((char*)&mpd,0,sizeof(gdth_modep_data));
- mpd.hd.data_length = sizeof(gdth_modep_data);
- mpd.hd.dev_par = (ha->hdr[t].devtype&2) ? 0x80:0;
- mpd.hd.bd_length = sizeof(mpd.bd);
- mpd.bd.block_length[0] = (SECTOR_SIZE & 0x00ff0000) >> 16;
- mpd.bd.block_length[1] = (SECTOR_SIZE & 0x0000ff00) >> 8;
- mpd.bd.block_length[2] = (SECTOR_SIZE & 0x000000ff);
- gdth_copy_internal_data(ha, scp, (char*)&mpd, sizeof(gdth_modep_data));
- break;
-
- case READ_CAPACITY:
- TRACE2(("Read capacity hdrive %d\n",t));
- if (ha->hdr[t].size > (u64)0xffffffff)
- rdc.last_block_no = 0xffffffff;
- else
- rdc.last_block_no = cpu_to_be32(ha->hdr[t].size-1);
- rdc.block_length = cpu_to_be32(SECTOR_SIZE);
- gdth_copy_internal_data(ha, scp, (char*)&rdc, sizeof(gdth_rdcap_data));
- break;
-
- case SERVICE_ACTION_IN_16:
- if ((scp->cmnd[1] & 0x1f) == SAI_READ_CAPACITY_16 &&
- (ha->cache_feat & GDT_64BIT)) {
- gdth_rdcap16_data rdc16;
-
- TRACE2(("Read capacity (16) hdrive %d\n",t));
- rdc16.last_block_no = cpu_to_be64(ha->hdr[t].size-1);
- rdc16.block_length = cpu_to_be32(SECTOR_SIZE);
- gdth_copy_internal_data(ha, scp, (char*)&rdc16,
- sizeof(gdth_rdcap16_data));
- } else {
- scp->result = DID_ABORT << 16;
- }
- break;
-
- default:
- TRACE2(("Internal cache cmd 0x%x unknown\n",scp->cmnd[0]));
- break;
- }
-
- if (!cmndinfo->wait_for_completion)
- cmndinfo->wait_for_completion++;
- else
- return 1;
-
- return 0;
-}
-
-static int gdth_fill_cache_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp,
- u16 hdrive)
-{
- register gdth_cmd_str *cmdp;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- u32 cnt, blockcnt;
- u64 no, blockno;
- int i, cmd_index, read_write, sgcnt, mode64;
-
- cmdp = ha->pccb;
- TRACE(("gdth_fill_cache_cmd() cmd 0x%x cmdsize %d hdrive %d\n",
- scp->cmnd[0],scp->cmd_len,hdrive));
-
- mode64 = (ha->cache_feat & GDT_64BIT) ? TRUE : FALSE;
- /* test for READ_16, WRITE_16 if !mode64 ? ---
- not required, should not occur due to error return on
- READ_CAPACITY_16 */
-
- cmdp->Service = CACHESERVICE;
- cmdp->RequestBuffer = scp;
- /* search free command index */
- if (!(cmd_index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
- /* if it's the first command, set command semaphore */
- if (ha->cmd_cnt == 0)
- gdth_set_sema0(ha);
-
- /* fill command */
- read_write = 0;
- if (cmndinfo->OpCode != -1)
- cmdp->OpCode = cmndinfo->OpCode; /* special cache cmd. */
- else if (scp->cmnd[0] == RESERVE)
- cmdp->OpCode = GDT_RESERVE_DRV;
- else if (scp->cmnd[0] == RELEASE)
- cmdp->OpCode = GDT_RELEASE_DRV;
- else if (scp->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
- if (scp->cmnd[4] & 1) /* prevent ? */
- cmdp->OpCode = GDT_MOUNT;
- else if (scp->cmnd[3] & 1) /* removable drive ? */
- cmdp->OpCode = GDT_UNMOUNT;
- else
- cmdp->OpCode = GDT_FLUSH;
- } else if (scp->cmnd[0] == WRITE_6 || scp->cmnd[0] == WRITE_10 ||
- scp->cmnd[0] == WRITE_12 || scp->cmnd[0] == WRITE_16
- ) {
- read_write = 1;
- if (gdth_write_through || ((ha->hdr[hdrive].rw_attribs & 1) &&
- (ha->cache_feat & GDT_WR_THROUGH)))
- cmdp->OpCode = GDT_WRITE_THR;
- else
- cmdp->OpCode = GDT_WRITE;
- } else {
- read_write = 2;
- cmdp->OpCode = GDT_READ;
- }
-
- cmdp->BoardNode = LOCALBOARD;
- if (mode64) {
- cmdp->u.cache64.DeviceNo = hdrive;
- cmdp->u.cache64.BlockNo = 1;
- cmdp->u.cache64.sg_canz = 0;
- } else {
- cmdp->u.cache.DeviceNo = hdrive;
- cmdp->u.cache.BlockNo = 1;
- cmdp->u.cache.sg_canz = 0;
- }
-
- if (read_write) {
- if (scp->cmd_len == 16) {
- memcpy(&no, &scp->cmnd[2], sizeof(u64));
- blockno = be64_to_cpu(no);
- memcpy(&cnt, &scp->cmnd[10], sizeof(u32));
- blockcnt = be32_to_cpu(cnt);
- } else if (scp->cmd_len == 10) {
- memcpy(&no, &scp->cmnd[2], sizeof(u32));
- blockno = be32_to_cpu(no);
- memcpy(&cnt, &scp->cmnd[7], sizeof(u16));
- blockcnt = be16_to_cpu(cnt);
- } else {
- memcpy(&no, &scp->cmnd[0], sizeof(u32));
- blockno = be32_to_cpu(no) & 0x001fffffUL;
- blockcnt= scp->cmnd[4]==0 ? 0x100 : scp->cmnd[4];
- }
- if (mode64) {
- cmdp->u.cache64.BlockNo = blockno;
- cmdp->u.cache64.BlockCnt = blockcnt;
- } else {
- cmdp->u.cache.BlockNo = (u32)blockno;
- cmdp->u.cache.BlockCnt = blockcnt;
- }
-
- if (scsi_bufflen(scp)) {
- cmndinfo->dma_dir = (read_write == 1 ?
- DMA_TO_DEVICE : DMA_FROM_DEVICE);
- sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
- scsi_sg_count(scp), cmndinfo->dma_dir);
- if (mode64) {
- struct scatterlist *sl;
-
- cmdp->u.cache64.DestAddr= (u64)-1;
- cmdp->u.cache64.sg_canz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.cache64.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.cache64.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- } else {
- struct scatterlist *sl;
-
- cmdp->u.cache.DestAddr= 0xffffffff;
- cmdp->u.cache.sg_canz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.cache.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.cache.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- }
-
-#ifdef GDTH_STATISTICS
- if (max_sg < (u32)sgcnt) {
- max_sg = (u32)sgcnt;
- TRACE3(("GDT: max_sg = %d\n",max_sg));
- }
-#endif
-
- }
- }
- /* evaluate command size, check space */
- if (mode64) {
- TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.cache64.DestAddr,cmdp->u.cache64.sg_canz,
- cmdp->u.cache64.sg_lst[0].sg_ptr,
- cmdp->u.cache64.sg_lst[0].sg_len));
- TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
- cmdp->OpCode,cmdp->u.cache64.BlockNo,cmdp->u.cache64.BlockCnt));
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) +
- (u16)cmdp->u.cache64.sg_canz * sizeof(gdth_sg64_str);
- } else {
- TRACE(("cache cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.cache.DestAddr,cmdp->u.cache.sg_canz,
- cmdp->u.cache.sg_lst[0].sg_ptr,
- cmdp->u.cache.sg_lst[0].sg_len));
- TRACE(("cache cmd: cmd %d blockno. %d, blockcnt %d\n",
- cmdp->OpCode,cmdp->u.cache.BlockNo,cmdp->u.cache.BlockCnt));
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) +
- (u16)cmdp->u.cache.sg_canz * sizeof(gdth_sg_str);
- }
- if (ha->cmd_len & 3)
- ha->cmd_len += (4 - (ha->cmd_len & 3));
-
- if (ha->cmd_cnt > 0) {
- if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
- ha->ic_all_size) {
- TRACE2(("gdth_fill_cache() DPMEM overflow\n"));
- ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
- return 0;
- }
- }
-
- /* copy command */
- gdth_copy_command(ha);
- return cmd_index;
-}
-
-static int gdth_fill_raw_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp, u8 b)
-{
- register gdth_cmd_str *cmdp;
- u16 i;
- dma_addr_t sense_paddr;
- int cmd_index, sgcnt, mode64;
- u8 t,l;
- struct gdth_cmndinfo *cmndinfo;
-
- t = scp->device->id;
- l = scp->device->lun;
- cmdp = ha->pccb;
- TRACE(("gdth_fill_raw_cmd() cmd 0x%x bus %d ID %d LUN %d\n",
- scp->cmnd[0],b,t,l));
-
- mode64 = (ha->raw_feat & GDT_64BIT) ? TRUE : FALSE;
-
- cmdp->Service = SCSIRAWSERVICE;
- cmdp->RequestBuffer = scp;
- /* search free command index */
- if (!(cmd_index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
- /* if it's the first command, set command semaphore */
- if (ha->cmd_cnt == 0)
- gdth_set_sema0(ha);
-
- cmndinfo = gdth_cmnd_priv(scp);
- /* fill command */
- if (cmndinfo->OpCode != -1) {
- cmdp->OpCode = cmndinfo->OpCode; /* special raw cmd. */
- cmdp->BoardNode = LOCALBOARD;
- if (mode64) {
- cmdp->u.raw64.direction = (cmndinfo->phase >> 8);
- TRACE2(("special raw cmd 0x%x param 0x%x\n",
- cmdp->OpCode, cmdp->u.raw64.direction));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst);
- } else {
- cmdp->u.raw.direction = (cmndinfo->phase >> 8);
- TRACE2(("special raw cmd 0x%x param 0x%x\n",
- cmdp->OpCode, cmdp->u.raw.direction));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst);
- }
-
- } else {
- sense_paddr = dma_map_single(&ha->pdev->dev, scp->sense_buffer, 16,
- DMA_FROM_DEVICE);
-
- cmndinfo->sense_paddr = sense_paddr;
- cmdp->OpCode = GDT_WRITE; /* always */
- cmdp->BoardNode = LOCALBOARD;
- if (mode64) {
- cmdp->u.raw64.reserved = 0;
- cmdp->u.raw64.mdisc_time = 0;
- cmdp->u.raw64.mcon_time = 0;
- cmdp->u.raw64.clen = scp->cmd_len;
- cmdp->u.raw64.target = t;
- cmdp->u.raw64.lun = l;
- cmdp->u.raw64.bus = b;
- cmdp->u.raw64.priority = 0;
- cmdp->u.raw64.sdlen = scsi_bufflen(scp);
- cmdp->u.raw64.sense_len = 16;
- cmdp->u.raw64.sense_data = sense_paddr;
- cmdp->u.raw64.direction =
- gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
- memcpy(cmdp->u.raw64.cmd,scp->cmnd,16);
- cmdp->u.raw64.sg_ranz = 0;
- } else {
- cmdp->u.raw.reserved = 0;
- cmdp->u.raw.mdisc_time = 0;
- cmdp->u.raw.mcon_time = 0;
- cmdp->u.raw.clen = scp->cmd_len;
- cmdp->u.raw.target = t;
- cmdp->u.raw.lun = l;
- cmdp->u.raw.bus = b;
- cmdp->u.raw.priority = 0;
- cmdp->u.raw.link_p = 0;
- cmdp->u.raw.sdlen = scsi_bufflen(scp);
- cmdp->u.raw.sense_len = 16;
- cmdp->u.raw.sense_data = sense_paddr;
- cmdp->u.raw.direction =
- gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN;
- memcpy(cmdp->u.raw.cmd,scp->cmnd,12);
- cmdp->u.raw.sg_ranz = 0;
- }
-
- if (scsi_bufflen(scp)) {
- cmndinfo->dma_dir = DMA_BIDIRECTIONAL;
- sgcnt = dma_map_sg(&ha->pdev->dev, scsi_sglist(scp),
- scsi_sg_count(scp), cmndinfo->dma_dir);
- if (mode64) {
- struct scatterlist *sl;
-
- cmdp->u.raw64.sdata = (u64)-1;
- cmdp->u.raw64.sg_ranz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.raw64.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.raw64.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- } else {
- struct scatterlist *sl;
-
- cmdp->u.raw.sdata = 0xffffffff;
- cmdp->u.raw.sg_ranz = sgcnt;
- scsi_for_each_sg(scp, sl, sgcnt, i) {
- cmdp->u.raw.sg_lst[i].sg_ptr = sg_dma_address(sl);
- cmdp->u.raw.sg_lst[i].sg_len = sg_dma_len(sl);
- }
- }
-
-#ifdef GDTH_STATISTICS
- if (max_sg < sgcnt) {
- max_sg = sgcnt;
- TRACE3(("GDT: max_sg = %d\n",sgcnt));
- }
-#endif
-
- }
- if (mode64) {
- TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.raw64.sdata,cmdp->u.raw64.sg_ranz,
- cmdp->u.raw64.sg_lst[0].sg_ptr,
- cmdp->u.raw64.sg_lst[0].sg_len));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) +
- (u16)cmdp->u.raw64.sg_ranz * sizeof(gdth_sg64_str);
- } else {
- TRACE(("raw cmd: addr. %x sganz %x sgptr0 %x sglen0 %x\n",
- cmdp->u.raw.sdata,cmdp->u.raw.sg_ranz,
- cmdp->u.raw.sg_lst[0].sg_ptr,
- cmdp->u.raw.sg_lst[0].sg_len));
- /* evaluate command size */
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) +
- (u16)cmdp->u.raw.sg_ranz * sizeof(gdth_sg_str);
- }
- }
- /* check space */
- if (ha->cmd_len & 3)
- ha->cmd_len += (4 - (ha->cmd_len & 3));
-
- if (ha->cmd_cnt > 0) {
- if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
- ha->ic_all_size) {
- TRACE2(("gdth_fill_raw() DPMEM overflow\n"));
- ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
- return 0;
- }
- }
-
- /* copy command */
- gdth_copy_command(ha);
- return cmd_index;
-}
-
-static int gdth_special_cmd(gdth_ha_str *ha, struct scsi_cmnd *scp)
-{
- register gdth_cmd_str *cmdp;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- int cmd_index;
-
- cmdp= ha->pccb;
- TRACE2(("gdth_special_cmd(): "));
-
- *cmdp = *cmndinfo->internal_cmd_str;
- cmdp->RequestBuffer = scp;
-
- /* search free command index */
- if (!(cmd_index=gdth_get_cmd_index(ha))) {
- TRACE(("GDT: No free command index found\n"));
- return 0;
- }
-
- /* if it's the first command, set command semaphore */
- if (ha->cmd_cnt == 0)
- gdth_set_sema0(ha);
-
- /* evaluate command size, check space */
- if (cmdp->OpCode == GDT_IOCTL) {
- TRACE2(("IOCTL\n"));
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.ioctl.p_param) + sizeof(u64);
- } else if (cmdp->Service == CACHESERVICE) {
- TRACE2(("cache command %d\n",cmdp->OpCode));
- if (ha->cache_feat & GDT_64BIT)
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.cache64.sg_lst) + sizeof(gdth_sg64_str);
- else
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.cache.sg_lst) + sizeof(gdth_sg_str);
- } else if (cmdp->Service == SCSIRAWSERVICE) {
- TRACE2(("raw command %d\n",cmdp->OpCode));
- if (ha->raw_feat & GDT_64BIT)
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.raw64.sg_lst) + sizeof(gdth_sg64_str);
- else
- ha->cmd_len =
- GDTOFFSOF(gdth_cmd_str,u.raw.sg_lst) + sizeof(gdth_sg_str);
- }
-
- if (ha->cmd_len & 3)
- ha->cmd_len += (4 - (ha->cmd_len & 3));
-
- if (ha->cmd_cnt > 0) {
- if ((ha->cmd_offs_dpmem + ha->cmd_len + DPMEM_COMMAND_OFFSET) >
- ha->ic_all_size) {
- TRACE2(("gdth_special_cmd() DPMEM overflow\n"));
- ha->cmd_tab[cmd_index-2].cmnd = UNUSED_CMND;
- return 0;
- }
- }
-
- /* copy command */
- gdth_copy_command(ha);
- return cmd_index;
-}
-
-
-/* Controller event handling functions */
-static gdth_evt_str *gdth_store_event(gdth_ha_str *ha, u16 source,
- u16 idx, gdth_evt_data *evt)
-{
- gdth_evt_str *e;
-
- /* no GDTH_LOCK_HA() ! */
- TRACE2(("gdth_store_event() source %d idx %d\n", source, idx));
- if (source == 0) /* no source -> no event */
- return NULL;
-
- if (ebuffer[elastidx].event_source == source &&
- ebuffer[elastidx].event_idx == idx &&
- ((evt->size != 0 && ebuffer[elastidx].event_data.size != 0 &&
- !memcmp((char *)&ebuffer[elastidx].event_data.eu,
- (char *)&evt->eu, evt->size)) ||
- (evt->size == 0 && ebuffer[elastidx].event_data.size == 0 &&
- !strcmp((char *)&ebuffer[elastidx].event_data.event_string,
- (char *)&evt->event_string)))) {
- e = &ebuffer[elastidx];
- e->last_stamp = (u32)ktime_get_real_seconds();
- ++e->same_count;
- } else {
- if (ebuffer[elastidx].event_source != 0) { /* entry not free ? */
- ++elastidx;
- if (elastidx == MAX_EVENTS)
- elastidx = 0;
- if (elastidx == eoldidx) { /* reached mark ? */
- ++eoldidx;
- if (eoldidx == MAX_EVENTS)
- eoldidx = 0;
- }
- }
- e = &ebuffer[elastidx];
- e->event_source = source;
- e->event_idx = idx;
- e->first_stamp = e->last_stamp = (u32)ktime_get_real_seconds();
- e->same_count = 1;
- e->event_data = *evt;
- e->application = 0;
- }
- return e;
-}
-
-static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
-{
- gdth_evt_str *e;
- int eindex;
- unsigned long flags;
-
- TRACE2(("gdth_read_event() handle %d\n", handle));
- spin_lock_irqsave(&ha->smp_lock, flags);
- if (handle == -1)
- eindex = eoldidx;
- else
- eindex = handle;
- estr->event_source = 0;
-
- if (eindex < 0 || eindex >= MAX_EVENTS) {
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return eindex;
- }
- e = &ebuffer[eindex];
- if (e->event_source != 0) {
- if (eindex != elastidx) {
- if (++eindex == MAX_EVENTS)
- eindex = 0;
- } else {
- eindex = -1;
- }
- memcpy(estr, e, sizeof(gdth_evt_str));
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return eindex;
-}
-
-static void gdth_readapp_event(gdth_ha_str *ha,
- u8 application, gdth_evt_str *estr)
-{
- gdth_evt_str *e;
- int eindex;
- unsigned long flags;
- u8 found = FALSE;
-
- TRACE2(("gdth_readapp_event() app. %d\n", application));
- spin_lock_irqsave(&ha->smp_lock, flags);
- eindex = eoldidx;
- for (;;) {
- e = &ebuffer[eindex];
- if (e->event_source == 0)
- break;
- if ((e->application & application) == 0) {
- e->application |= application;
- found = TRUE;
- break;
- }
- if (eindex == elastidx)
- break;
- if (++eindex == MAX_EVENTS)
- eindex = 0;
- }
- if (found)
- memcpy(estr, e, sizeof(gdth_evt_str));
- else
- estr->event_source = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static void gdth_clear_events(void)
-{
- TRACE(("gdth_clear_events()"));
-
- eoldidx = elastidx = 0;
- ebuffer[0].event_source = 0;
-}
-
-
-/* SCSI interface functions */
-
-static irqreturn_t __gdth_interrupt(gdth_ha_str *ha,
- int gdth_from_wait, int* pIndex)
-{
- gdt6m_dpram_str __iomem *dp6m_ptr = NULL;
- gdt6_dpram_str __iomem *dp6_ptr;
- struct scsi_cmnd *scp;
- int rval, i;
- u8 IStatus;
- u16 Service;
- unsigned long flags = 0;
-
- TRACE(("gdth_interrupt() IRQ %d\n", ha->irq));
-
- /* if polling and not from gdth_wait() -> return */
- if (gdth_polling) {
- if (!gdth_from_wait) {
- return IRQ_HANDLED;
- }
- }
-
- if (!gdth_polling)
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- /* search controller */
- IStatus = gdth_get_status(ha);
- if (IStatus == 0) {
- /* spurious interrupt */
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
-
-#ifdef GDTH_STATISTICS
- ++act_ints;
-#endif
-
- if (ha->type == GDT_PCI) {
- dp6_ptr = ha->brd;
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = readw(&dp6_ptr->u.ic.Status);
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else /* no error */
- ha->status = S_OK;
- ha->info = readl(&dp6_ptr->u.ic.Info[0]);
- ha->service = readw(&dp6_ptr->u.ic.Service);
- ha->info2 = readl(&dp6_ptr->u.ic.Info[1]);
-
- writeb(0xff, &dp6_ptr->io.irqdel); /* acknowledge interrupt */
- writeb(0, &dp6_ptr->u.ic.Cmd_Index);/* reset command index */
- writeb(0, &dp6_ptr->io.Sema1); /* reset status semaphore */
- } else if (ha->type == GDT_PCINEW) {
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = inw(PTR2USHORT(&ha->plx->status));
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else
- ha->status = S_OK;
- ha->info = inl(PTR2USHORT(&ha->plx->info[0]));
- ha->service = inw(PTR2USHORT(&ha->plx->service));
- ha->info2 = inl(PTR2USHORT(&ha->plx->info[1]));
-
- outb(0xff, PTR2USHORT(&ha->plx->edoor_reg));
- outb(0x00, PTR2USHORT(&ha->plx->sema1_reg));
- } else if (ha->type == GDT_PCIMPR) {
- dp6m_ptr = ha->brd;
- if (IStatus & 0x80) { /* error flag */
- IStatus &= ~0x80;
- ha->status = readw(&dp6m_ptr->i960r.status);
- TRACE2(("gdth_interrupt() error %d/%d\n",IStatus,ha->status));
- } else /* no error */
- ha->status = S_OK;
-
- ha->info = readl(&dp6m_ptr->i960r.info[0]);
- ha->service = readw(&dp6m_ptr->i960r.service);
- ha->info2 = readl(&dp6m_ptr->i960r.info[1]);
-
- /* event string */
- if (IStatus == ASYNCINDEX) {
- if (ha->service != SCREENSERVICE &&
- (ha->fw_vers & 0xff) >= 0x1a) {
- ha->dvr.severity = readb
- (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.severity);
- for (i = 0; i < 256; ++i) {
- ha->dvr.event_string[i] = readb
- (&((gdt6m_dpram_str __iomem *)ha->brd)->i960r.evt_str[i]);
- if (ha->dvr.event_string[i] == 0)
- break;
- }
- }
- }
- writeb(0xff, &dp6m_ptr->i960r.edoor_reg);
- writeb(0, &dp6m_ptr->i960r.sema1_reg);
- } else {
- TRACE2(("gdth_interrupt() unknown controller type\n"));
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
-
- TRACE(("gdth_interrupt() index %d stat %d info %d\n",
- IStatus,ha->status,ha->info));
-
- if (gdth_from_wait) {
- *pIndex = (int)IStatus;
- }
-
- if (IStatus == ASYNCINDEX) {
- TRACE2(("gdth_interrupt() async. event\n"));
- gdth_async_event(ha);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_next(ha);
- return IRQ_HANDLED;
- }
-
- if (IStatus == SPEZINDEX) {
- TRACE2(("Service unknown or not initialized !\n"));
- ha->dvr.size = sizeof(ha->dvr.eu.driver);
- ha->dvr.eu.driver.ionode = ha->hanum;
- gdth_store_event(ha, ES_DRIVER, 4, &ha->dvr);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
- scp = ha->cmd_tab[IStatus-2].cmnd;
- Service = ha->cmd_tab[IStatus-2].service;
- ha->cmd_tab[IStatus-2].cmnd = UNUSED_CMND;
- if (scp == UNUSED_CMND) {
- TRACE2(("gdth_interrupt() index to unused command (%d)\n",IStatus));
- ha->dvr.size = sizeof(ha->dvr.eu.driver);
- ha->dvr.eu.driver.ionode = ha->hanum;
- ha->dvr.eu.driver.index = IStatus;
- gdth_store_event(ha, ES_DRIVER, 1, &ha->dvr);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
- if (scp == INTERNAL_CMND) {
- TRACE(("gdth_interrupt() answer to internal command\n"));
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return IRQ_HANDLED;
- }
-
- TRACE(("gdth_interrupt() sync. status\n"));
- rval = gdth_sync_event(ha,Service,IStatus,scp);
- if (!gdth_polling)
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- if (rval == 2) {
- gdth_putq(ha, scp, gdth_cmnd_priv(scp)->priority);
- } else if (rval == 1) {
- gdth_scsi_done(scp);
- }
-
- gdth_next(ha);
- return IRQ_HANDLED;
-}
-
-static irqreturn_t gdth_interrupt(int irq, void *dev_id)
-{
- gdth_ha_str *ha = dev_id;
-
- return __gdth_interrupt(ha, false, NULL);
-}
-
-static int gdth_sync_event(gdth_ha_str *ha, int service, u8 index,
- struct scsi_cmnd *scp)
-{
- gdth_msg_str *msg;
- gdth_cmd_str *cmdp;
- u8 b, t;
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
-
- cmdp = ha->pccb;
- TRACE(("gdth_sync_event() serv %d status %d\n",
- service,ha->status));
-
- if (service == SCREENSERVICE) {
- msg = ha->pmsg;
- TRACE(("len: %d, answer: %d, ext: %d, alen: %d\n",
- msg->msg_len,msg->msg_answer,msg->msg_ext,msg->msg_alen));
- if (msg->msg_len > MSGLEN+1)
- msg->msg_len = MSGLEN+1;
- if (msg->msg_len)
- if (!(msg->msg_answer && msg->msg_ext)) {
- msg->msg_text[msg->msg_len] = '\0';
- printk("%s",msg->msg_text);
- }
-
- if (msg->msg_ext && !msg->msg_answer) {
- while (gdth_test_busy(ha))
- gdth_delay(0);
- cmdp->Service = SCREENSERVICE;
- cmdp->RequestBuffer = SCREEN_CMND;
- gdth_get_cmd_index(ha);
- gdth_set_sema0(ha);
- cmdp->OpCode = GDT_READ;
- cmdp->BoardNode = LOCALBOARD;
- cmdp->u.screen.reserved = 0;
- cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
- cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
- ha->cmd_offs_dpmem = 0;
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(u64);
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- gdth_release_event(ha);
- return 0;
- }
-
- if (msg->msg_answer && msg->msg_alen) {
- /* default answers (getchar() not possible) */
- if (msg->msg_alen == 1) {
- msg->msg_alen = 0;
- msg->msg_len = 1;
- msg->msg_text[0] = 0;
- } else {
- msg->msg_alen -= 2;
- msg->msg_len = 2;
- msg->msg_text[0] = 1;
- msg->msg_text[1] = 0;
- }
- msg->msg_ext = 0;
- msg->msg_answer = 0;
- while (gdth_test_busy(ha))
- gdth_delay(0);
- cmdp->Service = SCREENSERVICE;
- cmdp->RequestBuffer = SCREEN_CMND;
- gdth_get_cmd_index(ha);
- gdth_set_sema0(ha);
- cmdp->OpCode = GDT_WRITE;
- cmdp->BoardNode = LOCALBOARD;
- cmdp->u.screen.reserved = 0;
- cmdp->u.screen.su.msg.msg_handle= msg->msg_handle;
- cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
- ha->cmd_offs_dpmem = 0;
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(u64);
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- gdth_release_event(ha);
- return 0;
- }
- printk("\n");
-
- } else {
- b = scp->device->channel;
- t = scp->device->id;
- if (cmndinfo->OpCode == -1 && b != ha->virt_bus) {
- ha->raw[BUS_L2P(ha,b)].io_cnt[t]--;
- }
- /* cache or raw service */
- if (ha->status == S_BSY) {
- TRACE2(("Controller busy -> retry !\n"));
- if (cmndinfo->OpCode == GDT_MOUNT)
- cmndinfo->OpCode = GDT_CLUST_INFO;
- /* retry */
- return 2;
- }
- if (scsi_bufflen(scp))
- dma_unmap_sg(&ha->pdev->dev, scsi_sglist(scp), scsi_sg_count(scp),
- cmndinfo->dma_dir);
-
- if (cmndinfo->sense_paddr)
- dma_unmap_page(&ha->pdev->dev, cmndinfo->sense_paddr, 16,
- DMA_FROM_DEVICE);
-
- if (ha->status == S_OK) {
- cmndinfo->status = S_OK;
- cmndinfo->info = ha->info;
- if (cmndinfo->OpCode != -1) {
- TRACE2(("gdth_sync_event(): special cmd 0x%x OK\n",
- cmndinfo->OpCode));
- /* special commands GDT_CLUST_INFO/GDT_MOUNT ? */
- if (cmndinfo->OpCode == GDT_CLUST_INFO) {
- ha->hdr[t].cluster_type = (u8)ha->info;
- if (!(ha->hdr[t].cluster_type &
- CLUSTER_MOUNTED)) {
- /* NOT MOUNTED -> MOUNT */
- cmndinfo->OpCode = GDT_MOUNT;
- if (ha->hdr[t].cluster_type &
- CLUSTER_RESERVED) {
- /* cluster drive RESERVED (on the other node) */
- cmndinfo->phase = -2; /* reservation conflict */
- }
- } else {
- cmndinfo->OpCode = -1;
- }
- } else {
- if (cmndinfo->OpCode == GDT_MOUNT) {
- ha->hdr[t].cluster_type |= CLUSTER_MOUNTED;
- ha->hdr[t].media_changed = TRUE;
- } else if (cmndinfo->OpCode == GDT_UNMOUNT) {
- ha->hdr[t].cluster_type &= ~CLUSTER_MOUNTED;
- ha->hdr[t].media_changed = TRUE;
- }
- cmndinfo->OpCode = -1;
- }
- /* retry */
- cmndinfo->priority = HIGH_PRI;
- return 2;
- } else {
- /* RESERVE/RELEASE ? */
- if (scp->cmnd[0] == RESERVE) {
- ha->hdr[t].cluster_type |= CLUSTER_RESERVED;
- } else if (scp->cmnd[0] == RELEASE) {
- ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
- }
- scp->result = DID_OK << 16;
- scp->sense_buffer[0] = 0;
- }
- } else {
- cmndinfo->status = ha->status;
- cmndinfo->info = ha->info;
-
- if (cmndinfo->OpCode != -1) {
- TRACE2(("gdth_sync_event(): special cmd 0x%x error 0x%x\n",
- cmndinfo->OpCode, ha->status));
- if (cmndinfo->OpCode == GDT_SCAN_START ||
- cmndinfo->OpCode == GDT_SCAN_END) {
- cmndinfo->OpCode = -1;
- /* retry */
- cmndinfo->priority = HIGH_PRI;
- return 2;
- }
- memset((char*)scp->sense_buffer,0,16);
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = NOT_READY;
- scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- } else if (service == CACHESERVICE) {
- if (ha->status == S_CACHE_UNKNOWN &&
- (ha->hdr[t].cluster_type &
- CLUSTER_RESERVE_STATE) == CLUSTER_RESERVE_STATE) {
- /* bus reset -> force GDT_CLUST_INFO */
- ha->hdr[t].cluster_type &= ~CLUSTER_RESERVED;
- }
- memset((char*)scp->sense_buffer,0,16);
- if (ha->status == (u16)S_CACHE_RESERV) {
- scp->result = (DID_OK << 16) | (RESERVATION_CONFLICT << 1);
- } else {
- scp->sense_buffer[0] = 0x70;
- scp->sense_buffer[2] = NOT_READY;
- scp->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
- }
- if (!cmndinfo->internal_command) {
- ha->dvr.size = sizeof(ha->dvr.eu.sync);
- ha->dvr.eu.sync.ionode = ha->hanum;
- ha->dvr.eu.sync.service = service;
- ha->dvr.eu.sync.status = ha->status;
- ha->dvr.eu.sync.info = ha->info;
- ha->dvr.eu.sync.hostdrive = t;
- if (ha->status >= 0x8000)
- gdth_store_event(ha, ES_SYNC, 0, &ha->dvr);
- else
- gdth_store_event(ha, ES_SYNC, service, &ha->dvr);
- }
- } else {
- /* sense buffer filled from controller firmware (DMA) */
- if (ha->status != S_RAW_SCSI || ha->info >= 0x100) {
- scp->result = DID_BAD_TARGET << 16;
- } else {
- scp->result = (DID_OK << 16) | ha->info;
- }
- }
- }
- if (!cmndinfo->wait_for_completion)
- cmndinfo->wait_for_completion++;
- else
- return 1;
- }
-
- return 0;
-}
-
-static char *async_cache_tab[] = {
-/* 0*/ "\011\000\002\002\002\004\002\006\004"
- "GDT HA %u, service %u, async. status %u/%lu unknown",
-/* 1*/ "\011\000\002\002\002\004\002\006\004"
- "GDT HA %u, service %u, async. status %u/%lu unknown",
-/* 2*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu not ready",
-/* 3*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
-/* 4*/ "\005\000\002\006\004"
- "GDT HA %u, mirror update on Host Drive %lu failed",
-/* 5*/ "\005\000\002\006\004"
- "GDT HA %u, Mirror Drive %lu failed",
-/* 6*/ "\005\000\002\006\004"
- "GDT HA %u, Mirror Drive %lu: REASSIGN not successful and/or data error on reassigned blocks. Drive may crash in the future and should be replaced",
-/* 7*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu write protected",
-/* 8*/ "\005\000\002\006\004"
- "GDT HA %u, media changed in Host Drive %lu",
-/* 9*/ "\005\000\002\006\004"
- "GDT HA %u, Host Drive %lu is offline",
-/*10*/ "\005\000\002\006\004"
- "GDT HA %u, media change of Mirror Drive %lu",
-/*11*/ "\005\000\002\006\004"
- "GDT HA %u, Mirror Drive %lu is write protected",
-/*12*/ "\005\000\002\006\004"
- "GDT HA %u, general error on Host Drive %lu. Please check the devices of this drive!",
-/*13*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Array Drive %u: Cache Drive %u failed",
-/*14*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: FAIL state entered",
-/*15*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: error",
-/*16*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Array Drive %u: failed drive replaced by Cache Drive %u",
-/*17*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity build failed",
-/*18*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild failed",
-/*19*/ "\005\000\002\010\002"
- "GDT HA %u, Test of Hot Fix %u failed",
-/*20*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive build finished successfully",
-/*21*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild finished successfully",
-/*22*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Array Drive %u: Hot Fix %u activated",
-/*23*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u: processing of i/o aborted due to serious drive error",
-/*24*/ "\005\000\002\010\002"
- "GDT HA %u, mirror update on Cache Drive %u completed",
-/*25*/ "\005\000\002\010\002"
- "GDT HA %u, mirror update on Cache Drive %lu failed",
-/*26*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild started",
-/*27*/ "\005\000\002\012\001"
- "GDT HA %u, Fault bus %u: SHELF OK detected",
-/*28*/ "\005\000\002\012\001"
- "GDT HA %u, Fault bus %u: SHELF not OK detected",
-/*29*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug started",
-/*30*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: new disk detected",
-/*31*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: old disk detected",
-/*32*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: plugging an active disk is invalid",
-/*33*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: invalid device detected",
-/*34*/ "\011\000\002\012\001\013\001\006\004"
- "GDT HA %u, Fault bus %u, ID %u: insufficient disk capacity (%lu MB required)",
-/*35*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: disk write protected",
-/*36*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: disk not available",
-/*37*/ "\007\000\002\012\001\006\004"
- "GDT HA %u, Fault bus %u: swap detected (%lu)",
-/*38*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug finished successfully",
-/*39*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted due to user Hot Plug",
-/*40*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug aborted",
-/*41*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, Fault bus %u, ID %u: Auto Hot Plug for Hot Fix started",
-/*42*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive build started",
-/*43*/ "\003\000\002"
- "GDT HA %u, DRAM parity error detected",
-/*44*/ "\005\000\002\006\002"
- "GDT HA %u, Mirror Drive %u: update started",
-/*45*/ "\007\000\002\006\002\010\002"
- "GDT HA %u, Mirror Drive %u: Hot Fix %u activated",
-/*46*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: no matching Pool Hot Fix Drive available",
-/*47*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: Pool Hot Fix Drive available",
-/*48*/ "\005\000\002\006\002"
- "GDT HA %u, Mirror Drive %u: no matching Pool Hot Fix Drive available",
-/*49*/ "\005\000\002\006\002"
- "GDT HA %u, Mirror Drive %u: Pool Hot Fix Drive available",
-/*50*/ "\007\000\002\012\001\013\001"
- "GDT HA %u, SCSI bus %u, ID %u: IGNORE_WIDE_RESIDUE message received",
-/*51*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand started",
-/*52*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand finished successfully",
-/*53*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand failed",
-/*54*/ "\003\000\002"
- "GDT HA %u, CPU temperature critical",
-/*55*/ "\003\000\002"
- "GDT HA %u, CPU temperature OK",
-/*56*/ "\005\000\002\006\004"
- "GDT HA %u, Host drive %lu created",
-/*57*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand restarted",
-/*58*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: expand stopped",
-/*59*/ "\005\000\002\010\002"
- "GDT HA %u, Mirror Drive %u: drive build quited",
-/*60*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity build quited",
-/*61*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: drive rebuild quited",
-/*62*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify started",
-/*63*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify done",
-/*64*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify failed",
-/*65*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity error detected",
-/*66*/ "\005\000\002\006\002"
- "GDT HA %u, Array Drive %u: parity verify quited",
-/*67*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u reserved",
-/*68*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u mounted and released",
-/*69*/ "\005\000\002\006\002"
- "GDT HA %u, Host Drive %u released",
-/*70*/ "\003\000\002"
- "GDT HA %u, DRAM error detected and corrected with ECC",
-/*71*/ "\003\000\002"
- "GDT HA %u, Uncorrectable DRAM error detected with ECC",
-/*72*/ "\011\000\002\012\001\013\001\014\001"
- "GDT HA %u, SCSI bus %u, ID %u, LUN %u: reassigning block",
-/*73*/ "\005\000\002\006\002"
- "GDT HA %u, Host drive %u resetted locally",
-/*74*/ "\005\000\002\006\002"
- "GDT HA %u, Host drive %u resetted remotely",
-/*75*/ "\003\000\002"
- "GDT HA %u, async. status 75 unknown",
-};
-
-
-static int gdth_async_event(gdth_ha_str *ha)
-{
- gdth_cmd_str *cmdp;
-
- cmdp= ha->pccb;
- TRACE2(("gdth_async_event() ha %d serv %d\n",
- ha->hanum, ha->service));
-
- if (ha->service == SCREENSERVICE) {
- if (ha->status == MSG_REQUEST) {
- while (gdth_test_busy(ha))
- gdth_delay(0);
- cmdp->Service = SCREENSERVICE;
- cmdp->RequestBuffer = SCREEN_CMND;
- gdth_set_sema0(ha);
- cmdp->OpCode = GDT_READ;
- cmdp->BoardNode = LOCALBOARD;
- cmdp->u.screen.reserved = 0;
- cmdp->u.screen.su.msg.msg_handle= MSG_INV_HANDLE;
- cmdp->u.screen.su.msg.msg_addr = ha->msg_phys;
- ha->cmd_offs_dpmem = 0;
- ha->cmd_len = GDTOFFSOF(gdth_cmd_str,u.screen.su.msg.msg_addr)
- + sizeof(u64);
- ha->cmd_cnt = 0;
- gdth_copy_command(ha);
- printk("[PCI %d/%d] ",(u16)(ha->brd_phys>>8),
- (u16)((ha->brd_phys>>3)&0x1f));
- gdth_release_event(ha);
- }
-
- } else {
- if (ha->type == GDT_PCIMPR &&
- (ha->fw_vers & 0xff) >= 0x1a) {
- ha->dvr.size = 0;
- ha->dvr.eu.async.ionode = ha->hanum;
- ha->dvr.eu.async.status = ha->status;
- /* severity and event_string already set! */
- } else {
- ha->dvr.size = sizeof(ha->dvr.eu.async);
- ha->dvr.eu.async.ionode = ha->hanum;
- ha->dvr.eu.async.service = ha->service;
- ha->dvr.eu.async.status = ha->status;
- ha->dvr.eu.async.info = ha->info;
- *(u32 *)ha->dvr.eu.async.scsi_coord = ha->info2;
- }
- gdth_store_event( ha, ES_ASYNC, ha->service, &ha->dvr );
- gdth_log_event( &ha->dvr, NULL );
-
- /* new host drive from expand? */
- if (ha->service == CACHESERVICE && ha->status == 56) {
- TRACE2(("gdth_async_event(): new host drive %d created\n",
- (u16)ha->info));
- /* gdth_analyse_hdrive(hanum, (u16)ha->info); */
- }
- }
- return 1;
-}
-
-static void gdth_log_event(gdth_evt_data *dvr, char *buffer)
-{
- gdth_stackframe stack;
- char *f = NULL;
- int i,j;
-
- TRACE2(("gdth_log_event()\n"));
- if (dvr->size == 0) {
- if (buffer == NULL) {
- printk("Adapter %d: %s\n",dvr->eu.async.ionode,dvr->event_string);
- } else {
- sprintf(buffer,"Adapter %d: %s\n",
- dvr->eu.async.ionode,dvr->event_string);
- }
- } else if (dvr->eu.async.service == CACHESERVICE &&
- INDEX_OK(dvr->eu.async.status, async_cache_tab)) {
- TRACE2(("GDT: Async. event cache service, event no.: %d\n",
- dvr->eu.async.status));
-
- f = async_cache_tab[dvr->eu.async.status];
-
- /* i: parameter to push, j: stack element to fill */
- for (j=0,i=1; i < f[0]; i+=2) {
- switch (f[i+1]) {
- case 4:
- stack.b[j++] = *(u32*)&dvr->eu.stream[(int)f[i]];
- break;
- case 2:
- stack.b[j++] = *(u16*)&dvr->eu.stream[(int)f[i]];
- break;
- case 1:
- stack.b[j++] = *(u8*)&dvr->eu.stream[(int)f[i]];
- break;
- default:
- break;
- }
- }
-
- if (buffer == NULL) {
- printk(&f[(int)f[0]],stack);
- printk("\n");
- } else {
- sprintf(buffer,&f[(int)f[0]],stack);
- }
-
- } else {
- if (buffer == NULL) {
- printk("GDT HA %u, Unknown async. event service %d event no. %d\n",
- dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
- } else {
- sprintf(buffer,"GDT HA %u, Unknown async. event service %d event no. %d",
- dvr->eu.async.ionode,dvr->eu.async.service,dvr->eu.async.status);
- }
- }
-}
-
-#ifdef GDTH_STATISTICS
-static u8 gdth_timer_running;
-
-static void gdth_timeout(struct timer_list *unused)
-{
- u32 i;
- struct scsi_cmnd *nscp;
- gdth_ha_str *ha;
- unsigned long flags;
-
- if(unlikely(list_empty(&gdth_instances))) {
- gdth_timer_running = 0;
- return;
- }
-
- ha = list_first_entry(&gdth_instances, gdth_ha_str, list);
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (act_stats=0,i=0; i<GDTH_MAXCMDS; ++i)
- if (ha->cmd_tab[i].cmnd != UNUSED_CMND)
- ++act_stats;
-
- for (act_rq=0,
- nscp=ha->req_first; nscp; nscp=(struct scsi_cmnd*)nscp->SCp.ptr)
- ++act_rq;
-
- TRACE2(("gdth_to(): ints %d, ios %d, act_stats %d, act_rq %d\n",
- act_ints, act_ios, act_stats, act_rq));
- act_ints = act_ios = 0;
-
- gdth_timer.expires = jiffies + 30 * HZ;
- add_timer(&gdth_timer);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
-
-static void gdth_timer_init(void)
-{
- if (gdth_timer_running)
- return;
- gdth_timer_running = 1;
- TRACE2(("gdth_detect(): Initializing timer !\n"));
- gdth_timer.expires = jiffies + HZ;
- add_timer(&gdth_timer);
-}
-#else
-static inline void gdth_timer_init(void)
-{
-}
-#endif
-
-
-static const char *gdth_ctr_name(gdth_ha_str *ha)
-{
- TRACE2(("gdth_ctr_name()\n"));
-
- if (ha->type == GDT_PCI) {
- switch (ha->pdev->device) {
- case PCI_DEVICE_ID_VORTEX_GDT60x0:
- return("GDT6000/6020/6050");
- case PCI_DEVICE_ID_VORTEX_GDT6000B:
- return("GDT6000B/6010");
- }
- }
- /* new controllers (GDT_PCINEW, GDT_PCIMPR, ..) use board_info IOCTL! */
-
- return("");
-}
-
-static const char *gdth_info(struct Scsi_Host *shp)
-{
- gdth_ha_str *ha = shost_priv(shp);
-
- TRACE2(("gdth_info()\n"));
- return ((const char *)ha->binfo.type_string);
-}
-
-static enum blk_eh_timer_return gdth_timed_out(struct scsi_cmnd *scp)
-{
- gdth_ha_str *ha = shost_priv(scp->device->host);
- struct gdth_cmndinfo *cmndinfo = gdth_cmnd_priv(scp);
- u8 b, t;
- unsigned long flags;
- enum blk_eh_timer_return retval = BLK_EH_DONE;
-
- TRACE(("%s() cmd 0x%x\n", scp->cmnd[0], __func__));
- b = scp->device->channel;
- t = scp->device->id;
-
- /*
- * We don't really honor the command timeout, but we try to
- * honor 6 times of the actual command timeout! So reset the
- * timer if this is less than 6th timeout on this command!
- */
- if (++cmndinfo->timeout_count < 6)
- retval = BLK_EH_RESET_TIMER;
-
- /* Reset the timeout if it is locked IO */
- spin_lock_irqsave(&ha->smp_lock, flags);
- if ((b != ha->virt_bus && ha->raw[BUS_L2P(ha, b)].lock) ||
- (b == ha->virt_bus && t < MAX_HDRIVES && ha->hdr[t].lock)) {
- TRACE2(("%s(): locked IO, reset timeout\n", __func__));
- retval = BLK_EH_RESET_TIMER;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- return retval;
-}
-
-
-static int gdth_eh_bus_reset(struct scsi_cmnd *scp)
-{
- gdth_ha_str *ha = shost_priv(scp->device->host);
- int i;
- unsigned long flags;
- struct scsi_cmnd *cmnd;
- u8 b;
-
- TRACE2(("gdth_eh_bus_reset()\n"));
-
- b = scp->device->channel;
-
- /* clear command tab */
- spin_lock_irqsave(&ha->smp_lock, flags);
- for (i = 0; i < GDTH_MAXCMDS; ++i) {
- cmnd = ha->cmd_tab[i].cmnd;
- if (!SPECIAL_SCP(cmnd) && cmnd->device->channel == b)
- ha->cmd_tab[i].cmnd = UNUSED_CMND;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- if (b == ha->virt_bus) {
- /* host drives */
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (ha->hdr[i].present) {
- spin_lock_irqsave(&ha->smp_lock, flags);
- gdth_polling = TRUE;
- while (gdth_test_busy(ha))
- gdth_delay(0);
- if (gdth_internal_cmd(ha, CACHESERVICE,
- GDT_CLUST_RESET, i, 0, 0))
- ha->hdr[i].cluster_type &= ~CLUSTER_RESERVED;
- gdth_polling = FALSE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- }
- }
- } else {
- /* raw devices */
- spin_lock_irqsave(&ha->smp_lock, flags);
- for (i = 0; i < MAXID; ++i)
- ha->raw[BUS_L2P(ha,b)].io_cnt[i] = 0;
- gdth_polling = TRUE;
- while (gdth_test_busy(ha))
- gdth_delay(0);
- gdth_internal_cmd(ha, SCSIRAWSERVICE, GDT_RESET_BUS,
- BUS_L2P(ha,b), 0, 0);
- gdth_polling = FALSE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- }
- return SUCCESS;
-}
-
-static int gdth_bios_param(struct scsi_device *sdev,struct block_device *bdev,sector_t cap,int *ip)
-{
- u8 b, t;
- gdth_ha_str *ha = shost_priv(sdev->host);
- struct scsi_device *sd;
- unsigned capacity;
-
- sd = sdev;
- capacity = cap;
- b = sd->channel;
- t = sd->id;
- TRACE2(("gdth_bios_param() ha %d bus %d target %d\n", ha->hanum, b, t));
-
- if (b != ha->virt_bus || ha->hdr[t].heads == 0) {
- /* raw device or host drive without mapping information */
- TRACE2(("Evaluate mapping\n"));
- gdth_eval_mapping(capacity,&ip[2],&ip[0],&ip[1]);
- } else {
- ip[0] = ha->hdr[t].heads;
- ip[1] = ha->hdr[t].secs;
- ip[2] = capacity / ip[0] / ip[1];
- }
-
- TRACE2(("gdth_bios_param(): %d heads, %d secs, %d cyls\n",
- ip[0],ip[1],ip[2]));
- return 0;
-}
-
-
-static int gdth_queuecommand_lck(struct scsi_cmnd *scp,
- void (*done)(struct scsi_cmnd *))
-{
- gdth_ha_str *ha = shost_priv(scp->device->host);
- struct gdth_cmndinfo *cmndinfo;
-
- TRACE(("gdth_queuecommand() cmd 0x%x\n", scp->cmnd[0]));
-
- cmndinfo = gdth_get_cmndinfo(ha);
- BUG_ON(!cmndinfo);
-
- scp->scsi_done = done;
- cmndinfo->timeout_count = 0;
- cmndinfo->priority = DEFAULT_PRI;
-
- return __gdth_queuecommand(ha, scp, cmndinfo);
-}
-
-static DEF_SCSI_QCMD(gdth_queuecommand)
-
-static int __gdth_queuecommand(gdth_ha_str *ha, struct scsi_cmnd *scp,
- struct gdth_cmndinfo *cmndinfo)
-{
- scp->host_scribble = (unsigned char *)cmndinfo;
- cmndinfo->wait_for_completion = 1;
- cmndinfo->phase = -1;
- cmndinfo->OpCode = -1;
-
-#ifdef GDTH_STATISTICS
- ++act_ios;
-#endif
-
- gdth_putq(ha, scp, cmndinfo->priority);
- gdth_next(ha);
- return 0;
-}
-
-
-static int gdth_open(struct inode *inode, struct file *filep)
-{
- gdth_ha_str *ha;
-
- mutex_lock(&gdth_mutex);
- list_for_each_entry(ha, &gdth_instances, list) {
- if (!ha->sdev)
- ha->sdev = scsi_get_host_dev(ha->shost);
- }
- mutex_unlock(&gdth_mutex);
-
- TRACE(("gdth_open()\n"));
- return 0;
-}
-
-static int gdth_close(struct inode *inode, struct file *filep)
-{
- TRACE(("gdth_close()\n"));
- return 0;
-}
-
-static int ioc_event(void __user *arg)
-{
- gdth_ioctl_event evt;
- gdth_ha_str *ha;
- unsigned long flags;
-
- if (copy_from_user(&evt, arg, sizeof(gdth_ioctl_event)))
- return -EFAULT;
- ha = gdth_find_ha(evt.ionode);
- if (!ha)
- return -EFAULT;
-
- if (evt.erase == 0xff) {
- if (evt.event.event_source == ES_TEST)
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.test);
- else if (evt.event.event_source == ES_DRIVER)
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.driver);
- else if (evt.event.event_source == ES_SYNC)
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.sync);
- else
- evt.event.event_data.size=sizeof(evt.event.event_data.eu.async);
- spin_lock_irqsave(&ha->smp_lock, flags);
- gdth_store_event(ha, evt.event.event_source, evt.event.event_idx,
- &evt.event.event_data);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- } else if (evt.erase == 0xfe) {
- gdth_clear_events();
- } else if (evt.erase == 0) {
- evt.handle = gdth_read_event(ha, evt.handle, &evt.event);
- } else {
- gdth_readapp_event(ha, evt.erase, &evt.event);
- }
- if (copy_to_user(arg, &evt, sizeof(gdth_ioctl_event)))
- return -EFAULT;
- return 0;
-}
-
-static int ioc_lockdrv(void __user *arg)
-{
- gdth_ioctl_lockdrv ldrv;
- u8 i, j;
- unsigned long flags;
- gdth_ha_str *ha;
-
- if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
- return -EFAULT;
- ha = gdth_find_ha(ldrv.ionode);
- if (!ha)
- return -EFAULT;
-
- for (i = 0; i < ldrv.drive_cnt && i < MAX_HDRIVES; ++i) {
- j = ldrv.drives[i];
- if (j >= MAX_HDRIVES || !ha->hdr[j].present)
- continue;
- if (ldrv.lock) {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[j].lock = 1;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_wait_completion(ha, ha->bus_cnt, j);
- } else {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[j].lock = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- gdth_next(ha);
- }
- }
- return 0;
-}
-
-static int ioc_resetdrv(void __user *arg, char *cmnd)
-{
- gdth_ioctl_reset res;
- gdth_cmd_str cmd;
- gdth_ha_str *ha;
- int rval;
-
- if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
- res.number >= MAX_HDRIVES)
- return -EFAULT;
- ha = gdth_find_ha(res.ionode);
- if (!ha)
- return -EFAULT;
-
- if (!ha->hdr[res.number].present)
- return 0;
- memset(&cmd, 0, sizeof(gdth_cmd_str));
- cmd.Service = CACHESERVICE;
- cmd.OpCode = GDT_CLUST_RESET;
- if (ha->cache_feat & GDT_64BIT)
- cmd.u.cache64.DeviceNo = res.number;
- else
- cmd.u.cache.DeviceNo = res.number;
-
- rval = __gdth_execute(ha->sdev, &cmd, cmnd, 30, NULL);
- if (rval < 0)
- return rval;
- res.status = rval;
-
- if (copy_to_user(arg, &res, sizeof(gdth_ioctl_reset)))
- return -EFAULT;
- return 0;
-}
-
-static void gdth_ioc_cacheservice(gdth_ha_str *ha, gdth_ioctl_general *gen,
- u64 paddr)
-{
- if (ha->cache_feat & GDT_64BIT) {
- /* copy elements from 32-bit IOCTL structure */
- gen->command.u.cache64.BlockCnt = gen->command.u.cache.BlockCnt;
- gen->command.u.cache64.BlockNo = gen->command.u.cache.BlockNo;
- gen->command.u.cache64.DeviceNo = gen->command.u.cache.DeviceNo;
-
- if (ha->cache_feat & SCATTER_GATHER) {
- gen->command.u.cache64.DestAddr = (u64)-1;
- gen->command.u.cache64.sg_canz = 1;
- gen->command.u.cache64.sg_lst[0].sg_ptr = paddr;
- gen->command.u.cache64.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.cache64.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.cache64.DestAddr = paddr;
- gen->command.u.cache64.sg_canz = 0;
- }
- } else {
- if (ha->cache_feat & SCATTER_GATHER) {
- gen->command.u.cache.DestAddr = 0xffffffff;
- gen->command.u.cache.sg_canz = 1;
- gen->command.u.cache.sg_lst[0].sg_ptr = (u32)paddr;
- gen->command.u.cache.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.cache.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.cache.DestAddr = paddr;
- gen->command.u.cache.sg_canz = 0;
- }
- }
-}
-
-static void gdth_ioc_scsiraw(gdth_ha_str *ha, gdth_ioctl_general *gen,
- u64 paddr)
-{
- if (ha->raw_feat & GDT_64BIT) {
- /* copy elements from 32-bit IOCTL structure */
- char cmd[16];
-
- gen->command.u.raw64.sense_len = gen->command.u.raw.sense_len;
- gen->command.u.raw64.bus = gen->command.u.raw.bus;
- gen->command.u.raw64.lun = gen->command.u.raw.lun;
- gen->command.u.raw64.target = gen->command.u.raw.target;
- memcpy(cmd, gen->command.u.raw.cmd, 16);
- memcpy(gen->command.u.raw64.cmd, cmd, 16);
- gen->command.u.raw64.clen = gen->command.u.raw.clen;
- gen->command.u.raw64.sdlen = gen->command.u.raw.sdlen;
- gen->command.u.raw64.direction = gen->command.u.raw.direction;
-
- /* addresses */
- if (ha->raw_feat & SCATTER_GATHER) {
- gen->command.u.raw64.sdata = (u64)-1;
- gen->command.u.raw64.sg_ranz = 1;
- gen->command.u.raw64.sg_lst[0].sg_ptr = paddr;
- gen->command.u.raw64.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.raw64.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.raw64.sdata = paddr;
- gen->command.u.raw64.sg_ranz = 0;
- }
-
- gen->command.u.raw64.sense_data = paddr + gen->data_len;
- } else {
- if (ha->raw_feat & SCATTER_GATHER) {
- gen->command.u.raw.sdata = 0xffffffff;
- gen->command.u.raw.sg_ranz = 1;
- gen->command.u.raw.sg_lst[0].sg_ptr = (u32)paddr;
- gen->command.u.raw.sg_lst[0].sg_len = gen->data_len;
- gen->command.u.raw.sg_lst[1].sg_len = 0;
- } else {
- gen->command.u.raw.sdata = paddr;
- gen->command.u.raw.sg_ranz = 0;
- }
-
- gen->command.u.raw.sense_data = (u32)paddr + gen->data_len;
- }
-}
-
-static int ioc_general(void __user *arg, char *cmnd)
-{
- gdth_ioctl_general gen;
- gdth_ha_str *ha;
- char *buf = NULL;
- dma_addr_t paddr;
- int rval;
-
- if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
- return -EFAULT;
- ha = gdth_find_ha(gen.ionode);
- if (!ha)
- return -EFAULT;
-
- if (gen.data_len > INT_MAX)
- return -EINVAL;
- if (gen.sense_len > INT_MAX)
- return -EINVAL;
- if (gen.data_len + gen.sense_len > INT_MAX)
- return -EINVAL;
-
- if (gen.data_len + gen.sense_len > 0) {
- buf = dma_alloc_coherent(&ha->pdev->dev,
- gen.data_len + gen.sense_len, &paddr,
- GFP_KERNEL);
- if (!buf)
- return -EFAULT;
-
- rval = -EFAULT;
- if (copy_from_user(buf, arg + sizeof(gdth_ioctl_general),
- gen.data_len + gen.sense_len))
- goto out_free_buf;
-
- if (gen.command.OpCode == GDT_IOCTL)
- gen.command.u.ioctl.p_param = paddr;
- else if (gen.command.Service == CACHESERVICE)
- gdth_ioc_cacheservice(ha, &gen, paddr);
- else if (gen.command.Service == SCSIRAWSERVICE)
- gdth_ioc_scsiraw(ha, &gen, paddr);
- else
- goto out_free_buf;
- }
-
- rval = __gdth_execute(ha->sdev, &gen.command, cmnd, gen.timeout,
- &gen.info);
- if (rval < 0)
- goto out_free_buf;
- gen.status = rval;
-
- rval = -EFAULT;
- if (copy_to_user(arg + sizeof(gdth_ioctl_general), buf,
- gen.data_len + gen.sense_len))
- goto out_free_buf;
- if (copy_to_user(arg, &gen,
- sizeof(gdth_ioctl_general) - sizeof(gdth_cmd_str)))
- goto out_free_buf;
-
- rval = 0;
-out_free_buf:
- if (buf)
- dma_free_coherent(&ha->pdev->dev, gen.data_len + gen.sense_len,
- buf, paddr);
- return rval;
-}
-
-static int ioc_hdrlist(void __user *arg, char *cmnd)
-{
- gdth_ioctl_rescan *rsc;
- gdth_cmd_str *cmd;
- gdth_ha_str *ha;
- u8 i;
- int rc = -ENOMEM;
- u32 cluster_type = 0;
-
- rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!rsc || !cmd)
- goto free_fail;
-
- if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
- (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
- rc = -EFAULT;
- goto free_fail;
- }
- memset(cmd, 0, sizeof(gdth_cmd_str));
-
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (!ha->hdr[i].present) {
- rsc->hdr_list[i].bus = 0xff;
- continue;
- }
- rsc->hdr_list[i].bus = ha->virt_bus;
- rsc->hdr_list[i].target = i;
- rsc->hdr_list[i].lun = 0;
- rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
- if (ha->hdr[i].cluster_type & CLUSTER_DRIVE) {
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_CLUST_INFO;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
- if (__gdth_execute(ha->sdev, cmd, cmnd, 30, &cluster_type) == S_OK)
- rsc->hdr_list[i].cluster_type = cluster_type;
- }
- }
-
- if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
- rc = -EFAULT;
- else
- rc = 0;
-
-free_fail:
- kfree(rsc);
- kfree(cmd);
- return rc;
-}
-
-static int ioc_rescan(void __user *arg, char *cmnd)
-{
- gdth_ioctl_rescan *rsc;
- gdth_cmd_str *cmd;
- u16 i, status, hdr_cnt;
- u32 info;
- int cyls, hds, secs;
- int rc = -ENOMEM;
- unsigned long flags;
- gdth_ha_str *ha;
-
- rsc = kmalloc(sizeof(*rsc), GFP_KERNEL);
- cmd = kmalloc(sizeof(*cmd), GFP_KERNEL);
- if (!cmd || !rsc)
- goto free_fail;
-
- if (copy_from_user(rsc, arg, sizeof(gdth_ioctl_rescan)) ||
- (NULL == (ha = gdth_find_ha(rsc->ionode)))) {
- rc = -EFAULT;
- goto free_fail;
- }
- memset(cmd, 0, sizeof(gdth_cmd_str));
-
- if (rsc->flag == 0) {
- /* old method: re-init. cache service */
- cmd->Service = CACHESERVICE;
- if (ha->cache_feat & GDT_64BIT) {
- cmd->OpCode = GDT_X_INIT_HOST;
- cmd->u.cache64.DeviceNo = LINUX_OS;
- } else {
- cmd->OpCode = GDT_INIT;
- cmd->u.cache.DeviceNo = LINUX_OS;
- }
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
- i = 0;
- hdr_cnt = (status == S_OK ? (u16)info : 0);
- } else {
- i = rsc->hdr_no;
- hdr_cnt = i + 1;
- }
-
- for (; i < hdr_cnt && i < MAX_HDRIVES; ++i) {
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_INFO;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- rsc->hdr_list[i].bus = ha->virt_bus;
- rsc->hdr_list[i].target = i;
- rsc->hdr_list[i].lun = 0;
- if (status != S_OK) {
- ha->hdr[i].present = FALSE;
- } else {
- ha->hdr[i].present = TRUE;
- ha->hdr[i].size = info;
- /* evaluate mapping */
- ha->hdr[i].size &= ~SECS32;
- gdth_eval_mapping(ha->hdr[i].size,&cyls,&hds,&secs);
- ha->hdr[i].heads = hds;
- ha->hdr[i].secs = secs;
- /* round size */
- ha->hdr[i].size = cyls * hds * secs;
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- if (status != S_OK)
- continue;
-
- /* extended info, if GDT_64BIT, for drives > 2 TB */
- /* but we need ha->info2, not yet stored in scp->SCp */
-
- /* devtype, cluster info, R/W attribs */
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_DEVTYPE;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].devtype = (status == S_OK ? (u16)info : 0);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_CLUST_INFO;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].cluster_type =
- ((status == S_OK && !shared_access) ? (u16)info : 0);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- rsc->hdr_list[i].cluster_type = ha->hdr[i].cluster_type;
-
- cmd->Service = CACHESERVICE;
- cmd->OpCode = GDT_RW_ATTRIBS;
- if (ha->cache_feat & GDT_64BIT)
- cmd->u.cache64.DeviceNo = i;
- else
- cmd->u.cache.DeviceNo = i;
-
- status = __gdth_execute(ha->sdev, cmd, cmnd, 30, &info);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->hdr[i].rw_attribs = (status == S_OK ? (u16)info : 0);
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- }
-
- if (copy_to_user(arg, rsc, sizeof(gdth_ioctl_rescan)))
- rc = -EFAULT;
- else
- rc = 0;
-
-free_fail:
- kfree(rsc);
- kfree(cmd);
- return rc;
-}
-
-static int gdth_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
-{
- gdth_ha_str *ha;
- struct scsi_cmnd *scp;
- unsigned long flags;
- char cmnd[MAX_COMMAND_SIZE];
- void __user *argp = (void __user *)arg;
-
- memset(cmnd, 0xff, 12);
-
- TRACE(("gdth_ioctl() cmd 0x%x\n", cmd));
-
- switch (cmd) {
- case GDTIOCTL_CTRCNT:
- {
- int cnt = gdth_ctr_count;
- if (put_user(cnt, (int __user *)argp))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_DRVERS:
- {
- int ver = (GDTH_VERSION<<8) | GDTH_SUBVERSION;
- if (put_user(ver, (int __user *)argp))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_OSVERS:
- {
- gdth_ioctl_osvers osv;
-
- osv.version = (u8)(LINUX_VERSION_CODE >> 16);
- osv.subversion = (u8)(LINUX_VERSION_CODE >> 8);
- osv.revision = (u16)(LINUX_VERSION_CODE & 0xff);
- if (copy_to_user(argp, &osv, sizeof(gdth_ioctl_osvers)))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_CTRTYPE:
- {
- gdth_ioctl_ctrtype ctrt;
-
- if (copy_from_user(&ctrt, argp, sizeof(gdth_ioctl_ctrtype)) ||
- (NULL == (ha = gdth_find_ha(ctrt.ionode))))
- return -EFAULT;
-
- if (ha->type != GDT_PCIMPR) {
- ctrt.type = (u8)((ha->stype<<4) + 6);
- } else {
- ctrt.type = (ha->oem_id == OEM_ID_INTEL ? 0xfd : 0xfe);
- if (ha->stype >= 0x300)
- ctrt.ext_type = 0x6000 | ha->pdev->subsystem_device;
- else
- ctrt.ext_type = 0x6000 | ha->stype;
- }
- ctrt.device_id = ha->pdev->device;
- ctrt.sub_device_id = ha->pdev->subsystem_device;
- ctrt.info = ha->brd_phys;
- ctrt.oem_id = ha->oem_id;
- if (copy_to_user(argp, &ctrt, sizeof(gdth_ioctl_ctrtype)))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_GENERAL:
- return ioc_general(argp, cmnd);
-
- case GDTIOCTL_EVENT:
- return ioc_event(argp);
-
- case GDTIOCTL_LOCKDRV:
- return ioc_lockdrv(argp);
-
- case GDTIOCTL_LOCKCHN:
- {
- gdth_ioctl_lockchn lchn;
- u8 i, j;
-
- if (copy_from_user(&lchn, argp, sizeof(gdth_ioctl_lockchn)) ||
- (NULL == (ha = gdth_find_ha(lchn.ionode))))
- return -EFAULT;
-
- i = lchn.channel;
- if (i < ha->bus_cnt) {
- if (lchn.lock) {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->raw[i].lock = 1;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j)
- gdth_wait_completion(ha, i, j);
- } else {
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->raw[i].lock = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- for (j = 0; j < ha->tid_cnt; ++j)
- gdth_next(ha);
- }
- }
- break;
- }
-
- case GDTIOCTL_RESCAN:
- return ioc_rescan(argp, cmnd);
-
- case GDTIOCTL_HDRLIST:
- return ioc_hdrlist(argp, cmnd);
-
- case GDTIOCTL_RESET_BUS:
- {
- gdth_ioctl_reset res;
- int rval;
-
- if (copy_from_user(&res, argp, sizeof(gdth_ioctl_reset)) ||
- (NULL == (ha = gdth_find_ha(res.ionode))))
- return -EFAULT;
-
- scp = kzalloc(sizeof(*scp), GFP_KERNEL);
- if (!scp)
- return -ENOMEM;
- scp->device = ha->sdev;
- scp->cmd_len = 12;
- scp->device->channel = res.number;
- rval = gdth_eh_bus_reset(scp);
- res.status = (rval == SUCCESS ? S_OK : S_GENERR);
- kfree(scp);
-
- if (copy_to_user(argp, &res, sizeof(gdth_ioctl_reset)))
- return -EFAULT;
- break;
- }
-
- case GDTIOCTL_RESET_DRV:
- return ioc_resetdrv(argp, cmnd);
-
- default:
- break;
- }
- return 0;
-}
-
-static long gdth_unlocked_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- int ret;
-
- mutex_lock(&gdth_mutex);
- ret = gdth_ioctl(file, cmd, arg);
- mutex_unlock(&gdth_mutex);
-
- return ret;
-}
-
-/* flush routine */
-static void gdth_flush(gdth_ha_str *ha)
-{
- int i;
- gdth_cmd_str gdtcmd;
- char cmnd[MAX_COMMAND_SIZE];
- memset(cmnd, 0xff, MAX_COMMAND_SIZE);
-
- TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
-
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (ha->hdr[i].present) {
- gdtcmd.BoardNode = LOCALBOARD;
- gdtcmd.Service = CACHESERVICE;
- gdtcmd.OpCode = GDT_FLUSH;
- if (ha->cache_feat & GDT_64BIT) {
- gdtcmd.u.cache64.DeviceNo = i;
- gdtcmd.u.cache64.BlockNo = 1;
- gdtcmd.u.cache64.sg_canz = 0;
- } else {
- gdtcmd.u.cache.DeviceNo = i;
- gdtcmd.u.cache.BlockNo = 1;
- gdtcmd.u.cache.sg_canz = 0;
- }
- TRACE2(("gdth_flush(): flush ha %d drive %d\n", ha->hanum, i));
-
- gdth_execute(ha->shost, &gdtcmd, cmnd, 30, NULL);
- }
- }
-}
-
-/* configure lun */
-static int gdth_slave_configure(struct scsi_device *sdev)
-{
- sdev->skip_ms_page_3f = 1;
- sdev->skip_ms_page_8 = 1;
- return 0;
-}
-
-static struct scsi_host_template gdth_template = {
- .name = "GDT SCSI Disk Array Controller",
- .info = gdth_info,
- .queuecommand = gdth_queuecommand,
- .eh_bus_reset_handler = gdth_eh_bus_reset,
- .slave_configure = gdth_slave_configure,
- .bios_param = gdth_bios_param,
- .show_info = gdth_show_info,
- .write_info = gdth_set_info,
- .eh_timed_out = gdth_timed_out,
- .proc_name = "gdth",
- .can_queue = GDTH_MAXCMDS,
- .this_id = -1,
- .sg_tablesize = GDTH_MAXSG,
- .cmd_per_lun = GDTH_MAXC_P_L,
- .unchecked_isa_dma = 1,
- .no_write_same = 1,
-};
-
-static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
-{
- struct Scsi_Host *shp;
- gdth_ha_str *ha;
- dma_addr_t scratch_dma_handle = 0;
- int error, i;
- struct pci_dev *pdev = pcistr->pdev;
-
- *ha_out = NULL;
-
- shp = scsi_host_alloc(&gdth_template, sizeof(gdth_ha_str));
- if (!shp)
- return -ENOMEM;
- ha = shost_priv(shp);
-
- error = -ENODEV;
- if (!gdth_init_pci(pdev, pcistr, ha))
- goto out_host_put;
-
- /* controller found and initialized */
- printk("Configuring GDT-PCI HA at %d/%d IRQ %u\n",
- pdev->bus->number,
- PCI_SLOT(pdev->devfn),
- ha->irq);
-
- error = request_irq(ha->irq, gdth_interrupt,
- IRQF_SHARED, "gdth", ha);
- if (error) {
- printk("GDT-PCI: Unable to allocate IRQ\n");
- goto out_host_put;
- }
-
- shp->unchecked_isa_dma = 0;
- shp->irq = ha->irq;
- shp->dma_channel = 0xff;
-
- ha->hanum = gdth_ctr_count++;
- ha->shost = shp;
-
- ha->pccb = &ha->cmdext;
- ha->ccb_phys = 0L;
-
- error = -ENOMEM;
-
- ha->pscratch = dma_alloc_coherent(&ha->pdev->dev, GDTH_SCRATCH,
- &scratch_dma_handle, GFP_KERNEL);
- if (!ha->pscratch)
- goto out_free_irq;
- ha->scratch_phys = scratch_dma_handle;
-
- ha->pmsg = dma_alloc_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
- &scratch_dma_handle, GFP_KERNEL);
- if (!ha->pmsg)
- goto out_free_pscratch;
- ha->msg_phys = scratch_dma_handle;
-
- ha->scratch_busy = FALSE;
- ha->req_first = NULL;
- ha->tid_cnt = pdev->device >= 0x200 ? MAXID : MAX_HDRIVES;
- if (max_ids > 0 && max_ids < ha->tid_cnt)
- ha->tid_cnt = max_ids;
- for (i = 0; i < GDTH_MAXCMDS; ++i)
- ha->cmd_tab[i].cmnd = UNUSED_CMND;
- ha->scan_mode = rescan ? 0x10 : 0;
-
- error = -ENODEV;
- if (!gdth_search_drives(ha)) {
- printk("GDT-PCI %d: Error during device scan\n", ha->hanum);
- goto out_free_pmsg;
- }
-
- if (hdr_channel < 0 || hdr_channel > ha->bus_cnt)
- hdr_channel = ha->bus_cnt;
- ha->virt_bus = hdr_channel;
-
- /* 64-bit DMA only supported from FW >= x.43 */
- if (!(ha->cache_feat & ha->raw_feat & ha->screen_feat & GDT_64BIT) ||
- !ha->dma64_support) {
- if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "GDT-PCI %d: "
- "Unable to set 32-bit DMA\n", ha->hanum);
- goto out_free_pmsg;
- }
- } else {
- shp->max_cmd_len = 16;
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- printk("GDT-PCI %d: 64-bit DMA enabled\n", ha->hanum);
- } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- printk(KERN_WARNING "GDT-PCI %d: "
- "Unable to set 64/32-bit DMA\n", ha->hanum);
- goto out_free_pmsg;
- }
- }
-
- shp->max_id = ha->tid_cnt;
- shp->max_lun = MAXLUN;
- shp->max_channel = ha->bus_cnt;
-
- spin_lock_init(&ha->smp_lock);
- gdth_enable_int(ha);
-
- error = scsi_add_host(shp, &pdev->dev);
- if (error)
- goto out_free_pmsg;
- list_add_tail(&ha->list, &gdth_instances);
-
- pci_set_drvdata(ha->pdev, ha);
- gdth_timer_init();
-
- scsi_scan_host(shp);
-
- *ha_out = ha;
-
- return 0;
-
- out_free_pmsg:
- dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
- ha->pmsg, ha->msg_phys);
- out_free_pscratch:
- dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
- ha->pscratch, ha->scratch_phys);
- out_free_irq:
- free_irq(ha->irq, ha);
- gdth_ctr_count--;
- out_host_put:
- scsi_host_put(shp);
- return error;
-}
-
-static void gdth_remove_one(gdth_ha_str *ha)
-{
- struct Scsi_Host *shp = ha->shost;
-
- TRACE2(("gdth_remove_one()\n"));
-
- scsi_remove_host(shp);
-
- gdth_flush(ha);
-
- if (ha->sdev) {
- scsi_free_host_dev(ha->sdev);
- ha->sdev = NULL;
- }
-
- if (shp->irq)
- free_irq(shp->irq,ha);
-
- if (ha->pscratch)
- dma_free_coherent(&ha->pdev->dev, GDTH_SCRATCH,
- ha->pscratch, ha->scratch_phys);
- if (ha->pmsg)
- dma_free_coherent(&ha->pdev->dev, sizeof(gdth_msg_str),
- ha->pmsg, ha->msg_phys);
- if (ha->ccb_phys)
- dma_unmap_single(&ha->pdev->dev, ha->ccb_phys,
- sizeof(gdth_cmd_str), DMA_BIDIRECTIONAL);
-
- scsi_host_put(shp);
-}
-
-static int gdth_halt(struct notifier_block *nb, unsigned long event, void *buf)
-{
- gdth_ha_str *ha;
-
- TRACE2(("gdth_halt() event %d\n", (int)event));
- if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
- return NOTIFY_DONE;
-
- list_for_each_entry(ha, &gdth_instances, list)
- gdth_flush(ha);
-
- return NOTIFY_OK;
-}
-
-static struct notifier_block gdth_notifier = {
- gdth_halt, NULL, 0
-};
-
-static int __init gdth_init(void)
-{
- if (disable) {
- printk("GDT-HA: Controller driver disabled from"
- " command line !\n");
- return 0;
- }
-
- printk("GDT-HA: Storage RAID Controller Driver. Version: %s\n",
- GDTH_VERSION_STR);
-
- /* initializations */
- gdth_polling = TRUE;
- gdth_clear_events();
- timer_setup(&gdth_timer, gdth_timeout, 0);
-
- /* scanning for PCI controllers */
- if (pci_register_driver(&gdth_pci_driver)) {
- gdth_ha_str *ha;
-
- list_for_each_entry(ha, &gdth_instances, list)
- gdth_remove_one(ha);
- return -ENODEV;
- }
-
- TRACE2(("gdth_detect() %d controller detected\n", gdth_ctr_count));
-
- major = register_chrdev(0,"gdth", &gdth_fops);
- register_reboot_notifier(&gdth_notifier);
- gdth_polling = FALSE;
- return 0;
-}
-
-static void __exit gdth_exit(void)
-{
- gdth_ha_str *ha;
-
- unregister_chrdev(major, "gdth");
- unregister_reboot_notifier(&gdth_notifier);
-
-#ifdef GDTH_STATISTICS
- del_timer_sync(&gdth_timer);
-#endif
-
- pci_unregister_driver(&gdth_pci_driver);
-
- list_for_each_entry(ha, &gdth_instances, list)
- gdth_remove_one(ha);
-}
-
-module_init(gdth_init);
-module_exit(gdth_exit);
-
-#ifndef MODULE
-static void __init internal_setup(char *str,int *ints)
-{
- int i;
- char *cur_str, *argv;
-
- TRACE2(("internal_setup() str %s ints[0] %d\n",
- str ? str:"NULL", ints ? ints[0]:0));
-
- /* analyse string */
- argv = str;
- while (argv && (cur_str = strchr(argv, ':'))) {
- int val = 0, c = *++cur_str;
-
- if (c == 'n' || c == 'N')
- val = 0;
- else if (c == 'y' || c == 'Y')
- val = 1;
- else
- val = (int)simple_strtoul(cur_str, NULL, 0);
-
- if (!strncmp(argv, "disable:", 8))
- disable = val;
- else if (!strncmp(argv, "reserve_mode:", 13))
- reserve_mode = val;
- else if (!strncmp(argv, "reverse_scan:", 13))
- reverse_scan = val;
- else if (!strncmp(argv, "hdr_channel:", 12))
- hdr_channel = val;
- else if (!strncmp(argv, "max_ids:", 8))
- max_ids = val;
- else if (!strncmp(argv, "rescan:", 7))
- rescan = val;
- else if (!strncmp(argv, "shared_access:", 14))
- shared_access = val;
- else if (!strncmp(argv, "reserve_list:", 13)) {
- reserve_list[0] = val;
- for (i = 1; i < MAX_RES_ARGS; i++) {
- cur_str = strchr(cur_str, ',');
- if (!cur_str)
- break;
- if (!isdigit((int)*++cur_str)) {
- --cur_str;
- break;
- }
- reserve_list[i] =
- (int)simple_strtoul(cur_str, NULL, 0);
- }
- if (!cur_str)
- break;
- argv = ++cur_str;
- continue;
- }
-
- if ((argv = strchr(argv, ',')))
- ++argv;
- }
-}
-
-static int __init option_setup(char *str)
-{
- int ints[MAXHA];
- char *cur = str;
- int i = 1;
-
- TRACE2(("option_setup() str %s\n", str ? str:"NULL"));
-
- while (cur && isdigit(*cur) && i < MAXHA) {
- ints[i++] = simple_strtoul(cur, NULL, 0);
- if ((cur = strchr(cur, ',')) != NULL) cur++;
- }
-
- ints[0] = i - 1;
- internal_setup(cur, ints);
- return 1;
-}
-
-__setup("gdth=", option_setup);
-#endif
diff --git a/drivers/scsi/gdth.h b/drivers/scsi/gdth.h
deleted file mode 100644
index 5a13d406d40e..000000000000
--- a/drivers/scsi/gdth.h
+++ /dev/null
@@ -1,981 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GDTH_H
-#define _GDTH_H
-
-/*
- * Header file for the GDT Disk Array/Storage RAID controllers driver for Linux
- *
- * gdth.h Copyright (C) 1995-06 ICP vortex, Achim Leubner
- * See gdth.c for further informations and
- * below for supported controller types
- *
- * <achim_leubner@adaptec.com>
- *
- * $Id: gdth.h,v 1.58 2006/01/11 16:14:09 achim Exp $
- */
-
-#include <linux/types.h>
-
-#ifndef TRUE
-#define TRUE 1
-#endif
-#ifndef FALSE
-#define FALSE 0
-#endif
-
-/* defines, macros */
-
-/* driver version */
-#define GDTH_VERSION_STR "3.05"
-#define GDTH_VERSION 3
-#define GDTH_SUBVERSION 5
-
-/* protocol version */
-#define PROTOCOL_VERSION 1
-
-/* OEM IDs */
-#define OEM_ID_ICP 0x941c
-#define OEM_ID_INTEL 0x8000
-
-/* controller classes */
-#define GDT_PCI 0x03 /* PCI controller */
-#define GDT_PCINEW 0x04 /* new PCI controller */
-#define GDT_PCIMPR 0x05 /* PCI MPR controller */
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDT60x0
-/* GDT_PCI */
-#define PCI_DEVICE_ID_VORTEX_GDT60x0 0 /* GDT6000/6020/6050 */
-#define PCI_DEVICE_ID_VORTEX_GDT6000B 1 /* GDT6000B/6010 */
-/* GDT_PCINEW */
-#define PCI_DEVICE_ID_VORTEX_GDT6x10 2 /* GDT6110/6510 */
-#define PCI_DEVICE_ID_VORTEX_GDT6x20 3 /* GDT6120/6520 */
-#define PCI_DEVICE_ID_VORTEX_GDT6530 4 /* GDT6530 */
-#define PCI_DEVICE_ID_VORTEX_GDT6550 5 /* GDT6550 */
-/* GDT_PCINEW, wide/ultra SCSI controllers */
-#define PCI_DEVICE_ID_VORTEX_GDT6x17 6 /* GDT6117/6517 */
-#define PCI_DEVICE_ID_VORTEX_GDT6x27 7 /* GDT6127/6527 */
-#define PCI_DEVICE_ID_VORTEX_GDT6537 8 /* GDT6537 */
-#define PCI_DEVICE_ID_VORTEX_GDT6557 9 /* GDT6557/6557-ECC */
-/* GDT_PCINEW, wide SCSI controllers */
-#define PCI_DEVICE_ID_VORTEX_GDT6x15 10 /* GDT6115/6515 */
-#define PCI_DEVICE_ID_VORTEX_GDT6x25 11 /* GDT6125/6525 */
-#define PCI_DEVICE_ID_VORTEX_GDT6535 12 /* GDT6535 */
-#define PCI_DEVICE_ID_VORTEX_GDT6555 13 /* GDT6555/6555-ECC */
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RP
-/* GDT_MPR, RP series, wide/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x17RP 0x100 /* GDT6117RP/GDT6517RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6x27RP 0x101 /* GDT6127RP/GDT6527RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6537RP 0x102 /* GDT6537RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6557RP 0x103 /* GDT6557RP */
-/* GDT_MPR, RP series, narrow/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x11RP 0x104 /* GDT6111RP/GDT6511RP */
-#define PCI_DEVICE_ID_VORTEX_GDT6x21RP 0x105 /* GDT6121RP/GDT6521RP */
-#endif
-#ifndef PCI_DEVICE_ID_VORTEX_GDT6x17RD
-/* GDT_MPR, RD series, wide/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x17RD 0x110 /* GDT6117RD/GDT6517RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x27RD 0x111 /* GDT6127RD/GDT6527RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6537RD 0x112 /* GDT6537RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6557RD 0x113 /* GDT6557RD */
-/* GDT_MPR, RD series, narrow/ultra SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x11RD 0x114 /* GDT6111RD/GDT6511RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x21RD 0x115 /* GDT6121RD/GDT6521RD */
-/* GDT_MPR, RD series, wide/ultra2 SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT6x18RD 0x118 /* GDT6118RD/GDT6518RD/
- GDT6618RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x28RD 0x119 /* GDT6128RD/GDT6528RD/
- GDT6628RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x38RD 0x11A /* GDT6538RD/GDT6638RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x58RD 0x11B /* GDT6558RD/GDT6658RD */
-/* GDT_MPR, RN series (64-bit PCI), wide/ultra2 SCSI */
-#define PCI_DEVICE_ID_VORTEX_GDT7x18RN 0x168 /* GDT7118RN/GDT7518RN/
- GDT7618RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x28RN 0x169 /* GDT7128RN/GDT7528RN/
- GDT7628RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x38RN 0x16A /* GDT7538RN/GDT7638RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x58RN 0x16B /* GDT7558RN/GDT7658RN */
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDT6x19RD
-/* GDT_MPR, RD series, Fibre Channel */
-#define PCI_DEVICE_ID_VORTEX_GDT6x19RD 0x210 /* GDT6519RD/GDT6619RD */
-#define PCI_DEVICE_ID_VORTEX_GDT6x29RD 0x211 /* GDT6529RD/GDT6629RD */
-/* GDT_MPR, RN series (64-bit PCI), Fibre Channel */
-#define PCI_DEVICE_ID_VORTEX_GDT7x19RN 0x260 /* GDT7519RN/GDT7619RN */
-#define PCI_DEVICE_ID_VORTEX_GDT7x29RN 0x261 /* GDT7529RN/GDT7629RN */
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDTMAXRP
-/* GDT_MPR, last device ID */
-#define PCI_DEVICE_ID_VORTEX_GDTMAXRP 0x2ff
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDTNEWRX
-/* new GDT Rx Controller */
-#define PCI_DEVICE_ID_VORTEX_GDTNEWRX 0x300
-#endif
-
-#ifndef PCI_DEVICE_ID_VORTEX_GDTNEWRX2
-/* new(2) GDT Rx Controller */
-#define PCI_DEVICE_ID_VORTEX_GDTNEWRX2 0x301
-#endif
-
-#ifndef PCI_DEVICE_ID_INTEL_SRC
-/* Intel Storage RAID Controller */
-#define PCI_DEVICE_ID_INTEL_SRC 0x600
-#endif
-
-#ifndef PCI_DEVICE_ID_INTEL_SRC_XSCALE
-/* Intel Storage RAID Controller */
-#define PCI_DEVICE_ID_INTEL_SRC_XSCALE 0x601
-#endif
-
-/* limits */
-#define GDTH_SCRATCH PAGE_SIZE /* 4KB scratch buffer */
-#define GDTH_MAXCMDS 120
-#define GDTH_MAXC_P_L 16 /* max. cmds per lun */
-#define GDTH_MAX_RAW 2 /* max. cmds per raw device */
-#define MAXOFFSETS 128
-#define MAXHA 16
-#define MAXID 127
-#define MAXLUN 8
-#define MAXBUS 6
-#define MAX_EVENTS 100 /* event buffer count */
-#define MAX_RES_ARGS 40 /* device reservation,
- must be a multiple of 4 */
-#define MAXCYLS 1024
-#define HEADS 64
-#define SECS 32 /* mapping 64*32 */
-#define MEDHEADS 127
-#define MEDSECS 63 /* mapping 127*63 */
-#define BIGHEADS 255
-#define BIGSECS 63 /* mapping 255*63 */
-
-/* special command ptr. */
-#define UNUSED_CMND ((struct scsi_cmnd *)-1)
-#define INTERNAL_CMND ((struct scsi_cmnd *)-2)
-#define SCREEN_CMND ((struct scsi_cmnd *)-3)
-#define SPECIAL_SCP(p) (p==UNUSED_CMND || p==INTERNAL_CMND || p==SCREEN_CMND)
-
-/* controller services */
-#define SCSIRAWSERVICE 3
-#define CACHESERVICE 9
-#define SCREENSERVICE 11
-
-/* screenservice defines */
-#define MSG_INV_HANDLE -1 /* special message handle */
-#define MSGLEN 16 /* size of message text */
-#define MSG_SIZE 34 /* size of message structure */
-#define MSG_REQUEST 0 /* async. event: message */
-
-/* DPMEM constants */
-#define DPMEM_MAGIC 0xC0FFEE11
-#define IC_HEADER_BYTES 48
-#define IC_QUEUE_BYTES 4
-#define DPMEM_COMMAND_OFFSET IC_HEADER_BYTES+IC_QUEUE_BYTES*MAXOFFSETS
-
-/* cluster_type constants */
-#define CLUSTER_DRIVE 1
-#define CLUSTER_MOUNTED 2
-#define CLUSTER_RESERVED 4
-#define CLUSTER_RESERVE_STATE (CLUSTER_DRIVE|CLUSTER_MOUNTED|CLUSTER_RESERVED)
-
-/* commands for all services, cache service */
-#define GDT_INIT 0 /* service initialization */
-#define GDT_READ 1 /* read command */
-#define GDT_WRITE 2 /* write command */
-#define GDT_INFO 3 /* information about devices */
-#define GDT_FLUSH 4 /* flush dirty cache buffers */
-#define GDT_IOCTL 5 /* ioctl command */
-#define GDT_DEVTYPE 9 /* additional information */
-#define GDT_MOUNT 10 /* mount cache device */
-#define GDT_UNMOUNT 11 /* unmount cache device */
-#define GDT_SET_FEAT 12 /* set feat. (scatter/gather) */
-#define GDT_GET_FEAT 13 /* get features */
-#define GDT_WRITE_THR 16 /* write through */
-#define GDT_READ_THR 17 /* read through */
-#define GDT_EXT_INFO 18 /* extended info */
-#define GDT_RESET 19 /* controller reset */
-#define GDT_RESERVE_DRV 20 /* reserve host drive */
-#define GDT_RELEASE_DRV 21 /* release host drive */
-#define GDT_CLUST_INFO 22 /* cluster info */
-#define GDT_RW_ATTRIBS 23 /* R/W attribs (write thru,..)*/
-#define GDT_CLUST_RESET 24 /* releases the cluster drives*/
-#define GDT_FREEZE_IO 25 /* freezes all IOs */
-#define GDT_UNFREEZE_IO 26 /* unfreezes all IOs */
-#define GDT_X_INIT_HOST 29 /* ext. init: 64 bit support */
-#define GDT_X_INFO 30 /* ext. info for drives>2TB */
-
-/* raw service commands */
-#define GDT_RESERVE 14 /* reserve dev. to raw serv. */
-#define GDT_RELEASE 15 /* release device */
-#define GDT_RESERVE_ALL 16 /* reserve all devices */
-#define GDT_RELEASE_ALL 17 /* release all devices */
-#define GDT_RESET_BUS 18 /* reset bus */
-#define GDT_SCAN_START 19 /* start device scan */
-#define GDT_SCAN_END 20 /* stop device scan */
-#define GDT_X_INIT_RAW 21 /* ext. init: 64 bit support */
-
-/* screen service commands */
-#define GDT_REALTIME 3 /* realtime clock to screens. */
-#define GDT_X_INIT_SCR 4 /* ext. init: 64 bit support */
-
-/* IOCTL command defines */
-#define SCSI_DR_INFO 0x00 /* SCSI drive info */
-#define SCSI_CHAN_CNT 0x05 /* SCSI channel count */
-#define SCSI_DR_LIST 0x06 /* SCSI drive list */
-#define SCSI_DEF_CNT 0x15 /* grown/primary defects */
-#define DSK_STATISTICS 0x4b /* SCSI disk statistics */
-#define IOCHAN_DESC 0x5d /* description of IO channel */
-#define IOCHAN_RAW_DESC 0x5e /* description of raw IO chn. */
-#define L_CTRL_PATTERN 0x20000000L /* SCSI IOCTL mask */
-#define ARRAY_INFO 0x12 /* array drive info */
-#define ARRAY_DRV_LIST 0x0f /* array drive list */
-#define ARRAY_DRV_LIST2 0x34 /* array drive list (new) */
-#define LA_CTRL_PATTERN 0x10000000L /* array IOCTL mask */
-#define CACHE_DRV_CNT 0x01 /* cache drive count */
-#define CACHE_DRV_LIST 0x02 /* cache drive list */
-#define CACHE_INFO 0x04 /* cache info */
-#define CACHE_CONFIG 0x05 /* cache configuration */
-#define CACHE_DRV_INFO 0x07 /* cache drive info */
-#define BOARD_FEATURES 0x15 /* controller features */
-#define BOARD_INFO 0x28 /* controller info */
-#define SET_PERF_MODES 0x82 /* set mode (coalescing,..) */
-#define GET_PERF_MODES 0x83 /* get mode */
-#define CACHE_READ_OEM_STRING_RECORD 0x84 /* read OEM string record */
-#define HOST_GET 0x10001L /* get host drive list */
-#define IO_CHANNEL 0x00020000L /* default IO channel */
-#define INVALID_CHANNEL 0x0000ffffL /* invalid channel */
-
-/* service errors */
-#define S_OK 1 /* no error */
-#define S_GENERR 6 /* general error */
-#define S_BSY 7 /* controller busy */
-#define S_CACHE_UNKNOWN 12 /* cache serv.: drive unknown */
-#define S_RAW_SCSI 12 /* raw serv.: target error */
-#define S_RAW_ILL 0xff /* raw serv.: illegal */
-#define S_NOFUNC -2 /* unknown function */
-#define S_CACHE_RESERV -24 /* cache: reserv. conflict */
-
-/* timeout values */
-#define INIT_RETRIES 100000 /* 100000 * 1ms = 100s */
-#define INIT_TIMEOUT 100000 /* 100000 * 1ms = 100s */
-#define POLL_TIMEOUT 10000 /* 10000 * 1ms = 10s */
-
-/* priorities */
-#define DEFAULT_PRI 0x20
-#define IOCTL_PRI 0x10
-#define HIGH_PRI 0x08
-
-/* data directions */
-#define GDTH_DATA_IN 0x01000000L /* data from target */
-#define GDTH_DATA_OUT 0x00000000L /* data to target */
-
-/* other defines */
-#define LINUX_OS 8 /* used for cache optim. */
-#define SECS32 0x1f /* round capacity */
-#define BIOS_ID_OFFS 0x10 /* offset contr-ID in ISABIOS */
-#define LOCALBOARD 0 /* board node always 0 */
-#define ASYNCINDEX 0 /* cmd index async. event */
-#define SPEZINDEX 1 /* cmd index unknown service */
-#define COALINDEX (GDTH_MAXCMDS + 2)
-
-/* features */
-#define SCATTER_GATHER 1 /* s/g feature */
-#define GDT_WR_THROUGH 0x100 /* WRITE_THROUGH supported */
-#define GDT_64BIT 0x200 /* 64bit / drv>2TB support */
-
-#include "gdth_ioctl.h"
-
-/* screenservice message */
-typedef struct {
- u32 msg_handle; /* message handle */
- u32 msg_len; /* size of message */
- u32 msg_alen; /* answer length */
- u8 msg_answer; /* answer flag */
- u8 msg_ext; /* more messages */
- u8 msg_reserved[2];
- char msg_text[MSGLEN+2]; /* the message text */
-} __attribute__((packed)) gdth_msg_str;
-
-
-/* IOCTL data structures */
-
-/* Status coalescing buffer for returning multiple requests per interrupt */
-typedef struct {
- u32 status;
- u32 ext_status;
- u32 info0;
- u32 info1;
-} __attribute__((packed)) gdth_coal_status;
-
-/* performance mode data structure */
-typedef struct {
- u32 version; /* The version of this IOCTL structure. */
- u32 st_mode; /* 0=dis., 1=st_buf_addr1 valid, 2=both */
- u32 st_buff_addr1; /* physical address of status buffer 1 */
- u32 st_buff_u_addr1; /* reserved for 64 bit addressing */
- u32 st_buff_indx1; /* reserved command idx. for this buffer */
- u32 st_buff_addr2; /* physical address of status buffer 1 */
- u32 st_buff_u_addr2; /* reserved for 64 bit addressing */
- u32 st_buff_indx2; /* reserved command idx. for this buffer */
- u32 st_buff_size; /* size of each buffer in bytes */
- u32 cmd_mode; /* 0 = mode disabled, 1 = cmd_buff_addr1 */
- u32 cmd_buff_addr1; /* physical address of cmd buffer 1 */
- u32 cmd_buff_u_addr1; /* reserved for 64 bit addressing */
- u32 cmd_buff_indx1; /* cmd buf addr1 unique identifier */
- u32 cmd_buff_addr2; /* physical address of cmd buffer 1 */
- u32 cmd_buff_u_addr2; /* reserved for 64 bit addressing */
- u32 cmd_buff_indx2; /* cmd buf addr1 unique identifier */
- u32 cmd_buff_size; /* size of each cmd buffer in bytes */
- u32 reserved1;
- u32 reserved2;
-} __attribute__((packed)) gdth_perf_modes;
-
-/* SCSI drive info */
-typedef struct {
- u8 vendor[8]; /* vendor string */
- u8 product[16]; /* product string */
- u8 revision[4]; /* revision */
- u32 sy_rate; /* current rate for sync. tr. */
- u32 sy_max_rate; /* max. rate for sync. tr. */
- u32 no_ldrive; /* belongs to this log. drv.*/
- u32 blkcnt; /* number of blocks */
- u16 blksize; /* size of block in bytes */
- u8 available; /* flag: access is available */
- u8 init; /* medium is initialized */
- u8 devtype; /* SCSI devicetype */
- u8 rm_medium; /* medium is removable */
- u8 wp_medium; /* medium is write protected */
- u8 ansi; /* SCSI I/II or III? */
- u8 protocol; /* same as ansi */
- u8 sync; /* flag: sync. transfer enab. */
- u8 disc; /* flag: disconnect enabled */
- u8 queueing; /* flag: command queing enab. */
- u8 cached; /* flag: caching enabled */
- u8 target_id; /* target ID of device */
- u8 lun; /* LUN id of device */
- u8 orphan; /* flag: drive fragment */
- u32 last_error; /* sense key or drive state */
- u32 last_result; /* result of last command */
- u32 check_errors; /* err. in last surface check */
- u8 percent; /* progress for surface check */
- u8 last_check; /* IOCTRL operation */
- u8 res[2];
- u32 flags; /* from 1.19/2.19: raw reserv.*/
- u8 multi_bus; /* multi bus dev? (fibre ch.) */
- u8 mb_status; /* status: available? */
- u8 res2[2];
- u8 mb_alt_status; /* status on second bus */
- u8 mb_alt_bid; /* number of second bus */
- u8 mb_alt_tid; /* target id on second bus */
- u8 res3;
- u8 fc_flag; /* from 1.22/2.22: info valid?*/
- u8 res4;
- u16 fc_frame_size; /* frame size (bytes) */
- char wwn[8]; /* world wide name */
-} __attribute__((packed)) gdth_diskinfo_str;
-
-/* get SCSI channel count */
-typedef struct {
- u32 channel_no; /* number of channel */
- u32 drive_cnt; /* drive count */
- u8 siop_id; /* SCSI processor ID */
- u8 siop_state; /* SCSI processor state */
-} __attribute__((packed)) gdth_getch_str;
-
-/* get SCSI drive numbers */
-typedef struct {
- u32 sc_no; /* SCSI channel */
- u32 sc_cnt; /* sc_list[] elements */
- u32 sc_list[MAXID]; /* minor device numbers */
-} __attribute__((packed)) gdth_drlist_str;
-
-/* get grown/primary defect count */
-typedef struct {
- u8 sddc_type; /* 0x08: grown, 0x10: prim. */
- u8 sddc_format; /* list entry format */
- u8 sddc_len; /* list entry length */
- u8 sddc_res;
- u32 sddc_cnt; /* entry count */
-} __attribute__((packed)) gdth_defcnt_str;
-
-/* disk statistics */
-typedef struct {
- u32 bid; /* SCSI channel */
- u32 first; /* first SCSI disk */
- u32 entries; /* number of elements */
- u32 count; /* (R) number of init. el. */
- u32 mon_time; /* time stamp */
- struct {
- u8 tid; /* target ID */
- u8 lun; /* LUN */
- u8 res[2];
- u32 blk_size; /* block size in bytes */
- u32 rd_count; /* bytes read */
- u32 wr_count; /* bytes written */
- u32 rd_blk_count; /* blocks read */
- u32 wr_blk_count; /* blocks written */
- u32 retries; /* retries */
- u32 reassigns; /* reassigns */
- } __attribute__((packed)) list[1];
-} __attribute__((packed)) gdth_dskstat_str;
-
-/* IO channel header */
-typedef struct {
- u32 version; /* version (-1UL: newest) */
- u8 list_entries; /* list entry count */
- u8 first_chan; /* first channel number */
- u8 last_chan; /* last channel number */
- u8 chan_count; /* (R) channel count */
- u32 list_offset; /* offset of list[0] */
-} __attribute__((packed)) gdth_iochan_header;
-
-/* get IO channel description */
-typedef struct {
- gdth_iochan_header hdr;
- struct {
- u32 address; /* channel address */
- u8 type; /* type (SCSI, FCAL) */
- u8 local_no; /* local number */
- u16 features; /* channel features */
- } __attribute__((packed)) list[MAXBUS];
-} __attribute__((packed)) gdth_iochan_str;
-
-/* get raw IO channel description */
-typedef struct {
- gdth_iochan_header hdr;
- struct {
- u8 proc_id; /* processor id */
- u8 proc_defect; /* defect ? */
- u8 reserved[2];
- } __attribute__((packed)) list[MAXBUS];
-} __attribute__((packed)) gdth_raw_iochan_str;
-
-/* array drive component */
-typedef struct {
- u32 al_controller; /* controller ID */
- u8 al_cache_drive; /* cache drive number */
- u8 al_status; /* cache drive state */
- u8 al_res[2];
-} __attribute__((packed)) gdth_arraycomp_str;
-
-/* array drive information */
-typedef struct {
- u8 ai_type; /* array type (RAID0,4,5) */
- u8 ai_cache_drive_cnt; /* active cachedrives */
- u8 ai_state; /* array drive state */
- u8 ai_master_cd; /* master cachedrive */
- u32 ai_master_controller; /* ID of master controller */
- u32 ai_size; /* user capacity [sectors] */
- u32 ai_striping_size; /* striping size [sectors] */
- u32 ai_secsize; /* sector size [bytes] */
- u32 ai_err_info; /* failed cache drive */
- u8 ai_name[8]; /* name of the array drive */
- u8 ai_controller_cnt; /* number of controllers */
- u8 ai_removable; /* flag: removable */
- u8 ai_write_protected; /* flag: write protected */
- u8 ai_devtype; /* type: always direct access */
- gdth_arraycomp_str ai_drives[35]; /* drive components: */
- u8 ai_drive_entries; /* number of drive components */
- u8 ai_protected; /* protection flag */
- u8 ai_verify_state; /* state of a parity verify */
- u8 ai_ext_state; /* extended array drive state */
- u8 ai_expand_state; /* array expand state (>=2.18)*/
- u8 ai_reserved[3];
-} __attribute__((packed)) gdth_arrayinf_str;
-
-/* get array drive list */
-typedef struct {
- u32 controller_no; /* controller no. */
- u8 cd_handle; /* master cachedrive */
- u8 is_arrayd; /* Flag: is array drive? */
- u8 is_master; /* Flag: is array master? */
- u8 is_parity; /* Flag: is parity drive? */
- u8 is_hotfix; /* Flag: is hotfix drive? */
- u8 res[3];
-} __attribute__((packed)) gdth_alist_str;
-
-typedef struct {
- u32 entries_avail; /* allocated entries */
- u32 entries_init; /* returned entries */
- u32 first_entry; /* first entry number */
- u32 list_offset; /* offset of following list */
- gdth_alist_str list[1]; /* list */
-} __attribute__((packed)) gdth_arcdl_str;
-
-/* cache info/config IOCTL */
-typedef struct {
- u32 version; /* firmware version */
- u16 state; /* cache state (on/off) */
- u16 strategy; /* cache strategy */
- u16 write_back; /* write back state (on/off) */
- u16 block_size; /* cache block size */
-} __attribute__((packed)) gdth_cpar_str;
-
-typedef struct {
- u32 csize; /* cache size */
- u32 read_cnt; /* read/write counter */
- u32 write_cnt;
- u32 tr_hits; /* hits */
- u32 sec_hits;
- u32 sec_miss; /* misses */
-} __attribute__((packed)) gdth_cstat_str;
-
-typedef struct {
- gdth_cpar_str cpar;
- gdth_cstat_str cstat;
-} __attribute__((packed)) gdth_cinfo_str;
-
-/* cache drive info */
-typedef struct {
- u8 cd_name[8]; /* cache drive name */
- u32 cd_devtype; /* SCSI devicetype */
- u32 cd_ldcnt; /* number of log. drives */
- u32 cd_last_error; /* last error */
- u8 cd_initialized; /* drive is initialized */
- u8 cd_removable; /* media is removable */
- u8 cd_write_protected; /* write protected */
- u8 cd_flags; /* Pool Hot Fix? */
- u32 ld_blkcnt; /* number of blocks */
- u32 ld_blksize; /* blocksize */
- u32 ld_dcnt; /* number of disks */
- u32 ld_slave; /* log. drive index */
- u32 ld_dtype; /* type of logical drive */
- u32 ld_last_error; /* last error */
- u8 ld_name[8]; /* log. drive name */
- u8 ld_error; /* error */
-} __attribute__((packed)) gdth_cdrinfo_str;
-
-/* OEM string */
-typedef struct {
- u32 ctl_version;
- u32 file_major_version;
- u32 file_minor_version;
- u32 buffer_size;
- u32 cpy_count;
- u32 ext_error;
- u32 oem_id;
- u32 board_id;
-} __attribute__((packed)) gdth_oem_str_params;
-
-typedef struct {
- u8 product_0_1_name[16];
- u8 product_4_5_name[16];
- u8 product_cluster_name[16];
- u8 product_reserved[16];
- u8 scsi_cluster_target_vendor_id[16];
- u8 cluster_raid_fw_name[16];
- u8 oem_brand_name[16];
- u8 oem_raid_type[16];
- u8 bios_type[13];
- u8 bios_title[50];
- u8 oem_company_name[37];
- u32 pci_id_1;
- u32 pci_id_2;
- u8 validation_status[80];
- u8 reserved_1[4];
- u8 scsi_host_drive_inquiry_vendor_id[16];
- u8 library_file_template[16];
- u8 reserved_2[16];
- u8 tool_name_1[32];
- u8 tool_name_2[32];
- u8 tool_name_3[32];
- u8 oem_contact_1[84];
- u8 oem_contact_2[84];
- u8 oem_contact_3[84];
-} __attribute__((packed)) gdth_oem_str;
-
-typedef struct {
- gdth_oem_str_params params;
- gdth_oem_str text;
-} __attribute__((packed)) gdth_oem_str_ioctl;
-
-/* board features */
-typedef struct {
- u8 chaining; /* Chaining supported */
- u8 striping; /* Striping (RAID-0) supp. */
- u8 mirroring; /* Mirroring (RAID-1) supp. */
- u8 raid; /* RAID-4/5/10 supported */
-} __attribute__((packed)) gdth_bfeat_str;
-
-/* board info IOCTL */
-typedef struct {
- u32 ser_no; /* serial no. */
- u8 oem_id[2]; /* OEM ID */
- u16 ep_flags; /* eprom flags */
- u32 proc_id; /* processor ID */
- u32 memsize; /* memory size (bytes) */
- u8 mem_banks; /* memory banks */
- u8 chan_type; /* channel type */
- u8 chan_count; /* channel count */
- u8 rdongle_pres; /* dongle present? */
- u32 epr_fw_ver; /* (eprom) firmware version */
- u32 upd_fw_ver; /* (update) firmware version */
- u32 upd_revision; /* update revision */
- char type_string[16]; /* controller name */
- char raid_string[16]; /* RAID firmware name */
- u8 update_pres; /* update present? */
- u8 xor_pres; /* XOR engine present? */
- u8 prom_type; /* ROM type (eprom/flash) */
- u8 prom_count; /* number of ROM devices */
- u32 dup_pres; /* duplexing module present? */
- u32 chan_pres; /* number of expansion chn. */
- u32 mem_pres; /* memory expansion inst. ? */
- u8 ft_bus_system; /* fault bus supported? */
- u8 subtype_valid; /* board_subtype valid? */
- u8 board_subtype; /* subtype/hardware level */
- u8 ramparity_pres; /* RAM parity check hardware? */
-} __attribute__((packed)) gdth_binfo_str;
-
-/* get host drive info */
-typedef struct {
- char name[8]; /* host drive name */
- u32 size; /* size (sectors) */
- u8 host_drive; /* host drive number */
- u8 log_drive; /* log. drive (master) */
- u8 reserved;
- u8 rw_attribs; /* r/w attribs */
- u32 start_sec; /* start sector */
-} __attribute__((packed)) gdth_hentry_str;
-
-typedef struct {
- u32 entries; /* entry count */
- u32 offset; /* offset of entries */
- u8 secs_p_head; /* sectors/head */
- u8 heads_p_cyl; /* heads/cylinder */
- u8 reserved;
- u8 clust_drvtype; /* cluster drive type */
- u32 location; /* controller number */
- gdth_hentry_str entry[MAX_HDRIVES]; /* entries */
-} __attribute__((packed)) gdth_hget_str;
-
-
-/* DPRAM structures */
-
-/* interface area ISA/PCI */
-typedef struct {
- u8 S_Cmd_Indx; /* special command */
- u8 volatile S_Status; /* status special command */
- u16 reserved1;
- u32 S_Info[4]; /* add. info special command */
- u8 volatile Sema0; /* command semaphore */
- u8 reserved2[3];
- u8 Cmd_Index; /* command number */
- u8 reserved3[3];
- u16 volatile Status; /* command status */
- u16 Service; /* service(for async.events) */
- u32 Info[2]; /* additional info */
- struct {
- u16 offset; /* command offs. in the DPRAM*/
- u16 serv_id; /* service */
- } __attribute__((packed)) comm_queue[MAXOFFSETS]; /* command queue */
- u32 bios_reserved[2];
- u8 gdt_dpr_cmd[1]; /* commands */
-} __attribute__((packed)) gdt_dpr_if;
-
-/* SRAM structure PCI controllers */
-typedef struct {
- u32 magic; /* controller ID from BIOS */
- u16 need_deinit; /* switch betw. BIOS/driver */
- u8 switch_support; /* see need_deinit */
- u8 padding[9];
- u8 os_used[16]; /* OS code per service */
- u8 unused[28];
- u8 fw_magic; /* contr. ID from firmware */
-} __attribute__((packed)) gdt_pci_sram;
-
-/* DPRAM ISA controllers */
-typedef struct {
- union {
- struct {
- u8 bios_used[0x3c00-32]; /* 15KB - 32Bytes BIOS */
- u16 need_deinit; /* switch betw. BIOS/driver */
- u8 switch_support; /* see need_deinit */
- u8 padding[9];
- u8 os_used[16]; /* OS code per service */
- } __attribute__((packed)) dp_sram;
- u8 bios_area[0x4000]; /* 16KB reserved for BIOS */
- } bu;
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0x3000]; /* 12KB for interface */
- } u;
- struct {
- u8 memlock; /* write protection DPRAM */
- u8 event; /* release event */
- u8 irqen; /* board interrupts enable */
- u8 irqdel; /* acknowledge board int. */
- u8 volatile Sema1; /* status semaphore */
- u8 rq; /* IRQ/DRQ configuration */
- } __attribute__((packed)) io;
-} __attribute__((packed)) gdt2_dpram_str;
-
-/* DPRAM PCI controllers */
-typedef struct {
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0xff0-sizeof(gdt_pci_sram)];
- } u;
- gdt_pci_sram gdt6sr; /* SRAM structure */
- struct {
- u8 unused0[1];
- u8 volatile Sema1; /* command semaphore */
- u8 unused1[3];
- u8 irqen; /* board interrupts enable */
- u8 unused2[2];
- u8 event; /* release event */
- u8 unused3[3];
- u8 irqdel; /* acknowledge board int. */
- u8 unused4[3];
- } __attribute__((packed)) io;
-} __attribute__((packed)) gdt6_dpram_str;
-
-/* PLX register structure (new PCI controllers) */
-typedef struct {
- u8 cfg_reg; /* DPRAM cfg.(2:below 1MB,0:anywhere)*/
- u8 unused1[0x3f];
- u8 volatile sema0_reg; /* command semaphore */
- u8 volatile sema1_reg; /* status semaphore */
- u8 unused2[2];
- u16 volatile status; /* command status */
- u16 service; /* service */
- u32 info[2]; /* additional info */
- u8 unused3[0x10];
- u8 ldoor_reg; /* PCI to local doorbell */
- u8 unused4[3];
- u8 volatile edoor_reg; /* local to PCI doorbell */
- u8 unused5[3];
- u8 control0; /* control0 register(unused) */
- u8 control1; /* board interrupts enable */
- u8 unused6[0x16];
-} __attribute__((packed)) gdt6c_plx_regs;
-
-/* DPRAM new PCI controllers */
-typedef struct {
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0x4000-sizeof(gdt_pci_sram)];
- } u;
- gdt_pci_sram gdt6sr; /* SRAM structure */
-} __attribute__((packed)) gdt6c_dpram_str;
-
-/* i960 register structure (PCI MPR controllers) */
-typedef struct {
- u8 unused1[16];
- u8 volatile sema0_reg; /* command semaphore */
- u8 unused2;
- u8 volatile sema1_reg; /* status semaphore */
- u8 unused3;
- u16 volatile status; /* command status */
- u16 service; /* service */
- u32 info[2]; /* additional info */
- u8 ldoor_reg; /* PCI to local doorbell */
- u8 unused4[11];
- u8 volatile edoor_reg; /* local to PCI doorbell */
- u8 unused5[7];
- u8 edoor_en_reg; /* board interrupts enable */
- u8 unused6[27];
- u32 unused7[939];
- u32 severity;
- char evt_str[256]; /* event string */
-} __attribute__((packed)) gdt6m_i960_regs;
-
-/* DPRAM PCI MPR controllers */
-typedef struct {
- gdt6m_i960_regs i960r; /* 4KB i960 registers */
- union {
- gdt_dpr_if ic; /* interface area */
- u8 if_area[0x3000-sizeof(gdt_pci_sram)];
- } u;
- gdt_pci_sram gdt6sr; /* SRAM structure */
-} __attribute__((packed)) gdt6m_dpram_str;
-
-
-/* PCI resources */
-typedef struct {
- struct pci_dev *pdev;
- unsigned long dpmem; /* DPRAM address */
- unsigned long io; /* IO address */
-} gdth_pci_str;
-
-
-/* controller information structure */
-typedef struct {
- struct Scsi_Host *shost;
- struct list_head list;
- u16 hanum;
- u16 oem_id; /* OEM */
- u16 type; /* controller class */
- u32 stype; /* subtype (PCI: device ID) */
- u16 fw_vers; /* firmware version */
- u16 cache_feat; /* feat. cache serv. (s/g,..)*/
- u16 raw_feat; /* feat. raw service (s/g,..)*/
- u16 screen_feat; /* feat. raw service (s/g,..)*/
- void __iomem *brd; /* DPRAM address */
- u32 brd_phys; /* slot number/BIOS address */
- gdt6c_plx_regs *plx; /* PLX regs (new PCI contr.) */
- gdth_cmd_str cmdext;
- gdth_cmd_str *pccb; /* address command structure */
- u32 ccb_phys; /* phys. address */
-#ifdef INT_COAL
- gdth_coal_status *coal_stat; /* buffer for coalescing int.*/
- u64 coal_stat_phys; /* phys. address */
-#endif
- char *pscratch; /* scratch (DMA) buffer */
- u64 scratch_phys; /* phys. address */
- u8 scratch_busy; /* in use? */
- u8 dma64_support; /* 64-bit DMA supported? */
- gdth_msg_str *pmsg; /* message buffer */
- u64 msg_phys; /* phys. address */
- u8 scan_mode; /* current scan mode */
- u8 irq; /* IRQ */
- u8 drq; /* DRQ (ISA controllers) */
- u16 status; /* command status */
- u16 service; /* service/firmware ver./.. */
- u32 info;
- u32 info2; /* additional info */
- struct scsi_cmnd *req_first; /* top of request queue */
- struct {
- u8 present; /* Flag: host drive present? */
- u8 is_logdrv; /* Flag: log. drive (master)? */
- u8 is_arraydrv; /* Flag: array drive? */
- u8 is_master; /* Flag: array drive master? */
- u8 is_parity; /* Flag: parity drive? */
- u8 is_hotfix; /* Flag: hotfix drive? */
- u8 master_no; /* number of master drive */
- u8 lock; /* drive locked? (hot plug) */
- u8 heads; /* mapping */
- u8 secs;
- u16 devtype; /* further information */
- u64 size; /* capacity */
- u8 ldr_no; /* log. drive no. */
- u8 rw_attribs; /* r/w attributes */
- u8 cluster_type; /* cluster properties */
- u8 media_changed; /* Flag:MOUNT/UNMOUNT occurred */
- u32 start_sec; /* start sector */
- } hdr[MAX_LDRIVES]; /* host drives */
- struct {
- u8 lock; /* channel locked? (hot plug) */
- u8 pdev_cnt; /* physical device count */
- u8 local_no; /* local channel number */
- u8 io_cnt[MAXID]; /* current IO count */
- u32 address; /* channel address */
- u32 id_list[MAXID]; /* IDs of the phys. devices */
- } raw[MAXBUS]; /* SCSI channels */
- struct {
- struct scsi_cmnd *cmnd; /* pending request */
- u16 service; /* service */
- } cmd_tab[GDTH_MAXCMDS]; /* table of pend. requests */
- struct gdth_cmndinfo { /* per-command private info */
- int index;
- int internal_command; /* don't call scsi_done */
- gdth_cmd_str *internal_cmd_str; /* crier for internal messages*/
- dma_addr_t sense_paddr; /* sense dma-addr */
- u8 priority;
- int timeout_count; /* # of timeout calls */
- volatile int wait_for_completion;
- u16 status;
- u32 info;
- enum dma_data_direction dma_dir;
- int phase; /* ???? */
- int OpCode;
- } cmndinfo[GDTH_MAXCMDS]; /* index==0 is free */
- u8 bus_cnt; /* SCSI bus count */
- u8 tid_cnt; /* Target ID count */
- u8 bus_id[MAXBUS]; /* IOP IDs */
- u8 virt_bus; /* number of virtual bus */
- u8 more_proc; /* more /proc info supported */
- u16 cmd_cnt; /* command count in DPRAM */
- u16 cmd_len; /* length of actual command */
- u16 cmd_offs_dpmem; /* actual offset in DPRAM */
- u16 ic_all_size; /* sizeof DPRAM interf. area */
- gdth_cpar_str cpar; /* controller cache par. */
- gdth_bfeat_str bfeat; /* controller features */
- gdth_binfo_str binfo; /* controller info */
- gdth_evt_data dvr; /* event structure */
- spinlock_t smp_lock;
- struct pci_dev *pdev;
- char oem_name[8];
-#ifdef GDTH_DMA_STATISTICS
- unsigned long dma32_cnt, dma64_cnt; /* statistics: DMA buffer */
-#endif
- struct scsi_device *sdev;
-} gdth_ha_str;
-
-static inline struct gdth_cmndinfo *gdth_cmnd_priv(struct scsi_cmnd* cmd)
-{
- return (struct gdth_cmndinfo *)cmd->host_scribble;
-}
-
-/* INQUIRY data format */
-typedef struct {
- u8 type_qual;
- u8 modif_rmb;
- u8 version;
- u8 resp_aenc;
- u8 add_length;
- u8 reserved1;
- u8 reserved2;
- u8 misc;
- u8 vendor[8];
- u8 product[16];
- u8 revision[4];
-} __attribute__((packed)) gdth_inq_data;
-
-/* READ_CAPACITY data format */
-typedef struct {
- u32 last_block_no;
- u32 block_length;
-} __attribute__((packed)) gdth_rdcap_data;
-
-/* READ_CAPACITY (16) data format */
-typedef struct {
- u64 last_block_no;
- u32 block_length;
-} __attribute__((packed)) gdth_rdcap16_data;
-
-/* REQUEST_SENSE data format */
-typedef struct {
- u8 errorcode;
- u8 segno;
- u8 key;
- u32 info;
- u8 add_length;
- u32 cmd_info;
- u8 adsc;
- u8 adsq;
- u8 fruc;
- u8 key_spec[3];
-} __attribute__((packed)) gdth_sense_data;
-
-/* MODE_SENSE data format */
-typedef struct {
- struct {
- u8 data_length;
- u8 med_type;
- u8 dev_par;
- u8 bd_length;
- } __attribute__((packed)) hd;
- struct {
- u8 dens_code;
- u8 block_count[3];
- u8 reserved;
- u8 block_length[3];
- } __attribute__((packed)) bd;
-} __attribute__((packed)) gdth_modep_data;
-
-/* stack frame */
-typedef struct {
- unsigned long b[10]; /* 32/64 bit compiler ! */
-} __attribute__((packed)) gdth_stackframe;
-
-
-/* function prototyping */
-
-int gdth_show_info(struct seq_file *, struct Scsi_Host *);
-int gdth_set_info(struct Scsi_Host *, char *, int);
-
-#endif
diff --git a/drivers/scsi/gdth_ioctl.h b/drivers/scsi/gdth_ioctl.h
deleted file mode 100644
index ee4c9bf1022a..000000000000
--- a/drivers/scsi/gdth_ioctl.h
+++ /dev/null
@@ -1,251 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GDTH_IOCTL_H
-#define _GDTH_IOCTL_H
-
-/* gdth_ioctl.h
- * $Id: gdth_ioctl.h,v 1.14 2004/02/19 15:43:15 achim Exp $
- */
-
-/* IOCTLs */
-#define GDTIOCTL_MASK ('J'<<8)
-#define GDTIOCTL_GENERAL (GDTIOCTL_MASK | 0) /* general IOCTL */
-#define GDTIOCTL_DRVERS (GDTIOCTL_MASK | 1) /* get driver version */
-#define GDTIOCTL_CTRTYPE (GDTIOCTL_MASK | 2) /* get controller type */
-#define GDTIOCTL_OSVERS (GDTIOCTL_MASK | 3) /* get OS version */
-#define GDTIOCTL_HDRLIST (GDTIOCTL_MASK | 4) /* get host drive list */
-#define GDTIOCTL_CTRCNT (GDTIOCTL_MASK | 5) /* get controller count */
-#define GDTIOCTL_LOCKDRV (GDTIOCTL_MASK | 6) /* lock host drive */
-#define GDTIOCTL_LOCKCHN (GDTIOCTL_MASK | 7) /* lock channel */
-#define GDTIOCTL_EVENT (GDTIOCTL_MASK | 8) /* read controller events */
-#define GDTIOCTL_SCSI (GDTIOCTL_MASK | 9) /* SCSI command */
-#define GDTIOCTL_RESET_BUS (GDTIOCTL_MASK |10) /* reset SCSI bus */
-#define GDTIOCTL_RESCAN (GDTIOCTL_MASK |11) /* rescan host drives */
-#define GDTIOCTL_RESET_DRV (GDTIOCTL_MASK |12) /* reset (remote) drv. res. */
-
-#define GDTIOCTL_MAGIC 0xaffe0004
-#define EVENT_SIZE 294
-#define GDTH_MAXSG 32 /* max. s/g elements */
-
-#define MAX_LDRIVES 255 /* max. log. drive count */
-#define MAX_HDRIVES MAX_LDRIVES /* max. host drive count */
-
-/* scatter/gather element */
-typedef struct {
- u32 sg_ptr; /* address */
- u32 sg_len; /* length */
-} __attribute__((packed)) gdth_sg_str;
-
-/* scatter/gather element - 64bit addresses */
-typedef struct {
- u64 sg_ptr; /* address */
- u32 sg_len; /* length */
-} __attribute__((packed)) gdth_sg64_str;
-
-/* command structure */
-typedef struct {
- u32 BoardNode; /* board node (always 0) */
- u32 CommandIndex; /* command number */
- u16 OpCode; /* the command (READ,..) */
- union {
- struct {
- u16 DeviceNo; /* number of cache drive */
- u32 BlockNo; /* block number */
- u32 BlockCnt; /* block count */
- u32 DestAddr; /* dest. addr. (if s/g: -1) */
- u32 sg_canz; /* s/g element count */
- gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) cache; /* cache service cmd. str. */
- struct {
- u16 DeviceNo; /* number of cache drive */
- u64 BlockNo; /* block number */
- u32 BlockCnt; /* block count */
- u64 DestAddr; /* dest. addr. (if s/g: -1) */
- u32 sg_canz; /* s/g element count */
- gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) cache64; /* cache service cmd. str. */
- struct {
- u16 param_size; /* size of p_param buffer */
- u32 subfunc; /* IOCTL function */
- u32 channel; /* device */
- u64 p_param; /* buffer */
- } __attribute__((packed)) ioctl; /* IOCTL command structure */
- struct {
- u16 reserved;
- union {
- struct {
- u32 msg_handle; /* message handle */
- u64 msg_addr; /* message buffer address */
- } __attribute__((packed)) msg;
- u8 data[12]; /* buffer for rtc data, ... */
- } su;
- } __attribute__((packed)) screen; /* screen service cmd. str. */
- struct {
- u16 reserved;
- u32 direction; /* data direction */
- u32 mdisc_time; /* disc. time (0: no timeout)*/
- u32 mcon_time; /* connect time(0: no to.) */
- u32 sdata; /* dest. addr. (if s/g: -1) */
- u32 sdlen; /* data length (bytes) */
- u32 clen; /* SCSI cmd. length(6,10,12) */
- u8 cmd[12]; /* SCSI command */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 bus; /* SCSI bus number */
- u8 priority; /* only 0 used */
- u32 sense_len; /* sense data length */
- u32 sense_data; /* sense data addr. */
- u32 link_p; /* linked cmds (not supp.) */
- u32 sg_ranz; /* s/g element count */
- gdth_sg_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) raw; /* raw service cmd. struct. */
- struct {
- u16 reserved;
- u32 direction; /* data direction */
- u32 mdisc_time; /* disc. time (0: no timeout)*/
- u32 mcon_time; /* connect time(0: no to.) */
- u64 sdata; /* dest. addr. (if s/g: -1) */
- u32 sdlen; /* data length (bytes) */
- u32 clen; /* SCSI cmd. length(6,..,16) */
- u8 cmd[16]; /* SCSI command */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 bus; /* SCSI bus number */
- u8 priority; /* only 0 used */
- u32 sense_len; /* sense data length */
- u64 sense_data; /* sense data addr. */
- u32 sg_ranz; /* s/g element count */
- gdth_sg64_str sg_lst[GDTH_MAXSG]; /* s/g list */
- } __attribute__((packed)) raw64; /* raw service cmd. struct. */
- } u;
- /* additional variables */
- u8 Service; /* controller service */
- u8 reserved;
- u16 Status; /* command result */
- u32 Info; /* additional information */
- void *RequestBuffer; /* request buffer */
-} __attribute__((packed)) gdth_cmd_str;
-
-/* controller event structure */
-#define ES_ASYNC 1
-#define ES_DRIVER 2
-#define ES_TEST 3
-#define ES_SYNC 4
-typedef struct {
- u16 size; /* size of structure */
- union {
- char stream[16];
- struct {
- u16 ionode;
- u16 service;
- u32 index;
- } __attribute__((packed)) driver;
- struct {
- u16 ionode;
- u16 service;
- u16 status;
- u32 info;
- u8 scsi_coord[3];
- } __attribute__((packed)) async;
- struct {
- u16 ionode;
- u16 service;
- u16 status;
- u32 info;
- u16 hostdrive;
- u8 scsi_coord[3];
- u8 sense_key;
- } __attribute__((packed)) sync;
- struct {
- u32 l1, l2, l3, l4;
- } __attribute__((packed)) test;
- } eu;
- u32 severity;
- u8 event_string[256];
-} __attribute__((packed)) gdth_evt_data;
-
-typedef struct {
- u32 first_stamp;
- u32 last_stamp;
- u16 same_count;
- u16 event_source;
- u16 event_idx;
- u8 application;
- u8 reserved;
- gdth_evt_data event_data;
-} __attribute__((packed)) gdth_evt_str;
-
-/* GDTIOCTL_GENERAL */
-typedef struct {
- u16 ionode; /* controller number */
- u16 timeout; /* timeout */
- u32 info; /* error info */
- u16 status; /* status */
- unsigned long data_len; /* data buffer size */
- unsigned long sense_len; /* sense buffer size */
- gdth_cmd_str command; /* command */
-} gdth_ioctl_general;
-
-/* GDTIOCTL_LOCKDRV */
-typedef struct {
- u16 ionode; /* controller number */
- u8 lock; /* lock/unlock */
- u8 drive_cnt; /* drive count */
- u16 drives[MAX_HDRIVES]; /* drives */
-} gdth_ioctl_lockdrv;
-
-/* GDTIOCTL_LOCKCHN */
-typedef struct {
- u16 ionode; /* controller number */
- u8 lock; /* lock/unlock */
- u8 channel; /* channel */
-} gdth_ioctl_lockchn;
-
-/* GDTIOCTL_OSVERS */
-typedef struct {
- u8 version; /* OS version */
- u8 subversion; /* OS subversion */
- u16 revision; /* revision */
-} gdth_ioctl_osvers;
-
-/* GDTIOCTL_CTRTYPE */
-typedef struct {
- u16 ionode; /* controller number */
- u8 type; /* controller type */
- u16 info; /* slot etc. */
- u16 oem_id; /* OEM ID */
- u16 bios_ver; /* not used */
- u16 access; /* not used */
- u16 ext_type; /* extended type */
- u16 device_id; /* device ID */
- u16 sub_device_id; /* sub device ID */
-} gdth_ioctl_ctrtype;
-
-/* GDTIOCTL_EVENT */
-typedef struct {
- u16 ionode;
- int erase; /* erase event? */
- int handle; /* event handle */
- gdth_evt_str event;
-} gdth_ioctl_event;
-
-/* GDTIOCTL_RESCAN/GDTIOCTL_HDRLIST */
-typedef struct {
- u16 ionode; /* controller number */
- u8 flag; /* add/remove */
- u16 hdr_no; /* drive no. */
- struct {
- u8 bus; /* SCSI bus */
- u8 target; /* target ID */
- u8 lun; /* LUN */
- u8 cluster_type; /* cluster properties */
- } hdr_list[MAX_HDRIVES]; /* index is host drive number */
-} gdth_ioctl_rescan;
-
-/* GDTIOCTL_RESET_BUS/GDTIOCTL_RESET_DRV */
-typedef struct {
- u16 ionode; /* controller number */
- u16 number; /* bus/host drive number */
- u16 status; /* status */
-} gdth_ioctl_reset;
-
-#endif
diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
deleted file mode 100644
index c764312f9ba0..000000000000
--- a/drivers/scsi/gdth_proc.c
+++ /dev/null
@@ -1,586 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/* gdth_proc.c
- * $Id: gdth_proc.c,v 1.43 2006/01/11 16:15:00 achim Exp $
- */
-
-#include <linux/completion.h>
-#include <linux/slab.h>
-
-int gdth_set_info(struct Scsi_Host *host, char *buffer, int length)
-{
- gdth_ha_str *ha = shost_priv(host);
- int ret_val = -EINVAL;
-
- TRACE2(("gdth_set_info() ha %d\n",ha->hanum,));
-
- if (length >= 4) {
- if (strncmp(buffer,"gdth",4) == 0) {
- buffer += 5;
- length -= 5;
- ret_val = gdth_set_asc_info(host, buffer, length, ha);
- }
- }
-
- return ret_val;
-}
-
-static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
- int length, gdth_ha_str *ha)
-{
- int orig_length, drive, wb_mode;
- int i, found;
- gdth_cmd_str gdtcmd;
- gdth_cpar_str *pcpar;
-
- char cmnd[MAX_COMMAND_SIZE];
- memset(cmnd, 0xff, 12);
- memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
-
- TRACE2(("gdth_set_asc_info() ha %d\n",ha->hanum));
- orig_length = length + 5;
- drive = -1;
- wb_mode = 0;
- found = FALSE;
-
- if (length >= 5 && strncmp(buffer,"flush",5)==0) {
- buffer += 6;
- length -= 6;
- if (length && *buffer>='0' && *buffer<='9') {
- drive = (int)(*buffer-'0');
- ++buffer; --length;
- if (length && *buffer>='0' && *buffer<='9') {
- drive = drive*10 + (int)(*buffer-'0');
- ++buffer; --length;
- }
- printk("GDT: Flushing host drive %d .. ",drive);
- } else {
- printk("GDT: Flushing all host drives .. ");
- }
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (ha->hdr[i].present) {
- if (drive != -1 && i != drive)
- continue;
- found = TRUE;
- gdtcmd.Service = CACHESERVICE;
- gdtcmd.OpCode = GDT_FLUSH;
- if (ha->cache_feat & GDT_64BIT) {
- gdtcmd.u.cache64.DeviceNo = i;
- gdtcmd.u.cache64.BlockNo = 1;
- } else {
- gdtcmd.u.cache.DeviceNo = i;
- gdtcmd.u.cache.BlockNo = 1;
- }
-
- gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
- }
- }
- if (!found)
- printk("\nNo host drive found !\n");
- else
- printk("Done.\n");
- return(orig_length);
- }
-
- if (length >= 7 && strncmp(buffer,"wbp_off",7)==0) {
- buffer += 8;
- length -= 8;
- printk("GDT: Disabling write back permanently .. ");
- wb_mode = 1;
- } else if (length >= 6 && strncmp(buffer,"wbp_on",6)==0) {
- buffer += 7;
- length -= 7;
- printk("GDT: Enabling write back permanently .. ");
- wb_mode = 2;
- } else if (length >= 6 && strncmp(buffer,"wb_off",6)==0) {
- buffer += 7;
- length -= 7;
- printk("GDT: Disabling write back commands .. ");
- if (ha->cache_feat & GDT_WR_THROUGH) {
- gdth_write_through = TRUE;
- printk("Done.\n");
- } else {
- printk("Not supported !\n");
- }
- return(orig_length);
- } else if (length >= 5 && strncmp(buffer,"wb_on",5)==0) {
- buffer += 6;
- length -= 6;
- printk("GDT: Enabling write back commands .. ");
- gdth_write_through = FALSE;
- printk("Done.\n");
- return(orig_length);
- }
-
- if (wb_mode) {
- unsigned long flags;
-
- BUILD_BUG_ON(sizeof(gdth_cpar_str) > GDTH_SCRATCH);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- if (ha->scratch_busy) {
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- return -EBUSY;
- }
- ha->scratch_busy = TRUE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- pcpar = (gdth_cpar_str *)ha->pscratch;
- memcpy( pcpar, &ha->cpar, sizeof(gdth_cpar_str) );
- gdtcmd.Service = CACHESERVICE;
- gdtcmd.OpCode = GDT_IOCTL;
- gdtcmd.u.ioctl.p_param = ha->scratch_phys;
- gdtcmd.u.ioctl.param_size = sizeof(gdth_cpar_str);
- gdtcmd.u.ioctl.subfunc = CACHE_CONFIG;
- gdtcmd.u.ioctl.channel = INVALID_CHANNEL;
- pcpar->write_back = wb_mode==1 ? 0:1;
-
- gdth_execute(host, &gdtcmd, cmnd, 30, NULL);
-
- spin_lock_irqsave(&ha->smp_lock, flags);
- ha->scratch_busy = FALSE;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-
- printk("Done.\n");
- return(orig_length);
- }
-
- printk("GDT: Unknown command: %s Length: %d\n",buffer,length);
- return(-EINVAL);
-}
-
-int gdth_show_info(struct seq_file *m, struct Scsi_Host *host)
-{
- gdth_ha_str *ha = shost_priv(host);
- int hlen;
- int id, i, j, k, sec, flag;
- int no_mdrv = 0, drv_no, is_mirr;
- u32 cnt;
- dma_addr_t paddr;
- int rc = -ENOMEM;
-
- gdth_cmd_str *gdtcmd;
- gdth_evt_str *estr;
- char hrec[277];
-
- char *buf;
- gdth_dskstat_str *pds;
- gdth_diskinfo_str *pdi;
- gdth_arrayinf_str *pai;
- gdth_defcnt_str *pdef;
- gdth_cdrinfo_str *pcdi;
- gdth_hget_str *phg;
- char cmnd[MAX_COMMAND_SIZE];
-
- gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
- estr = kmalloc(sizeof(*estr), GFP_KERNEL);
- if (!gdtcmd || !estr)
- goto free_fail;
-
- memset(cmnd, 0xff, 12);
- memset(gdtcmd, 0, sizeof(gdth_cmd_str));
-
- TRACE2(("gdth_get_info() ha %d\n",ha->hanum));
-
-
- /* request is i.e. "cat /proc/scsi/gdth/0" */
- /* format: %-15s\t%-10s\t%-15s\t%s */
- /* driver parameters */
- seq_puts(m, "Driver Parameters:\n");
- if (reserve_list[0] == 0xff)
- strcpy(hrec, "--");
- else {
- hlen = sprintf(hrec, "%d", reserve_list[0]);
- for (i = 1; i < MAX_RES_ARGS; i++) {
- if (reserve_list[i] == 0xff)
- break;
- hlen += scnprintf(hrec + hlen, 161 - hlen, ",%d", reserve_list[i]);
- }
- }
- seq_printf(m,
- " reserve_mode: \t%d \treserve_list: \t%s\n",
- reserve_mode, hrec);
- seq_printf(m,
- " max_ids: \t%-3d \thdr_channel: \t%d\n",
- max_ids, hdr_channel);
-
- /* controller information */
- seq_puts(m, "\nDisk Array Controller Information:\n");
- seq_printf(m,
- " Number: \t%d \tName: \t%s\n",
- ha->hanum, ha->binfo.type_string);
-
- seq_printf(m,
- " Driver Ver.: \t%-10s\tFirmware Ver.: \t",
- GDTH_VERSION_STR);
- if (ha->more_proc)
- seq_printf(m, "%d.%02d.%02d-%c%03X\n",
- (u8)(ha->binfo.upd_fw_ver>>24),
- (u8)(ha->binfo.upd_fw_ver>>16),
- (u8)(ha->binfo.upd_fw_ver),
- ha->bfeat.raid ? 'R':'N',
- ha->binfo.upd_revision);
- else
- seq_printf(m, "%d.%02d\n", (u8)(ha->cpar.version>>8),
- (u8)(ha->cpar.version));
-
- if (ha->more_proc)
- /* more information: 1. about controller */
- seq_printf(m,
- " Serial No.: \t0x%8X\tCache RAM size:\t%d KB\n",
- ha->binfo.ser_no, ha->binfo.memsize / 1024);
-
- if (ha->more_proc) {
- size_t size = max_t(size_t, GDTH_SCRATCH, sizeof(gdth_hget_str));
-
- /* more information: 2. about physical devices */
- seq_puts(m, "\nPhysical Devices:");
- flag = FALSE;
-
- buf = dma_alloc_coherent(&ha->pdev->dev, size, &paddr, GFP_KERNEL);
- if (!buf)
- goto stop_output;
- for (i = 0; i < ha->bus_cnt; ++i) {
- /* 2.a statistics (and retries/reassigns) */
- TRACE2(("pdr_statistics() chn %d\n",i));
- pds = (gdth_dskstat_str *)(buf + GDTH_SCRATCH/4);
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr + GDTH_SCRATCH/4;
- gdtcmd->u.ioctl.param_size = 3*GDTH_SCRATCH/4;
- gdtcmd->u.ioctl.subfunc = DSK_STATISTICS | L_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel = ha->raw[i].address | INVALID_CHANNEL;
- pds->bid = ha->raw[i].local_no;
- pds->first = 0;
- pds->entries = ha->raw[i].pdev_cnt;
- cnt = (3*GDTH_SCRATCH/4 - 5 * sizeof(u32)) /
- sizeof(pds->list[0]);
- if (pds->entries > cnt)
- pds->entries = cnt;
-
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK)
- pds->count = 0;
-
- /* other IOCTLs must fit into area GDTH_SCRATCH/4 */
- for (j = 0; j < ha->raw[i].pdev_cnt; ++j) {
- /* 2.b drive info */
- TRACE2(("scsi_drv_info() chn %d dev %d\n",
- i, ha->raw[i].id_list[j]));
- pdi = (gdth_diskinfo_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_diskinfo_str);
- gdtcmd->u.ioctl.subfunc = SCSI_DR_INFO | L_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel =
- ha->raw[i].address | ha->raw[i].id_list[j];
-
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- strncpy(hrec,pdi->vendor,8);
- strncpy(hrec+8,pdi->product,16);
- strncpy(hrec+24,pdi->revision,4);
- hrec[28] = 0;
- seq_printf(m,
- "\n Chn/ID/LUN: \t%c/%02d/%d \tName: \t%s\n",
- 'A'+i,pdi->target_id,pdi->lun,hrec);
- flag = TRUE;
- pdi->no_ldrive &= 0xffff;
- if (pdi->no_ldrive == 0xffff)
- strcpy(hrec,"--");
- else
- sprintf(hrec,"%d",pdi->no_ldrive);
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tTo Log. Drive: \t%s\n",
- pdi->blkcnt/(1024*1024/pdi->blksize),
- hrec);
- } else {
- pdi->devtype = 0xff;
- }
-
- if (pdi->devtype == 0) {
- /* search retries/reassigns */
- for (k = 0; k < pds->count; ++k) {
- if (pds->list[k].tid == pdi->target_id &&
- pds->list[k].lun == pdi->lun) {
- seq_printf(m,
- " Retries: \t%-6d \tReassigns: \t%d\n",
- pds->list[k].retries,
- pds->list[k].reassigns);
- break;
- }
- }
- /* 2.c grown defects */
- TRACE2(("scsi_drv_defcnt() chn %d dev %d\n",
- i, ha->raw[i].id_list[j]));
- pdef = (gdth_defcnt_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_defcnt_str);
- gdtcmd->u.ioctl.subfunc = SCSI_DEF_CNT | L_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel =
- ha->raw[i].address | ha->raw[i].id_list[j];
- pdef->sddc_type = 0x08;
-
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- seq_printf(m,
- " Grown Defects:\t%d\n",
- pdef->sddc_cnt);
- }
- }
- }
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
-
- /* 3. about logical drives */
- seq_puts(m, "\nLogical Drives:");
- flag = FALSE;
-
- for (i = 0; i < MAX_LDRIVES; ++i) {
- if (!ha->hdr[i].is_logdrv)
- continue;
- drv_no = i;
- j = k = 0;
- is_mirr = FALSE;
- do {
- /* 3.a log. drive info */
- TRACE2(("cache_drv_info() drive no %d\n",drv_no));
- pcdi = (gdth_cdrinfo_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_cdrinfo_str);
- gdtcmd->u.ioctl.subfunc = CACHE_DRV_INFO;
- gdtcmd->u.ioctl.channel = drv_no;
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) != S_OK)
- break;
- pcdi->ld_dtype >>= 16;
- j++;
- if (pcdi->ld_dtype > 2) {
- strcpy(hrec, "missing");
- } else if (pcdi->ld_error & 1) {
- strcpy(hrec, "fault");
- } else if (pcdi->ld_error & 2) {
- strcpy(hrec, "invalid");
- k++; j--;
- } else {
- strcpy(hrec, "ok");
- }
-
- if (drv_no == i) {
- seq_printf(m,
- "\n Number: \t%-2d \tStatus: \t%s\n",
- drv_no, hrec);
- flag = TRUE;
- no_mdrv = pcdi->cd_ldcnt;
- if (no_mdrv > 1 || pcdi->ld_slave != -1) {
- is_mirr = TRUE;
- strcpy(hrec, "RAID-1");
- } else if (pcdi->ld_dtype == 0) {
- strcpy(hrec, "Disk");
- } else if (pcdi->ld_dtype == 1) {
- strcpy(hrec, "RAID-0");
- } else if (pcdi->ld_dtype == 2) {
- strcpy(hrec, "Chain");
- } else {
- strcpy(hrec, "???");
- }
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tType: \t%s\n",
- pcdi->ld_blkcnt/(1024*1024/pcdi->ld_blksize),
- hrec);
- } else {
- seq_printf(m,
- " Slave Number: \t%-2d \tStatus: \t%s\n",
- drv_no & 0x7fff, hrec);
- }
- drv_no = pcdi->ld_slave;
- } while (drv_no != -1);
-
- if (is_mirr)
- seq_printf(m,
- " Missing Drv.: \t%-2d \tInvalid Drv.: \t%d\n",
- no_mdrv - j - k, k);
-
- if (!ha->hdr[i].is_arraydrv)
- strcpy(hrec, "--");
- else
- sprintf(hrec, "%d", ha->hdr[i].master_no);
- seq_printf(m,
- " To Array Drv.:\t%s\n", hrec);
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
-
- /* 4. about array drives */
- seq_puts(m, "\nArray Drives:");
- flag = FALSE;
-
- for (i = 0; i < MAX_LDRIVES; ++i) {
- if (!(ha->hdr[i].is_arraydrv && ha->hdr[i].is_master))
- continue;
- /* 4.a array drive info */
- TRACE2(("array_info() drive no %d\n",i));
- pai = (gdth_arrayinf_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_arrayinf_str);
- gdtcmd->u.ioctl.subfunc = ARRAY_INFO | LA_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel = i;
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- if (pai->ai_state == 0)
- strcpy(hrec, "idle");
- else if (pai->ai_state == 2)
- strcpy(hrec, "build");
- else if (pai->ai_state == 4)
- strcpy(hrec, "ready");
- else if (pai->ai_state == 6)
- strcpy(hrec, "fail");
- else if (pai->ai_state == 8 || pai->ai_state == 10)
- strcpy(hrec, "rebuild");
- else
- strcpy(hrec, "error");
- if (pai->ai_ext_state & 0x10)
- strcat(hrec, "/expand");
- else if (pai->ai_ext_state & 0x1)
- strcat(hrec, "/patch");
- seq_printf(m,
- "\n Number: \t%-2d \tStatus: \t%s\n",
- i,hrec);
- flag = TRUE;
-
- if (pai->ai_type == 0)
- strcpy(hrec, "RAID-0");
- else if (pai->ai_type == 4)
- strcpy(hrec, "RAID-4");
- else if (pai->ai_type == 5)
- strcpy(hrec, "RAID-5");
- else
- strcpy(hrec, "RAID-10");
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tType: \t%s\n",
- pai->ai_size/(1024*1024/pai->ai_secsize),
- hrec);
- }
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
-
- /* 5. about host drives */
- seq_puts(m, "\nHost Drives:");
- flag = FALSE;
-
- for (i = 0; i < MAX_LDRIVES; ++i) {
- if (!ha->hdr[i].is_logdrv ||
- (ha->hdr[i].is_arraydrv && !ha->hdr[i].is_master))
- continue;
- /* 5.a get host drive list */
- TRACE2(("host_get() drv_no %d\n",i));
- phg = (gdth_hget_str *)buf;
- gdtcmd->Service = CACHESERVICE;
- gdtcmd->OpCode = GDT_IOCTL;
- gdtcmd->u.ioctl.p_param = paddr;
- gdtcmd->u.ioctl.param_size = sizeof(gdth_hget_str);
- gdtcmd->u.ioctl.subfunc = HOST_GET | LA_CTRL_PATTERN;
- gdtcmd->u.ioctl.channel = i;
- phg->entries = MAX_HDRIVES;
- phg->offset = GDTOFFSOF(gdth_hget_str, entry[0]);
- if (gdth_execute(host, gdtcmd, cmnd, 30, NULL) == S_OK) {
- ha->hdr[i].ldr_no = i;
- ha->hdr[i].rw_attribs = 0;
- ha->hdr[i].start_sec = 0;
- } else {
- for (j = 0; j < phg->entries; ++j) {
- k = phg->entry[j].host_drive;
- if (k >= MAX_LDRIVES)
- continue;
- ha->hdr[k].ldr_no = phg->entry[j].log_drive;
- ha->hdr[k].rw_attribs = phg->entry[j].rw_attribs;
- ha->hdr[k].start_sec = phg->entry[j].start_sec;
- }
- }
- }
- dma_free_coherent(&ha->pdev->dev, size, buf, paddr);
-
- for (i = 0; i < MAX_HDRIVES; ++i) {
- if (!(ha->hdr[i].present))
- continue;
-
- seq_printf(m,
- "\n Number: \t%-2d \tArr/Log. Drive:\t%d\n",
- i, ha->hdr[i].ldr_no);
- flag = TRUE;
-
- seq_printf(m,
- " Capacity [MB]:\t%-6d \tStart Sector: \t%d\n",
- (u32)(ha->hdr[i].size/2048), ha->hdr[i].start_sec);
- }
-
- if (!flag)
- seq_puts(m, "\n --\n");
- }
-
- /* controller events */
- seq_puts(m, "\nController Events:\n");
-
- for (id = -1;;) {
- id = gdth_read_event(ha, id, estr);
- if (estr->event_source == 0)
- break;
- if (estr->event_data.eu.driver.ionode == ha->hanum &&
- estr->event_source == ES_ASYNC) {
- gdth_log_event(&estr->event_data, hrec);
-
- /*
- * Elapsed seconds subtraction with unsigned operands is
- * safe from wrap around in year 2106. Executes as:
- * operand a + (2's complement operand b) + 1
- */
-
- sec = (int)((u32)ktime_get_real_seconds() - estr->first_stamp);
- if (sec < 0) sec = 0;
- seq_printf(m," date- %02d:%02d:%02d\t%s\n",
- sec/3600, sec%3600/60, sec%60, hrec);
- }
- if (id == -1)
- break;
- }
-stop_output:
- rc = 0;
-free_fail:
- kfree(gdtcmd);
- kfree(estr);
- return rc;
-}
-
-static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id)
-{
- unsigned long flags;
- int i;
- struct scsi_cmnd *scp;
- struct gdth_cmndinfo *cmndinfo;
- u8 b, t;
-
- spin_lock_irqsave(&ha->smp_lock, flags);
-
- for (i = 0; i < GDTH_MAXCMDS; ++i) {
- scp = ha->cmd_tab[i].cmnd;
- cmndinfo = gdth_cmnd_priv(scp);
-
- b = scp->device->channel;
- t = scp->device->id;
- if (!SPECIAL_SCP(scp) && t == (u8)id &&
- b == (u8)busnum) {
- cmndinfo->wait_for_completion = 0;
- spin_unlock_irqrestore(&ha->smp_lock, flags);
- while (!cmndinfo->wait_for_completion)
- barrier();
- spin_lock_irqsave(&ha->smp_lock, flags);
- }
- }
- spin_unlock_irqrestore(&ha->smp_lock, flags);
-}
diff --git a/drivers/scsi/gdth_proc.h b/drivers/scsi/gdth_proc.h
deleted file mode 100644
index 4cc5377cb92e..000000000000
--- a/drivers/scsi/gdth_proc.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _GDTH_PROC_H
-#define _GDTH_PROC_H
-
-/* gdth_proc.h
- * $Id: gdth_proc.h,v 1.16 2004/01/14 13:09:01 achim Exp $
- */
-
-int gdth_execute(struct Scsi_Host *shost, gdth_cmd_str *gdtcmd, char *cmnd,
- int timeout, u32 *info);
-
-static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
- int length, gdth_ha_str *ha);
-
-static void gdth_wait_completion(gdth_ha_str *ha, int busnum, int id);
-
-#endif
-
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index b8148b1733f8..4ba3a8eadb77 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -18,3 +18,9 @@ config SCSI_HISI_SAS_PCI
depends on ACPI
help
This driver supports HiSilicon's SAS HBA based on PCI device
+
+config SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE
+ bool "HiSilicon SAS debugging default enable"
+ depends on SCSI_HISI_SAS
+ help
+ Set Y to default enable DEBUGFS for SCSI_HISI_SAS
diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h
index e821dd32dd28..2401a9575215 100644
--- a/drivers/scsi/hisi_sas/hisi_sas.h
+++ b/drivers/scsi/hisi_sas/hisi_sas.h
@@ -44,6 +44,7 @@
#define HISI_SAS_IOST_ITCT_CACHE_NUM 64
#define HISI_SAS_IOST_ITCT_CACHE_DW_SZ 10
+#define HISI_SAS_FIFO_DATA_DW_SIZE 32
#define HISI_SAS_STATUS_BUF_SZ (sizeof(struct hisi_sas_status_buffer))
#define HISI_SAS_COMMAND_TABLE_SZ (sizeof(union hisi_sas_command_table))
@@ -154,6 +155,16 @@ enum hisi_sas_phy_event {
HISI_PHYES_NUM,
};
+struct hisi_sas_debugfs_fifo {
+ u32 signal_sel;
+ u32 dump_msk;
+ u32 dump_mode;
+ u32 trigger;
+ u32 trigger_msk;
+ u32 trigger_mode;
+ u32 rd_data[HISI_SAS_FIFO_DATA_DW_SIZE];
+};
+
struct hisi_sas_phy {
struct work_struct works[HISI_PHYES_NUM];
struct hisi_hba *hisi_hba;
@@ -175,6 +186,9 @@ struct hisi_sas_phy {
enum sas_linkrate maximum_linkrate;
int enable;
atomic_t down_cnt;
+
+ /* Trace FIFO */
+ struct hisi_sas_debugfs_fifo fifo;
};
struct hisi_sas_port {
@@ -474,6 +488,7 @@ struct hisi_hba {
struct dentry *debugfs_dir;
struct dentry *debugfs_dump_dentry;
struct dentry *debugfs_bist_dentry;
+ struct dentry *debugfs_fifo_dentry;
};
/* Generic HW DMA host memory structures */
@@ -637,7 +652,8 @@ extern void hisi_sas_scan_start(struct Scsi_Host *shost);
extern int hisi_sas_host_reset(struct Scsi_Host *shost, int reset_type);
extern void hisi_sas_phy_enable(struct hisi_hba *hisi_hba, int phy_no,
int enable);
-extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy);
+extern void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
+ gfp_t gfp_flags);
extern void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba,
struct sas_task *task,
struct hisi_sas_slot *slot);
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index cf0bfac920a8..a979edfd9a78 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -445,21 +445,19 @@ static int hisi_sas_task_prep(struct sas_task *task,
}
}
- if (scmd && hisi_hba->shost->nr_hw_queues) {
+ if (scmd) {
unsigned int dq_index;
u32 blk_tag;
blk_tag = blk_mq_unique_tag(scmd->request);
dq_index = blk_mq_unique_tag_to_hwq(blk_tag);
*dq_pointer = dq = &hisi_hba->dq[dq_index];
- } else if (hisi_hba->shost->nr_hw_queues) {
+ } else {
struct Scsi_Host *shost = hisi_hba->shost;
struct blk_mq_queue_map *qmap = &shost->tag_set.map[HCTX_TYPE_DEFAULT];
int queue = qmap->mq_map[raw_smp_processor_id()];
*dq_pointer = dq = &hisi_hba->dq[queue];
- } else {
- *dq_pointer = dq = sas_dev->dq;
}
port = to_hisi_sas_port(sas_port);
@@ -612,11 +610,11 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags,
return rc;
}
-static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
+static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no,
+ gfp_t gfp_flags)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha;
if (!phy->phy_attached)
return;
@@ -627,8 +625,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
return;
}
- sas_ha = &hisi_hba->sha;
- sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
struct sas_phy *sphy = sas_phy->phy;
@@ -656,7 +653,7 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
}
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
- sas_ha->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
}
static struct hisi_sas_device *hisi_sas_alloc_dev(struct domain_device *device)
@@ -862,7 +859,7 @@ static void hisi_sas_phyup_work(struct work_struct *work)
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
- hisi_sas_bytes_dmaed(hisi_hba, phy_no);
+ hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
}
static void hisi_sas_linkreset_work(struct work_struct *work)
@@ -1411,7 +1408,6 @@ static void hisi_sas_refresh_port_id(struct hisi_hba *hisi_hba)
static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
{
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct asd_sas_port *_sas_port = NULL;
int phy_no;
@@ -1432,11 +1428,12 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
_sas_port = sas_port;
if (dev_is_expander(dev->dev_type))
- sas_ha->notify_port_event(sas_phy,
- PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD,
+ GFP_KERNEL);
}
} else {
- hisi_sas_phy_down(hisi_hba, phy_no, 0);
+ hisi_sas_phy_down(hisi_hba, phy_no, 0, GFP_KERNEL);
}
}
}
@@ -1790,7 +1787,7 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
/* report PHY down if timed out */
if (!ret)
- hisi_sas_phy_down(hisi_hba, sas_phy->id, 0);
+ hisi_sas_phy_down(hisi_hba, sas_phy->id, 0, GFP_KERNEL);
} else if (sas_dev->dev_status != HISI_SAS_DEV_INIT) {
/*
* If in init state, we rely on caller to wait for link to be
@@ -2190,16 +2187,16 @@ static void hisi_sas_phy_disconnected(struct hisi_sas_phy *phy)
spin_unlock_irqrestore(&phy->lock, flags);
}
-void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
+void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy,
+ gfp_t gfp_flags)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
struct device *dev = hisi_hba->dev;
if (rdy) {
/* Phy down but ready */
- hisi_sas_bytes_dmaed(hisi_hba, phy_no);
+ hisi_sas_bytes_dmaed(hisi_hba, phy_no, gfp_flags);
hisi_sas_port_notify_formed(sas_phy);
} else {
struct hisi_sas_port *port = phy->port;
@@ -2210,7 +2207,7 @@ void hisi_sas_phy_down(struct hisi_hba *hisi_hba, int phy_no, int rdy)
return;
}
/* Phy down and not ready */
- sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL, gfp_flags);
sas_phy_disconnected(sas_phy);
if (port) {
@@ -2725,12 +2722,21 @@ int hisi_sas_remove(struct platform_device *pdev)
}
EXPORT_SYMBOL_GPL(hisi_sas_remove);
+#if IS_ENABLED(CONFIG_SCSI_HISI_SAS_DEBUGFS_DEFAULT_ENABLE)
+#define DEBUGFS_ENABLE_DEFAULT "enabled"
+bool hisi_sas_debugfs_enable = true;
+u32 hisi_sas_debugfs_dump_count = 50;
+#else
+#define DEBUGFS_ENABLE_DEFAULT "disabled"
bool hisi_sas_debugfs_enable;
+u32 hisi_sas_debugfs_dump_count = 1;
+#endif
+
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_enable);
module_param_named(debugfs_enable, hisi_sas_debugfs_enable, bool, 0444);
-MODULE_PARM_DESC(hisi_sas_debugfs_enable, "Enable driver debugfs (default disabled)");
+MODULE_PARM_DESC(hisi_sas_debugfs_enable,
+ "Enable driver debugfs (default "DEBUGFS_ENABLE_DEFAULT")");
-u32 hisi_sas_debugfs_dump_count = 1;
EXPORT_SYMBOL_GPL(hisi_sas_debugfs_dump_count);
module_param_named(debugfs_dump_count, hisi_sas_debugfs_dump_count, uint, 0444);
MODULE_PARM_DESC(hisi_sas_debugfs_dump_count, "Number of debugfs dumps to allow");
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 45e866cb9164..7451377c4cb6 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1408,7 +1408,6 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
struct hisi_sas_phy *phy = p;
struct hisi_hba *hisi_hba = phy->hisi_hba;
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sha = &hisi_hba->sha;
struct device *dev = hisi_hba->dev;
int phy_no = sas_phy->id;
u32 irq_value;
@@ -1424,7 +1423,8 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
}
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- sha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
end:
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT2,
@@ -1453,7 +1453,8 @@ static irqreturn_t int_abnormal_v1_hw(int irq, void *p)
u32 phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
hisi_sas_phy_down(hisi_hba, phy_no,
- (phy_state & 1 << phy_no) ? 1 : 0);
+ (phy_state & 1 << phy_no) ? 1 : 0,
+ GFP_ATOMIC);
}
if (irq_value & CHL_INT0_ID_TIMEOUT_MSK)
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
index 9adfdefef9ca..46f60fc2a069 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c
@@ -2734,7 +2734,8 @@ static int phy_down_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
- hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0,
+ GFP_ATOMIC);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
@@ -2818,14 +2819,14 @@ static void phy_bcast_v2_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -3626,18 +3627,6 @@ static const struct hisi_sas_hw hisi_sas_v2_hw = {
static int hisi_sas_v2_probe(struct platform_device *pdev)
{
- /*
- * Check if we should defer the probe before we probe the
- * upper layer, as it's hard to defer later on.
- */
- int ret = platform_get_irq(pdev, 0);
-
- if (ret < 0) {
- if (ret != -EPROBE_DEFER)
- dev_err(&pdev->dev, "cannot obtain irq\n");
- return ret;
- }
-
return hisi_sas_probe(pdev, &hisi_sas_v2_hw);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 7c12804b4e1d..4580e081e489 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -303,6 +303,19 @@
#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
#define ERR_CNT_CODE_ERR (PORT_BASE + 0x394)
#define ERR_CNT_DISP_ERR (PORT_BASE + 0x398)
+#define DFX_FIFO_CTRL (PORT_BASE + 0x3a0)
+#define DFX_FIFO_CTRL_TRIGGER_MODE_OFF 0
+#define DFX_FIFO_CTRL_TRIGGER_MODE_MSK (0x7 << DFX_FIFO_CTRL_TRIGGER_MODE_OFF)
+#define DFX_FIFO_CTRL_DUMP_MODE_OFF 3
+#define DFX_FIFO_CTRL_DUMP_MODE_MSK (0x7 << DFX_FIFO_CTRL_DUMP_MODE_OFF)
+#define DFX_FIFO_CTRL_SIGNAL_SEL_OFF 6
+#define DFX_FIFO_CTRL_SIGNAL_SEL_MSK (0xF << DFX_FIFO_CTRL_SIGNAL_SEL_OFF)
+#define DFX_FIFO_CTRL_DUMP_DISABLE_OFF 10
+#define DFX_FIFO_CTRL_DUMP_DISABLE_MSK (0x1 << DFX_FIFO_CTRL_DUMP_DISABLE_OFF)
+#define DFX_FIFO_TRIGGER (PORT_BASE + 0x3a4)
+#define DFX_FIFO_TRIGGER_MSK (PORT_BASE + 0x3a8)
+#define DFX_FIFO_DUMP_MSK (PORT_BASE + 0x3aC)
+#define DFX_FIFO_RD_DATA (PORT_BASE + 0x3b0)
#define DEFAULT_ITCT_HW 2048 /* reset value, not reprogrammed */
#if (HISI_SAS_MAX_DEVICES > DEFAULT_ITCT_HW)
@@ -517,11 +530,6 @@ static int prot_mask;
module_param(prot_mask, int, 0);
MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=0x0 ");
-static bool auto_affine_msi_experimental;
-module_param(auto_affine_msi_experimental, bool, 0444);
-MODULE_PARM_DESC(auto_affine_msi_experimental, "Enable auto-affinity of MSI IRQs as experimental:\n"
- "default is off");
-
static void debugfs_work_handler_v3_hw(struct work_struct *work);
static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off)
@@ -1580,7 +1588,8 @@ static irqreturn_t phy_down_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
phy_state = hisi_sas_read32(hisi_hba, PHY_STATE);
dev_info(dev, "phydown: phy%d phy_state=0x%x\n", phy_no, phy_state);
- hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0);
+ hisi_sas_phy_down(hisi_hba, phy_no, (phy_state & 1 << phy_no) ? 1 : 0,
+ GFP_ATOMIC);
sl_ctrl = hisi_sas_phy_read32(hisi_hba, phy_no, SL_CONTROL);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_CONTROL,
@@ -1600,14 +1609,14 @@ static irqreturn_t phy_bcast_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
{
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha = &hisi_hba->sha;
u32 bcast_status;
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 1);
bcast_status = hisi_sas_phy_read32(hisi_hba, phy_no, RX_PRIMS_STATUS);
if ((bcast_status & RX_BCAST_CHG_MSK) &&
!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags))
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
hisi_sas_phy_write32(hisi_hba, phy_no, CHL_INT0,
CHL_INT0_SL_RX_BCST_ACK_MSK);
hisi_sas_phy_write32(hisi_hba, phy_no, SL_RX_BCAST_CHK_MSK, 0);
@@ -4157,6 +4166,243 @@ static const struct file_operations debugfs_phy_down_cnt_v3_hw_fops = {
.owner = THIS_MODULE,
};
+enum fifo_dump_mode_v3_hw {
+ FIFO_DUMP_FORVER = (1U << 0),
+ FIFO_DUMP_AFTER_TRIGGER = (1U << 1),
+ FIFO_DUMP_UNTILL_TRIGGER = (1U << 2),
+};
+
+enum fifo_trigger_mode_v3_hw {
+ FIFO_TRIGGER_EDGE = (1U << 0),
+ FIFO_TRIGGER_SAME_LEVEL = (1U << 1),
+ FIFO_TRIGGER_DIFF_LEVEL = (1U << 2),
+};
+
+static int debugfs_is_fifo_config_valid_v3_hw(struct hisi_sas_phy *phy)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+
+ if (phy->fifo.signal_sel > 0xf) {
+ dev_info(hisi_hba->dev, "Invalid signal select: %u\n",
+ phy->fifo.signal_sel);
+ return -EINVAL;
+ }
+
+ switch (phy->fifo.dump_mode) {
+ case FIFO_DUMP_FORVER:
+ case FIFO_DUMP_AFTER_TRIGGER:
+ case FIFO_DUMP_UNTILL_TRIGGER:
+ break;
+ default:
+ dev_info(hisi_hba->dev, "Invalid dump mode: %u\n",
+ phy->fifo.dump_mode);
+ return -EINVAL;
+ }
+
+ /* when FIFO_DUMP_FORVER, no need to check trigger_mode */
+ if (phy->fifo.dump_mode == FIFO_DUMP_FORVER)
+ return 0;
+
+ switch (phy->fifo.trigger_mode) {
+ case FIFO_TRIGGER_EDGE:
+ case FIFO_TRIGGER_SAME_LEVEL:
+ case FIFO_TRIGGER_DIFF_LEVEL:
+ break;
+ default:
+ dev_info(hisi_hba->dev, "Invalid trigger mode: %u\n",
+ phy->fifo.trigger_mode);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int debugfs_update_fifo_config_v3_hw(struct hisi_sas_phy *phy)
+{
+ u32 trigger_mode = phy->fifo.trigger_mode;
+ u32 signal_sel = phy->fifo.signal_sel;
+ u32 dump_mode = phy->fifo.dump_mode;
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ int phy_no = phy->sas_phy.id;
+ u32 reg_val;
+ int res;
+
+ /* Check the validity of trace FIFO configuration */
+ res = debugfs_is_fifo_config_valid_v3_hw(phy);
+ if (res)
+ return res;
+
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ /* Disable trace FIFO before update configuration */
+ reg_val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+
+ /* Update trace FIFO configuration */
+ reg_val &= ~(DFX_FIFO_CTRL_DUMP_MODE_MSK |
+ DFX_FIFO_CTRL_SIGNAL_SEL_MSK |
+ DFX_FIFO_CTRL_TRIGGER_MODE_MSK);
+
+ reg_val |= ((trigger_mode << DFX_FIFO_CTRL_TRIGGER_MODE_OFF) |
+ (dump_mode << DFX_FIFO_CTRL_DUMP_MODE_OFF) |
+ (signal_sel << DFX_FIFO_CTRL_SIGNAL_SEL_OFF));
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK,
+ phy->fifo.dump_msk);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER,
+ phy->fifo.trigger);
+
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK,
+ phy->fifo.trigger_msk);
+
+ /* Enable trace FIFO after updated configuration */
+ reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ reg_val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, reg_val);
+
+ return 0;
+}
+
+static ssize_t debugfs_fifo_update_cfg_v3_hw_write(struct file *filp,
+ const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct hisi_sas_phy *phy = filp->private_data;
+ bool update;
+ int val;
+
+ val = kstrtobool_from_user(buf, count, &update);
+ if (val)
+ return val;
+
+ if (update != 1)
+ return -EINVAL;
+
+ val = debugfs_update_fifo_config_v3_hw(phy);
+ if (val)
+ return val;
+
+ return count;
+}
+
+static const struct file_operations debugfs_fifo_update_cfg_v3_hw_fops = {
+ .open = simple_open,
+ .write = debugfs_fifo_update_cfg_v3_hw_write,
+ .owner = THIS_MODULE,
+};
+
+static void debugfs_read_fifo_data_v3_hw(struct hisi_sas_phy *phy)
+{
+ struct hisi_hba *hisi_hba = phy->hisi_hba;
+ u32 *buf = phy->fifo.rd_data;
+ int phy_no = phy->sas_phy.id;
+ u32 val;
+ int i;
+
+ memset(buf, 0, sizeof(phy->fifo.rd_data));
+
+ /* Disable trace FIFO before read data */
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val |= DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val);
+
+ for (i = 0; i < HISI_SAS_FIFO_DATA_DW_SIZE; i++) {
+ val = hisi_sas_phy_read32(hisi_hba, phy_no,
+ DFX_FIFO_RD_DATA);
+ buf[i] = val;
+ }
+
+ /* Enable trace FIFO after read data */
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= ~DFX_FIFO_CTRL_DUMP_DISABLE_MSK;
+ hisi_sas_phy_write32(hisi_hba, phy_no, DFX_FIFO_CTRL, val);
+}
+
+static int debugfs_fifo_data_v3_hw_show(struct seq_file *s, void *p)
+{
+ struct hisi_sas_phy *phy = s->private;
+
+ debugfs_read_fifo_data_v3_hw(phy);
+
+ debugfs_show_row_32_v3_hw(s, 0, HISI_SAS_FIFO_DATA_DW_SIZE * 4,
+ phy->fifo.rd_data);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(debugfs_fifo_data_v3_hw);
+
+static void debugfs_fifo_init_v3_hw(struct hisi_hba *hisi_hba)
+{
+ int phy_no;
+
+ hisi_hba->debugfs_fifo_dentry =
+ debugfs_create_dir("fifo", hisi_hba->debugfs_dir);
+
+ for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
+ struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
+ struct dentry *port_dentry;
+ char name[256];
+ u32 val;
+
+ /* get default configuration for trace FIFO */
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= DFX_FIFO_CTRL_DUMP_MODE_MSK;
+ val >>= DFX_FIFO_CTRL_DUMP_MODE_OFF;
+ phy->fifo.dump_mode = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= DFX_FIFO_CTRL_TRIGGER_MODE_MSK;
+ val >>= DFX_FIFO_CTRL_TRIGGER_MODE_OFF;
+ phy->fifo.trigger_mode = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_CTRL);
+ val &= DFX_FIFO_CTRL_SIGNAL_SEL_MSK;
+ val >>= DFX_FIFO_CTRL_SIGNAL_SEL_OFF;
+ phy->fifo.signal_sel = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_DUMP_MSK);
+ phy->fifo.dump_msk = val;
+
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER);
+ phy->fifo.trigger = val;
+ val = hisi_sas_phy_read32(hisi_hba, phy_no, DFX_FIFO_TRIGGER_MSK);
+ phy->fifo.trigger_msk = val;
+
+ snprintf(name, 256, "%d", phy_no);
+ port_dentry = debugfs_create_dir(name,
+ hisi_hba->debugfs_fifo_dentry);
+
+ debugfs_create_file("update_config", 0200, port_dentry, phy,
+ &debugfs_fifo_update_cfg_v3_hw_fops);
+
+ debugfs_create_file("signal_sel", 0600, port_dentry,
+ &phy->fifo.signal_sel,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("dump_msk", 0600, port_dentry,
+ &phy->fifo.dump_msk,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("dump_mode", 0600, port_dentry,
+ &phy->fifo.dump_mode,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("trigger_mode", 0600, port_dentry,
+ &phy->fifo.trigger_mode,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("trigger", 0600, port_dentry,
+ &phy->fifo.trigger,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("trigger_msk", 0600, port_dentry,
+ &phy->fifo.trigger_msk,
+ &debugfs_v3_hw_fops);
+
+ debugfs_create_file("fifo_data", 0400, port_dentry, phy,
+ &debugfs_fifo_data_v3_hw_fops);
+ }
+}
+
static void debugfs_work_handler_v3_hw(struct work_struct *work)
{
struct hisi_hba *hisi_hba =
@@ -4392,6 +4638,7 @@ static void debugfs_init_v3_hw(struct hisi_hba *hisi_hba)
debugfs_create_dir("dump", hisi_hba->debugfs_dir);
debugfs_phy_down_cnt_init_v3_hw(hisi_hba);
+ debugfs_fifo_init_v3_hw(hisi_hba);
for (i = 0; i < hisi_sas_debugfs_dump_count; i++) {
if (debugfs_alloc_v3_hw(hisi_hba, i)) {
@@ -4576,6 +4823,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
del_timer(&hisi_hba->timer);
sas_unregister_ha(sha);
+ flush_workqueue(hisi_hba->wq);
sas_remove_host(sha->core.shost);
hisi_sas_v3_destroy_irqs(pdev, hisi_hba);
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index f4d3747cfa0b..337d3aa91945 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -2396,7 +2396,6 @@ static int handle_ioaccel_mode2_error(struct ctlr_info *h,
break;
case IOACCEL2_STATUS_SR_UNDERRUN:
cmd->result = (DID_OK << 16); /* host byte */
- cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
ioaccel2_resid = get_unaligned_le32(
&c2->error_data.resid_cnt[0]);
scsi_set_resid(cmd, ioaccel2_resid);
@@ -2597,8 +2596,7 @@ static void complete_scsi_command(struct CommandList *cp)
(c2->sg[0].chain_indicator == IOACCEL2_CHAIN))
hpsa_unmap_ioaccel2_sg_chain_block(h, c2);
- cmd->result = (DID_OK << 16); /* host byte */
- cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
+ cmd->result = (DID_OK << 16); /* host byte */
/* SCSI command has already been cleaned up in SML */
if (dev->was_removed) {
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 65f168c41d23..755313b766b9 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -40,6 +40,12 @@ static unsigned int disc_threads = IBMVFC_MAX_DISC_THREADS;
static unsigned int ibmvfc_debug = IBMVFC_DEBUG;
static unsigned int log_level = IBMVFC_DEFAULT_LOG_LEVEL;
static unsigned int cls3_error = IBMVFC_CLS3_ERROR;
+static unsigned int mq_enabled = IBMVFC_MQ;
+static unsigned int nr_scsi_hw_queues = IBMVFC_SCSI_HW_QUEUES;
+static unsigned int nr_scsi_channels = IBMVFC_SCSI_CHANNELS;
+static unsigned int mig_channels_only = IBMVFC_MIG_NO_SUB_TO_CRQ;
+static unsigned int mig_no_less_channels = IBMVFC_MIG_NO_N_TO_M;
+
static LIST_HEAD(ibmvfc_head);
static DEFINE_SPINLOCK(ibmvfc_driver_lock);
static struct scsi_transport_template *ibmvfc_transport_template;
@@ -49,6 +55,22 @@ MODULE_AUTHOR("Brian King <brking@linux.vnet.ibm.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(IBMVFC_DRIVER_VERSION);
+module_param_named(mq, mq_enabled, uint, S_IRUGO);
+MODULE_PARM_DESC(mq, "Enable multiqueue support. "
+ "[Default=" __stringify(IBMVFC_MQ) "]");
+module_param_named(scsi_host_queues, nr_scsi_hw_queues, uint, S_IRUGO);
+MODULE_PARM_DESC(scsi_host_queues, "Number of SCSI Host submission queues. "
+ "[Default=" __stringify(IBMVFC_SCSI_HW_QUEUES) "]");
+module_param_named(scsi_hw_channels, nr_scsi_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(scsi_hw_channels, "Number of hw scsi channels to request. "
+ "[Default=" __stringify(IBMVFC_SCSI_CHANNELS) "]");
+module_param_named(mig_channels_only, mig_channels_only, uint, S_IRUGO);
+MODULE_PARM_DESC(mig_channels_only, "Prevent migration to non-channelized system. "
+ "[Default=" __stringify(IBMVFC_MIG_NO_SUB_TO_CRQ) "]");
+module_param_named(mig_no_less_channels, mig_no_less_channels, uint, S_IRUGO);
+MODULE_PARM_DESC(mig_no_less_channels, "Prevent migration to system with less channels. "
+ "[Default=" __stringify(IBMVFC_MIG_NO_N_TO_M) "]");
+
module_param_named(init_timeout, init_timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds. "
"[Default=" __stringify(IBMVFC_INIT_TIMEOUT) "]");
@@ -138,6 +160,20 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
static const char *unknown_error = "unknown error";
+static long h_reg_sub_crq(unsigned long unit_address, unsigned long ioba,
+ unsigned long length, unsigned long *cookie,
+ unsigned long *irq)
+{
+ unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ long rc;
+
+ rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, ioba, length);
+ *cookie = retbuf[0];
+ *irq = retbuf[1];
+
+ return rc;
+}
+
static int ibmvfc_check_caps(struct ibmvfc_host *vhost, unsigned long cap_flags)
{
u64 host_caps = be64_to_cpu(vhost->login_buf->resp.capabilities);
@@ -176,8 +212,9 @@ static void ibmvfc_trc_start(struct ibmvfc_event *evt)
struct ibmvfc_mad_common *mad = &evt->iu.mad_common;
struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
struct ibmvfc_trace_entry *entry;
+ int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
- entry = &vhost->trace[vhost->trace_index++];
+ entry = &vhost->trace[index];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
@@ -211,8 +248,10 @@ static void ibmvfc_trc_end(struct ibmvfc_event *evt)
struct ibmvfc_mad_common *mad = &evt->xfer_iu->mad_common;
struct ibmvfc_fcp_cmd_iu *iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
struct ibmvfc_fcp_rsp *rsp = ibmvfc_get_fcp_rsp(vhost, vfc_cmd);
- struct ibmvfc_trace_entry *entry = &vhost->trace[vhost->trace_index++];
+ struct ibmvfc_trace_entry *entry;
+ int index = atomic_inc_return(&vhost->trace_index) & IBMVFC_TRACE_INDEX_MASK;
+ entry = &vhost->trace[index];
entry->evt = evt;
entry->time = jiffies;
entry->fmt = evt->crq.format;
@@ -660,7 +699,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost)
}
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
- memset(vhost->async_crq.msgs, 0, PAGE_SIZE);
+ memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE);
vhost->async_crq.cur = 0;
list_for_each_entry(tgt, &vhost->targets, queue)
@@ -687,6 +726,15 @@ static int ibmvfc_send_crq(struct ibmvfc_host *vhost, u64 word1, u64 word2)
return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
}
+static int ibmvfc_send_sub_crq(struct ibmvfc_host *vhost, u64 cookie, u64 word1,
+ u64 word2, u64 word3, u64 word4)
+{
+ struct vio_dev *vdev = to_vio_dev(vhost->dev);
+
+ return plpar_hcall_norets(H_SEND_SUB_CRQ, vdev->unit_address, cookie,
+ word1, word2, word3, word4);
+}
+
/**
* ibmvfc_send_crq_init - Send a CRQ init message
* @vhost: ibmvfc host struct
@@ -714,6 +762,105 @@ static int ibmvfc_send_crq_init_complete(struct ibmvfc_host *vhost)
}
/**
+ * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
+ * @vhost: ibmvfc host who owns the event pool
+ *
+ * Returns zero on success.
+ **/
+static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue,
+ unsigned int size)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &queue->evt_pool;
+
+ ENTER;
+ if (!size)
+ return 0;
+
+ pool->size = size;
+ pool->events = kcalloc(size, sizeof(*pool->events), GFP_KERNEL);
+ if (!pool->events)
+ return -ENOMEM;
+
+ pool->iu_storage = dma_alloc_coherent(vhost->dev,
+ size * sizeof(*pool->iu_storage),
+ &pool->iu_token, 0);
+
+ if (!pool->iu_storage) {
+ kfree(pool->events);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&queue->sent);
+ INIT_LIST_HEAD(&queue->free);
+ spin_lock_init(&queue->l_lock);
+
+ for (i = 0; i < size; ++i) {
+ struct ibmvfc_event *evt = &pool->events[i];
+
+ atomic_set(&evt->free, 1);
+ evt->crq.valid = 0x80;
+ evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
+ evt->xfer_iu = pool->iu_storage + i;
+ evt->vhost = vhost;
+ evt->queue = queue;
+ evt->ext_list = NULL;
+ list_add_tail(&evt->queue_list, &queue->free);
+ }
+
+ LEAVE;
+ return 0;
+}
+
+/**
+ * ibmvfc_free_event_pool - Frees memory of the event pool of a host
+ * @vhost: ibmvfc host who owns the event pool
+ *
+ **/
+static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue)
+{
+ int i;
+ struct ibmvfc_event_pool *pool = &queue->evt_pool;
+
+ ENTER;
+ for (i = 0; i < pool->size; ++i) {
+ list_del(&pool->events[i].queue_list);
+ BUG_ON(atomic_read(&pool->events[i].free) != 1);
+ if (pool->events[i].ext_list)
+ dma_pool_free(vhost->sg_pool,
+ pool->events[i].ext_list,
+ pool->events[i].ext_list_token);
+ }
+
+ kfree(pool->events);
+ dma_free_coherent(vhost->dev,
+ pool->size * sizeof(*pool->iu_storage),
+ pool->iu_storage, pool->iu_token);
+ LEAVE;
+}
+
+/**
+ * ibmvfc_free_queue - Deallocate queue
+ * @vhost: ibmvfc host struct
+ * @queue: ibmvfc queue struct
+ *
+ * Unmaps dma and deallocates page for messages
+ **/
+static void ibmvfc_free_queue(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue)
+{
+ struct device *dev = vhost->dev;
+
+ dma_unmap_single(dev, queue->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ free_page((unsigned long)queue->msgs.handle);
+ queue->msgs.handle = NULL;
+
+ ibmvfc_free_event_pool(vhost, queue);
+}
+
+/**
* ibmvfc_release_crq_queue - Deallocates data and unregisters CRQ
* @vhost: ibmvfc host struct
*
@@ -724,7 +871,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
{
long rc = 0;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
ibmvfc_dbg(vhost, "Releasing CRQ\n");
free_irq(vdev->irq, vhost);
@@ -737,8 +884,8 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
- dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
- free_page((unsigned long)crq->msgs);
+
+ ibmvfc_free_queue(vhost, crq);
}
/**
@@ -778,7 +925,9 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
int rc = 0;
unsigned long flags;
struct vio_dev *vdev = to_vio_dev(vhost->dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *scrq;
+ int i;
/* Close the CRQ */
do {
@@ -788,13 +937,26 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
vhost->state = IBMVFC_NO_CRQ;
vhost->logged_in = 0;
+ vhost->do_enquiry = 1;
+ vhost->using_channels = 0;
/* Clean out the queue */
- memset(crq->msgs, 0, PAGE_SIZE);
+ memset(crq->msgs.crq, 0, PAGE_SIZE);
crq->cur = 0;
+ if (vhost->scsi_scrqs.scrqs) {
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ scrq = &vhost->scsi_scrqs.scrqs[i];
+ spin_lock(scrq->q_lock);
+ memset(scrq->msgs.scrq, 0, PAGE_SIZE);
+ scrq->cur = 0;
+ spin_unlock(scrq->q_lock);
+ }
+ }
+
/* And re-open it again */
rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
crq->msg_token, PAGE_SIZE);
@@ -804,6 +966,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
dev_warn(vhost->dev, "Partner adapter not ready\n");
else if (rc != 0)
dev_warn(vhost->dev, "Couldn't register crq (rc=%d)\n", rc);
+ spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
return rc;
@@ -835,12 +998,17 @@ static int ibmvfc_valid_event(struct ibmvfc_event_pool *pool,
**/
static void ibmvfc_free_event(struct ibmvfc_event *evt)
{
- struct ibmvfc_host *vhost = evt->vhost;
- struct ibmvfc_event_pool *pool = &vhost->pool;
+ struct ibmvfc_event_pool *pool = &evt->queue->evt_pool;
+ unsigned long flags;
BUG_ON(!ibmvfc_valid_event(pool, evt));
BUG_ON(atomic_inc_return(&evt->free) != 1);
- list_add_tail(&evt->queue, &vhost->free);
+
+ spin_lock_irqsave(&evt->queue->l_lock, flags);
+ list_add_tail(&evt->queue_list, &evt->queue->free);
+ if (evt->eh_comp)
+ complete(evt->eh_comp);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
}
/**
@@ -859,13 +1027,28 @@ static void ibmvfc_scsi_eh_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd);
}
- if (evt->eh_comp)
- complete(evt->eh_comp);
-
ibmvfc_free_event(evt);
}
/**
+ * ibmvfc_complete_purge - Complete failed command list
+ * @purge_list: list head of failed commands
+ *
+ * This function runs completions on commands to fail as a result of a
+ * host reset or platform migration.
+ **/
+static void ibmvfc_complete_purge(struct list_head *purge_list)
+{
+ struct ibmvfc_event *evt, *pos;
+
+ list_for_each_entry_safe(evt, pos, purge_list, queue_list) {
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+/**
* ibmvfc_fail_request - Fail request with specified error code
* @evt: ibmvfc event struct
* @error_code: error code to fail request with
@@ -881,10 +1064,7 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
} else
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_DRIVER_FAILED);
- list_del(&evt->queue);
del_timer(&evt->timer);
- ibmvfc_trc_end(evt);
- evt->done(evt);
}
/**
@@ -898,10 +1078,30 @@ static void ibmvfc_fail_request(struct ibmvfc_event *evt, int error_code)
static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
{
struct ibmvfc_event *evt, *pos;
+ struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
+ unsigned long flags;
+ int hwqs = 0;
+ int i;
+
+ if (vhost->using_channels)
+ hwqs = vhost->scsi_scrqs.active_queues;
ibmvfc_dbg(vhost, "Purging all requests\n");
- list_for_each_entry_safe(evt, pos, &vhost->sent, queue)
+ spin_lock_irqsave(&vhost->crq.l_lock, flags);
+ list_for_each_entry_safe(evt, pos, &vhost->crq.sent, queue_list)
ibmvfc_fail_request(evt, error_code);
+ list_splice_init(&vhost->crq.sent, &vhost->purge);
+ spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
+
+ for (i = 0; i < hwqs; i++) {
+ spin_lock_irqsave(queues[i].q_lock, flags);
+ spin_lock(&queues[i].l_lock);
+ list_for_each_entry_safe(evt, pos, &queues[i].sent, queue_list)
+ ibmvfc_fail_request(evt, error_code);
+ list_splice_init(&queues[i].sent, &vhost->purge);
+ spin_unlock(&queues[i].l_lock);
+ spin_unlock_irqrestore(queues[i].q_lock, flags);
+ }
}
/**
@@ -1238,6 +1438,7 @@ static void ibmvfc_gather_partition_info(struct ibmvfc_host *vhost)
static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login *login_info = &vhost->login_info;
+ struct ibmvfc_queue *async_crq = &vhost->async_crq;
struct device_node *of_node = vhost->dev->of_node;
const char *location;
@@ -1256,8 +1457,13 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
login_info->max_cmds = cpu_to_be32(max_requests + IBMVFC_NUM_INTERNAL_REQ);
login_info->capabilities = cpu_to_be64(IBMVFC_CAN_MIGRATE | IBMVFC_CAN_SEND_VF_WWPN);
+
+ if (vhost->mq_enabled || vhost->using_channels)
+ login_info->capabilities |= cpu_to_be64(IBMVFC_CAN_USE_CHANNELS);
+
login_info->async.va = cpu_to_be64(vhost->async_crq.msg_token);
- login_info->async.len = cpu_to_be32(vhost->async_crq.size * sizeof(*vhost->async_crq.msgs));
+ login_info->async.len = cpu_to_be32(async_crq->size *
+ sizeof(*async_crq->msgs.async));
strncpy(login_info->partition_name, vhost->partition_name, IBMVFC_MAX_NAME);
strncpy(login_info->device_name,
dev_name(&vhost->host->shost_gendev), IBMVFC_MAX_NAME);
@@ -1268,88 +1474,40 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
}
/**
- * ibmvfc_init_event_pool - Allocates and initializes the event pool for a host
- * @vhost: ibmvfc host who owns the event pool
- *
- * Returns zero on success.
- **/
-static int ibmvfc_init_event_pool(struct ibmvfc_host *vhost)
-{
- int i;
- struct ibmvfc_event_pool *pool = &vhost->pool;
-
- ENTER;
- pool->size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
- pool->events = kcalloc(pool->size, sizeof(*pool->events), GFP_KERNEL);
- if (!pool->events)
- return -ENOMEM;
-
- pool->iu_storage = dma_alloc_coherent(vhost->dev,
- pool->size * sizeof(*pool->iu_storage),
- &pool->iu_token, 0);
-
- if (!pool->iu_storage) {
- kfree(pool->events);
- return -ENOMEM;
- }
-
- for (i = 0; i < pool->size; ++i) {
- struct ibmvfc_event *evt = &pool->events[i];
- atomic_set(&evt->free, 1);
- evt->crq.valid = 0x80;
- evt->crq.ioba = cpu_to_be64(pool->iu_token + (sizeof(*evt->xfer_iu) * i));
- evt->xfer_iu = pool->iu_storage + i;
- evt->vhost = vhost;
- evt->ext_list = NULL;
- list_add_tail(&evt->queue, &vhost->free);
- }
-
- LEAVE;
- return 0;
-}
-
-/**
- * ibmvfc_free_event_pool - Frees memory of the event pool of a host
- * @vhost: ibmvfc host who owns the event pool
+ * ibmvfc_get_event - Gets the next free event in pool
+ * @vhost: ibmvfc host struct
*
+ * Returns a free event from the pool.
**/
-static void ibmvfc_free_event_pool(struct ibmvfc_host *vhost)
+static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_queue *queue)
{
- int i;
- struct ibmvfc_event_pool *pool = &vhost->pool;
-
- ENTER;
- for (i = 0; i < pool->size; ++i) {
- list_del(&pool->events[i].queue);
- BUG_ON(atomic_read(&pool->events[i].free) != 1);
- if (pool->events[i].ext_list)
- dma_pool_free(vhost->sg_pool,
- pool->events[i].ext_list,
- pool->events[i].ext_list_token);
- }
+ struct ibmvfc_event *evt;
+ unsigned long flags;
- kfree(pool->events);
- dma_free_coherent(vhost->dev,
- pool->size * sizeof(*pool->iu_storage),
- pool->iu_storage, pool->iu_token);
- LEAVE;
+ spin_lock_irqsave(&queue->l_lock, flags);
+ BUG_ON(list_empty(&queue->free));
+ evt = list_entry(queue->free.next, struct ibmvfc_event, queue_list);
+ atomic_set(&evt->free, 0);
+ list_del(&evt->queue_list);
+ spin_unlock_irqrestore(&queue->l_lock, flags);
+ return evt;
}
/**
- * ibmvfc_get_event - Gets the next free event in pool
- * @vhost: ibmvfc host struct
+ * ibmvfc_locked_done - Calls evt completion with host_lock held
+ * @evt: ibmvfc evt to complete
*
- * Returns a free event from the pool.
+ * All non-scsi command completion callbacks have the expectation that the
+ * host_lock is held. This callback is used by ibmvfc_init_event to wrap a
+ * MAD evt with the host_lock.
**/
-static struct ibmvfc_event *ibmvfc_get_event(struct ibmvfc_host *vhost)
+static void ibmvfc_locked_done(struct ibmvfc_event *evt)
{
- struct ibmvfc_event *evt;
+ unsigned long flags;
- BUG_ON(list_empty(&vhost->free));
- evt = list_entry(vhost->free.next, struct ibmvfc_event, queue);
- atomic_set(&evt->free, 0);
- list_del(&evt->queue);
- return evt;
+ spin_lock_irqsave(evt->vhost->host->host_lock, flags);
+ evt->_done(evt);
+ spin_unlock_irqrestore(evt->vhost->host->host_lock, flags);
}
/**
@@ -1364,9 +1522,15 @@ static void ibmvfc_init_event(struct ibmvfc_event *evt,
{
evt->cmnd = NULL;
evt->sync_iu = NULL;
- evt->crq.format = format;
- evt->done = done;
evt->eh_comp = NULL;
+ evt->crq.format = format;
+ if (format == IBMVFC_CMD_FORMAT)
+ evt->done = done;
+ else {
+ evt->_done = done;
+ evt->done = ibmvfc_locked_done;
+ }
+ evt->hwq = 0;
}
/**
@@ -1482,6 +1646,7 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
struct ibmvfc_host *vhost, unsigned long timeout)
{
__be64 *crq_as_u64 = (__be64 *) &evt->crq;
+ unsigned long flags;
int rc;
/* Copy the IU into the transfer area */
@@ -1493,7 +1658,6 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
else
BUG();
- list_add_tail(&evt->queue, &vhost->sent);
timer_setup(&evt->timer, ibmvfc_timeout, 0);
if (timeout) {
@@ -1501,11 +1665,24 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
add_timer(&evt->timer);
}
+ spin_lock_irqsave(&evt->queue->l_lock, flags);
+ list_add_tail(&evt->queue_list, &evt->queue->sent);
+
mb();
- if ((rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
- be64_to_cpu(crq_as_u64[1])))) {
- list_del(&evt->queue);
+ if (evt->queue->fmt == IBMVFC_SUB_CRQ_FMT)
+ rc = ibmvfc_send_sub_crq(vhost,
+ evt->queue->vios_cookie,
+ be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]),
+ 0, 0);
+ else
+ rc = ibmvfc_send_crq(vhost, be64_to_cpu(crq_as_u64[0]),
+ be64_to_cpu(crq_as_u64[1]));
+
+ if (rc) {
+ list_del(&evt->queue_list);
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
del_timer(&evt->timer);
/* If send_crq returns H_CLOSED, return SCSI_MLQUEUE_HOST_BUSY.
@@ -1530,8 +1707,10 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt,
evt->xfer_iu->mad_common.status = cpu_to_be16(IBMVFC_MAD_CRQ_ERROR);
evt->done(evt);
- } else
+ } else {
+ spin_unlock_irqrestore(&evt->queue->l_lock, flags);
ibmvfc_trc_start(evt);
+ }
return 0;
}
@@ -1579,7 +1758,9 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
struct ibmvfc_target *tgt;
+ unsigned long flags;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
list_for_each_entry(tgt, &vhost->targets, queue) {
if (rport == tgt->rport) {
ibmvfc_del_tgt(tgt);
@@ -1588,6 +1769,7 @@ static void ibmvfc_relogin(struct scsi_device *sdev)
}
ibmvfc_reinit_host(vhost);
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
}
/**
@@ -1639,9 +1821,6 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
cmnd->scsi_done(cmnd);
}
- if (evt->eh_comp)
- complete(evt->eh_comp);
-
ibmvfc_free_event(evt);
}
@@ -1710,28 +1889,35 @@ static struct ibmvfc_cmd *ibmvfc_init_vfc_cmd(struct ibmvfc_event *evt, struct s
* Returns:
* 0 on success / other on failure
**/
-static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
- void (*done) (struct scsi_cmnd *))
+static int ibmvfc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *cmnd)
{
- struct ibmvfc_host *vhost = shost_priv(cmnd->device->host);
+ struct ibmvfc_host *vhost = shost_priv(shost);
struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
struct ibmvfc_cmd *vfc_cmd;
struct ibmvfc_fcp_cmd_iu *iu;
struct ibmvfc_event *evt;
+ u32 tag_and_hwq = blk_mq_unique_tag(cmnd->request);
+ u16 hwq = blk_mq_unique_tag_to_hwq(tag_and_hwq);
+ u16 scsi_channel;
int rc;
if (unlikely((rc = fc_remote_port_chkready(rport))) ||
unlikely((rc = ibmvfc_host_chkready(vhost)))) {
cmnd->result = rc;
- done(cmnd);
+ cmnd->scsi_done(cmnd);
return 0;
}
cmnd->result = (DID_OK << 16);
- evt = ibmvfc_get_event(vhost);
+ if (vhost->using_channels) {
+ scsi_channel = hwq % vhost->scsi_scrqs.active_queues;
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[scsi_channel]);
+ evt->hwq = hwq % vhost->scsi_scrqs.active_queues;
+ } else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_scsi_done, IBMVFC_CMD_FORMAT);
evt->cmnd = cmnd;
- cmnd->scsi_done = done;
vfc_cmd = ibmvfc_init_vfc_cmd(evt, cmnd->device);
iu = ibmvfc_get_fcp_iu(vhost, vfc_cmd);
@@ -1758,12 +1944,10 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd,
"Failed to map DMA buffer for command. rc=%d\n", rc);
cmnd->result = DID_ERROR << 16;
- done(cmnd);
+ cmnd->scsi_done(cmnd);
return 0;
}
-static DEF_SCSI_QCMD(ibmvfc_queuecommand)
-
/**
* ibmvfc_sync_completion - Signal that a synchronous command has completed
* @evt: ibmvfc event struct
@@ -1817,7 +2001,7 @@ static int ibmvfc_bsg_timeout(struct bsg_job *job)
}
vhost->aborting_passthru = 1;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_bsg_timeout_done, IBMVFC_MAD_FORMAT);
tmf = &evt->iu.tmf;
@@ -1875,7 +2059,7 @@ static int ibmvfc_bsg_plogi(struct ibmvfc_host *vhost, unsigned int port_id)
if (unlikely((rc = ibmvfc_host_chkready(vhost))))
goto unlock_out;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
plogi = &evt->iu.plogi;
memset(plogi, 0, sizeof(*plogi));
@@ -1993,7 +2177,7 @@ static int ibmvfc_bsg_request(struct bsg_job *job)
goto out;
}
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
mad = &evt->iu.passthru;
@@ -2077,7 +2261,11 @@ static int ibmvfc_reset_device(struct scsi_device *sdev, int type, char *desc)
spin_lock_irqsave(vhost->host->host_lock, flags);
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
+ if (vhost->using_channels)
+ evt = ibmvfc_get_event(&vhost->scsi_scrqs.scrqs[0]);
+ else
+ evt = ibmvfc_get_event(&vhost->crq);
+
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = ibmvfc_init_vfc_cmd(evt, sdev);
iu = ibmvfc_get_fcp_iu(vhost, tmf);
@@ -2195,28 +2383,28 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
ENTER;
do {
wait = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(&vhost->crq.l_lock, flags);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (match(evt, device)) {
evt->eh_comp = &comp;
wait++;
}
}
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
if (wait) {
timeout = wait_for_completion_timeout(&comp, timeout);
if (!timeout) {
wait = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(&vhost->crq.l_lock, flags);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (match(evt, device)) {
evt->eh_comp = NULL;
wait++;
}
}
- spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ spin_unlock_irqrestore(&vhost->crq.l_lock, flags);
if (wait)
dev_err(vhost->dev, "Timed out waiting for aborted commands\n");
LEAVE;
@@ -2229,23 +2417,123 @@ static int ibmvfc_wait_for_ops(struct ibmvfc_host *vhost, void *device,
return SUCCESS;
}
-/**
- * ibmvfc_cancel_all - Cancel all outstanding commands to the device
- * @sdev: scsi device to cancel commands
- * @type: type of error recovery being performed
- *
- * This sends a cancel to the VIOS for the specified device. This does
- * NOT send any abort to the actual device. That must be done separately.
- *
- * Returns:
- * 0 on success / other on failure
- **/
-static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+static struct ibmvfc_event *ibmvfc_init_tmf(struct ibmvfc_queue *queue,
+ struct scsi_device *sdev,
+ int type)
{
struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct scsi_target *starget = scsi_target(sdev);
struct fc_rport *rport = starget_to_rport(starget);
+ struct ibmvfc_event *evt;
struct ibmvfc_tmf *tmf;
+
+ evt = ibmvfc_get_event(queue);
+ ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
+
+ tmf = &evt->iu.tmf;
+ memset(tmf, 0, sizeof(*tmf));
+ if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
+ tmf->common.version = cpu_to_be32(2);
+ tmf->target_wwpn = cpu_to_be64(rport->port_name);
+ } else {
+ tmf->common.version = cpu_to_be32(1);
+ }
+ tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
+ tmf->common.length = cpu_to_be16(sizeof(*tmf));
+ tmf->scsi_id = cpu_to_be64(rport->port_id);
+ int_to_scsilun(sdev->lun, &tmf->lun);
+ if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
+ type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
+ if (vhost->state == IBMVFC_ACTIVE)
+ tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
+ else
+ tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
+ tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
+ tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
+
+ init_completion(&evt->comp);
+
+ return evt;
+}
+
+static int ibmvfc_cancel_all_mq(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+ struct ibmvfc_event *evt, *found_evt, *temp;
+ struct ibmvfc_queue *queues = vhost->scsi_scrqs.scrqs;
+ unsigned long flags;
+ int num_hwq, i;
+ int fail = 0;
+ LIST_HEAD(cancelq);
+ u16 status;
+
+ ENTER;
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ num_hwq = vhost->scsi_scrqs.active_queues;
+ for (i = 0; i < num_hwq; i++) {
+ spin_lock(queues[i].q_lock);
+ spin_lock(&queues[i].l_lock);
+ found_evt = NULL;
+ list_for_each_entry(evt, &queues[i].sent, queue_list) {
+ if (evt->cmnd && evt->cmnd->device == sdev) {
+ found_evt = evt;
+ break;
+ }
+ }
+ spin_unlock(&queues[i].l_lock);
+
+ if (found_evt && vhost->logged_in) {
+ evt = ibmvfc_init_tmf(&queues[i], sdev, type);
+ evt->sync_iu = &queues[i].cancel_rsp;
+ ibmvfc_send_event(evt, vhost, default_timeout);
+ list_add_tail(&evt->cancel, &cancelq);
+ }
+
+ spin_unlock(queues[i].q_lock);
+ }
+ spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ if (list_empty(&cancelq)) {
+ if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
+ sdev_printk(KERN_INFO, sdev, "No events found to cancel\n");
+ return 0;
+ }
+
+ sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n");
+
+ list_for_each_entry_safe(evt, temp, &cancelq, cancel) {
+ wait_for_completion(&evt->comp);
+ status = be16_to_cpu(evt->queue->cancel_rsp.mad_common.status);
+ list_del(&evt->cancel);
+ ibmvfc_free_event(evt);
+
+ if (status != IBMVFC_MAD_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status);
+ switch (status) {
+ case IBMVFC_MAD_DRIVER_FAILED:
+ case IBMVFC_MAD_CRQ_ERROR:
+ /* Host adapter most likely going through reset, return success to
+ * the caller will wait for the command being cancelled to get returned
+ */
+ break;
+ default:
+ fail = 1;
+ break;
+ }
+ }
+ }
+
+ if (fail)
+ return -EIO;
+
+ sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n");
+ LEAVE;
+ return 0;
+}
+
+static int ibmvfc_cancel_all_sq(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
struct ibmvfc_event *evt, *found_evt;
union ibmvfc_iu rsp;
int rsp_rc = -EBUSY;
@@ -2253,14 +2541,16 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
u16 status;
ENTER;
- spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(&vhost->crq.l_lock);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
+ spin_unlock(&vhost->crq.l_lock);
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
@@ -2270,32 +2560,8 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
if (vhost->logged_in) {
- evt = ibmvfc_get_event(vhost);
- ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_MAD_FORMAT);
-
- tmf = &evt->iu.tmf;
- memset(tmf, 0, sizeof(*tmf));
- if (ibmvfc_check_caps(vhost, IBMVFC_HANDLE_VF_WWPN)) {
- tmf->common.version = cpu_to_be32(2);
- tmf->target_wwpn = cpu_to_be64(rport->port_name);
- } else {
- tmf->common.version = cpu_to_be32(1);
- }
- tmf->common.opcode = cpu_to_be32(IBMVFC_TMF_MAD);
- tmf->common.length = cpu_to_be16(sizeof(*tmf));
- tmf->scsi_id = cpu_to_be64(rport->port_id);
- int_to_scsilun(sdev->lun, &tmf->lun);
- if (!ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPRESS_ABTS))
- type &= ~IBMVFC_TMF_SUPPRESS_ABTS;
- if (vhost->state == IBMVFC_ACTIVE)
- tmf->flags = cpu_to_be32((type | IBMVFC_TMF_LUA_VALID));
- else
- tmf->flags = cpu_to_be32(((type & IBMVFC_TMF_SUPPRESS_ABTS) | IBMVFC_TMF_LUA_VALID));
- tmf->cancel_key = cpu_to_be32((unsigned long)sdev->hostdata);
- tmf->my_cancel_key = cpu_to_be32((unsigned long)starget->hostdata);
-
+ evt = ibmvfc_init_tmf(&vhost->crq, sdev, type);
evt->sync_iu = &rsp;
- init_completion(&evt->comp);
rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout);
}
@@ -2335,6 +2601,27 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
}
/**
+ * ibmvfc_cancel_all - Cancel all outstanding commands to the device
+ * @sdev: scsi device to cancel commands
+ * @type: type of error recovery being performed
+ *
+ * This sends a cancel to the VIOS for the specified device. This does
+ * NOT send any abort to the actual device. That must be done separately.
+ *
+ * Returns:
+ * 0 on success / other on failure
+ **/
+static int ibmvfc_cancel_all(struct scsi_device *sdev, int type)
+{
+ struct ibmvfc_host *vhost = shost_priv(sdev->host);
+
+ if (vhost->mq_enabled && vhost->using_channels)
+ return ibmvfc_cancel_all_mq(sdev, type);
+ else
+ return ibmvfc_cancel_all_sq(sdev, type);
+}
+
+/**
* ibmvfc_match_key - Match function for specified cancel key
* @evt: ibmvfc event struct
* @key: cancel key to match
@@ -2390,14 +2677,16 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
unsigned long flags, timeout = IBMVFC_ABORT_TIMEOUT;
int rsp_code = 0;
- spin_lock_irqsave(vhost->host->host_lock, flags);
found_evt = NULL;
- list_for_each_entry(evt, &vhost->sent, queue) {
+ spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(&vhost->crq.l_lock);
+ list_for_each_entry(evt, &vhost->crq.sent, queue_list) {
if (evt->cmnd && evt->cmnd->device == sdev) {
found_evt = evt;
break;
}
}
+ spin_unlock(&vhost->crq.l_lock);
if (!found_evt) {
if (vhost->log_level > IBMVFC_DEFAULT_LOG_LEVEL)
@@ -2407,7 +2696,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev)
}
if (vhost->state == IBMVFC_ACTIVE) {
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_sync_completion, IBMVFC_CMD_FORMAT);
tmf = ibmvfc_init_vfc_cmd(evt, sdev);
iu = ibmvfc_get_fcp_iu(vhost, tmf);
@@ -2839,7 +3128,8 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
* @vhost: ibmvfc host struct
*
**/
-static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
+static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
+ struct list_head *evt_doneq)
{
long rc;
struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
@@ -2898,7 +3188,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
* things we send. Make sure this response is to something we
* actually sent
*/
- if (unlikely(!ibmvfc_valid_event(&vhost->pool, evt))) {
+ if (unlikely(!ibmvfc_valid_event(&vhost->crq.evt_pool, evt))) {
dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
crq->ioba);
return;
@@ -2910,10 +3200,9 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
return;
}
- del_timer(&evt->timer);
- list_del(&evt->queue);
- ibmvfc_trc_end(evt);
- evt->done(evt);
+ spin_lock(&evt->queue->l_lock);
+ list_move_tail(&evt->queue_list, evt_doneq);
+ spin_unlock(&evt->queue->l_lock);
}
/**
@@ -3132,6 +3421,37 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
return strlen(buf);
}
+static ssize_t ibmvfc_show_scsi_channels(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", vhost->client_scsi_channels);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return len;
+}
+
+static ssize_t ibmvfc_store_scsi_channels(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ibmvfc_host *vhost = shost_priv(shost);
+ unsigned long flags = 0;
+ unsigned int channels;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+ channels = simple_strtoul(buf, NULL, 10);
+ vhost->client_scsi_channels = min(channels, nr_scsi_hw_queues);
+ ibmvfc_hard_reset_host(vhost);
+ spin_unlock_irqrestore(shost->host_lock, flags);
+ return strlen(buf);
+}
+
static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
@@ -3140,6 +3460,8 @@ static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
ibmvfc_show_log_level, ibmvfc_store_log_level);
+static DEVICE_ATTR(nr_scsi_channels, S_IRUGO | S_IWUSR,
+ ibmvfc_show_scsi_channels, ibmvfc_store_scsi_channels);
#ifdef CONFIG_SCSI_IBMVFC_TRACE
/**
@@ -3196,6 +3518,7 @@ static struct device_attribute *ibmvfc_attrs[] = {
&dev_attr_npiv_version,
&dev_attr_capabilities,
&dev_attr_log_level,
+ &dev_attr_nr_scsi_channels,
NULL
};
@@ -3221,6 +3544,7 @@ static struct scsi_host_template driver_template = {
.max_sectors = IBMVFC_MAX_SECTORS,
.shost_attrs = ibmvfc_attrs,
.track_queue_depth = 1,
+ .host_tagset = 1,
};
/**
@@ -3232,10 +3556,10 @@ static struct scsi_host_template driver_template = {
**/
static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_crq = &vhost->async_crq;
+ struct ibmvfc_queue *async_crq = &vhost->async_crq;
struct ibmvfc_async_crq *crq;
- crq = &async_crq->msgs[async_crq->cur];
+ crq = &async_crq->msgs.async[async_crq->cur];
if (crq->valid & 0x80) {
if (++async_crq->cur == async_crq->size)
async_crq->cur = 0;
@@ -3255,10 +3579,10 @@ static struct ibmvfc_async_crq *ibmvfc_next_async_crq(struct ibmvfc_host *vhost)
**/
static struct ibmvfc_crq *ibmvfc_next_crq(struct ibmvfc_host *vhost)
{
- struct ibmvfc_crq_queue *queue = &vhost->crq;
+ struct ibmvfc_queue *queue = &vhost->crq;
struct ibmvfc_crq *crq;
- crq = &queue->msgs[queue->cur];
+ crq = &queue->msgs.crq[queue->cur];
if (crq->valid & 0x80) {
if (++queue->cur == queue->size)
queue->cur = 0;
@@ -3302,10 +3626,13 @@ static void ibmvfc_tasklet(void *data)
struct vio_dev *vdev = to_vio_dev(vhost->dev);
struct ibmvfc_crq *crq;
struct ibmvfc_async_crq *async;
+ struct ibmvfc_event *evt, *temp;
unsigned long flags;
int done = 0;
+ LIST_HEAD(evt_doneq);
spin_lock_irqsave(vhost->host->host_lock, flags);
+ spin_lock(vhost->crq.q_lock);
while (!done) {
/* Pull all the valid messages off the async CRQ */
while ((async = ibmvfc_next_async_crq(vhost)) != NULL) {
@@ -3316,7 +3643,7 @@ static void ibmvfc_tasklet(void *data)
/* Pull all the valid messages off the CRQ */
while ((crq = ibmvfc_next_crq(vhost)) != NULL) {
- ibmvfc_handle_crq(crq, vhost);
+ ibmvfc_handle_crq(crq, vhost, &evt_doneq);
crq->valid = 0;
wmb();
}
@@ -3329,14 +3656,138 @@ static void ibmvfc_tasklet(void *data)
wmb();
} else if ((crq = ibmvfc_next_crq(vhost)) != NULL) {
vio_disable_interrupts(vdev);
- ibmvfc_handle_crq(crq, vhost);
+ ibmvfc_handle_crq(crq, vhost, &evt_doneq);
crq->valid = 0;
wmb();
} else
done = 1;
}
+ spin_unlock(vhost->crq.q_lock);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+
+ list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
+ del_timer(&evt->timer);
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+static int ibmvfc_toggle_scrq_irq(struct ibmvfc_queue *scrq, int enable)
+{
+ struct device *dev = scrq->vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ unsigned long rc;
+ int irq_action = H_ENABLE_VIO_INTERRUPT;
+
+ if (!enable)
+ irq_action = H_DISABLE_VIO_INTERRUPT;
+
+ rc = plpar_hcall_norets(H_VIOCTL, vdev->unit_address, irq_action,
+ scrq->hw_irq, 0, 0);
+
+ if (rc)
+ dev_err(dev, "Couldn't %s sub-crq[%lu] irq. rc=%ld\n",
+ enable ? "enable" : "disable", scrq->hwq_id, rc);
+
+ return rc;
+}
+
+static void ibmvfc_handle_scrq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost,
+ struct list_head *evt_doneq)
+{
+ struct ibmvfc_event *evt = (struct ibmvfc_event *)be64_to_cpu(crq->ioba);
+
+ switch (crq->valid) {
+ case IBMVFC_CRQ_CMD_RSP:
+ break;
+ case IBMVFC_CRQ_XPORT_EVENT:
+ return;
+ default:
+ dev_err(vhost->dev, "Got and invalid message type 0x%02x\n", crq->valid);
+ return;
+ }
+
+ /* The only kind of payload CRQs we should get are responses to
+ * things we send. Make sure this response is to something we
+ * actually sent
+ */
+ if (unlikely(!ibmvfc_valid_event(&evt->queue->evt_pool, evt))) {
+ dev_err(vhost->dev, "Returned correlation_token 0x%08llx is invalid!\n",
+ crq->ioba);
+ return;
+ }
+
+ if (unlikely(atomic_read(&evt->free))) {
+ dev_err(vhost->dev, "Received duplicate correlation_token 0x%08llx!\n",
+ crq->ioba);
+ return;
+ }
+
+ spin_lock(&evt->queue->l_lock);
+ list_move_tail(&evt->queue_list, evt_doneq);
+ spin_unlock(&evt->queue->l_lock);
+}
+
+static struct ibmvfc_crq *ibmvfc_next_scrq(struct ibmvfc_queue *scrq)
+{
+ struct ibmvfc_crq *crq;
+
+ crq = &scrq->msgs.scrq[scrq->cur].crq;
+ if (crq->valid & 0x80) {
+ if (++scrq->cur == scrq->size)
+ scrq->cur = 0;
+ rmb();
+ } else
+ crq = NULL;
+
+ return crq;
+}
+
+static void ibmvfc_drain_sub_crq(struct ibmvfc_queue *scrq)
+{
+ struct ibmvfc_crq *crq;
+ struct ibmvfc_event *evt, *temp;
+ unsigned long flags;
+ int done = 0;
+ LIST_HEAD(evt_doneq);
+
+ spin_lock_irqsave(scrq->q_lock, flags);
+ while (!done) {
+ while ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
+ ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
+ crq->valid = 0;
+ wmb();
+ }
+
+ ibmvfc_toggle_scrq_irq(scrq, 1);
+ if ((crq = ibmvfc_next_scrq(scrq)) != NULL) {
+ ibmvfc_toggle_scrq_irq(scrq, 0);
+ ibmvfc_handle_scrq(crq, scrq->vhost, &evt_doneq);
+ crq->valid = 0;
+ wmb();
+ } else
+ done = 1;
+ }
+ spin_unlock_irqrestore(scrq->q_lock, flags);
+
+ list_for_each_entry_safe(evt, temp, &evt_doneq, queue_list) {
+ del_timer(&evt->timer);
+ list_del(&evt->queue_list);
+ ibmvfc_trc_end(evt);
+ evt->done(evt);
+ }
+}
+
+static irqreturn_t ibmvfc_interrupt_scsi(int irq, void *scrq_instance)
+{
+ struct ibmvfc_queue *scrq = (struct ibmvfc_queue *)scrq_instance;
+
+ ibmvfc_toggle_scrq_irq(scrq, 0);
+ ibmvfc_drain_sub_crq(scrq);
+
+ return IRQ_HANDLED;
}
/**
@@ -3491,7 +3942,7 @@ static void ibmvfc_tgt_send_prli(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_prli_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
@@ -3598,7 +4049,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *tgt)
kref_get(&tgt->kref);
tgt->logo_rcvd = 0;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
ibmvfc_init_event(evt, ibmvfc_tgt_plogi_done, IBMVFC_MAD_FORMAT);
@@ -3673,7 +4124,7 @@ static struct ibmvfc_event *__ibmvfc_tgt_get_implicit_logout_evt(struct ibmvfc_t
struct ibmvfc_event *evt;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
mad = &evt->iu.implicit_logout;
@@ -3838,7 +4289,7 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
@@ -4004,7 +4455,7 @@ static void ibmvfc_adisc_timeout(struct timer_list *t)
vhost->abort_threads++;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_cancel_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
@@ -4054,7 +4505,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
ibmvfc_init_event(evt, ibmvfc_tgt_adisc_done, IBMVFC_MAD_FORMAT);
evt->tgt = tgt;
@@ -4157,7 +4608,7 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
return;
kref_get(&tgt->kref);
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
vhost->discovery_threads++;
evt->tgt = tgt;
ibmvfc_init_event(evt, ibmvfc_tgt_query_target_done, IBMVFC_MAD_FORMAT);
@@ -4324,7 +4775,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
{
struct ibmvfc_discover_targets *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_discover_targets_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.discover_targets;
@@ -4344,6 +4795,148 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
}
+static void ibmvfc_channel_setup_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_channel_setup *setup = vhost->channel_setup_buf;
+ struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ u32 mad_status = be16_to_cpu(evt->xfer_iu->channel_setup.common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+ int flags, active_queues, i;
+
+ ibmvfc_free_event(evt);
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Channel Setup succeeded\n");
+ flags = be32_to_cpu(setup->flags);
+ vhost->do_enquiry = 0;
+ active_queues = be32_to_cpu(setup->num_scsi_subq_channels);
+ scrqs->active_queues = active_queues;
+
+ if (flags & IBMVFC_CHANNELS_CANCELED) {
+ ibmvfc_dbg(vhost, "Channels Canceled\n");
+ vhost->using_channels = 0;
+ } else {
+ if (active_queues)
+ vhost->using_channels = 1;
+ for (i = 0; i < active_queues; i++)
+ scrqs->scrqs[i].vios_cookie =
+ be64_to_cpu(setup->channel_handles[i]);
+
+ ibmvfc_dbg(vhost, "Using %u channels\n",
+ vhost->scsi_scrqs.active_queues);
+ }
+ break;
+ case IBMVFC_MAD_FAILED:
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Channel Setup failed\n");
+ fallthrough;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid Channel Setup response: 0x%x\n",
+ mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ return;
+ }
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+}
+
+static void ibmvfc_channel_setup(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_channel_setup_mad *mad;
+ struct ibmvfc_channel_setup *setup_buf = vhost->channel_setup_buf;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+ struct ibmvfc_scsi_channels *scrqs = &vhost->scsi_scrqs;
+ unsigned int num_channels =
+ min(vhost->client_scsi_channels, vhost->max_vios_scsi_channels);
+ int i;
+
+ memset(setup_buf, 0, sizeof(*setup_buf));
+ if (num_channels == 0)
+ setup_buf->flags = cpu_to_be32(IBMVFC_CANCEL_CHANNELS);
+ else {
+ setup_buf->num_scsi_subq_channels = cpu_to_be32(num_channels);
+ for (i = 0; i < num_channels; i++)
+ setup_buf->channel_handles[i] = cpu_to_be64(scrqs->scrqs[i].cookie);
+ }
+
+ ibmvfc_init_event(evt, ibmvfc_channel_setup_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_setup;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_SETUP);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+ mad->buffer.va = cpu_to_be64(vhost->channel_setup_dma);
+ mad->buffer.len = cpu_to_be32(sizeof(*vhost->channel_setup_buf));
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Sent channel setup\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+}
+
+static void ibmvfc_channel_enquiry_done(struct ibmvfc_event *evt)
+{
+ struct ibmvfc_host *vhost = evt->vhost;
+ struct ibmvfc_channel_enquiry *rsp = &evt->xfer_iu->channel_enquiry;
+ u32 mad_status = be16_to_cpu(rsp->common.status);
+ int level = IBMVFC_DEFAULT_LOG_LEVEL;
+
+ switch (mad_status) {
+ case IBMVFC_MAD_SUCCESS:
+ ibmvfc_dbg(vhost, "Channel Enquiry succeeded\n");
+ vhost->max_vios_scsi_channels = be32_to_cpu(rsp->num_scsi_subq_channels);
+ ibmvfc_free_event(evt);
+ break;
+ case IBMVFC_MAD_FAILED:
+ level += ibmvfc_retry_host_init(vhost);
+ ibmvfc_log(vhost, level, "Channel Enquiry failed\n");
+ fallthrough;
+ case IBMVFC_MAD_DRIVER_FAILED:
+ ibmvfc_free_event(evt);
+ return;
+ default:
+ dev_err(vhost->dev, "Invalid Channel Enquiry response: 0x%x\n",
+ mad_status);
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+ ibmvfc_free_event(evt);
+ return;
+ }
+
+ ibmvfc_channel_setup(vhost);
+}
+
+static void ibmvfc_channel_enquiry(struct ibmvfc_host *vhost)
+{
+ struct ibmvfc_channel_enquiry *mad;
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
+
+ ibmvfc_init_event(evt, ibmvfc_channel_enquiry_done, IBMVFC_MAD_FORMAT);
+ mad = &evt->iu.channel_enquiry;
+ memset(mad, 0, sizeof(*mad));
+ mad->common.version = cpu_to_be32(1);
+ mad->common.opcode = cpu_to_be32(IBMVFC_CHANNEL_ENQUIRY);
+ mad->common.length = cpu_to_be16(sizeof(*mad));
+
+ if (mig_channels_only)
+ mad->flags |= cpu_to_be32(IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT);
+ if (mig_no_less_channels)
+ mad->flags |= cpu_to_be32(IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT);
+
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
+
+ if (!ibmvfc_send_event(evt, vhost, default_timeout))
+ ibmvfc_dbg(vhost, "Send channel enquiry\n");
+ else
+ ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
/**
* ibmvfc_npiv_login_done - Completion handler for NPIV Login
* @evt: ibmvfc event struct
@@ -4425,8 +5018,14 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
vhost->host->can_queue = be32_to_cpu(rsp->max_cmds) - IBMVFC_NUM_INTERNAL_REQ;
vhost->host->max_sectors = npiv_max_sectors;
- ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
- wake_up(&vhost->work_wait_q);
+
+ if (ibmvfc_check_caps(vhost, IBMVFC_CAN_SUPPORT_CHANNELS) && vhost->do_enquiry) {
+ ibmvfc_channel_enquiry(vhost);
+ } else {
+ vhost->do_enquiry = 0;
+ ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+ wake_up(&vhost->work_wait_q);
+ }
}
/**
@@ -4437,7 +5036,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
{
struct ibmvfc_npiv_login_mad *mad;
- struct ibmvfc_event *evt = ibmvfc_get_event(vhost);
+ struct ibmvfc_event *evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_gather_partition_info(vhost);
ibmvfc_set_login_info(vhost);
@@ -4474,7 +5073,7 @@ static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
switch (mad_status) {
case IBMVFC_MAD_SUCCESS:
- if (list_empty(&vhost->sent) &&
+ if (list_empty(&vhost->crq.sent) &&
vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
ibmvfc_init_host(vhost);
return;
@@ -4502,7 +5101,7 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
struct ibmvfc_npiv_logout_mad *mad;
struct ibmvfc_event *evt;
- evt = ibmvfc_get_event(vhost);
+ evt = ibmvfc_get_event(&vhost->crq);
ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
mad = &evt->iu.npiv_logout;
@@ -4712,6 +5311,7 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
struct ibmvfc_target *tgt;
unsigned long flags;
struct fc_rport *rport;
+ LIST_HEAD(purge);
int rc;
ibmvfc_log_ae(vhost, vhost->events_to_log);
@@ -4724,7 +5324,9 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
case IBMVFC_HOST_ACTION_RESET:
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_complete_purge(&purge);
rc = ibmvfc_reset_crq(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rc == H_CLOSED)
@@ -4737,7 +5339,9 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
break;
case IBMVFC_HOST_ACTION_REENABLE:
vhost->action = IBMVFC_HOST_ACTION_TGT_DEL;
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
+ ibmvfc_complete_purge(&purge);
rc = ibmvfc_reenable_crq_queue(vhost);
spin_lock_irqsave(vhost->host->host_lock, flags);
if (rc || (rc = ibmvfc_send_crq_init(vhost))) {
@@ -4898,6 +5502,69 @@ static int ibmvfc_work(void *data)
}
/**
+ * ibmvfc_alloc_queue - Allocate queue
+ * @vhost: ibmvfc host struct
+ * @queue: ibmvfc queue to allocate
+ * @fmt: queue format to allocate
+ *
+ * Returns:
+ * 0 on success / non-zero on failure
+ **/
+static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost,
+ struct ibmvfc_queue *queue,
+ enum ibmvfc_msg_fmt fmt)
+{
+ struct device *dev = vhost->dev;
+ size_t fmt_size;
+ unsigned int pool_size = 0;
+
+ ENTER;
+ spin_lock_init(&queue->_lock);
+ queue->q_lock = &queue->_lock;
+
+ switch (fmt) {
+ case IBMVFC_CRQ_FMT:
+ fmt_size = sizeof(*queue->msgs.crq);
+ pool_size = max_requests + IBMVFC_NUM_INTERNAL_REQ;
+ break;
+ case IBMVFC_ASYNC_FMT:
+ fmt_size = sizeof(*queue->msgs.async);
+ break;
+ case IBMVFC_SUB_CRQ_FMT:
+ fmt_size = sizeof(*queue->msgs.scrq);
+ /* We need one extra event for Cancel Commands */
+ pool_size = max_requests + 1;
+ break;
+ default:
+ dev_warn(dev, "Unknown command/response queue message format: %d\n", fmt);
+ return -EINVAL;
+ }
+
+ if (ibmvfc_init_event_pool(vhost, queue, pool_size)) {
+ dev_err(dev, "Couldn't initialize event pool.\n");
+ return -ENOMEM;
+ }
+
+ queue->msgs.handle = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!queue->msgs.handle)
+ return -ENOMEM;
+
+ queue->msg_token = dma_map_single(dev, queue->msgs.handle, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ if (dma_mapping_error(dev, queue->msg_token)) {
+ free_page((unsigned long)queue->msgs.handle);
+ queue->msgs.handle = NULL;
+ return -ENOMEM;
+ }
+
+ queue->cur = 0;
+ queue->fmt = fmt;
+ queue->size = PAGE_SIZE / fmt_size;
+ return 0;
+}
+
+/**
* ibmvfc_init_crq - Initializes and registers CRQ with hypervisor
* @vhost: ibmvfc host struct
*
@@ -4912,21 +5579,12 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
int rc, retrc = -ENOMEM;
struct device *dev = vhost->dev;
struct vio_dev *vdev = to_vio_dev(dev);
- struct ibmvfc_crq_queue *crq = &vhost->crq;
+ struct ibmvfc_queue *crq = &vhost->crq;
ENTER;
- crq->msgs = (struct ibmvfc_crq *)get_zeroed_page(GFP_KERNEL);
-
- if (!crq->msgs)
+ if (ibmvfc_alloc_queue(vhost, crq, IBMVFC_CRQ_FMT))
return -ENOMEM;
- crq->size = PAGE_SIZE / sizeof(*crq->msgs);
- crq->msg_token = dma_map_single(dev, crq->msgs,
- PAGE_SIZE, DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, crq->msg_token))
- goto map_failed;
-
retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
crq->msg_token, PAGE_SIZE);
@@ -4955,7 +5613,6 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
goto req_irq_failed;
}
- crq->cur = 0;
LEAVE;
return retrc;
@@ -4965,12 +5622,136 @@ req_irq_failed:
rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
} while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
reg_crq_failed:
- dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
-map_failed:
- free_page((unsigned long)crq->msgs);
+ ibmvfc_free_queue(vhost, crq);
return retrc;
}
+static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost,
+ int index)
+{
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+ int rc = -ENOMEM;
+
+ ENTER;
+
+ if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT))
+ return -ENOMEM;
+
+ rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE,
+ &scrq->cookie, &scrq->hw_irq);
+
+ if (rc) {
+ dev_warn(dev, "Error registering sub-crq: %d\n", rc);
+ if (rc == H_PARAMETER)
+ dev_warn_once(dev, "Firmware may not support MQ\n");
+ goto reg_failed;
+ }
+
+ scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
+
+ if (!scrq->irq) {
+ rc = -EINVAL;
+ dev_err(dev, "Error mapping sub-crq[%d] irq\n", index);
+ goto irq_failed;
+ }
+
+ snprintf(scrq->name, sizeof(scrq->name), "ibmvfc-%x-scsi%d",
+ vdev->unit_address, index);
+ rc = request_irq(scrq->irq, ibmvfc_interrupt_scsi, 0, scrq->name, scrq);
+
+ if (rc) {
+ dev_err(dev, "Couldn't register sub-crq[%d] irq\n", index);
+ irq_dispose_mapping(scrq->irq);
+ goto irq_failed;
+ }
+
+ scrq->hwq_id = index;
+ scrq->vhost = vhost;
+
+ LEAVE;
+ return 0;
+
+irq_failed:
+ do {
+ plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+reg_failed:
+ ibmvfc_free_queue(vhost, scrq);
+ LEAVE;
+ return rc;
+}
+
+static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index)
+{
+ struct device *dev = vhost->dev;
+ struct vio_dev *vdev = to_vio_dev(dev);
+ struct ibmvfc_queue *scrq = &vhost->scsi_scrqs.scrqs[index];
+ long rc;
+
+ ENTER;
+
+ free_irq(scrq->irq, scrq);
+ irq_dispose_mapping(scrq->irq);
+
+ do {
+ rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address,
+ scrq->cookie);
+ } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
+
+ if (rc)
+ dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc);
+
+ ibmvfc_free_queue(vhost, scrq);
+ LEAVE;
+}
+
+static int ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i, j;
+
+ ENTER;
+
+ vhost->scsi_scrqs.scrqs = kcalloc(nr_scsi_hw_queues,
+ sizeof(*vhost->scsi_scrqs.scrqs),
+ GFP_KERNEL);
+ if (!vhost->scsi_scrqs.scrqs)
+ return -1;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++) {
+ if (ibmvfc_register_scsi_channel(vhost, i)) {
+ for (j = i; j > 0; j--)
+ ibmvfc_deregister_scsi_channel(vhost, j - 1);
+ kfree(vhost->scsi_scrqs.scrqs);
+ vhost->scsi_scrqs.scrqs = NULL;
+ vhost->scsi_scrqs.active_queues = 0;
+ LEAVE;
+ return -1;
+ }
+ }
+
+ LEAVE;
+ return 0;
+}
+
+static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost)
+{
+ int i;
+
+ ENTER;
+ if (!vhost->scsi_scrqs.scrqs)
+ return;
+
+ for (i = 0; i < nr_scsi_hw_queues; i++)
+ ibmvfc_deregister_scsi_channel(vhost, i);
+
+ kfree(vhost->scsi_scrqs.scrqs);
+ vhost->scsi_scrqs.scrqs = NULL;
+ vhost->scsi_scrqs.active_queues = 0;
+ LEAVE;
+}
+
/**
* ibmvfc_free_mem - Free memory for vhost
* @vhost: ibmvfc host struct
@@ -4980,7 +5761,7 @@ map_failed:
**/
static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct ibmvfc_queue *async_q = &vhost->async_crq;
ENTER;
mempool_destroy(vhost->tgt_pool);
@@ -4990,9 +5771,7 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
dma_free_coherent(vhost->dev, sizeof(*vhost->login_buf),
vhost->login_buf, vhost->login_buf_dma);
dma_pool_destroy(vhost->sg_pool);
- dma_unmap_single(vhost->dev, async_q->msg_token,
- async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
- free_page((unsigned long)async_q->msgs);
+ ibmvfc_free_queue(vhost, async_q);
LEAVE;
}
@@ -5005,26 +5784,15 @@ static void ibmvfc_free_mem(struct ibmvfc_host *vhost)
**/
static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
{
- struct ibmvfc_async_crq_queue *async_q = &vhost->async_crq;
+ struct ibmvfc_queue *async_q = &vhost->async_crq;
struct device *dev = vhost->dev;
ENTER;
- async_q->msgs = (struct ibmvfc_async_crq *)get_zeroed_page(GFP_KERNEL);
- if (!async_q->msgs) {
- dev_err(dev, "Couldn't allocate async queue.\n");
+ if (ibmvfc_alloc_queue(vhost, async_q, IBMVFC_ASYNC_FMT)) {
+ dev_err(dev, "Couldn't allocate/map async queue.\n");
goto nomem;
}
- async_q->size = PAGE_SIZE / sizeof(struct ibmvfc_async_crq);
- async_q->msg_token = dma_map_single(dev, async_q->msgs,
- async_q->size * sizeof(*async_q->msgs),
- DMA_BIDIRECTIONAL);
-
- if (dma_mapping_error(dev, async_q->msg_token)) {
- dev_err(dev, "Failed to map async queue\n");
- goto free_async_crq;
- }
-
vhost->sg_pool = dma_pool_create(IBMVFC_NAME, dev,
SG_ALL * sizeof(struct srp_direct_buf),
sizeof(struct srp_direct_buf), 0);
@@ -5053,6 +5821,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
vhost->trace = kcalloc(IBMVFC_NUM_TRACE_ENTRIES,
sizeof(struct ibmvfc_trace_entry), GFP_KERNEL);
+ atomic_set(&vhost->trace_index, -1);
if (!vhost->trace)
goto free_disc_buffer;
@@ -5065,9 +5834,20 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
goto free_trace;
}
+ vhost->channel_setup_buf = dma_alloc_coherent(dev, sizeof(*vhost->channel_setup_buf),
+ &vhost->channel_setup_dma,
+ GFP_KERNEL);
+
+ if (!vhost->channel_setup_buf) {
+ dev_err(dev, "Couldn't allocate Channel Setup buffer\n");
+ goto free_tgt_pool;
+ }
+
LEAVE;
return 0;
+free_tgt_pool:
+ mempool_destroy(vhost->tgt_pool);
free_trace:
kfree(vhost->trace);
free_disc_buffer:
@@ -5079,10 +5859,7 @@ free_login_buffer:
free_sg_pool:
dma_pool_destroy(vhost->sg_pool);
unmap_async_crq:
- dma_unmap_single(dev, async_q->msg_token,
- async_q->size * sizeof(*async_q->msgs), DMA_BIDIRECTIONAL);
-free_async_crq:
- free_page((unsigned long)async_q->msgs);
+ ibmvfc_free_queue(vhost, async_q);
nomem:
LEAVE;
return -ENOMEM;
@@ -5154,6 +5931,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
struct Scsi_Host *shost;
struct device *dev = &vdev->dev;
int rc = -ENOMEM;
+ unsigned int max_scsi_queues = IBMVFC_MAX_SCSI_QUEUES;
ENTER;
shost = scsi_host_alloc(&driver_template, sizeof(*vhost));
@@ -5169,17 +5947,23 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
shost->max_sectors = IBMVFC_MAX_SECTORS;
shost->max_cmd_len = IBMVFC_MAX_CDB_LEN;
shost->unique_id = shost->host_no;
+ shost->nr_hw_queues = mq_enabled ? min(max_scsi_queues, nr_scsi_hw_queues) : 1;
vhost = shost_priv(shost);
- INIT_LIST_HEAD(&vhost->sent);
- INIT_LIST_HEAD(&vhost->free);
INIT_LIST_HEAD(&vhost->targets);
+ INIT_LIST_HEAD(&vhost->purge);
sprintf(vhost->name, IBMVFC_NAME);
vhost->host = shost;
vhost->dev = dev;
vhost->partition_number = -1;
vhost->log_level = log_level;
vhost->task_set = 1;
+
+ vhost->mq_enabled = mq_enabled;
+ vhost->client_scsi_channels = min(shost->nr_hw_queues, nr_scsi_channels);
+ vhost->using_channels = 0;
+ vhost->do_enquiry = 1;
+
strcpy(vhost->partition_name, "UNKNOWN");
init_waitqueue_head(&vhost->work_wait_q);
init_waitqueue_head(&vhost->init_wait_q);
@@ -5204,13 +5988,8 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto kill_kthread;
}
- if ((rc = ibmvfc_init_event_pool(vhost))) {
- dev_err(dev, "Couldn't initialize event pool. rc=%d\n", rc);
- goto release_crq;
- }
-
if ((rc = scsi_add_host(shost, dev)))
- goto release_event_pool;
+ goto release_crq;
fc_host_dev_loss_tmo(shost) = IBMVFC_DEV_LOSS_TMO;
@@ -5220,6 +5999,12 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
goto remove_shost;
}
+ if (vhost->mq_enabled) {
+ rc = ibmvfc_init_sub_crqs(vhost);
+ if (rc)
+ dev_warn(dev, "Failed to allocate Sub-CRQs. rc=%d\n", rc);
+ }
+
if (shost_to_fc_host(shost)->rqst_q)
blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1);
dev_set_drvdata(dev, vhost);
@@ -5233,8 +6018,6 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
remove_shost:
scsi_remove_host(shost);
-release_event_pool:
- ibmvfc_free_event_pool(vhost);
release_crq:
ibmvfc_release_crq_queue(vhost);
kill_kthread:
@@ -5258,6 +6041,7 @@ out:
static int ibmvfc_remove(struct vio_dev *vdev)
{
struct ibmvfc_host *vhost = dev_get_drvdata(&vdev->dev);
+ LIST_HEAD(purge);
unsigned long flags;
ENTER;
@@ -5268,15 +6052,17 @@ static int ibmvfc_remove(struct vio_dev *vdev)
spin_unlock_irqrestore(vhost->host->host_lock, flags);
ibmvfc_wait_while_resetting(vhost);
- ibmvfc_release_crq_queue(vhost);
kthread_stop(vhost->work_thread);
fc_remove_host(vhost->host);
scsi_remove_host(vhost->host);
spin_lock_irqsave(vhost->host->host_lock, flags);
ibmvfc_purge_requests(vhost, DID_ERROR);
+ list_splice_init(&vhost->purge, &purge);
spin_unlock_irqrestore(vhost->host->host_lock, flags);
- ibmvfc_free_event_pool(vhost);
+ ibmvfc_complete_purge(&purge);
+ ibmvfc_release_sub_crqs(vhost);
+ ibmvfc_release_crq_queue(vhost);
ibmvfc_free_mem(vhost);
spin_lock(&ibmvfc_driver_lock);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h
index 9d58cfd774d3..19dcec3ae9ba 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.h
+++ b/drivers/scsi/ibmvscsi/ibmvfc.h
@@ -41,6 +41,12 @@
#define IBMVFC_DEFAULT_LOG_LEVEL 2
#define IBMVFC_MAX_CDB_LEN 16
#define IBMVFC_CLS3_ERROR 0
+#define IBMVFC_MQ 1
+#define IBMVFC_SCSI_CHANNELS 8
+#define IBMVFC_MAX_SCSI_QUEUES 16
+#define IBMVFC_SCSI_HW_QUEUES 8
+#define IBMVFC_MIG_NO_SUB_TO_CRQ 0
+#define IBMVFC_MIG_NO_N_TO_M 0
/*
* Ensure we have resources for ERP and initialization:
@@ -645,11 +651,10 @@ struct ibmvfc_crq {
volatile __be64 ioba;
} __packed __aligned(8);
-struct ibmvfc_crq_queue {
- struct ibmvfc_crq *msgs;
- int size, cur;
- dma_addr_t msg_token;
-};
+struct ibmvfc_sub_crq {
+ struct ibmvfc_crq crq;
+ __be64 reserved[2];
+} __packed __aligned(8);
enum ibmvfc_ae_link_state {
IBMVFC_AE_LS_LINK_UP = 0x01,
@@ -678,12 +683,6 @@ struct ibmvfc_async_crq {
__be64 reserved;
} __packed __aligned(8);
-struct ibmvfc_async_crq_queue {
- struct ibmvfc_async_crq *msgs;
- int size, cur;
- dma_addr_t msg_token;
-};
-
union ibmvfc_iu {
struct ibmvfc_mad_common mad_common;
struct ibmvfc_npiv_login_mad npiv_login;
@@ -738,13 +737,16 @@ struct ibmvfc_target {
/* a unit of work for the hosting partition */
struct ibmvfc_event {
- struct list_head queue;
+ struct list_head queue_list;
+ struct list_head cancel;
struct ibmvfc_host *vhost;
+ struct ibmvfc_queue *queue;
struct ibmvfc_target *tgt;
struct scsi_cmnd *cmnd;
atomic_t free;
union ibmvfc_iu *xfer_iu;
- void (*done) (struct ibmvfc_event *);
+ void (*done)(struct ibmvfc_event *evt);
+ void (*_done)(struct ibmvfc_event *evt);
struct ibmvfc_crq crq;
union ibmvfc_iu iu;
union ibmvfc_iu *sync_iu;
@@ -753,6 +755,7 @@ struct ibmvfc_event {
struct completion comp;
struct completion *eh_comp;
struct timer_list timer;
+ u16 hwq;
};
/* a pool of event structs for use */
@@ -763,6 +766,49 @@ struct ibmvfc_event_pool {
dma_addr_t iu_token;
};
+enum ibmvfc_msg_fmt {
+ IBMVFC_CRQ_FMT = 0,
+ IBMVFC_ASYNC_FMT,
+ IBMVFC_SUB_CRQ_FMT,
+};
+
+union ibmvfc_msgs {
+ void *handle;
+ struct ibmvfc_crq *crq;
+ struct ibmvfc_async_crq *async;
+ struct ibmvfc_sub_crq *scrq;
+};
+
+struct ibmvfc_queue {
+ union ibmvfc_msgs msgs;
+ dma_addr_t msg_token;
+ enum ibmvfc_msg_fmt fmt;
+ int size, cur;
+ spinlock_t _lock;
+ spinlock_t *q_lock;
+
+ struct ibmvfc_event_pool evt_pool;
+ struct list_head sent;
+ struct list_head free;
+ spinlock_t l_lock;
+
+ union ibmvfc_iu cancel_rsp;
+
+ /* Sub-CRQ fields */
+ struct ibmvfc_host *vhost;
+ unsigned long cookie;
+ unsigned long vios_cookie;
+ unsigned long hw_irq;
+ unsigned long irq;
+ unsigned long hwq_id;
+ char name[32];
+};
+
+struct ibmvfc_scsi_channels {
+ struct ibmvfc_queue *scrqs;
+ unsigned int active_queues;
+};
+
enum ibmvfc_host_action {
IBMVFC_HOST_ACTION_NONE = 0,
IBMVFC_HOST_ACTION_RESET,
@@ -797,26 +843,29 @@ struct ibmvfc_host {
enum ibmvfc_host_action action;
#define IBMVFC_NUM_TRACE_INDEX_BITS 8
#define IBMVFC_NUM_TRACE_ENTRIES (1 << IBMVFC_NUM_TRACE_INDEX_BITS)
+#define IBMVFC_TRACE_INDEX_MASK (IBMVFC_NUM_TRACE_ENTRIES - 1)
#define IBMVFC_TRACE_SIZE (sizeof(struct ibmvfc_trace_entry) * IBMVFC_NUM_TRACE_ENTRIES)
struct ibmvfc_trace_entry *trace;
- u32 trace_index:IBMVFC_NUM_TRACE_INDEX_BITS;
+ atomic_t trace_index;
int num_targets;
struct list_head targets;
- struct list_head sent;
- struct list_head free;
+ struct list_head purge;
struct device *dev;
- struct ibmvfc_event_pool pool;
struct dma_pool *sg_pool;
mempool_t *tgt_pool;
- struct ibmvfc_crq_queue crq;
- struct ibmvfc_async_crq_queue async_crq;
+ struct ibmvfc_queue crq;
+ struct ibmvfc_queue async_crq;
+ struct ibmvfc_scsi_channels scsi_scrqs;
struct ibmvfc_npiv_login login_info;
union ibmvfc_npiv_login_data *login_buf;
dma_addr_t login_buf_dma;
+ struct ibmvfc_channel_setup *channel_setup_buf;
+ dma_addr_t channel_setup_dma;
int disc_buf_sz;
int log_level;
struct ibmvfc_discover_targets_entry *disc_buf;
struct mutex passthru_mutex;
+ int max_vios_scsi_channels;
int task_set;
int init_retries;
int discovery_threads;
@@ -826,6 +875,10 @@ struct ibmvfc_host {
int delay_init;
int scan_complete;
int logged_in;
+ int mq_enabled;
+ int using_channels;
+ int do_enquiry;
+ int client_scsi_channels;
int aborting_passthru;
int events_to_log;
#define IBMVFC_AE_LINKUP 0x0001
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index ca16ef45d8dc..814acc57069d 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -1315,15 +1315,15 @@ static int initio_state_1(struct initio_host * host)
}
if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
active_tc->flags |= TCF_WDTR_DONE;
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo); /* Extended msg length */
- outb(3, host->addr + TUL_SFifo); /* Sync request */
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* Sync request */
outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
} else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
active_tc->flags |= TCF_SYNC_DONE;
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* extended msg length */
- outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
}
@@ -1409,16 +1409,16 @@ static int initio_state_3(struct initio_host * host)
case MSG_OUT: /* Message out phase */
if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
- outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
} else {
active_tc->flags |= TCF_SYNC_DONE;
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo); /* ext. msg len */
- outb(1, host->addr + TUL_SFifo); /* sync request */
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo); /* sync request */
outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
@@ -1479,7 +1479,7 @@ static int initio_state_4(struct initio_host * host)
return -1;
return 6;
} else {
- outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
@@ -1616,7 +1616,7 @@ static int initio_state_6(struct initio_host * host)
break;
case MSG_OUT: /* Message out phase */
- outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
+ outb(NOP, host->addr + TUL_SFifo); /* msg nop */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
if (wait_tulip(host) == -1)
return -1;
@@ -1789,9 +1789,9 @@ int initio_status_msg(struct initio_host * host)
if (host->phase == MSG_OUT) {
if (host->jsstatus0 & TSS_PAR_ERROR)
- outb(MSG_PARITY, host->addr + TUL_SFifo);
+ outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo);
else
- outb(MSG_NOP, host->addr + TUL_SFifo);
+ outb(NOP, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -1802,7 +1802,7 @@ int initio_status_msg(struct initio_host * host)
return -1;
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
- outb(MSG_PARITY, host->addr + TUL_SFifo);
+ outb(MSG_PARITY_ERROR, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -1815,7 +1815,8 @@ int initio_status_msg(struct initio_host * host)
return initio_wait_done_disc(host);
}
- if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
+ if (msg == LINKED_CMD_COMPLETE ||
+ msg == LINKED_FLG_CMD_COMPLETE) {
if ((scb->tastat & 0x18) == 0x10)
return initio_msgin_accept(host);
}
@@ -1930,7 +1931,8 @@ int int_initio_resel(struct initio_host * host)
return -1;
msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
- if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
+ if (msg < SIMPLE_QUEUE_TAG || msg > ORDERED_QUEUE_TAG)
+ /* Is simple Tag */
goto no_tag;
if (initio_msgin_accept(host) == -1)
@@ -2010,7 +2012,7 @@ static int initio_msgout_abort_targ(struct initio_host * host)
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
- outb(MSG_ABORT, host->addr + TUL_SFifo);
+ outb(ABORT_TASK_SET, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
@@ -2033,7 +2035,7 @@ static int initio_msgout_abort_tag(struct initio_host * host)
if (host->phase != MSG_OUT)
return initio_bad_seq(host);
- outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
+ outb(ABORT_TASK, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
@@ -2059,15 +2061,15 @@ static int initio_msgin(struct initio_host * host)
return -1;
switch (inb(host->addr + TUL_SFifo)) {
- case MSG_DISC: /* Disconnect msg */
+ case DISCONNECT: /* Disconnect msg */
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
- case MSG_SDP:
- case MSG_RESTORE:
- case MSG_NOP:
+ case SAVE_POINTERS:
+ case RESTORE_POINTERS:
+ case NOP:
initio_msgin_accept(host);
break;
- case MSG_REJ: /* Clear ATN first */
+ case MESSAGE_REJECT: /* Clear ATN first */
outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
host->addr + TUL_SSignal);
active_tc = host->active_tc;
@@ -2076,13 +2078,13 @@ static int initio_msgin(struct initio_host * host)
host->addr + TUL_SSignal);
initio_msgin_accept(host);
break;
- case MSG_EXTEND: /* extended msg */
+ case EXTENDED_MESSAGE: /* extended msg */
initio_msgin_extend(host);
break;
- case MSG_IGNOREWIDE:
+ case IGNORE_WIDE_RESIDUE:
initio_msgin_accept(host);
break;
- case MSG_COMP:
+ case COMMAND_COMPLETE:
outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
return initio_wait_done_disc(host);
@@ -2104,7 +2106,7 @@ static int initio_msgout_reject(struct initio_host * host)
return -1;
if (host->phase == MSG_OUT) {
- outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
+ outb(MESSAGE_REJECT, host->addr + TUL_SFifo); /* Msg reject */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -2113,7 +2115,7 @@ static int initio_msgout_reject(struct initio_host * host)
static int initio_msgout_ide(struct initio_host * host)
{
- outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
+ outb(INITIATOR_ERROR, host->addr + TUL_SFifo); /* Initiator Detected Error */
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
}
@@ -2167,9 +2169,9 @@ static int initio_msgin_extend(struct initio_host * host)
initio_sync_done(host);
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(3, host->addr + TUL_SFifo);
- outb(1, host->addr + TUL_SFifo);
+ outb(EXTENDED_SDTR, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(host->msg[3], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
@@ -2199,9 +2201,9 @@ static int initio_msgin_extend(struct initio_host * host)
if (initio_msgin_accept(host) != MSG_OUT)
return host->phase;
/* WDTR msg out */
- outb(MSG_EXTEND, host->addr + TUL_SFifo);
+ outb(EXTENDED_MESSAGE, host->addr + TUL_SFifo);
outb(2, host->addr + TUL_SFifo);
- outb(3, host->addr + TUL_SFifo);
+ outb(EXTENDED_WDTR, host->addr + TUL_SFifo);
outb(host->msg[2], host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return wait_tulip(host);
@@ -2391,7 +2393,7 @@ int initio_bus_device_reset(struct initio_host * host)
}
tmp = tmp->next;
}
- outb(MSG_DEVRST, host->addr + TUL_SFifo);
+ outb(TARGET_RESET, host->addr + TUL_SFifo);
outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
return initio_wait_disc(host);
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index 219b901bdc25..9fd010cf1f8a 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -433,31 +433,6 @@ struct scsi_ctrl_blk {
#define TARGET_BUSY 0x08
#define INI_QUEUE_FULL 0x28
-/* SCSI MESSAGE */
-#define MSG_COMP 0x00
-#define MSG_EXTEND 0x01
-#define MSG_SDP 0x02
-#define MSG_RESTORE 0x03
-#define MSG_DISC 0x04
-#define MSG_IDE 0x05
-#define MSG_ABORT 0x06
-#define MSG_REJ 0x07
-#define MSG_NOP 0x08
-#define MSG_PARITY 0x09
-#define MSG_LINK_COMP 0x0A
-#define MSG_LINK_FLAG 0x0B
-#define MSG_DEVRST 0x0C
-#define MSG_ABORT_TAG 0x0D
-
-/* Queue tag msg: Simple_quque_tag, Head_of_queue_tag, Ordered_queue_tag */
-#define MSG_STAG 0x20
-#define MSG_HTAG 0x21
-#define MSG_OTAG 0x22
-
-#define MSG_IGNOREWIDE 0x23
-
-#define MSG_IDENT 0x80
-
/***********************************************************************
Target Device Control Structure
**********************************************************************/
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 2e6077c502fc..1a3c534826ba 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1045,10 +1045,10 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
ha = (ips_ha_t *) SC->device->host->hostdata;
if (!ha)
- return (1);
+ goto out_error;
if (!ha->active)
- return (DID_ERROR);
+ goto out_error;
if (ips_is_passthru(SC)) {
if (ha->copp_waitlist.count == IPS_MAX_IOCTL_QUEUE) {
@@ -1124,6 +1124,11 @@ static int ips_queue_lck(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *)
ips_next(ha, IPS_INTR_IORL);
return (0);
+out_error:
+ SC->result = DID_ERROR << 16;
+ done(SC);
+
+ return (0);
}
static DEF_SCSI_QCMD(ips_queue)
diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c
index 1df45f028ea7..448a8c31ba35 100644
--- a/drivers/scsi/isci/port.c
+++ b/drivers/scsi/isci/port.c
@@ -164,7 +164,8 @@ static void isci_port_bc_change_received(struct isci_host *ihost,
"%s: isci_phy = %p, sas_phy = %p\n",
__func__, iphy, &iphy->sas_phy);
- ihost->sas_ha.notify_port_event(&iphy->sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(&iphy->sas_phy,
+ PORTE_BROADCAST_RCVD, GFP_ATOMIC);
sci_port_bcn_enable(iport);
}
@@ -223,8 +224,8 @@ static void isci_port_link_up(struct isci_host *isci_host,
/* Notify libsas that we have an address frame, if indeed
* we've found an SSP, SMP, or STP target */
if (success)
- isci_host->sas_ha.notify_port_event(&iphy->sas_phy,
- PORTE_BYTES_DMAED);
+ sas_notify_port_event(&iphy->sas_phy,
+ PORTE_BYTES_DMAED, GFP_ATOMIC);
}
@@ -270,8 +271,8 @@ static void isci_port_link_down(struct isci_host *isci_host,
* isci_port_deformed and isci_dev_gone functions.
*/
sas_phy_disconnected(&isci_phy->sas_phy);
- isci_host->sas_ha.notify_phy_event(&isci_phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&isci_phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
dev_dbg(&isci_host->pdev->dev,
"%s: isci_port = %p - Done\n", __func__, isci_port);
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 6e0817941fa7..bee16850b236 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -2103,8 +2103,6 @@ sci_io_request_frame_handler(struct isci_request *ireq,
static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq,
u32 completion_code)
{
- enum sci_status status = SCI_SUCCESS;
-
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
ireq->scu_status = SCU_TASK_DONE_GOOD;
@@ -2148,7 +2146,7 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
break;
}
- return status;
+ return SCI_SUCCESS;
}
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
diff --git a/drivers/scsi/libsas/sas_event.c b/drivers/scsi/libsas/sas_event.c
index a1852f6c042b..f703115e7a25 100644
--- a/drivers/scsi/libsas/sas_event.c
+++ b/drivers/scsi/libsas/sas_event.c
@@ -109,7 +109,8 @@ void sas_enable_revalidation(struct sas_ha_struct *ha)
sas_phy = container_of(port->phy_list.next, struct asd_sas_phy,
port_phy_el);
- ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD, GFP_KERNEL);
}
mutex_unlock(&ha->disco_mutex);
}
@@ -131,15 +132,16 @@ static void sas_phy_event_worker(struct work_struct *work)
sas_free_event(ev);
}
-static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
+int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags)
{
- struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ struct asd_sas_event *ev;
int ret;
BUG_ON(event >= PORT_NUM_EVENTS);
- ev = sas_alloc_event(phy);
+ ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
return -ENOMEM;
@@ -151,16 +153,18 @@ static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event)
return ret;
}
+EXPORT_SYMBOL_GPL(sas_notify_port_event);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
+int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags)
{
- struct asd_sas_event *ev;
struct sas_ha_struct *ha = phy->ha;
+ struct asd_sas_event *ev;
int ret;
BUG_ON(event >= PHY_NUM_EVENTS);
- ev = sas_alloc_event(phy);
+ ev = sas_alloc_event(phy, gfp_flags);
if (!ev)
return -ENOMEM;
@@ -172,11 +176,4 @@ int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
return ret;
}
-
-int sas_init_events(struct sas_ha_struct *sas_ha)
-{
- sas_ha->notify_port_event = sas_notify_port_event;
- sas_ha->notify_phy_event = sas_notify_phy_event;
-
- return 0;
-}
+EXPORT_SYMBOL_GPL(sas_notify_phy_event);
diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c
index 21c43b18d5d5..2b0f98ca6ec3 100644
--- a/drivers/scsi/libsas/sas_init.c
+++ b/drivers/scsi/libsas/sas_init.c
@@ -123,12 +123,6 @@ int sas_register_ha(struct sas_ha_struct *sas_ha)
goto Undo_phys;
}
- error = sas_init_events(sas_ha);
- if (error) {
- pr_notice("couldn't start event thread:%d\n", error);
- goto Undo_ports;
- }
-
error = -ENOMEM;
snprintf(name, sizeof(name), "%s_event_q", dev_name(sas_ha->dev));
sas_ha->event_q = create_singlethread_workqueue(name);
@@ -410,7 +404,8 @@ void sas_resume_ha(struct sas_ha_struct *ha)
if (phy->suspended) {
dev_warn(&phy->phy->dev, "resume timeout\n");
- sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT);
+ sas_notify_phy_event(phy, PHYE_RESUME_TIMEOUT,
+ GFP_KERNEL);
}
}
@@ -590,16 +585,15 @@ sas_domain_attach_transport(struct sas_domain_function_template *dft)
}
EXPORT_SYMBOL_GPL(sas_domain_attach_transport);
-
-struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy,
+ gfp_t gfp_flags)
{
struct asd_sas_event *event;
- gfp_t flags = in_interrupt() ? GFP_ATOMIC : GFP_KERNEL;
struct sas_ha_struct *sas_ha = phy->ha;
struct sas_internal *i =
to_sas_internal(sas_ha->core.shost->transportt);
- event = kmem_cache_zalloc(sas_event_cache, flags);
+ event = kmem_cache_zalloc(sas_event_cache, gfp_flags);
if (!event)
return NULL;
@@ -610,7 +604,8 @@ struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy)
if (cmpxchg(&phy->in_shutdown, 0, 1) == 0) {
pr_notice("The phy%d bursting events, shut it down.\n",
phy->id);
- sas_notify_phy_event(phy, PHYE_SHUTDOWN);
+ sas_notify_phy_event(phy, PHYE_SHUTDOWN,
+ gfp_flags);
}
} else {
/* Do not support PHY control, stop allocating events */
diff --git a/drivers/scsi/libsas/sas_internal.h b/drivers/scsi/libsas/sas_internal.h
index 1f1d01901978..d7a1fb5c10c6 100644
--- a/drivers/scsi/libsas/sas_internal.h
+++ b/drivers/scsi/libsas/sas_internal.h
@@ -48,13 +48,12 @@ int sas_show_oob_mode(enum sas_oob_mode oob_mode, char *buf);
int sas_register_phys(struct sas_ha_struct *sas_ha);
void sas_unregister_phys(struct sas_ha_struct *sas_ha);
-struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy);
+struct asd_sas_event *sas_alloc_event(struct asd_sas_phy *phy, gfp_t gfp_flags);
void sas_free_event(struct asd_sas_event *event);
int sas_register_ports(struct sas_ha_struct *sas_ha);
void sas_unregister_ports(struct sas_ha_struct *sas_ha);
-int sas_init_events(struct sas_ha_struct *sas_ha);
void sas_disable_revalidation(struct sas_ha_struct *ha);
void sas_enable_revalidation(struct sas_ha_struct *ha);
void __sas_drain_work(struct sas_ha_struct *ha);
@@ -77,7 +76,8 @@ int sas_smp_phy_control(struct domain_device *dev, int phy_id,
enum phy_func phy_func, struct sas_phy_linkrates *);
int sas_smp_get_phy_events(struct sas_phy *phy);
-int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event);
+int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t flags);
void sas_device_set_phy(struct domain_device *dev, struct sas_port *port);
struct domain_device *sas_find_dev_by_rphy(struct sas_rphy *rphy);
struct domain_device *sas_ex_to_ata(struct domain_device *ex_dev, int phy_id);
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a54c8da30273..6ba5fa08c47a 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -779,6 +779,9 @@ struct lpfc_hba {
*/
#define HBA_FLOGI_ISSUED 0x100000 /* FLOGI was issued */
#define HBA_DEFER_FLOGI 0x800000 /* Defer FLOGI till read_sparm cmpl */
+#define HBA_NEEDS_CFG_PORT 0x2000000 /* SLI3 - needs a CONFIG_PORT mbox */
+#define HBA_HBEAT_INP 0x4000000 /* mbox HBEAT is in progress */
+#define HBA_HBEAT_TMO 0x8000000 /* HBEAT initiated after timeout */
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
struct lpfc_dmabuf slim2p;
@@ -1135,7 +1138,6 @@ struct lpfc_hba {
unsigned long last_completion_time;
unsigned long skipped_hb;
struct timer_list hb_tmofunc;
- uint8_t hb_outstanding;
struct timer_list rrq_tmr;
enum hba_temp_state over_temp_state;
/*
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 4528166dee36..bdd9a29f4201 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1788,6 +1788,8 @@ lpfc_board_mode_store(struct device *dev, struct device_attribute *attr,
else if (strncmp(buf, "pci_bus_reset", sizeof("pci_bus_reset") - 1)
== 0)
status = lpfc_reset_pci_bus(phba);
+ else if (strncmp(buf, "heartbeat", sizeof("heartbeat") - 1) == 0)
+ lpfc_issue_hb_tmo(phba);
else if (strncmp(buf, "trunk", sizeof("trunk") - 1) == 0)
status = lpfc_set_trunking(phba, (char *)buf + sizeof("trunk"));
else
@@ -3441,11 +3443,8 @@ unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
-LPFC_ATTR(sli_mode, 0, 0, 3,
- "SLI mode selector:"
- " 0 - auto (SLI-3 if supported),"
- " 2 - select SLI-2 even on SLI-3 capable HBAs,"
- " 3 - select SLI-3");
+LPFC_ATTR(sli_mode, 3, 3, 3,
+ "SLI mode selector: 3 - select SLI-3");
LPFC_ATTR_R(enable_npiv, 1, 0, 1,
"Enable NPIV functionality");
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index eed6ea5e0722..b974d39d233b 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -5376,9 +5376,9 @@ lpfc_check_fwlog_support(struct lpfc_hba *phba)
ras_fwlog = &phba->ras_fwlog;
- if (ras_fwlog->ras_hwsupport == false)
+ if (!ras_fwlog->ras_hwsupport)
return -EACCES;
- else if (ras_fwlog->ras_enabled == false)
+ else if (!ras_fwlog->ras_enabled)
return -EPERM;
else
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index f78e52a18b0b..a0aad4896a45 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -255,7 +255,6 @@ void lpfc_nvmet_ctxbuf_post(struct lpfc_hba *phba,
int lpfc_nvmet_rcv_unsol_abort(struct lpfc_vport *vport,
struct fc_frame_header *fc_hdr);
void lpfc_nvmet_wqfull_process(struct lpfc_hba *phba, struct lpfc_queue *wq);
-void lpfc_sli_flush_nvme_rings(struct lpfc_hba *phba);
void lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba);
void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
uint16_t);
@@ -360,6 +359,8 @@ lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *,
void lpfc_mbox_timeout(struct timer_list *t);
void lpfc_mbox_timeout_handler(struct lpfc_hba *);
+int lpfc_issue_hb_mbox(struct lpfc_hba *phba);
+void lpfc_issue_hb_tmo(struct lpfc_hba *phba);
struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t);
struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
@@ -598,7 +599,8 @@ void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd,
void lpfc_io_ktime(struct lpfc_hba *phba, struct lpfc_io_buf *ncmd);
void lpfc_wqe_cmd_template(void);
void lpfc_nvmet_cmd_template(void);
-void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn);
+void lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ uint32_t stat, uint32_t param);
extern int lpfc_enable_nvmet_cnt;
extern unsigned long long lpfc_enable_nvmet[];
extern int lpfc_no_hba_reset_cnt;
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index ea07afcb750a..8ce13ef3cac3 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -77,6 +77,13 @@ struct lpfc_node_rrqs {
unsigned long xri_bitmap[XRI_BITMAP_ULONGS];
};
+enum lpfc_fc4_xpt_flags {
+ NLP_WAIT_FOR_UNREG = 0x1,
+ SCSI_XPT_REGD = 0x2,
+ NVME_XPT_REGD = 0x4,
+ NLP_XPT_HAS_HH = 0x8,
+};
+
struct lpfc_nodelist {
struct list_head nlp_listp;
struct lpfc_name nlp_portname;
@@ -134,15 +141,15 @@ struct lpfc_nodelist {
unsigned long *active_rrqs_xri_bitmap;
struct lpfc_scsicmd_bkt *lat_data; /* Latency data */
uint32_t fc4_prli_sent;
- uint32_t fc4_xpt_flags;
-#define NLP_WAIT_FOR_UNREG 0x1
-#define SCSI_XPT_REGD 0x2
-#define NVME_XPT_REGD 0x4
+ u32 upcall_flags;
+#define NLP_WAIT_FOR_LOGO 0x2
+ enum lpfc_fc4_xpt_flags fc4_xpt_flags;
uint32_t nvme_fb_size; /* NVME target's supported byte cnt */
#define NVME_FB_BIT_SHIFT 9 /* PRLI Rsp first burst in 512B units. */
uint32_t nlp_defer_did;
+ wait_queue_head_t *logo_waitq;
};
struct lpfc_node_rrq {
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 96c087b8b474..f0a758138ae8 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -732,7 +732,7 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
spin_unlock_irq(&phba->hbalock);
} else {
/* Because we asked f/w for NPIV it still expects us
- to call reg_vnpid atleast for the physcial host */
+ to call reg_vnpid at least for the physical host */
lpfc_printf_vlog(vport, KERN_WARNING,
LOG_ELS | LOG_VPORT,
"1817 Fabric does not support NPIV "
@@ -1428,6 +1428,9 @@ lpfc_els_abort_flogi(struct lpfc_hba *phba)
NULL);
}
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
spin_unlock_irq(&phba->hbalock);
return 0;
@@ -2815,9 +2818,9 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) cmdiocb->context1;
struct lpfc_vport *vport = ndlp->vport;
IOCB_t *irsp;
- struct lpfcMboxq *mbox;
unsigned long flags;
uint32_t skip_recovery = 0;
+ int wake_up_waiter = 0;
/* we pass cmdiocb to state machine which needs rspiocb as well */
cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -2825,6 +2828,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
irsp = &(rspiocb->iocb);
spin_lock_irq(&ndlp->lock);
ndlp->nlp_flag &= ~NLP_LOGO_SND;
+ if (ndlp->upcall_flags & NLP_WAIT_FOR_LOGO) {
+ wake_up_waiter = 1;
+ ndlp->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ }
spin_unlock_irq(&ndlp->lock);
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD,
@@ -2884,32 +2891,14 @@ out:
lpfc_els_free_iocb(phba, cmdiocb);
lpfc_nlp_put(ndlp);
- /* If we are in pt2pt mode, we could rcv new S_ID on PLOGI */
- if ((vport->fc_flag & FC_PT2PT) &&
- !(vport->fc_flag & FC_PT2PT_PLOGI)) {
- phba->pport->fc_myDID = 0;
-
- if ((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) ||
- (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) {
- if (phba->nvmet_support)
- lpfc_nvmet_update_targetport(phba);
- else
- lpfc_nvme_update_localport(phba->pport);
- }
-
- mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (mbox) {
- lpfc_config_link(phba, mbox);
- mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
- mbox->vport = vport;
- if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) ==
- MBX_NOT_FINISHED) {
- mempool_free(mbox, phba->mbox_mem_pool);
- skip_recovery = 1;
- }
- }
- }
+ /* At this point, the LOGO processing is complete. NOTE: For a
+ * pt2pt topology, we are assuming the NPortID will only change
+ * on link up processing. For a LOGO / PLOGI initiated by the
+ * Initiator, we are assuming the NPortID is not going to change.
+ */
+ if (wake_up_waiter && ndlp->logo_waitq)
+ wake_up(ndlp->logo_waitq);
/*
* If the node is a target, the handling attempts to recover the port.
* For any other port type, the rpi is unregistered as an implicit
@@ -8141,6 +8130,9 @@ lpfc_els_timeout_handler(struct lpfc_vport *vport)
spin_unlock_irq(&phba->hbalock);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
if (!list_empty(&pring->txcmplq))
if (!(phba->pport->load_flag & FC_UNLOADING))
mod_timer(&vport->els_tmofunc,
@@ -8240,6 +8232,9 @@ lpfc_els_flush_cmd(struct lpfc_vport *vport)
lpfc_sli_issue_abort_iotag(phba, pring, piocb, NULL);
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
if (!list_empty(&abort_list))
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"3387 abort list for txq not empty\n");
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2b6b5fc671fe..48ca4a612f80 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -73,6 +73,16 @@ static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
static int lpfc_fcf_inuse(struct lpfc_hba *);
static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int
+lpfc_valid_xpt_node(struct lpfc_nodelist *ndlp)
+{
+ if (ndlp->nlp_fc4_type ||
+ ndlp->nlp_DID == Fabric_DID ||
+ ndlp->nlp_DID == NameServer_DID ||
+ ndlp->nlp_DID == FDMI_DID)
+ return 1;
+ return 0;
+}
/* The source of a terminate rport I/O is either a dev_loss_tmo
* event or a call to fc_remove_host. While the rport should be
* valid during these downcalls, the transport can call twice
@@ -1145,13 +1155,14 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
struct lpfc_vport *vport = pmb->vport;
LPFC_MBOXQ_t *sparam_mb;
struct lpfc_dmabuf *sparam_mp;
+ u16 status = pmb->u.mb.mbxStatus;
int rc;
- if (pmb->u.mb.mbxStatus)
- goto out;
-
mempool_free(pmb, phba->mbox_mem_pool);
+ if (status)
+ goto out;
+
/* don't perform discovery for SLI4 loopback diagnostic test */
if ((phba->sli_rev == LPFC_SLI_REV4) &&
!(phba->hba_flag & HBA_FCOE_MODE) &&
@@ -1214,12 +1225,10 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
out:
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
- "0306 CONFIG_LINK mbxStatus error x%x "
- "HBA state x%x\n",
- pmb->u.mb.mbxStatus, vport->port_state);
-sparam_out:
- mempool_free(pmb, phba->mbox_mem_pool);
+ "0306 CONFIG_LINK mbxStatus error x%x HBA state x%x\n",
+ status, vport->port_state);
+sparam_out:
lpfc_linkdown(phba);
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
@@ -4318,7 +4327,8 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* FCP and NVME Transport interface */
if ((old_state == NLP_STE_MAPPED_NODE ||
old_state == NLP_STE_UNMAPPED_NODE)) {
- if (ndlp->rport) {
+ if (ndlp->rport &&
+ lpfc_valid_xpt_node(ndlp)) {
vport->phba->nport_event_cnt++;
lpfc_unregister_remote_port(ndlp);
}
@@ -4340,10 +4350,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (new_state == NLP_STE_MAPPED_NODE ||
new_state == NLP_STE_UNMAPPED_NODE) {
- if (ndlp->nlp_fc4_type ||
- ndlp->nlp_DID == Fabric_DID ||
- ndlp->nlp_DID == NameServer_DID ||
- ndlp->nlp_DID == FDMI_DID) {
+ if (lpfc_valid_xpt_node(ndlp)) {
vport->phba->nport_event_cnt++;
/*
* Tell the fc transport about the port, if we haven't
@@ -5611,6 +5618,9 @@ lpfc_free_tx(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
}
spin_unlock_irq(&phba->hbalock);
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_ABORTED);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index ac67f420ec26..71f340dd4fbd 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -591,7 +591,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
phba->last_completion_time = jiffies;
/* Set up error attention (ERATT) polling timer */
mod_timer(&phba->eratt_poll,
@@ -1204,10 +1204,10 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
unsigned long drvr_flag;
spin_lock_irqsave(&phba->hbalock, drvr_flag);
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
- /* Check and reset heart-beat timer is necessary */
+ /* Check and reset heart-beat timer if necessary */
mempool_free(pmboxq, phba->mbox_mem_pool);
if (!(phba->pport->fc_flag & FC_OFFLINE_MODE) &&
!(phba->link_state == LPFC_HBA_ERROR) &&
@@ -1381,6 +1381,60 @@ static void lpfc_hb_mxp_handler(struct lpfc_hba *phba)
}
/**
+ * lpfc_issue_hb_mbox - Issues heart-beat mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * If a HB mbox is not already in progrees, this routine will allocate
+ * a LPFC_MBOXQ_t, populate it with a MBX_HEARTBEAT (0x31) command,
+ * and issue it. The HBA_HBEAT_INP flag means the command is in progress.
+ **/
+int
+lpfc_issue_hb_mbox(struct lpfc_hba *phba)
+{
+ LPFC_MBOXQ_t *pmboxq;
+ int retval;
+
+ /* Is a Heartbeat mbox already in progress */
+ if (phba->hba_flag & HBA_HBEAT_INP)
+ return 0;
+
+ pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!pmboxq)
+ return -ENOMEM;
+
+ lpfc_heart_beat(phba, pmboxq);
+ pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
+ pmboxq->vport = phba->pport;
+ retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
+
+ if (retval != MBX_BUSY && retval != MBX_SUCCESS) {
+ mempool_free(pmboxq, phba->mbox_mem_pool);
+ return -ENXIO;
+ }
+ phba->hba_flag |= HBA_HBEAT_INP;
+
+ return 0;
+}
+
+/**
+ * lpfc_issue_hb_tmo - Signals heartbeat timer to issue mbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * The heartbeat timer (every 5 sec) will fire. If the HBA_HBEAT_TMO
+ * flag is set, it will force a MBX_HEARTBEAT mbox command, regardless
+ * of the value of lpfc_enable_hba_heartbeat.
+ * If lpfc_enable_hba_heartbeat is set, the timeout routine will always
+ * try to issue a MBX_HEARTBEAT mbox command.
+ **/
+void
+lpfc_issue_hb_tmo(struct lpfc_hba *phba)
+{
+ if (phba->cfg_enable_hba_heartbeat)
+ return;
+ phba->hba_flag |= HBA_HBEAT_TMO;
+}
+
+/**
* lpfc_hb_timeout_handler - The HBA-timer timeout handler
* @phba: pointer to lpfc hba data structure.
*
@@ -1400,9 +1454,9 @@ void
lpfc_hb_timeout_handler(struct lpfc_hba *phba)
{
struct lpfc_vport **vports;
- LPFC_MBOXQ_t *pmboxq;
struct lpfc_dmabuf *buf_ptr;
- int retval, i;
+ int retval = 0;
+ int i, tmo;
struct lpfc_sli *psli = &phba->sli;
LIST_HEAD(completions);
@@ -1424,24 +1478,6 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
(phba->pport->fc_flag & FC_OFFLINE_MODE))
return;
- spin_lock_irq(&phba->pport->work_port_lock);
-
- if (time_after(phba->last_completion_time +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
- jiffies)) {
- spin_unlock_irq(&phba->pport->work_port_lock);
- if (!phba->hb_outstanding)
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- else
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
- return;
- }
- spin_unlock_irq(&phba->pport->work_port_lock);
-
if (phba->elsbuf_cnt &&
(phba->elsbuf_cnt == phba->elsbuf_prev_cnt)) {
spin_lock_irq(&phba->hbalock);
@@ -1461,37 +1497,43 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If there is no heart beat outstanding, issue a heartbeat command */
if (phba->cfg_enable_hba_heartbeat) {
- if (!phba->hb_outstanding) {
+ /* If IOs are completing, no need to issue a MBX_HEARTBEAT */
+ spin_lock_irq(&phba->pport->work_port_lock);
+ if (time_after(phba->last_completion_time +
+ msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
+ jiffies)) {
+ spin_unlock_irq(&phba->pport->work_port_lock);
+ if (phba->hba_flag & HBA_HBEAT_INP)
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ else
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
+ goto out;
+ }
+ spin_unlock_irq(&phba->pport->work_port_lock);
+
+ /* Check if a MBX_HEARTBEAT is already in progress */
+ if (phba->hba_flag & HBA_HBEAT_INP) {
+ /*
+ * If heart beat timeout called with HBA_HBEAT_INP set
+ * we need to give the hb mailbox cmd a chance to
+ * complete or TMO.
+ */
+ lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+ "0459 Adapter heartbeat still outstanding: "
+ "last compl time was %d ms.\n",
+ jiffies_to_msecs(jiffies
+ - phba->last_completion_time));
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ } else {
if ((!(psli->sli_flag & LPFC_SLI_MBOX_ACTIVE)) &&
(list_empty(&psli->mboxq))) {
- pmboxq = mempool_alloc(phba->mbox_mem_pool,
- GFP_KERNEL);
- if (!pmboxq) {
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 *
- LPFC_HB_MBOX_INTERVAL));
- return;
- }
- lpfc_heart_beat(phba, pmboxq);
- pmboxq->mbox_cmpl = lpfc_hb_mbox_cmpl;
- pmboxq->vport = phba->pport;
- retval = lpfc_sli_issue_mbox(phba, pmboxq,
- MBX_NOWAIT);
-
- if (retval != MBX_BUSY &&
- retval != MBX_SUCCESS) {
- mempool_free(pmboxq,
- phba->mbox_mem_pool);
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 *
- LPFC_HB_MBOX_INTERVAL));
- return;
+ retval = lpfc_issue_hb_mbox(phba);
+ if (retval) {
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
+ goto out;
}
phba->skipped_hb = 0;
- phba->hb_outstanding = 1;
} else if (time_before_eq(phba->last_completion_time,
phba->skipped_hb)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -1502,30 +1544,23 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
} else
phba->skipped_hb = jiffies;
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
- return;
- } else {
- /*
- * If heart beat timeout called with hb_outstanding set
- * we need to give the hb mailbox cmd a chance to
- * complete or TMO.
- */
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "0459 Adapter heartbeat still out"
- "standing:last compl time was %d ms.\n",
- jiffies_to_msecs(jiffies
- - phba->last_completion_time));
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_TIMEOUT));
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ goto out;
}
} else {
- mod_timer(&phba->hb_tmofunc,
- jiffies +
- msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
+ /* Check to see if we want to force a MBX_HEARTBEAT */
+ if (phba->hba_flag & HBA_HBEAT_TMO) {
+ retval = lpfc_issue_hb_mbox(phba);
+ if (retval)
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
+ else
+ tmo = (1000 * LPFC_HB_MBOX_TIMEOUT);
+ goto out;
+ }
+ tmo = (1000 * LPFC_HB_MBOX_INTERVAL);
}
+out:
+ mod_timer(&phba->hb_tmofunc, jiffies + msecs_to_jiffies(tmo));
}
/**
@@ -1830,9 +1865,19 @@ lpfc_sli4_port_sta_fn_reset(struct lpfc_hba *phba, int mbx_action,
/* need reset: attempt for port recovery */
if (en_rn_msg)
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2887 Reset Needed: Attempting Port "
"Recovery...\n");
+
+ /* If we are no wait, the HBA has been reset and is not
+ * functional, thus we should clear LPFC_SLI_ACTIVE flag.
+ */
+ if (mbx_action == LPFC_MBX_NO_WAIT) {
+ spin_lock_irq(&phba->hbalock);
+ phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
+ spin_unlock_irq(&phba->hbalock);
+ }
+
lpfc_offline_prep(phba, mbx_action);
lpfc_sli_flush_io_rings(phba);
lpfc_offline(phba);
@@ -2979,7 +3024,7 @@ lpfc_stop_hba_timers(struct lpfc_hba *phba)
del_timer_sync(&phba->rrq_tmr);
phba->hba_flag &= ~HBA_RRQ_ACTIVE;
}
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
switch (phba->pci_dev_grp) {
case LPFC_PCI_DEV_LP:
@@ -3592,7 +3637,11 @@ lpfc_offline(struct lpfc_hba *phba)
spin_unlock_irq(shost->host_lock);
}
lpfc_destroy_vport_work_array(phba, vports);
- __lpfc_cpuhp_remove(phba);
+ /* If OFFLINE flag is clear (i.e. unloading), cpuhp removal is handled
+ * in hba_unset
+ */
+ if (phba->pport->fc_flag & FC_OFFLINE_MODE)
+ __lpfc_cpuhp_remove(phba);
if (phba->cfg_xri_rebalancing)
lpfc_destroy_multixri_pools(phba);
@@ -6177,10 +6226,14 @@ lpfc_reset_hba(struct lpfc_hba *phba)
phba->link_state = LPFC_HBA_ERROR;
return;
}
- if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+
+ /* If not LPFC_SLI_ACTIVE, force all IO to be flushed */
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE) {
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
- else
+ } else {
lpfc_offline_prep(phba, LPFC_MBX_NO_WAIT);
+ lpfc_sli_flush_io_rings(phba);
+ }
lpfc_offline(phba);
lpfc_sli_brdrestart(phba);
lpfc_online(phba);
@@ -10728,17 +10781,19 @@ lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
uint32_t intr_mode = LPFC_INTR_ERROR;
int retval;
+ /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+ retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ if (retval)
+ return intr_mode;
+ phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
+
if (cfg_mode == 2) {
- /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
- retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ /* Now, try to enable MSI-X interrupt mode */
+ retval = lpfc_sli_enable_msix(phba);
if (!retval) {
- /* Now, try to enable MSI-X interrupt mode */
- retval = lpfc_sli_enable_msix(phba);
- if (!retval) {
- /* Indicate initialization to MSI-X mode */
- phba->intr_type = MSIX;
- intr_mode = 2;
- }
+ /* Indicate initialization to MSI-X mode */
+ phba->intr_type = MSIX;
+ intr_mode = 2;
}
}
@@ -14122,15 +14177,32 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
int i;
int j = 0;
unsigned long rem_nsec;
+ struct lpfc_vport **vports;
+ /* Don't dump messages if we explicitly set log_verbose for the
+ * physical port or any vport.
+ */
if (phba->cfg_log_verbose)
return;
+ vports = lpfc_create_vport_work_array(phba);
+ if (vports != NULL) {
+ for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+ if (vports[i]->cfg_log_verbose) {
+ lpfc_destroy_vport_work_array(phba, vports);
+ return;
+ }
+ }
+ }
+ lpfc_destroy_vport_work_array(phba, vports);
+
if (atomic_cmpxchg(&phba->dbg_log_dmping, 0, 1) != 0)
return;
start_idx = (unsigned int)atomic_read(&phba->dbg_log_idx) % DBG_LOG_SZ;
dbg_cnt = (unsigned int)atomic_read(&phba->dbg_log_cnt);
+ if (!dbg_cnt)
+ goto out;
temp_idx = start_idx;
if (dbg_cnt >= DBG_LOG_SZ) {
dbg_cnt = DBG_LOG_SZ;
@@ -14160,6 +14232,7 @@ void lpfc_dmp_dbg(struct lpfc_hba *phba)
rem_nsec / 1000,
phba->dbg_log[temp_idx].log);
}
+out:
atomic_set(&phba->dbg_log_cnt, 0);
atomic_set(&phba->dbg_log_dmping, 0);
}
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 3414ffcb26fe..c03a7f12dd65 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -2409,7 +2409,7 @@ error:
/*
- * lpfc_sli4_dump_sfp_pagea0 - Dump sli4 read SFP Diagnostic.
+ * lpfc_sli4_dump_page_a0 - Dump sli4 read SFP Diagnostic.
* @phba: pointer to the hba structure containing.
* @mbox: pointer to lpfc mbox command to initialize.
*
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index 1ac855640fc5..135d8e8a42ba 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -250,6 +250,8 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
INIT_LIST_HEAD(&abort_list);
@@ -471,6 +473,15 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
*/
if (!(ndlp->nlp_type & NLP_FABRIC) &&
!(phba->nvmet_support)) {
+ /* Clear ndlp info, since follow up PRLI may have
+ * updated ndlp information
+ */
+ ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
+ ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
+ ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
+ ndlp->nlp_flag &= ~NLP_FIRSTBURST;
+
lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb,
ndlp, NULL);
return 1;
@@ -499,6 +510,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR);
ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR);
ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER;
ndlp->nlp_flag &= ~NLP_FIRSTBURST;
login_mbox = NULL;
@@ -1011,7 +1023,12 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ndlp->nlp_fc4_type |= NLP_FC4_NVME;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
- if (npr->prliType == PRLI_FCP_TYPE)
+
+ /* Fabric Controllers send FCP PRLI as an initiator but should
+ * not get recognized as FCP type and registered with transport.
+ */
+ if (npr->prliType == PRLI_FCP_TYPE &&
+ !(ndlp->nlp_type & NLP_FABRIC))
ndlp->nlp_fc4_type |= NLP_FC4_FCP;
}
if (rport) {
@@ -2034,6 +2051,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
* must complete PRLI.
*/
if (ndlp->nlp_type & NLP_FABRIC) {
+ ndlp->nlp_fc4_type &= ~NLP_FC4_FCP;
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
}
@@ -2107,6 +2125,7 @@ lpfc_rcv_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!lpfc_rcv_prli_support_check(vport, ndlp, cmdiocb))
return ndlp->nlp_state;
+ lpfc_rcv_prli(vport, ndlp, cmdiocb);
lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp);
return ndlp->nlp_state;
}
diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c
index 39d147e251bf..4d819e52496a 100644
--- a/drivers/scsi/lpfc/lpfc_nvme.c
+++ b/drivers/scsi/lpfc/lpfc_nvme.c
@@ -458,7 +458,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
bf_set(wqe_xri_tag, &wqe->gen_req.wqe_com, genwqe->sli4_xritag);
/* Word 7 */
- bf_set(wqe_tmo, &wqe->gen_req.wqe_com, (vport->phba->fc_ratov-1));
+ bf_set(wqe_tmo, &wqe->gen_req.wqe_com, tmo);
bf_set(wqe_class, &wqe->gen_req.wqe_com, CLASS3);
bf_set(wqe_cmnd, &wqe->gen_req.wqe_com, CMD_GEN_REQUEST64_WQE);
bf_set(wqe_ct, &wqe->gen_req.wqe_com, SLI4_CT_RPI);
@@ -618,7 +618,7 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
ret = lpfc_nvme_gen_req(vport, bmp, pnvme_lsreq->rqstaddr,
pnvme_lsreq, gen_req_cmp, ndlp, 2,
- LPFC_NVME_LS_TIMEOUT, 0);
+ pnvme_lsreq->timeout, 0);
if (ret != WQE_SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6052 NVMEx REQ: EXIT. issue ls wqe failed "
@@ -1850,6 +1850,10 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport,
spin_unlock(&lpfc_nbuf->buf_lock);
spin_unlock_irqrestore(&phba->hbalock, flags);
+
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
if (ret_val != WQE_SUCCESS) {
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"6137 Failed abts issue_wqe with status x%x "
@@ -2596,17 +2600,24 @@ lpfc_nvme_wait_for_io_drain(struct lpfc_hba *phba)
}
}
}
+
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
}
void
-lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
+lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn,
+ uint32_t stat, uint32_t param)
{
#if (IS_ENABLED(CONFIG_NVME_FC))
struct lpfc_io_buf *lpfc_ncmd;
struct nvmefc_fcp_req *nCmd;
- struct lpfc_nvme_fcpreq_priv *freqpriv;
+ struct lpfc_wcqe_complete wcqe;
+ struct lpfc_wcqe_complete *wcqep = &wcqe;
- if (!pwqeIn->context1) {
+ lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
+ if (!lpfc_ncmd) {
lpfc_sli_release_iocbq(phba, pwqeIn);
return;
}
@@ -2616,31 +2627,29 @@ lpfc_nvme_cancel_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn)
lpfc_sli_release_iocbq(phba, pwqeIn);
return;
}
- lpfc_ncmd = (struct lpfc_io_buf *)pwqeIn->context1;
spin_lock(&lpfc_ncmd->buf_lock);
- if (!lpfc_ncmd->nvmeCmd) {
+ nCmd = lpfc_ncmd->nvmeCmd;
+ if (!nCmd) {
spin_unlock(&lpfc_ncmd->buf_lock);
lpfc_release_nvme_buf(phba, lpfc_ncmd);
return;
}
+ spin_unlock(&lpfc_ncmd->buf_lock);
- nCmd = lpfc_ncmd->nvmeCmd;
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_IOERR,
"6194 NVME Cancel xri %x\n",
lpfc_ncmd->cur_iocbq.sli4_xritag);
- nCmd->transferred_length = 0;
- nCmd->rcv_rsplen = 0;
- nCmd->status = NVME_SC_INTERNAL;
- freqpriv = nCmd->private;
- freqpriv->nvme_buf = NULL;
- lpfc_ncmd->nvmeCmd = NULL;
-
- spin_unlock(&lpfc_ncmd->buf_lock);
- nCmd->done(nCmd);
+ wcqep->word0 = 0;
+ bf_set(lpfc_wcqe_c_status, wcqep, stat);
+ wcqep->parameter = param;
+ wcqep->word3 = 0; /* xb is 0 */
/* Call release with XB=1 to queue the IO into the abort list. */
- lpfc_release_nvme_buf(phba, lpfc_ncmd);
+ if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
+ bf_set(lpfc_wcqe_c_xb, wcqep, 1);
+
+ (pwqeIn->wqe_cmpl)(phba, pwqeIn, wcqep);
#endif
}
diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c
index a71df8788fff..bb2a4a0d1295 100644
--- a/drivers/scsi/lpfc/lpfc_nvmet.c
+++ b/drivers/scsi/lpfc/lpfc_nvmet.c
@@ -1367,17 +1367,22 @@ static void
lpfc_nvmet_host_release(void *hosthandle)
{
struct lpfc_nodelist *ndlp = hosthandle;
- struct lpfc_hba *phba = NULL;
+ struct lpfc_hba *phba = ndlp->phba;
struct lpfc_nvmet_tgtport *tgtp;
- phba = ndlp->phba;
if (!phba->targetport || !phba->targetport->private)
return;
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
- "6202 NVMET XPT releasing hosthandle x%px\n",
- hosthandle);
+ "6202 NVMET XPT releasing hosthandle x%px "
+ "DID x%x xflags x%x refcnt %d\n",
+ hosthandle, ndlp->nlp_DID, ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
+ spin_lock_irq(&ndlp->lock);
+ ndlp->fc4_xpt_flags &= ~NLP_XPT_HAS_HH;
+ spin_unlock_irq(&ndlp->lock);
+ lpfc_nlp_put(ndlp);
atomic_set(&tgtp->state, 0);
}
@@ -3644,15 +3649,33 @@ out:
void
lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
{
+ u32 ndlp_has_hh;
struct lpfc_nvmet_tgtport *tgtp;
- lpfc_printf_log(phba, KERN_INFO, LOG_NVME | LOG_NVME_ABTS,
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
"6203 Invalidating hosthandle x%px\n",
ndlp);
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
atomic_set(&tgtp->state, LPFC_NVMET_INV_HOST_ACTIVE);
+ spin_lock_irq(&ndlp->lock);
+ ndlp_has_hh = ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH;
+ spin_unlock_irq(&ndlp->lock);
+
+ /* Do not invalidate any nodes that do not have a hosthandle.
+ * The host_release callbk will cause a node reference
+ * count imbalance and a crash.
+ */
+ if (!ndlp_has_hh) {
+ lpfc_printf_log(phba, KERN_INFO,
+ LOG_NVME | LOG_NVME_ABTS | LOG_NVME_DISC,
+ "6204 Skip invalidate on node x%px DID x%x\n",
+ ndlp, ndlp->nlp_DID);
+ return;
+ }
+
#if (IS_ENABLED(CONFIG_NVME_TARGET_FC))
/* Need to get the nvmet_fc_target_port pointer here.*/
nvmet_fc_invalidate_host(phba->targetport, ndlp);
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3b989f720937..a4d697373c71 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5479,6 +5479,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
lpfc_sli_abort_fcp_cmpl);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
if (ret_val != IOCB_SUCCESS) {
/* Indicate the IO is not being aborted by the driver. */
lpfc_cmd->waitq = NULL;
@@ -5849,6 +5852,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
uint64_t lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ u32 logit = LOG_FCP;
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
@@ -5880,8 +5884,10 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_LUN_RESET);
+ if (status != SUCCESS)
+ logit = LOG_TRACE_EVENT;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
"0713 SCSI layer issued Device Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
@@ -5920,6 +5926,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
uint64_t lun_id = cmnd->device->lun;
struct lpfc_scsi_event_header scsi_event;
int status;
+ u32 logit = LOG_FCP;
+ unsigned long flags;
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq);
rdata = lpfc_rport_data_from_scsi_device(cmnd->device);
if (!rdata || !rdata->pnode) {
@@ -5938,10 +5947,10 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
"0722 Target Reset rport failure: rdata x%px\n", rdata);
if (pnode) {
- spin_lock_irq(&pnode->lock);
+ spin_lock_irqsave(&pnode->lock, flags);
pnode->nlp_flag &= ~NLP_NPR_ADISC;
pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
- spin_unlock_irq(&pnode->lock);
+ spin_unlock_irqrestore(&pnode->lock, flags);
}
lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
LPFC_CTX_TGT);
@@ -5959,8 +5968,42 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_send_taskmgmt(vport, cmnd, tgt_id, lun_id,
FCP_TARGET_RESET);
+ if (status != SUCCESS)
+ logit = LOG_TRACE_EVENT;
+ spin_lock_irqsave(&pnode->lock, flags);
+ if (status != SUCCESS &&
+ (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)) &&
+ !pnode->logo_waitq) {
+ pnode->logo_waitq = &waitq;
+ pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+ pnode->nlp_flag |= NLP_ISSUE_LOGO;
+ pnode->upcall_flags |= NLP_WAIT_FOR_LOGO;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ lpfc_unreg_rpi(vport, pnode);
+ wait_event_timeout(waitq,
+ (!(pnode->upcall_flags & NLP_WAIT_FOR_LOGO)),
+ msecs_to_jiffies(vport->cfg_devloss_tmo *
+ 1000));
+
+ if (pnode->upcall_flags & NLP_WAIT_FOR_LOGO) {
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ "0725 SCSI layer TGTRST failed & LOGO TMO "
+ " (%d, %llu) return x%x\n", tgt_id,
+ lun_id, status);
+ spin_lock_irqsave(&pnode->lock, flags);
+ pnode->upcall_flags &= ~NLP_WAIT_FOR_LOGO;
+ } else {
+ spin_lock_irqsave(&pnode->lock, flags);
+ }
+ pnode->logo_waitq = NULL;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ status = SUCCESS;
+ } else {
+ status = FAILED;
+ spin_unlock_irqrestore(&pnode->lock, flags);
+ }
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
"0723 SCSI layer issued Target Reset (%d, %llu) "
"return x%x\n", tgt_id, lun_id, status);
@@ -5996,6 +6039,7 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_scsi_event_header scsi_event;
int match;
int ret = SUCCESS, status, i;
+ u32 logit = LOG_FCP;
scsi_event.event_type = FC_REG_SCSI_EVENT;
scsi_event.subcategory = LPFC_EVENT_BUSRESET;
@@ -6056,8 +6100,10 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
status = lpfc_reset_flush_io_context(vport, 0, 0, LPFC_CTX_HOST);
if (status != SUCCESS)
ret = FAILED;
+ if (ret == FAILED)
+ logit = LOG_TRACE_EVENT;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, logit,
"0714 SCSI layer issued Bus Reset Data: x%x\n", ret);
return ret;
}
@@ -6086,7 +6132,7 @@ lpfc_host_reset_handler(struct scsi_cmnd *cmnd)
struct lpfc_hba *phba = vport->phba;
int rc, ret = SUCCESS;
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
"3172 SCSI layer issued Host Reset Data:\n");
lpfc_offline_prep(phba, LPFC_MBX_WAIT);
@@ -6662,6 +6708,7 @@ struct scsi_host_template lpfc_template = {
.info = lpfc_info,
.queuecommand = lpfc_queuecommand,
.eh_timed_out = fc_eh_timed_out,
+ .eh_should_retry_cmd = fc_eh_should_retry_cmd,
.eh_abort_handler = lpfc_abort_handler,
.eh_device_reset_handler = lpfc_device_reset_handler,
.eh_target_reset_handler = lpfc_target_reset_handler,
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 95caad764fb7..fa1a714a78f0 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -1532,15 +1532,19 @@ lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
while (!list_empty(iocblist)) {
list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
- if (!piocb->iocb_cmpl) {
+ if (piocb->wqe_cmpl) {
if (piocb->iocb_flag & LPFC_IO_NVME)
- lpfc_nvme_cancel_iocb(phba, piocb);
+ lpfc_nvme_cancel_iocb(phba, piocb,
+ ulpstatus, ulpWord4);
else
lpfc_sli_release_iocbq(phba, piocb);
- } else {
+
+ } else if (piocb->iocb_cmpl) {
piocb->iocb.ulpStatus = ulpstatus;
piocb->iocb.un.ulpWord[4] = ulpWord4;
(piocb->iocb_cmpl) (phba, piocb, piocb);
+ } else {
+ lpfc_sli_release_iocbq(phba, piocb);
}
}
return;
@@ -3007,23 +3011,44 @@ lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
axchg->payload = nvmebuf->dbuf.virt;
INIT_LIST_HEAD(&axchg->list);
- if (phba->nvmet_support)
+ if (phba->nvmet_support) {
ret = lpfc_nvmet_handle_lsreq(phba, axchg);
- else
+ spin_lock_irq(&ndlp->lock);
+ if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
+ ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
+ spin_unlock_irq(&ndlp->lock);
+
+ /* This reference is a single occurrence to hold the
+ * node valid until the nvmet transport calls
+ * host_release.
+ */
+ if (!lpfc_nlp_get(ndlp))
+ goto out_fail;
+
+ lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
+ "6206 NVMET unsol ls_req ndlp %p "
+ "DID x%x xflags x%x refcnt %d\n",
+ ndlp, ndlp->nlp_DID,
+ ndlp->fc4_xpt_flags,
+ kref_read(&ndlp->kref));
+ } else {
+ spin_unlock_irq(&ndlp->lock);
+ }
+ } else {
ret = lpfc_nvme_handle_lsreq(phba, axchg);
+ }
/* if zero, LS was successfully handled. If non-zero, LS not handled */
if (!ret)
return;
+out_fail:
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
"NVMe%s handler failed %d\n",
did, sid, oxid,
(phba->nvmet_support) ? "T" : "I", ret);
-out_fail:
-
/* recycle receive buffer */
lpfc_in_buf_free(phba, &nvmebuf->dbuf);
@@ -4221,6 +4246,8 @@ lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
spin_unlock_irq(&phba->hbalock);
}
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
@@ -4359,6 +4386,8 @@ lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
if (lpfc_readl(phba->HSregaddr, &status))
return 1;
+ phba->hba_flag |= HBA_NEEDS_CFG_PORT;
+
/*
* Check status register every 100ms for 5 retries, then every
* 500ms for 5, then every 2.5 sec for 5, then reset board and
@@ -4687,6 +4716,7 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
/* perform board reset */
phba->fc_eventTag = 0;
phba->link_events = 0;
+ phba->hba_flag |= HBA_NEEDS_CFG_PORT;
if (phba->pport) {
phba->pport->fc_myDID = 0;
phba->pport->fc_prevDID = 0;
@@ -5020,6 +5050,8 @@ lpfc_sli_chipset_init(struct lpfc_hba *phba)
return -EIO;
}
+ phba->hba_flag |= HBA_NEEDS_CFG_PORT;
+
/* Clear all interrupt enable conditions */
writel(0, phba->HCregaddr);
readl(phba->HCregaddr); /* flush */
@@ -5316,45 +5348,18 @@ int
lpfc_sli_hba_setup(struct lpfc_hba *phba)
{
uint32_t rc;
- int mode = 3, i;
+ int i;
int longs;
- switch (phba->cfg_sli_mode) {
- case 2:
- if (phba->cfg_enable_npiv) {
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "1824 NPIV enabled: Override sli_mode "
- "parameter (%d) to auto (0).\n",
- phba->cfg_sli_mode);
- break;
- }
- mode = 2;
- break;
- case 0:
- case 3:
- break;
- default:
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "1819 Unrecognized sli_mode parameter: %d.\n",
- phba->cfg_sli_mode);
-
- break;
+ /* Enable ISR already does config_port because of config_msi mbx */
+ if (phba->hba_flag & HBA_NEEDS_CFG_PORT) {
+ rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+ if (rc)
+ return -EIO;
+ phba->hba_flag &= ~HBA_NEEDS_CFG_PORT;
}
phba->fcp_embed_io = 0; /* SLI4 FC support only */
- rc = lpfc_sli_config_port(phba, mode);
-
- if (rc && phba->cfg_sli_mode == 3)
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
- "1820 Unable to select SLI-3. "
- "Not supported by adapter.\n");
- if (rc && mode != 2)
- rc = lpfc_sli_config_port(phba, 2);
- else if (rc && mode == 2)
- rc = lpfc_sli_config_port(phba, 3);
- if (rc)
- goto lpfc_sli_hba_setup_error;
-
/* Enable PCIe device Advanced Error Reporting (AER) if configured */
if (phba->cfg_aer_support == 1 && !(phba->hba_flag & HBA_AER_ENABLED)) {
rc = pci_enable_pcie_error_reporting(phba->pcidev);
@@ -7486,7 +7491,7 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
return;
if (bf_get(lpfc_sliport_status_dip, &reg_data))
- lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
"2904 Firmware Dump Image Present"
" on Adapter");
}
@@ -8041,7 +8046,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
- phba->hb_outstanding = 0;
+ phba->hba_flag &= ~(HBA_HBEAT_INP | HBA_HBEAT_TMO);
phba->last_completion_time = jiffies;
/* start eq_delay heartbeat */
@@ -8291,8 +8296,10 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
struct lpfc_sli *psli = &phba->sli;
- /* If the mailbox completed, process the completion and return */
- if (lpfc_sli4_process_missed_mbox_completions(phba))
+ /* If the mailbox completed, process the completion */
+ lpfc_sli4_process_missed_mbox_completions(phba);
+
+ if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
return;
if (pmbox != NULL)
@@ -8333,8 +8340,6 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
psli->sli_flag &= ~LPFC_SLI_ACTIVE;
spin_unlock_irq(&phba->hbalock);
- lpfc_sli_abort_fcp_rings(phba);
-
lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
"0345 Resetting board due to mailbox timeout\n");
@@ -11215,6 +11220,9 @@ lpfc_sli_host_down(struct lpfc_vport *vport)
}
spin_unlock_irqrestore(&phba->hbalock, flags);
+ /* Make sure HBA is alive */
+ lpfc_issue_hb_tmo(phba);
+
/* Cancel all the IOCBs from the completions list */
lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
IOERR_SLI_DOWN);
@@ -11805,7 +11813,7 @@ lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
struct lpfc_io_buf *lpfc_cmd;
int rc = 1;
- if (iocbq->vport != vport)
+ if (!iocbq || iocbq->vport != vport)
return rc;
if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
@@ -13026,7 +13034,21 @@ lpfc_sli_sp_intr_handler(int irq, void *dev_id)
spin_unlock_irqrestore(
&phba->pport->work_port_lock,
iflag);
- lpfc_mbox_cmpl_put(phba, pmb);
+
+ /* Do NOT queue MBX_HEARTBEAT to the worker
+ * thread for processing.
+ */
+ if (pmbox->mbxCommand == MBX_HEARTBEAT) {
+ /* Process mbox now */
+ phba->sli.mbox_active = NULL;
+ phba->sli.sli_flag &=
+ ~LPFC_SLI_MBOX_ACTIVE;
+ if (pmb->mbox_cmpl)
+ pmb->mbox_cmpl(phba, pmb);
+ } else {
+ /* Queue to worker thread to process */
+ lpfc_mbox_cmpl_put(phba, pmb);
+ }
}
} else
spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -13622,7 +13644,26 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
- /* There is mailbox completion work to do */
+ /* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
+ if (pmbox->mbxCommand == MBX_HEARTBEAT) {
+ spin_lock_irqsave(&phba->hbalock, iflags);
+ /* Release the mailbox command posting token */
+ phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+ phba->sli.mbox_active = NULL;
+ if (bf_get(lpfc_trailer_consumed, mcqe))
+ lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+ spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+ /* Post the next mbox command, if there is one */
+ lpfc_sli4_post_async_mbox(phba);
+
+ /* Process cmpl now */
+ if (pmb->mbox_cmpl)
+ pmb->mbox_cmpl(phba, pmb);
+ return false;
+ }
+
+ /* There is mailbox completion work to queue to the worker thread */
spin_lock_irqsave(&phba->hbalock, iflags);
__lpfc_mbox_cmpl_put(phba, pmb);
phba->work_ha |= HA_MBATT;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 234dca60995b..fade044c8f15 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -20,7 +20,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "12.8.0.6"
+#define LPFC_DRIVER_VERSION "12.8.0.7"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index a99fdfba7d27..ccf7b6cd0bd8 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -478,7 +478,7 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
rc = VPORT_OK;
out:
- lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
"1825 Vport Created.\n");
lpfc_host_attrib_init(lpfc_shost_from_vport(vport));
error_out:
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 43edf83fdb62..9e989776609b 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -326,7 +326,6 @@ static void mac53c94_interrupt(int irq, void *dev_id)
}
cmd->SCp.Status = readb(&regs->fifo);
cmd->SCp.Message = readb(&regs->fifo);
- cmd->result = CMD_ACCEPT_MSG;
writeb(CMD_ACCEPT_MSG, &regs->command);
state->phase = busfreeing;
break;
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 4a27ac869f2e..d57e93872d7b 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -1165,7 +1165,7 @@ megaraid_mbox_setup_dma_pools(adapter_t *adapter)
* structure
* Since passthru and extended passthru commands are exclusive, they
* share common memory pool. Passthru structures piggyback on memory
- * allocted to extended passthru since passthru is smaller of the two
+ * allocated to extended passthru since passthru is smaller of the two
*/
raid_dev->epthru_pool_handle = dma_pool_create("megaraid mbox pthru",
&adapter->pdev->dev, sizeof(mraid_epassthru_t), 128, 0);
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index fd607287608e..38fc9467c625 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -3920,8 +3920,7 @@ megasas_free_host_crash_buffer(struct megasas_instance *instance)
{
unsigned int i;
for (i = 0; i < instance->drv_buf_alloc; i++) {
- if (instance->crash_buf[i])
- vfree(instance->crash_buf[i]);
+ vfree(instance->crash_buf[i]);
}
instance->drv_buf_index = 0;
instance->drv_buf_alloc = 0;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 6e23dc3209fe..f5582c8e77c9 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -2505,8 +2505,8 @@ _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
}
/* Check if we need to build a native SG list. */
- if (base_is_prp_possible(ioc, pcie_device,
- scmd, sges_left) == 0) {
+ if (!base_is_prp_possible(ioc, pcie_device,
+ scmd, sges_left)) {
/* We built a native SG list, just return. */
goto out;
}
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index a920eced92ec..1acea528f27f 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -216,11 +216,11 @@ void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
}
-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i, gfp_t gfp_flags)
{
struct mvs_phy *phy = &mvi->phy[i];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
- struct sas_ha_struct *sas_ha;
+
if (!phy->phy_attached)
return;
@@ -229,8 +229,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
return;
}
- sas_ha = mvi->sas;
- sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(sas_phy, PHYE_OOB_DONE, gfp_flags);
if (sas_phy->phy) {
struct sas_phy *sphy = sas_phy->phy;
@@ -262,8 +261,7 @@ static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
- mvi->sas->notify_port_event(sas_phy,
- PORTE_BYTES_DMAED);
+ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, gfp_flags);
}
void mvs_scan_start(struct Scsi_Host *shost)
@@ -279,7 +277,7 @@ void mvs_scan_start(struct Scsi_Host *shost)
for (j = 0; j < core_nr; j++) {
mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
for (i = 0; i < mvi->chip->n_phy; ++i)
- mvs_bytes_dmaed(mvi, i);
+ mvs_bytes_dmaed(mvi, i, GFP_KERNEL);
}
mvs_prv->scan_finished = 1;
}
@@ -1880,7 +1878,6 @@ static void mvs_work_queue(struct work_struct *work)
struct mvs_info *mvi = mwq->mvi;
unsigned long flags;
u32 phy_no = (unsigned long) mwq->data;
- struct sas_ha_struct *sas_ha = mvi->sas;
struct mvs_phy *phy = &mvi->phy[phy_no];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -1895,21 +1892,21 @@ static void mvs_work_queue(struct work_struct *work)
if (!(tmp & PHY_READY_MASK)) {
sas_phy_disconnected(sas_phy);
mvs_phy_disconnected(phy);
- sas_ha->notify_phy_event(sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
mv_dprintk("phy%d Removed Device\n", phy_no);
} else {
MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
mvs_update_phyinfo(mvi, phy_no, 1);
- mvs_bytes_dmaed(mvi, phy_no);
+ mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
mvs_port_notify_formed(sas_phy, 0);
mv_dprintk("phy%d Attached Device\n", phy_no);
}
}
} else if (mwq->handler & EXP_BRCT_CHG) {
phy->phy_event &= ~EXP_BRCT_CHG;
- sas_ha->notify_port_event(sas_phy,
- PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy,
+ PORTE_BROADCAST_RCVD, GFP_ATOMIC);
mv_dprintk("phy%d Got Broadcast Change\n", phy_no);
}
list_del(&mwq->entry);
@@ -2026,7 +2023,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
mdelay(10);
}
- mvs_bytes_dmaed(mvi, phy_no);
+ mvs_bytes_dmaed(mvi, phy_no, GFP_ATOMIC);
/* whether driver is going to handle hot plug */
if (phy->phy_event & PHY_PLUG_OUT) {
mvs_port_notify_formed(&phy->sas_phy, 0);
diff --git a/drivers/scsi/ncr53c8xx.c b/drivers/scsi/ncr53c8xx.c
index 03d70138ad58..c76e9f05d042 100644
--- a/drivers/scsi/ncr53c8xx.c
+++ b/drivers/scsi/ncr53c8xx.c
@@ -148,6 +148,11 @@ static int ncr_debug = SCSI_NCR_DEBUG_FLAGS;
#define DEBUG_FLAGS SCSI_NCR_DEBUG_FLAGS
#endif
+/*
+ * Locally used status flag
+ */
+#define SAM_STAT_ILLEGAL 0xff
+
static inline struct list_head *ncr_list_pop(struct list_head *head)
{
if (!list_empty(head)) {
@@ -998,8 +1003,6 @@ typedef u32 tagmap_t;
** Other definitions
*/
-#define ScsiResult(host_code, scsi_code) (((host_code) << 16) + ((scsi_code) & 0x7f))
-
#define initverbose (driver_setup.verbose)
#define bootverbose (np->verbose)
@@ -2430,7 +2433,7 @@ static struct script script0 __initdata = {
*/
SCR_FROM_REG (SS_REG),
0,
- SCR_CALL ^ IFFALSE (DATA (S_GOOD)),
+ SCR_CALL ^ IFFALSE (DATA (SAM_STAT_GOOD)),
PADDRH (bad_status),
#ifndef SCSI_NCR_CCB_DONE_SUPPORT
@@ -2879,7 +2882,7 @@ static struct scripth scripth0 __initdata = {
8,
SCR_TO_REG (HS_REG),
0,
- SCR_LOAD_REG (SS_REG, S_GOOD),
+ SCR_LOAD_REG (SS_REG, SAM_STAT_GOOD),
0,
SCR_JUMP,
PADDR (cleanup_ok),
@@ -3341,15 +3344,15 @@ static struct scripth scripth0 __initdata = {
PADDRH (reset),
}/*-------------------------< BAD_STATUS >-----------------*/,{
/*
- ** If command resulted in either QUEUE FULL,
+ ** If command resulted in either TASK_SET FULL,
** CHECK CONDITION or COMMAND TERMINATED,
** call the C code.
*/
- SCR_INT ^ IFTRUE (DATA (S_QUEUE_FULL)),
+ SCR_INT ^ IFTRUE (DATA (SAM_STAT_TASK_SET_FULL)),
SIR_BAD_STATUS,
- SCR_INT ^ IFTRUE (DATA (S_CHECK_COND)),
+ SCR_INT ^ IFTRUE (DATA (SAM_STAT_CHECK_CONDITION)),
SIR_BAD_STATUS,
- SCR_INT ^ IFTRUE (DATA (S_TERMINATED)),
+ SCR_INT ^ IFTRUE (DATA (SAM_STAT_COMMAND_TERMINATED)),
SIR_BAD_STATUS,
SCR_RETURN,
0,
@@ -4371,7 +4374,7 @@ static int ncr_queue_command (struct ncb *np, struct scsi_cmnd *cmd)
*/
cp->actualquirks = 0;
cp->host_status = cp->nego_status ? HS_NEGOTIATE : HS_BUSY;
- cp->scsi_status = S_ILLEGAL;
+ cp->scsi_status = SAM_STAT_ILLEGAL;
cp->parity_status = 0;
cp->xerr_status = XE_OK;
@@ -4602,7 +4605,7 @@ static int ncr_reset_bus (struct ncb *np, struct scsi_cmnd *cmd, int sync_reset)
* in order to keep it alive.
*/
if (!found && sync_reset && !retrieve_from_waiting_list(0, np, cmd)) {
- cmd->result = DID_RESET << 16;
+ set_host_byte(cmd, DID_RESET);
ncr_queue_done_cmd(np, cmd);
}
@@ -4630,7 +4633,7 @@ static int ncr_abort_command (struct ncb *np, struct scsi_cmnd *cmd)
* First, look for the scsi command in the waiting list
*/
if (remove_from_waiting_list(np, cmd)) {
- cmd->result = ScsiResult(DID_ABORT, 0);
+ set_host_byte(cmd, DID_ABORT);
ncr_queue_done_cmd(np, cmd);
return SCSI_ABORT_SUCCESS;
}
@@ -4895,7 +4898,8 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
** Print out any error for debugging purpose.
*/
if (DEBUG_FLAGS & (DEBUG_RESULT|DEBUG_TINY)) {
- if (cp->host_status!=HS_COMPLETE || cp->scsi_status!=S_GOOD) {
+ if (cp->host_status != HS_COMPLETE ||
+ cp->scsi_status != SAM_STAT_GOOD) {
PRINT_ADDR(cmd, "ERROR: cmd=%x host_status=%x "
"scsi_status=%x\n", cmd->cmnd[0],
cp->host_status, cp->scsi_status);
@@ -4905,15 +4909,16 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** Check the status.
*/
+ cmd->result = 0;
if ( (cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_GOOD ||
- cp->scsi_status == S_COND_MET)) {
+ && (cp->scsi_status == SAM_STAT_GOOD ||
+ cp->scsi_status == SAM_STAT_CONDITION_MET)) {
/*
* All went well (GOOD status).
- * CONDITION MET status is returned on
+ * CONDITION MET status is returned on
* `Pre-Fetch' or `Search data' success.
*/
- cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
/*
** @RESID@
@@ -4944,11 +4949,11 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
}
}
} else if ((cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_CHECK_COND)) {
+ && (cp->scsi_status == SAM_STAT_CHECK_CONDITION)) {
/*
** Check condition code
*/
- cmd->result = DID_OK << 16 | S_CHECK_COND;
+ set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
/*
** Copy back sense data to caller's buffer.
@@ -4965,20 +4970,20 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
printk (".\n");
}
} else if ((cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_CONFLICT)) {
+ && (cp->scsi_status == SAM_STAT_RESERVATION_CONFLICT)) {
/*
** Reservation Conflict condition code
*/
- cmd->result = DID_OK << 16 | S_CONFLICT;
-
+ set_status_byte(cmd, SAM_STAT_RESERVATION_CONFLICT);
+
} else if ((cp->host_status == HS_COMPLETE)
- && (cp->scsi_status == S_BUSY ||
- cp->scsi_status == S_QUEUE_FULL)) {
+ && (cp->scsi_status == SAM_STAT_BUSY ||
+ cp->scsi_status == SAM_STAT_TASK_SET_FULL)) {
/*
** Target is busy.
*/
- cmd->result = ScsiResult(DID_OK, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
} else if ((cp->host_status == HS_SEL_TIMEOUT)
|| (cp->host_status == HS_TIMEOUT)) {
@@ -4986,21 +4991,24 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
/*
** No response
*/
- cmd->result = ScsiResult(DID_TIME_OUT, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_TIME_OUT);
} else if (cp->host_status == HS_RESET) {
/*
** SCSI bus reset
*/
- cmd->result = ScsiResult(DID_RESET, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_RESET);
} else if (cp->host_status == HS_ABORTED) {
/*
** Transfer aborted
*/
- cmd->result = ScsiResult(DID_ABORT, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_ABORT);
} else {
@@ -5010,7 +5018,8 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
PRINT_ADDR(cmd, "COMMAND FAILED (%x %x) @%p.\n",
cp->host_status, cp->scsi_status, cp);
- cmd->result = ScsiResult(DID_ERROR, cp->scsi_status);
+ set_status_byte(cmd, cp->scsi_status);
+ set_host_byte(cmd, DID_ERROR);
}
/*
@@ -5026,10 +5035,10 @@ void ncr_complete (struct ncb *np, struct ccb *cp)
if (cp->host_status==HS_COMPLETE) {
switch (cp->scsi_status) {
- case S_GOOD:
+ case SAM_STAT_GOOD:
printk (" GOOD");
break;
- case S_CHECK_COND:
+ case SAM_STAT_CHECK_CONDITION:
printk (" SENSE:");
p = (u_char*) &cmd->sense_buffer;
for (i=0; i<14; i++)
@@ -6564,7 +6573,7 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
switch(s_status) {
default: /* Just for safety, should never happen */
- case S_QUEUE_FULL:
+ case SAM_STAT_TASK_SET_FULL:
/*
** Decrease number of tags to the number of
** disconnected commands.
@@ -6588,15 +6597,15 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
*/
cp->phys.header.savep = cp->startp;
cp->host_status = HS_BUSY;
- cp->scsi_status = S_ILLEGAL;
+ cp->scsi_status = SAM_STAT_ILLEGAL;
ncr_put_start_queue(np, cp);
if (disc_cnt)
INB (nc_ctest2); /* Clear SIGP */
OUTL_DSP (NCB_SCRIPT_PHYS (np, reselect));
return;
- case S_TERMINATED:
- case S_CHECK_COND:
+ case SAM_STAT_COMMAND_TERMINATED:
+ case SAM_STAT_CHECK_CONDITION:
/*
** If we were requesting sense, give up.
*/
@@ -6646,7 +6655,7 @@ static void ncr_sir_to_redo(struct ncb *np, int num, struct ccb *cp)
cp->phys.header.wlastp = startp;
cp->host_status = HS_BUSY;
- cp->scsi_status = S_ILLEGAL;
+ cp->scsi_status = SAM_STAT_ILLEGAL;
cp->auto_sense = s_status;
cp->start.schedule.l_paddr =
@@ -8035,7 +8044,7 @@ printk("ncr53c8xx_queue_command\n");
spin_lock_irqsave(&np->smp_lock, flags);
if ((sts = ncr_queue_command(np, cmd)) != DID_OK) {
- cmd->result = sts << 16;
+ set_host_byte(cmd, sts);
#ifdef DEBUG_NCR53C8XX
printk("ncr53c8xx : command not queued - result=%d\n", sts);
#endif
@@ -8226,7 +8235,7 @@ static void process_waiting_list(struct ncb *np, int sts)
#ifdef DEBUG_WAITING_LIST
printk("%s: cmd %lx done forced sts=%d\n", ncr_name(np), (u_long) wcmd, sts);
#endif
- wcmd->result = sts << 16;
+ set_host_byte(wcmd, sts);
ncr_queue_done_cmd(np, wcmd);
}
}
diff --git a/drivers/scsi/ncr53c8xx.h b/drivers/scsi/ncr53c8xx.h
index 8326f5f01e07..fa14b5ca8783 100644
--- a/drivers/scsi/ncr53c8xx.h
+++ b/drivers/scsi/ncr53c8xx.h
@@ -1239,22 +1239,6 @@ struct scr_tblsel {
*/
/*
-** Status
-*/
-
-#define S_GOOD (0x00)
-#define S_CHECK_COND (0x02)
-#define S_COND_MET (0x04)
-#define S_BUSY (0x08)
-#define S_INT (0x10)
-#define S_INT_COND_MET (0x14)
-#define S_CONFLICT (0x18)
-#define S_TERMINATED (0x20)
-#define S_QUEUE_FULL (0x28)
-#define S_ILLEGAL (0xff)
-#define S_SENSE (0x80)
-
-/*
* End of ncrreg from FreeBSD
*/
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index da814c2da16d..e44b1a0f6709 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -935,7 +935,7 @@ static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct s
SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
- SCpnt->SCp.Status = CHECK_CONDITION;
+ SCpnt->SCp.Status = SAM_STAT_CHECK_CONDITION;
SCpnt->SCp.Message = 0;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index bb3b3884f968..5d5f50d6a02d 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1132,7 +1132,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//*sync_neg = SYNC_NOT_YET;
/* all command complete and return status */
- if (tmpSC->SCp.Message == MSG_COMMAND_COMPLETE) {
+ if (tmpSC->SCp.Message == COMMAND_COMPLETE) {
tmpSC->result = (DID_OK << 16) |
((tmpSC->SCp.Message & 0xff) << 8) |
((tmpSC->SCp.Status & 0xff) << 0);
@@ -1226,9 +1226,9 @@ static irqreturn_t nspintr(int irq, void *dev_id)
data->Sync[target].SyncOffset = 0;
/**/
- data->MsgBuffer[i] = MSG_EXTENDED; i++;
+ data->MsgBuffer[i] = EXTENDED_MESSAGE; i++;
data->MsgBuffer[i] = 3; i++;
- data->MsgBuffer[i] = MSG_EXT_SDTR; i++;
+ data->MsgBuffer[i] = EXTENDED_SDTR; i++;
data->MsgBuffer[i] = 0x0c; i++;
data->MsgBuffer[i] = 15; i++;
/**/
@@ -1255,9 +1255,9 @@ static irqreturn_t nspintr(int irq, void *dev_id)
//nsp_dbg(NSP_DEBUG_INTR, "sync target=%d,lun=%d",target,lun);
if (data->MsgLen >= 5 &&
- data->MsgBuffer[0] == MSG_EXTENDED &&
+ data->MsgBuffer[0] == EXTENDED_MESSAGE &&
data->MsgBuffer[1] == 3 &&
- data->MsgBuffer[2] == MSG_EXT_SDTR ) {
+ data->MsgBuffer[2] == EXTENDED_SDTR ) {
data->Sync[target].SyncPeriod = data->MsgBuffer[3];
data->Sync[target].SyncOffset = data->MsgBuffer[4];
//nsp_dbg(NSP_DEBUG_INTR, "sync ok, %d %d", data->MsgBuffer[3], data->MsgBuffer[4]);
@@ -1275,7 +1275,7 @@ static irqreturn_t nspintr(int irq, void *dev_id)
tmp = -1;
for (i = 0; i < data->MsgLen; i++) {
tmp = data->MsgBuffer[i];
- if (data->MsgBuffer[i] == MSG_EXTENDED) {
+ if (data->MsgBuffer[i] == EXTENDED_MESSAGE) {
i += (1 + data->MsgBuffer[i+1]);
}
}
diff --git a/drivers/scsi/pcmcia/nsp_cs.h b/drivers/scsi/pcmcia/nsp_cs.h
index ea5122f3396d..665bf8d0faf7 100644
--- a/drivers/scsi/pcmcia/nsp_cs.h
+++ b/drivers/scsi/pcmcia/nsp_cs.h
@@ -370,17 +370,6 @@ enum _burst_mode {
BURST_MEM32 = 2,
};
-/**************************************************************************
- * SCSI messaage
- */
-#define MSG_COMMAND_COMPLETE 0x00
-#define MSG_EXTENDED 0x01
-#define MSG_ABORT 0x06
-#define MSG_NO_OPERATION 0x08
-#define MSG_BUS_DEVICE_RESET 0x0c
-
-#define MSG_EXT_SDTR 0x01
-
/* scatter-gather table */
# define BUFFER_ADDR ((char *)((sg_virt(SCpnt->SCp.buffer))))
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index c8d4d87c5473..49bf2f70a470 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3038,8 +3038,8 @@ void pm8001_mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
complete(pm8001_ha->nvmd_completion);
pm8001_dbg(pm8001_ha, MSG, "Set nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
- pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error!\n");
- return;
+ pm8001_dbg(pm8001_ha, FAIL, "Set nvm data error %x\n",
+ dlen_status);
}
ccb->task = NULL;
ccb->ccb_tag = 0xFFFFFFFF;
@@ -3062,11 +3062,17 @@ pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, MSG, "Get nvm data complete!\n");
if ((dlen_status & NVMD_STAT) != 0) {
- pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error!\n");
+ pm8001_dbg(pm8001_ha, FAIL, "Get nvm data error %x\n",
+ dlen_status);
complete(pm8001_ha->nvmd_completion);
+ /* We should free tag during failure also, the tag is not being
+ * freed by requesting path anywhere.
+ */
+ ccb->task = NULL;
+ ccb->ccb_tag = 0xFFFFFFFF;
+ pm8001_tag_free(pm8001_ha, tag);
return;
}
-
if (ir_tds_bn_dps_das_nvm & IPMode) {
/* indirect mode - IR bit set */
pm8001_dbg(pm8001_ha, MSG, "Get NVMD success, IR=1\n");
@@ -3179,7 +3185,7 @@ void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i)
pm8001_dbg(pm8001_ha, MSG, "phy %d byte dmaded.\n", i);
sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
- pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED);
+ sas_notify_port_event(sas_phy, PORTE_BYTES_DMAED, GFP_ATOMIC);
}
/* Get the link rate speed */
@@ -3293,7 +3299,6 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
u8 portstate = (u8)(npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
@@ -3337,7 +3342,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
phy->sas_phy.oob_mode = SAS_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, &pPayload->sas_identify,
sizeof(struct sas_identify_frame)-4);
@@ -3369,7 +3374,6 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate);
u8 portstate = (u8)(npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
pm8001_dbg(pm8001_ha, DEVIO, "HW_EVENT_SATA_PHY_UP port id = %d, phy id = %d\n",
@@ -3381,7 +3385,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_type |= PORT_TYPE_SATA;
phy->phy_attached = 1;
phy->sas_phy.oob_mode = SATA_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
sizeof(struct dev_to_host_fis));
@@ -3728,11 +3732,13 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
break;
case HW_EVENT_SATA_SPINUP_HOLD:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_DOWN:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
+ GFP_ATOMIC);
phy->phy_attached = 0;
phy->phy_state = 0;
hw_event_phy_down(pm8001_ha, piomb);
@@ -3741,7 +3747,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
/* the broadcast change primitive received, tell the LIBSAS this event
to revalidate the sas domain*/
@@ -3752,20 +3759,22 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_ERROR:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
break;
case HW_EVENT_BROADCAST_EXP:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
pm8001_dbg(pm8001_ha, MSG,
@@ -3774,7 +3783,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_DISPARITY_ERROR:
pm8001_dbg(pm8001_ha, MSG,
@@ -3784,7 +3794,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_CODE_VIOLATION:
pm8001_dbg(pm8001_ha, MSG,
@@ -3794,7 +3805,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH:
pm8001_dbg(pm8001_ha, MSG,
@@ -3804,7 +3816,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_MALFUNCTION:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_MALFUNCTION\n");
@@ -3814,7 +3827,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
@@ -3824,13 +3838,14 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
break;
case HW_EVENT_HARD_RESET_RECEIVED:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
pm8001_dbg(pm8001_ha, MSG,
@@ -3840,20 +3855,23 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb)
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RECOVERY_TIMER_TMO:
pm8001_dbg(pm8001_ha, MSG,
"HW_EVENT_PORT_RECOVERY_TIMER_TMO\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RECOVER:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RECOVER\n");
@@ -4998,4 +5016,5 @@ const struct pm8001_dispatch pm8001_8001_dispatch = {
.fw_flash_update_req = pm8001_chip_fw_flash_update_req,
.set_dev_state_req = pm8001_chip_set_dev_state_req,
.sas_re_init_req = pm8001_chip_sas_re_initialization,
+ .fatal_errors = pm80xx_fatal_errors,
};
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index ee2de177d0d0..bd626ef876da 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -423,7 +423,7 @@ err_out_shost:
err_out_nodev:
for (i = 0; i < pm8001_ha->max_memcnt; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
- pci_free_consistent(pm8001_ha->pdev,
+ dma_free_coherent(&pm8001_ha->pdev->dev,
(pm8001_ha->memoryMap.region[i].total_len +
pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
@@ -466,9 +466,12 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->io_mem[logicalBar].memvirtaddr =
ioremap(pm8001_ha->io_mem[logicalBar].membase,
pm8001_ha->io_mem[logicalBar].memsize);
- pm8001_dbg(pm8001_ha, INIT,
- "PCI: bar %d, logicalBar %d\n",
+ if (!pm8001_ha->io_mem[logicalBar].memvirtaddr) {
+ pm8001_dbg(pm8001_ha, INIT,
+ "Failed to ioremap bar %d, logicalBar %d",
bar, logicalBar);
+ return -ENOMEM;
+ }
pm8001_dbg(pm8001_ha, INIT,
"base addr %llx virt_addr=%llx len=%d\n",
(u64)pm8001_ha->io_mem[logicalBar].membase,
@@ -540,9 +543,11 @@ static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
tasklet_init(&pm8001_ha->tasklet[j], pm8001_tasklet,
(unsigned long)&(pm8001_ha->irq_vector[j]));
#endif
- pm8001_ioremap(pm8001_ha);
+ if (pm8001_ioremap(pm8001_ha))
+ goto failed_pci_alloc;
if (!pm8001_alloc(pm8001_ha, ent))
return pm8001_ha;
+failed_pci_alloc:
pm8001_free(pm8001_ha);
return NULL;
}
@@ -1192,12 +1197,13 @@ pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost,
goto err_out_noccb;
}
for (i = 0; i < ccb_count; i++) {
- pm8001_ha->ccb_info[i].buf_prd = pci_alloc_consistent(pdev,
+ pm8001_ha->ccb_info[i].buf_prd = dma_alloc_coherent(&pdev->dev,
sizeof(struct pm8001_prd) * PM8001_MAX_DMA_SG,
- &pm8001_ha->ccb_info[i].ccb_dma_handle);
+ &pm8001_ha->ccb_info[i].ccb_dma_handle,
+ GFP_KERNEL);
if (!pm8001_ha->ccb_info[i].buf_prd) {
pm8001_dbg(pm8001_ha, FAIL,
- "pm80xx: ccb prd memory allocation error\n");
+ "ccb prd memory allocation error\n");
goto err_out;
}
pm8001_ha->ccb_info[i].task = NULL;
diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
index d1e9dba2ef19..a98d4496ff8b 100644
--- a/drivers/scsi/pm8001/pm8001_sas.c
+++ b/drivers/scsi/pm8001/pm8001_sas.c
@@ -158,7 +158,6 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
int rc = 0, phy_id = sas_phy->id;
struct pm8001_hba_info *pm8001_ha = NULL;
struct sas_phy_linkrates *rates;
- struct sas_ha_struct *sas_ha;
struct pm8001_phy *phy;
DECLARE_COMPLETION_ONSTACK(completion);
unsigned long flags;
@@ -207,19 +206,17 @@ int pm8001_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
if (pm8001_ha->chip_id != chip_8001) {
if (pm8001_ha->phy[phy_id].phy_state ==
PHY_STATE_LINK_UP_SPCV) {
- sas_ha = pm8001_ha->sas;
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
phy->phy_attached = 0;
}
} else {
if (pm8001_ha->phy[phy_id].phy_state ==
PHY_STATE_LINK_UP_SPC) {
- sas_ha = pm8001_ha->sas;
sas_phy_disconnected(&phy->sas_phy);
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_KERNEL);
phy->phy_attached = 0;
}
}
@@ -1183,12 +1180,21 @@ int pm8001_abort_task(struct sas_task *task)
int rc = TMF_RESP_FUNC_FAILED, ret;
u32 phy_id;
struct sas_task_slow slow_task;
+
if (unlikely(!task || !task->lldd_task || !task->dev))
return TMF_RESP_FUNC_FAILED;
+
dev = task->dev;
pm8001_dev = dev->lldd_dev;
pm8001_ha = pm8001_find_ha_by_dev(dev);
phy_id = pm8001_dev->attached_phy;
+
+ if (PM8001_CHIP_DISP->fatal_errors(pm8001_ha)) {
+ // If the controller is seeing fatal errors
+ // abort task will not get a response from the controller
+ return TMF_RESP_FUNC_FAILED;
+ }
+
ret = pm8001_find_tag(task, &tag);
if (ret == 0) {
pm8001_info(pm8001_ha, "no tag for task:%p\n", task);
@@ -1344,4 +1350,3 @@ int pm8001_clear_task_set(struct domain_device *dev, u8 *lun)
tmf_task.tmf = TMF_CLEAR_TASK_SET;
return pm8001_issue_ssp_tmf(dev, lun, &tmf_task);
}
-
diff --git a/drivers/scsi/pm8001/pm8001_sas.h b/drivers/scsi/pm8001/pm8001_sas.h
index f2c8cbad3853..039ed91e9841 100644
--- a/drivers/scsi/pm8001/pm8001_sas.h
+++ b/drivers/scsi/pm8001/pm8001_sas.h
@@ -215,6 +215,7 @@ struct pm8001_dispatch {
int (*sas_diag_execute_req)(struct pm8001_hba_info *pm8001_ha,
u32 state);
int (*sas_re_init_req)(struct pm8001_hba_info *pm8001_ha);
+ int (*fatal_errors)(struct pm8001_hba_info *pm8001_ha);
};
struct pm8001_chip_info {
@@ -725,6 +726,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev,
ssize_t pm80xx_get_non_fatal_dump(struct device *cdev,
struct device_attribute *attr, char *buf);
ssize_t pm8001_get_gsm_dump(struct device *cdev, u32, char *buf);
+int pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha);
/* ctl shared API */
extern struct device_attribute *pm8001_host_attrs[];
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index 6772b0924dac..84315560e8e1 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -349,37 +349,37 @@ moreData:
sprintf(
pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 0xFFFFFFFF);
- pm8001_cw32(pm8001_ha, 0,
+ return((char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf);
+ }
+ /* reset fatal_forensic_shift_offset back to zero and reset MEMBASE 2 register to zero */
+ pm8001_ha->fatal_forensic_shift_offset = 0; /* location in 64k region */
+ pm8001_cw32(pm8001_ha, 0,
MEMBASE_II_SHIFT_REGISTER,
pm8001_ha->fatal_forensic_shift_offset);
- }
- /* Read the next block of the debug data.*/
- length_to_read = pm8001_mr32(fatal_table_address,
- MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
- pm8001_ha->forensic_preserved_accumulated_transfer;
- if (length_to_read != 0x0) {
- pm8001_ha->forensic_fatal_step = 0;
- goto moreData;
- } else {
- pm8001_ha->forensic_info.data_buf.direct_data +=
- sprintf(
- pm8001_ha->forensic_info.data_buf.direct_data,
+ }
+ /* Read the next block of the debug data.*/
+ length_to_read = pm8001_mr32(fatal_table_address,
+ MPI_FATAL_EDUMP_TABLE_ACCUM_LEN) -
+ pm8001_ha->forensic_preserved_accumulated_transfer;
+ if (length_to_read != 0x0) {
+ pm8001_ha->forensic_fatal_step = 0;
+ goto moreData;
+ } else {
+ pm8001_ha->forensic_info.data_buf.direct_data +=
+ sprintf(pm8001_ha->forensic_info.data_buf.direct_data,
"%08x ", 4);
- pm8001_ha->forensic_info.data_buf.read_len
- = 0xFFFFFFFF;
- pm8001_ha->forensic_info.data_buf.direct_len
- = 0;
- pm8001_ha->forensic_info.data_buf.direct_offset
- = 0;
- pm8001_ha->forensic_info.data_buf.read_len = 0;
- }
+ pm8001_ha->forensic_info.data_buf.read_len = 0xFFFFFFFF;
+ pm8001_ha->forensic_info.data_buf.direct_len = 0;
+ pm8001_ha->forensic_info.data_buf.direct_offset = 0;
+ pm8001_ha->forensic_info.data_buf.read_len = 0;
}
}
offset = (int)((char *)pm8001_ha->forensic_info.data_buf.direct_data
- (char *)buf);
pm8001_dbg(pm8001_ha, IO, "get_fatal_spcv: return4 0x%x\n", offset);
- return (char *)pm8001_ha->forensic_info.data_buf.direct_data -
- (char *)buf;
+ return ((char *)pm8001_ha->forensic_info.data_buf.direct_data -
+ (char *)buf);
}
/* pm80xx_get_non_fatal_dump - dump the nonfatal data from the dma
@@ -997,7 +997,7 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
max_wait_count = SPC_DOORBELL_CLEAR_TIMEOUT;
}
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET);
value &= SPCv_MSGU_CFG_TABLE_UPDATE;
} while ((value != 0) && (--max_wait_count));
@@ -1010,9 +1010,9 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha)
return -EBUSY;
}
/* check the MPI-State for initialization upto 100ms*/
- max_wait_count = 100 * 1000;/* 100 msec */
+ max_wait_count = 5;/* 100 msec */
do {
- udelay(1);
+ msleep(FW_READY_INTERVAL);
gst_len_mpistate =
pm8001_mr32(pm8001_ha->general_stat_tbl_addr,
GST_GSTLEN_MPIS_OFFSET);
@@ -1039,6 +1039,7 @@ static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
u32 value;
u32 max_wait_count;
u32 max_wait_time;
+ u32 expected_mask;
int ret = 0;
/* reset / PCIe ready */
@@ -1048,74 +1049,39 @@ static int check_fw_ready(struct pm8001_hba_info *pm8001_ha)
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
} while ((value == 0xFFFFFFFF) && (--max_wait_count));
- /* check ila status */
- max_wait_time = max_wait_count = 50; /* 1000 milli sec */
- do {
- msleep(FW_READY_INTERVAL);
- value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_ILA_READY) !=
- SCRATCH_PAD_ILA_READY) && (--max_wait_count));
- if (!max_wait_count)
- ret = -1;
- else {
- pm8001_dbg(pm8001_ha, MSG,
- " ila ready status in %d millisec\n",
- (max_wait_time - max_wait_count));
- }
-
- /* check RAAE status */
- max_wait_time = max_wait_count = 90; /* 1800 milli sec */
- do {
- msleep(FW_READY_INTERVAL);
- value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_RAAE_READY) !=
- SCRATCH_PAD_RAAE_READY) && (--max_wait_count));
- if (!max_wait_count)
- ret = -1;
- else {
- pm8001_dbg(pm8001_ha, MSG,
- " raae ready status in %d millisec\n",
- (max_wait_time - max_wait_count));
+ /* check ila, RAAE and iops status */
+ if ((pm8001_ha->chip_id != chip_8008) &&
+ (pm8001_ha->chip_id != chip_8009)) {
+ max_wait_time = max_wait_count = 180; /* 3600 milli sec */
+ expected_mask = SCRATCH_PAD_ILA_READY |
+ SCRATCH_PAD_RAAE_READY |
+ SCRATCH_PAD_IOP0_READY |
+ SCRATCH_PAD_IOP1_READY;
+ } else {
+ max_wait_time = max_wait_count = 170; /* 3400 milli sec */
+ expected_mask = SCRATCH_PAD_ILA_READY |
+ SCRATCH_PAD_RAAE_READY |
+ SCRATCH_PAD_IOP0_READY;
}
-
- /* check iop0 status */
- max_wait_time = max_wait_count = 30; /* 600 milli sec */
do {
msleep(FW_READY_INTERVAL);
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_IOP0_READY) != SCRATCH_PAD_IOP0_READY) &&
- (--max_wait_count));
- if (!max_wait_count)
+ } while (((value & expected_mask) !=
+ expected_mask) && (--max_wait_count));
+ if (!max_wait_count) {
+ pm8001_dbg(pm8001_ha, INIT,
+ "At least one FW component failed to load within %d millisec: Scratchpad1: 0x%x\n",
+ max_wait_time * FW_READY_INTERVAL, value);
ret = -1;
- else {
+ } else {
pm8001_dbg(pm8001_ha, MSG,
- " iop0 ready status in %d millisec\n",
- (max_wait_time - max_wait_count));
+ "All FW components ready by %d ms\n",
+ (max_wait_time - max_wait_count) * FW_READY_INTERVAL);
}
-
- /* check iop1 status only for 16 port controllers */
- if ((pm8001_ha->chip_id != chip_8008) &&
- (pm8001_ha->chip_id != chip_8009)) {
- /* 200 milli sec */
- max_wait_time = max_wait_count = 10;
- do {
- msleep(FW_READY_INTERVAL);
- value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
- } while (((value & SCRATCH_PAD_IOP1_READY) !=
- SCRATCH_PAD_IOP1_READY) && (--max_wait_count));
- if (!max_wait_count)
- ret = -1;
- else {
- pm8001_dbg(pm8001_ha, MSG,
- "iop1 ready status in %d millisec\n",
- (max_wait_time - max_wait_count));
- }
- }
-
return ret;
}
-static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
+static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *base_addr;
u32 value;
@@ -1124,15 +1090,48 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
u32 pcilogic;
value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0);
+
+ /**
+ * lower 26 bits of SCRATCHPAD0 register describes offset within the
+ * PCIe BAR where the MPI configuration table is present
+ */
offset = value & 0x03FFFFFF; /* scratch pad 0 TBL address */
pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n",
offset, value);
+ /**
+ * Upper 6 bits describe the offset within PCI config space where BAR
+ * is located.
+ */
pcilogic = (value & 0xFC000000) >> 26;
pcibar = get_pci_bar_index(pcilogic);
pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar);
+
+ /**
+ * Make sure the offset falls inside the ioremapped PCI BAR
+ */
+ if (offset > pm8001_ha->io_mem[pcibar].memsize) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Main cfg tbl offset outside %u > %u\n",
+ offset, pm8001_ha->io_mem[pcibar].memsize);
+ return -EBUSY;
+ }
pm8001_ha->main_cfg_tbl_addr = base_addr =
pm8001_ha->io_mem[pcibar].memvirtaddr + offset;
+
+ /**
+ * Validate main configuration table address: first DWord should read
+ * "PMCS"
+ */
+ value = pm8001_mr32(pm8001_ha->main_cfg_tbl_addr, 0);
+ if (memcmp(&value, "PMCS", 4) != 0) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "BAD main config signature 0x%x\n",
+ value);
+ return -EBUSY;
+ }
+ pm8001_dbg(pm8001_ha, INIT,
+ "VALID main config signature 0x%x\n", value);
pm8001_ha->general_stat_tbl_addr =
base_addr + (pm8001_cr32(pm8001_ha, pcibar, offset + 0x18) &
0xFFFFFF);
@@ -1171,6 +1170,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
pm8001_dbg(pm8001_ha, INIT, "addr - pspa %p ivt %p\n",
pm8001_ha->pspa_q_tbl_addr,
pm8001_ha->ivt_tbl_addr);
+ return 0;
}
/**
@@ -1438,7 +1438,12 @@ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha)
pm8001_ha->controller_fatal_error = false;
/* Initialize pci space address eg: mpi offset */
- init_pci_device_addresses(pm8001_ha);
+ ret = init_pci_device_addresses(pm8001_ha);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Failed to init pci addresses");
+ return ret;
+ }
init_default_table_values(pm8001_ha);
read_main_config_table(pm8001_ha);
read_general_status_table(pm8001_ha);
@@ -1482,7 +1487,15 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
u32 max_wait_count;
u32 value;
u32 gst_len_mpistate;
- init_pci_device_addresses(pm8001_ha);
+ int ret;
+
+ ret = init_pci_device_addresses(pm8001_ha);
+ if (ret) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Failed to init pci addresses");
+ return ret;
+ }
+
/* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the
table is stop */
pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPCv_MSGU_CFG_TABLE_RESET);
@@ -1526,6 +1539,41 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha)
}
/**
+ * pm80xx_fatal_errors - returns non zero *ONLY* when fatal errors
+ * @pm8001_ha: our hba card information
+ *
+ * Fatal errors are recoverable only after a host reboot.
+ */
+int
+pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha)
+{
+ int ret = 0;
+ u32 scratch_pad_rsvd0 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_6);
+ u32 scratch_pad_rsvd1 = pm8001_cr32(pm8001_ha, 0,
+ MSGU_HOST_SCRATCH_PAD_7);
+ u32 scratch_pad1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1);
+ u32 scratch_pad2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2);
+ u32 scratch_pad3 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3);
+
+ if (pm8001_ha->chip_id != chip_8006 &&
+ pm8001_ha->chip_id != chip_8074 &&
+ pm8001_ha->chip_id != chip_8076) {
+ return 0;
+ }
+
+ if (MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(scratch_pad1)) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "Fatal error SCRATCHPAD1 = 0x%x SCRATCHPAD2 = 0x%x SCRATCHPAD3 = 0x%x SCRATCHPAD_RSVD0 = 0x%x SCRATCHPAD_RSVD1 = 0x%x\n",
+ scratch_pad1, scratch_pad2, scratch_pad3,
+ scratch_pad_rsvd0, scratch_pad_rsvd1);
+ ret = 1;
+ }
+
+ return ret;
+}
+
+/**
* pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all
* the FW register status to the originated status.
* @pm8001_ha: our hba card information
@@ -2385,10 +2433,11 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
return;
}
- if (unlikely(status))
- pm8001_dbg(pm8001_ha, IOERR,
- "status:0x%x, tag:0x%x, task::0x%p\n",
- status, tag, t);
+ if (status != IO_SUCCESS) {
+ pm8001_dbg(pm8001_ha, FAIL,
+ "IO failed device_id %u status 0x%x tag %d\n",
+ pm8001_dev->device_id, status, tag);
+ }
/* Print sas address of IO failed device */
if ((status != IO_SUCCESS) && (status != IO_OVERFLOW) &&
@@ -2710,7 +2759,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb)
atomic_dec(&pm8001_dev->running_req);
break;
default:
- pm8001_dbg(pm8001_ha, DEVIO, "Unknown status 0x%x\n", status);
+ pm8001_dbg(pm8001_ha, DEVIO,
+ "Unknown status device_id %u status 0x%x tag %d\n",
+ pm8001_dev->device_id, status, tag);
/* not allowed case. Therefore, return failed status */
ts->resp = SAS_TASK_COMPLETE;
ts->stat = SAS_DEV_NO_RESPONSE;
@@ -3243,7 +3294,6 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
u8 deviceType = pPayload->sas_identify.dev_type;
@@ -3288,7 +3338,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
else if (phy->identify.device_type != SAS_PHY_UNUSED)
phy->identify.target_port_protocols = SAS_PROTOCOL_SMP;
phy->sas_phy.oob_mode = SAS_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, &pPayload->sas_identify,
sizeof(struct sas_identify_frame)-4);
@@ -3322,7 +3372,6 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
u8 portstate = (u8)(phyid_npip_portstate & 0x0000000F);
struct pm8001_port *port = &pm8001_ha->port[port_id];
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
unsigned long flags;
pm8001_dbg(pm8001_ha, DEVIO,
@@ -3336,7 +3385,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb)
phy->phy_type |= PORT_TYPE_SATA;
phy->phy_attached = 1;
phy->sas_phy.oob_mode = SATA_OOB_MODE;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE, GFP_ATOMIC);
spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags);
memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4),
sizeof(struct dev_to_host_fis));
@@ -3418,11 +3467,9 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
}
- if (port_sata && (portstate != PORT_IN_RESET)) {
- struct sas_ha_struct *sas_ha = pm8001_ha->sas;
-
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL);
- }
+ if (port_sata && (portstate != PORT_IN_RESET))
+ sas_notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL,
+ GFP_ATOMIC);
}
static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
@@ -3520,7 +3567,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case HW_EVENT_SATA_SPINUP_HOLD:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_SATA_SPINUP_HOLD\n");
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_DOWN:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_DOWN\n");
@@ -3536,7 +3584,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_INVALID\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
/* the broadcast change primitive received, tell the LIBSAS this event
to revalidate the sas domain*/
@@ -3547,20 +3596,22 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_PHY_ERROR:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PHY_ERROR\n");
sas_phy_disconnected(&phy->sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR);
+ sas_notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR, GFP_ATOMIC);
break;
case HW_EVENT_BROADCAST_EXP:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_BROADCAST_EXP\n");
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_INVALID_DWORD:
pm8001_dbg(pm8001_ha, MSG,
@@ -3597,7 +3648,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
spin_lock_irqsave(&sas_phy->sas_prim_lock, flags);
sas_phy->sas_prim = HW_EVENT_BROADCAST_SES;
spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags);
- sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+ sas_notify_port_event(sas_phy, PORTE_BROADCAST_RCVD,
+ GFP_ATOMIC);
break;
case HW_EVENT_INBOUND_CRC_ERROR:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_INBOUND_CRC_ERROR\n");
@@ -3607,13 +3659,14 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
break;
case HW_EVENT_HARD_RESET_RECEIVED:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_HARD_RESET_RECEIVED\n");
- sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET);
+ sas_notify_port_event(sas_phy, PORTE_HARD_RESET, GFP_ATOMIC);
break;
case HW_EVENT_ID_FRAME_TIMEOUT:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_ID_FRAME_TIMEOUT\n");
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_LINK_ERR_PHY_RESET_FAILED:
pm8001_dbg(pm8001_ha, MSG,
@@ -3623,7 +3676,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
break;
case HW_EVENT_PORT_RESET_TIMER_TMO:
pm8001_dbg(pm8001_ha, MSG, "HW_EVENT_PORT_RESET_TIMER_TMO\n");
@@ -3631,7 +3685,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
port_id, phy_id, 0, 0);
sas_phy_disconnected(sas_phy);
phy->phy_attached = 0;
- sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR);
+ sas_notify_port_event(sas_phy, PORTE_LINK_RESET_ERR,
+ GFP_ATOMIC);
if (pm8001_ha->phy[phy_id].reset_completion) {
pm8001_ha->phy[phy_id].port_reset_status =
PORT_RESET_TMO;
@@ -3648,8 +3703,8 @@ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb)
for (i = 0; i < pm8001_ha->chip->n_phy; i++) {
if (port->wide_port_phymap & (1 << i)) {
phy = &pm8001_ha->phy[i];
- sas_ha->notify_phy_event(&phy->sas_phy,
- PHYE_LOSS_OF_SIGNAL);
+ sas_notify_phy_event(&phy->sas_phy,
+ PHYE_LOSS_OF_SIGNAL, GFP_ATOMIC);
port->wide_port_phymap &= ~(1 << i);
}
}
@@ -4959,4 +5014,5 @@ const struct pm8001_dispatch pm8001_80xx_dispatch = {
.set_nvmd_req = pm8001_chip_set_nvmd_req,
.fw_flash_update_req = pm8001_chip_fw_flash_update_req,
.set_dev_state_req = pm8001_chip_set_dev_state_req,
+ .fatal_errors = pm80xx_fatal_errors,
};
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.h b/drivers/scsi/pm8001/pm80xx_hwi.h
index ec48bc276de6..2c8e85cfdbc4 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.h
+++ b/drivers/scsi/pm8001/pm80xx_hwi.h
@@ -220,8 +220,8 @@
#define SAS_DOPNRJT_RTRY_TMO 128
#define SAS_COPNRJT_RTRY_TMO 128
-#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 1000 * 1000) /* 30 sec */
-#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 1000 * 1000) /* 15 sec */
+#define SPCV_DOORBELL_CLEAR_TIMEOUT (30 * 50) /* 30 sec */
+#define SPC_DOORBELL_CLEAR_TIMEOUT (15 * 50) /* 15 sec */
/*
Making ORR bigger than IT NEXUS LOSS which is 2000000us = 2 second.
@@ -1368,6 +1368,19 @@ typedef struct SASProtocolTimerConfig SASProtocolTimerConfig_t;
#define MSGU_HOST_SCRATCH_PAD_6 0x6C
#define MSGU_HOST_SCRATCH_PAD_7 0x70
+#define MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) ((x & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) (((x >> 2) & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) ((((x >> 4) & 0x7) == 0x7) || \
+ (((x >> 4) & 0x7) == 0x4))
+#define MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) (((x >> 10) & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x) (((x >> 12) & 0x3) == 0x2)
+#define MSGU_SCRATCHPAD1_STATE_FATAL_ERROR(x) \
+ (MSGU_SCRATCHPAD1_RAAE_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_ILA_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_BOOTLDR_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_IOP0_STATE_ERR(x) || \
+ MSGU_SCRATCHPAD1_IOP1_STATE_ERR(x))
+
/* bit definition for ODMR register */
#define ODMR_MASK_ALL 0xFFFFFFFF/* mask all
interrupt vector */
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 46d185cb9ea8..cec27f2ef70d 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3713,7 +3713,7 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
else
fc_fabric_logoff(qedf->lport);
- if (qedf_wait_for_upload(qedf) == false)
+ if (!qedf_wait_for_upload(qedf))
QEDF_ERR(&qedf->dbg_ctx, "Could not upload all sessions.\n");
#ifdef CONFIG_DEBUG_FS
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 545936cb3980..46de2541af25 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -3906,18 +3906,18 @@ qla1280_get_target_parameters(struct scsi_qla_host *ha,
printk(KERN_INFO "scsi(%li:%d:%d:%d):", ha->host_no, bus, target, lun);
if (mb[3] != 0) {
- printk(" Sync: period %d, offset %d",
+ printk(KERN_CONT " Sync: period %d, offset %d",
(mb[3] & 0xff), (mb[3] >> 8));
if (mb[2] & BIT_13)
- printk(", Wide");
+ printk(KERN_CONT ", Wide");
if ((mb[2] & BIT_5) && ((mb[6] >> 8) & 0xff) >= 2)
- printk(", DT");
+ printk(KERN_CONT ", DT");
} else
- printk(" Async");
+ printk(KERN_CONT " Async");
if (device->simple_tags)
- printk(", Tagged queuing: depth %d", device->queue_depth);
- printk("\n");
+ printk(KERN_CONT ", Tagged queuing: depth %d", device->queue_depth);
+ printk(KERN_CONT "\n");
}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index ab45ac1e5a72..63391c9be05d 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -710,6 +710,12 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x706e,
"Issuing ISP reset.\n");
+ if (vha->hw->flags.port_isolated) {
+ ql_log(ql_log_info, vha, 0x706e,
+ "Port is isolated, returning.\n");
+ return -EINVAL;
+ }
+
scsi_block_requests(vha->host);
if (IS_QLA82XX(ha)) {
ha->flags.isp82xx_no_md_cap = 1;
@@ -2717,6 +2723,9 @@ qla2x00_issue_lip(struct Scsi_Host *shost)
if (IS_QLAFX00(vha->hw))
return 0;
+ if (vha->hw->flags.port_isolated)
+ return 0;
+
qla2x00_loop_reset(vha);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 23b604832a54..bee8cf9f8123 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -4,6 +4,7 @@
* Copyright (c) 2003-2014 QLogic Corporation
*/
#include "qla_def.h"
+#include "qla_gbl.h"
#include <linux/kthread.h>
#include <linux/vmalloc.h>
@@ -2445,6 +2446,323 @@ qla2x00_get_flash_image_status(struct bsg_job *bsg_job)
}
static int
+qla2x00_manage_host_stats(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_mng_host_stats_param *req_data;
+ struct ql_vnd_mng_host_stats_resp rsp_data;
+ u32 req_data_len;
+ int ret = 0;
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
+ return -EIO;
+ }
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_mng_host_stats_param)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data,
+ req_data_len);
+
+ switch (req_data->action) {
+ case QLA_STOP:
+ ret = qla2xxx_stop_stats(vha->host, req_data->stat_type);
+ break;
+ case QLA_START:
+ ret = qla2xxx_start_stats(vha->host, req_data->stat_type);
+ break;
+ case QLA_CLEAR:
+ ret = qla2xxx_reset_stats(vha->host, req_data->stat_type);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
+ ret = -EIO;
+ break;
+ }
+
+ kfree(req_data);
+
+ /* Prepare response */
+ rsp_data.status = ret;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
+
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ &rsp_data,
+ sizeof(struct ql_vnd_mng_host_stats_resp));
+
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return ret;
+}
+
+static int
+qla2x00_get_host_stats(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_stats_param *req_data;
+ struct ql_vnd_host_stats_resp rsp_data;
+ u32 req_data_len;
+ int ret = 0;
+ u64 ini_entry_count = 0;
+ u64 entry_count = 0;
+ u64 tgt_num = 0;
+ u64 tmp_stat_type = 0;
+ u64 response_len = 0;
+ void *data;
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_stats_param)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ /* Copy stat type to work on it */
+ tmp_stat_type = req_data->stat_type;
+
+ if (tmp_stat_type & QLA2XX_TGT_SHT_LNK_DOWN) {
+ /* Num of tgts connected to this host */
+ tgt_num = qla2x00_get_num_tgts(vha);
+ /* unset BIT_17 */
+ tmp_stat_type &= ~(1 << 17);
+ }
+
+ /* Total ini stats */
+ ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
+
+ /* Total number of entries */
+ entry_count = ini_entry_count + tgt_num;
+
+ response_len = sizeof(struct ql_vnd_host_stats_resp) +
+ (sizeof(struct ql_vnd_stat_entry) * entry_count);
+
+ if (response_len > bsg_job->reply_payload.payload_len) {
+ rsp_data.status = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &rsp_data,
+ sizeof(struct ql_vnd_mng_host_stats_resp));
+
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ goto host_stat_out;
+ }
+
+ data = kzalloc(response_len, GFP_KERNEL);
+
+ ret = qla2xxx_get_ini_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
+ data, response_len);
+
+ rsp_data.status = EXT_STATUS_OK;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+
+ bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt,
+ data, response_len);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ kfree(data);
+host_stat_out:
+ kfree(req_data);
+ return ret;
+}
+
+static struct fc_rport *
+qla2xxx_find_rport(scsi_qla_host_t *vha, uint32_t tgt_num)
+{
+ fc_port_t *fcport = NULL;
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->rport->number == tgt_num)
+ return fcport->rport;
+ }
+ return NULL;
+}
+
+static int
+qla2x00_get_tgt_stats(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_tgt_stats_param *req_data;
+ u32 req_data_len;
+ int ret = 0;
+ u64 response_len = 0;
+ struct ql_vnd_tgt_stats_resp *data = NULL;
+ struct fc_rport *rport = NULL;
+
+ if (!vha->flags.online) {
+ ql_log(ql_log_warn, vha, 0x0000, "Host is not online.\n");
+ return -EIO;
+ }
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_stat_entry)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt,
+ req_data, req_data_len);
+
+ response_len = sizeof(struct ql_vnd_tgt_stats_resp) +
+ sizeof(struct ql_vnd_stat_entry);
+
+ /* structure + size for one entry */
+ data = kzalloc(response_len, GFP_KERNEL);
+ if (!data) {
+ kfree(req_data);
+ return -ENOMEM;
+ }
+
+ if (response_len > bsg_job->reply_payload.payload_len) {
+ data->status = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_BUFFER_TOO_SMALL;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_stats_resp);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, data,
+ sizeof(struct ql_vnd_tgt_stats_resp));
+
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+ goto tgt_stat_out;
+ }
+
+ rport = qla2xxx_find_rport(vha, req_data->tgt_id);
+ if (!rport) {
+ ql_log(ql_log_warn, vha, 0x0000, "target %d not found.\n", req_data->tgt_id);
+ ret = EXT_STATUS_INVALID_PARAM;
+ data->status = EXT_STATUS_INVALID_PARAM;
+ goto reply;
+ }
+
+ ret = qla2xxx_get_tgt_stats(fc_bsg_to_shost(bsg_job), req_data->stat_type,
+ rport, (void *)data, response_len);
+
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+reply:
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, data,
+ response_len);
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+tgt_stat_out:
+ kfree(data);
+ kfree(req_data);
+
+ return ret;
+}
+
+static int
+qla2x00_manage_host_port(struct bsg_job *bsg_job)
+{
+ scsi_qla_host_t *vha = shost_priv(fc_bsg_to_shost(bsg_job));
+ struct fc_bsg_reply *bsg_reply = bsg_job->reply;
+ struct ql_vnd_mng_host_port_param *req_data;
+ struct ql_vnd_mng_host_port_resp rsp_data;
+ u32 req_data_len;
+ int ret = 0;
+
+ req_data_len = bsg_job->request_payload.payload_len;
+
+ if (req_data_len != sizeof(struct ql_vnd_mng_host_port_param)) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data_len invalid.\n");
+ return -EIO;
+ }
+
+ req_data = kzalloc(sizeof(*req_data), GFP_KERNEL);
+ if (!req_data) {
+ ql_log(ql_log_warn, vha, 0x0000, "req_data memory allocation failure.\n");
+ return -ENOMEM;
+ }
+
+ /* Copy the request buffer in req_data */
+ sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+ bsg_job->request_payload.sg_cnt, req_data, req_data_len);
+
+ switch (req_data->action) {
+ case QLA_ENABLE:
+ ret = qla2xxx_enable_port(vha->host);
+ break;
+ case QLA_DISABLE:
+ ret = qla2xxx_disable_port(vha->host);
+ break;
+ default:
+ ql_log(ql_log_warn, vha, 0x0000, "Invalid action.\n");
+ ret = -EIO;
+ break;
+ }
+
+ kfree(req_data);
+
+ /* Prepare response */
+ rsp_data.status = ret;
+ bsg_reply->reply_data.vendor_reply.vendor_rsp[0] = EXT_STATUS_OK;
+ bsg_job->reply_payload.payload_len = sizeof(struct ql_vnd_mng_host_port_resp);
+
+ bsg_reply->reply_payload_rcv_len =
+ sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+ bsg_job->reply_payload.sg_cnt, &rsp_data,
+ sizeof(struct ql_vnd_mng_host_port_resp));
+ bsg_reply->result = DID_OK;
+ bsg_job_done(bsg_job, bsg_reply->result,
+ bsg_reply->reply_payload_rcv_len);
+
+ return ret;
+}
+
+static int
qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
{
struct fc_bsg_request *bsg_request = bsg_job->request;
@@ -2520,6 +2838,18 @@ qla2x00_process_vendor_specific(struct bsg_job *bsg_job)
case QL_VND_SS_GET_FLASH_IMAGE_STATUS:
return qla2x00_get_flash_image_status(bsg_job);
+ case QL_VND_MANAGE_HOST_STATS:
+ return qla2x00_manage_host_stats(bsg_job);
+
+ case QL_VND_GET_HOST_STATS:
+ return qla2x00_get_host_stats(bsg_job);
+
+ case QL_VND_GET_TGT_STATS:
+ return qla2x00_get_tgt_stats(bsg_job);
+
+ case QL_VND_MANAGE_HOST_PORT:
+ return qla2x00_manage_host_port(bsg_job);
+
default:
return -ENOSYS;
}
@@ -2547,6 +2877,17 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
vha = shost_priv(host);
}
+ /* Disable port will bring down the chip, allow enable command */
+ if (bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_MANAGE_HOST_PORT ||
+ bsg_request->rqst_data.h_vendor.vendor_cmd[0] == QL_VND_GET_HOST_STATS)
+ goto skip_chip_chk;
+
+ if (vha->hw->flags.port_isolated) {
+ bsg_reply->result = DID_ERROR;
+ /* operation not permitted */
+ return -EPERM;
+ }
+
if (qla2x00_chip_is_down(vha)) {
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
@@ -2554,6 +2895,7 @@ qla24xx_bsg_request(struct bsg_job *bsg_job)
return -EBUSY;
}
+skip_chip_chk:
ql_dbg(ql_dbg_user, vha, 0x7000,
"Entered %s msgcode=0x%x.\n", __func__, bsg_request->msgcode);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 1a09b5512267..0274e99e4a12 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -31,6 +31,10 @@
#define QL_VND_DPORT_DIAGNOSTICS 0x19
#define QL_VND_GET_PRIV_STATS_EX 0x1A
#define QL_VND_SS_GET_FLASH_IMAGE_STATUS 0x1E
+#define QL_VND_MANAGE_HOST_STATS 0x23
+#define QL_VND_GET_HOST_STATS 0x24
+#define QL_VND_GET_TGT_STATS 0x25
+#define QL_VND_MANAGE_HOST_PORT 0x26
/* BSG Vendor specific subcode returns */
#define EXT_STATUS_OK 0
@@ -40,6 +44,7 @@
#define EXT_STATUS_DATA_OVERRUN 7
#define EXT_STATUS_DATA_UNDERRUN 8
#define EXT_STATUS_MAILBOX 11
+#define EXT_STATUS_BUFFER_TOO_SMALL 16
#define EXT_STATUS_NO_MEMORY 17
#define EXT_STATUS_DEVICE_OFFLINE 22
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index bb7431912d41..144a893e7335 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -202,6 +202,7 @@ qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
wrt_reg_word(&reg->mailbox1, LSW(addr));
wrt_reg_word(&reg->mailbox8, MSW(addr));
+ wrt_reg_word(&reg->mailbox10, 0);
wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 30c7e5e63851..49b42b430df4 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2101,6 +2101,7 @@ typedef struct {
#define CS_COMPLETE_CHKCOND 0x30 /* Error? */
#define CS_IOCB_ERROR 0x31 /* Generic error for IOCB request
failure */
+#define CS_REJECT_RECEIVED 0x4E /* Reject received */
#define CS_BAD_PAYLOAD 0x80 /* Driver defined */
#define CS_UNKNOWN 0x81 /* Driver defined */
#define CS_RETRY 0x82 /* Driver defined */
@@ -2557,6 +2558,10 @@ typedef struct fc_port {
u16 n2n_chip_reset;
struct dentry *dfs_rport_dir;
+
+ u64 tgt_short_link_down_cnt;
+ u64 tgt_link_down_time;
+ u64 dev_loss_tmo;
} fc_port_t;
enum {
@@ -3922,6 +3927,7 @@ struct qla_hw_data {
uint32_t scm_enabled:1;
uint32_t max_req_queue_warned:1;
uint32_t plogi_template_valid:1;
+ uint32_t port_isolated:1;
} flags;
uint16_t max_exchg;
@@ -4145,6 +4151,17 @@ struct qla_hw_data {
/* Bit 21 of fw_attributes decides the MCTP capabilities */
#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
((ha)->fw_attributes_ext[0] & BIT_0))
+#define QLA_ABTS_FW_ENABLED(_ha) ((_ha)->fw_attributes_ext[0] & BIT_14)
+#define QLA_SRB_NVME_LS(_sp) ((_sp)->type == SRB_NVME_LS)
+#define QLA_SRB_NVME_CMD(_sp) ((_sp)->type == SRB_NVME_CMD)
+#define QLA_NVME_IOS(_sp) (QLA_SRB_NVME_CMD(_sp) || QLA_SRB_NVME_LS(_sp))
+#define QLA_LS_ABTS_WAIT_ENABLED(_sp) \
+ (QLA_SRB_NVME_LS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
+#define QLA_CMD_ABTS_WAIT_ENABLED(_sp) \
+ (QLA_SRB_NVME_CMD(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
+#define QLA_ABTS_WAIT_ENABLED(_sp) \
+ (QLA_NVME_IOS(_sp) && QLA_ABTS_FW_ENABLED(_sp->fcport->vha->hw))
+
#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
@@ -4851,6 +4868,13 @@ typedef struct scsi_qla_host {
uint8_t scm_fabric_connection_flags;
unsigned int irq_offset;
+
+ u64 hw_err_cnt;
+ u64 interface_err_cnt;
+ u64 cmd_timeout_cnt;
+ u64 reset_cmd_err_cnt;
+ u64 link_down_time;
+ u64 short_link_down_cnt;
} scsi_qla_host_t;
struct qla27xx_image_status {
@@ -5174,6 +5198,65 @@ struct sff_8247_a0 {
#define PRLI_PHASE(_cls) \
((_cls == DSC_LS_PRLI_PEND) || (_cls == DSC_LS_PRLI_COMP))
+enum ql_vnd_host_stat_action {
+ QLA_STOP = 0,
+ QLA_START,
+ QLA_CLEAR,
+};
+
+struct ql_vnd_mng_host_stats_param {
+ u32 stat_type;
+ enum ql_vnd_host_stat_action action;
+} __packed;
+
+struct ql_vnd_mng_host_stats_resp {
+ u32 status;
+} __packed;
+
+struct ql_vnd_stats_param {
+ u32 stat_type;
+} __packed;
+
+struct ql_vnd_tgt_stats_param {
+ s32 tgt_id;
+ u32 stat_type;
+} __packed;
+
+enum ql_vnd_host_port_action {
+ QLA_ENABLE = 0,
+ QLA_DISABLE,
+};
+
+struct ql_vnd_mng_host_port_param {
+ enum ql_vnd_host_port_action action;
+} __packed;
+
+struct ql_vnd_mng_host_port_resp {
+ u32 status;
+} __packed;
+
+struct ql_vnd_stat_entry {
+ u32 stat_type; /* Failure type */
+ u32 tgt_num; /* Target Num */
+ u64 cnt; /* Counter value */
+} __packed;
+
+struct ql_vnd_stats {
+ u64 entry_count; /* Num of entries */
+ u64 rservd;
+ struct ql_vnd_stat_entry entry[0]; /* Place holder of entries */
+} __packed;
+
+struct ql_vnd_host_stats_resp {
+ u32 status;
+ struct ql_vnd_stats stats;
+} __packed;
+
+struct ql_vnd_tgt_stats_resp {
+ u32 status;
+ struct ql_vnd_stats stats;
+} __packed;
+
#include "qla_target.h"
#include "qla_gbl.h"
#include "qla_dbg.h"
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index d5ebcf7d70ff..85bd0e468d43 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -286,6 +286,10 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
core_qla_snd_status, qla_core_ret_sta_ctio, core_qla_free_cmd,
num_q_full_sent, num_alloc_iocb_failed, num_term_xchg_sent;
u16 i;
+ fc_port_t *fcport = NULL;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
qla_core_sbt_cmd = qpair->tgt_counters.qla_core_sbt_cmd;
core_qla_que_buf = qpair->tgt_counters.core_qla_que_buf;
@@ -349,6 +353,30 @@ qla_dfs_tgt_counters_show(struct seq_file *s, void *unused)
vha->qla_stats.qla_dif_stats.dif_ref_tag_err);
seq_printf(s, "DIF App tag err = %d\n",
vha->qla_stats.qla_dif_stats.dif_app_tag_err);
+
+ seq_puts(s, "\n");
+ seq_puts(s, "Initiator Error Counters\n");
+ seq_printf(s, "HW Error Count = %14lld\n",
+ vha->hw_err_cnt);
+ seq_printf(s, "Link Down Count = %14lld\n",
+ vha->short_link_down_cnt);
+ seq_printf(s, "Interface Err Count = %14lld\n",
+ vha->interface_err_cnt);
+ seq_printf(s, "Cmd Timeout Count = %14lld\n",
+ vha->cmd_timeout_cnt);
+ seq_printf(s, "Reset Count = %14lld\n",
+ vha->reset_cmd_err_cnt);
+ seq_puts(s, "\n");
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (!fcport->rport)
+ continue;
+
+ seq_printf(s, "Target Num = %7d Link Down Count = %14lld\n",
+ fcport->rport->number, fcport->tgt_short_link_down_cnt);
+ }
+ seq_puts(s, "\n");
+
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 12b689e32883..49df418030e4 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -982,11 +982,18 @@ struct abort_entry_24xx {
uint32_t handle; /* System handle. */
- __le16 nport_handle; /* N_PORT handle. */
- /* or Completion status. */
+ union {
+ __le16 nport_handle; /* N_PORT handle. */
+ __le16 comp_status; /* Completion status. */
+ };
__le16 options; /* Options. */
#define AOF_NO_ABTS BIT_0 /* Do not send any ABTS. */
+#define AOF_NO_RRQ BIT_1 /* Do not send RRQ. */
+#define AOF_ABTS_TIMEOUT BIT_2 /* Disable logout on ABTS timeout. */
+#define AOF_ABTS_RTY_CNT BIT_3 /* Use driver specified retry count. */
+#define AOF_RSP_TIMEOUT BIT_4 /* Use specified response timeout. */
+
uint32_t handle_to_abort; /* System handle to abort. */
@@ -995,8 +1002,20 @@ struct abort_entry_24xx {
uint8_t port_id[3]; /* PortID of destination port. */
uint8_t vp_index;
-
- uint8_t reserved_2[12];
+ u8 reserved_2[4];
+ union {
+ struct {
+ __le16 abts_rty_cnt;
+ __le16 rsp_timeout;
+ } drv;
+ struct {
+ u8 ba_rjt_vendorUnique;
+ u8 ba_rjt_reasonCodeExpl;
+ u8 ba_rjt_reasonCode;
+ u8 reserved_3;
+ } fw;
+ };
+ u8 reserved_4[4];
};
#define ABTS_RCV_TYPE 0x54
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index e39b4f2da73a..6486f97d649e 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -177,6 +177,7 @@ extern int ql2xexlogins;
extern int ql2xdifbundlinginternalbuffers;
extern int ql2xfulldump_on_mpifail;
extern int ql2xenforce_iocb_limit;
+extern int ql2xabts_wait_nvme;
extern int qla2x00_loop_reset(scsi_qla_host_t *);
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -941,8 +942,36 @@ int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
extern void qla24xx_process_purex_list(struct purex_list *);
extern void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp);
extern void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp);
+extern void qla_wait_nvme_release_cmd_kref(srb_t *sp);
+extern void qla_nvme_abort_set_option
+ (struct abort_entry_24xx *abt, srb_t *sp);
+extern void qla_nvme_abort_process_comp_status
+ (struct abort_entry_24xx *abt, srb_t *sp);
/* nvme.c */
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
+
+#define QLA2XX_HW_ERROR BIT_0
+#define QLA2XX_SHT_LNK_DWN BIT_1
+#define QLA2XX_INT_ERR BIT_2
+#define QLA2XX_CMD_TIMEOUT BIT_3
+#define QLA2XX_RESET_CMD_ERR BIT_4
+#define QLA2XX_TGT_SHT_LNK_DOWN BIT_17
+
+#define QLA2XX_MAX_LINK_DOWN_TIME 100
+
+int qla2xxx_start_stats(struct Scsi_Host *shost, u32 flags);
+int qla2xxx_stop_stats(struct Scsi_Host *shost, u32 flags);
+int qla2xxx_reset_stats(struct Scsi_Host *shost, u32 flags);
+
+int qla2xxx_get_ini_stats(struct Scsi_Host *shost, u32 flags, void *data, u64 size);
+int qla2xxx_get_tgt_stats(struct Scsi_Host *shost, u32 flags,
+ struct fc_rport *rport, void *data, u64 size);
+int qla2xxx_disable_port(struct Scsi_Host *shost);
+int qla2xxx_enable_port(struct Scsi_Host *shost);
+
+uint64_t qla2x00_get_num_tgts(scsi_qla_host_t *vha);
+uint64_t qla2x00_count_set_bits(u32 num);
+
#endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 391ac75e3de3..517d358b0031 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -3563,6 +3563,7 @@ login_logout:
__func__, __LINE__,
fcport->port_name);
+ fcport->tgt_link_down_time = 0;
qlt_schedule_sess_for_deletion(fcport);
continue;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index dcc0f0d823db..f01f07116bd3 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -136,6 +136,10 @@ static void qla24xx_abort_iocb_timeout(void *data)
static void qla24xx_abort_sp_done(srb_t *sp, int res)
{
struct srb_iocb *abt = &sp->u.iocb_cmd;
+ srb_t *orig_sp = sp->cmd_sp;
+
+ if (orig_sp)
+ qla_wait_nvme_release_cmd_kref(orig_sp);
del_timer(&sp->u.iocb_cmd.timer);
if (sp->flags & SRB_WAKEUP_ON_COMP)
@@ -347,11 +351,11 @@ qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
if (NVME_TARGET(vha->hw, fcport))
lio->u.logio.flags |= SRB_LOGIN_SKIP_PRLI;
- ql_dbg(ql_dbg_disc, vha, 0x2072,
- "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x "
- "retries=%d.\n", fcport->port_name, sp->handle, fcport->loop_id,
- fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
- fcport->login_retry);
+ ql_log(ql_log_warn, vha, 0x2072,
+ "Async-login - %8phC hdl=%x, loopid=%x portid=%02x%02x%02x retries=%d.\n",
+ fcport->port_name, sp->handle, fcport->loop_id,
+ fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+ fcport->login_retry);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
@@ -3371,8 +3375,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
"Re-Allocated (%d KB) and save firmware dump.\n",
dump_size / 1024);
} else {
- if (ha->fw_dump)
- vfree(ha->fw_dump);
+ vfree(ha->fw_dump);
ha->fw_dump = fw_dump;
ha->fw_dump_len = ha->fw_dump_alloc_len =
@@ -4993,6 +4996,9 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->login_retry = vha->hw->login_retry_count;
fcport->chip_reset = vha->hw->base_qpair->chip_reset;
fcport->logout_on_delete = 1;
+ fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ fcport->tgt_short_link_down_cnt = 0;
+ fcport->dev_loss_tmo = 0;
if (!fcport->ct_desc.ct_sns) {
ql_log(ql_log_warn, vha, 0xd049,
@@ -5490,6 +5496,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
spin_lock_irqsave(fcport->vha->host->host_lock, flags);
*((fc_port_t **)rport->dd_data) = fcport;
spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
+ fcport->dev_loss_tmo = rport->dev_loss_tmo;
rport->supported_classes = fcport->supported_classes;
@@ -5548,6 +5555,11 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->logout_on_delete = 1;
fcport->n2n_chip_reset = fcport->n2n_link_reset_cnt = 0;
+ if (fcport->tgt_link_down_time < fcport->dev_loss_tmo) {
+ fcport->tgt_short_link_down_cnt++;
+ fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ }
+
switch (vha->hw->current_topology) {
case ISP_CFG_N:
case ISP_CFG_NL:
@@ -6908,6 +6920,9 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
if (vha->flags.online) {
qla2x00_abort_isp_cleanup(vha);
+ if (vha->hw->flags.port_isolated)
+ return status;
+
if (test_and_clear_bit(ISP_ABORT_TO_ROM, &vha->dpc_flags)) {
ha->flags.chip_reset_done = 1;
vha->flags.online = 1;
@@ -7029,6 +7044,11 @@ qla2x00_abort_isp(scsi_qla_host_t *vha)
}
+ if (vha->hw->flags.port_isolated) {
+ qla2x00_abort_isp_cleanup(vha);
+ return status;
+ }
+
if (!status) {
ql_dbg(ql_dbg_taskm, vha, 0x8022, "%s succeeded.\n", __func__);
qla2x00_configure_hba(vha);
@@ -7855,8 +7875,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
templates = (risc_attr & BIT_9) ? 2 : 1;
ql_dbg(ql_dbg_init, vha, 0x0160, "-> templates = %u\n", templates);
for (j = 0; j < templates; j++, fwdt++) {
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
@@ -7916,8 +7935,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return QLA_SUCCESS;
failed:
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
@@ -8113,8 +8131,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
templates = (risc_attr & BIT_9) ? 2 : 1;
ql_dbg(ql_dbg_init, vha, 0x0170, "-> templates = %u\n", templates);
for (j = 0; j < templates; j++, fwdt++) {
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
@@ -8174,8 +8191,7 @@ qla24xx_load_risc_blob(scsi_qla_host_t *vha, uint32_t *srisc_addr)
return QLA_SUCCESS;
failed:
- if (fwdt->template)
- vfree(fwdt->template);
+ vfree(fwdt->template);
fwdt->template = NULL;
fwdt->length = 0;
@@ -9171,3 +9187,202 @@ int qla2xxx_delete_qpair(struct scsi_qla_host *vha, struct qla_qpair *qpair)
fail:
return ret;
}
+
+uint64_t
+qla2x00_count_set_bits(uint32_t num)
+{
+ /* Brian Kernighan's Algorithm */
+ u64 count = 0;
+
+ while (num) {
+ num &= (num - 1);
+ count++;
+ }
+ return count;
+}
+
+uint64_t
+qla2x00_get_num_tgts(scsi_qla_host_t *vha)
+{
+ fc_port_t *f, *tf;
+ u64 count = 0;
+
+ f = NULL;
+ tf = NULL;
+
+ list_for_each_entry_safe(f, tf, &vha->vp_fcports, list) {
+ if (f->port_type != FCT_TARGET)
+ continue;
+ count++;
+ }
+ return count;
+}
+
+int qla2xxx_reset_stats(struct Scsi_Host *host, u32 flags)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ fc_port_t *fcport = NULL;
+ unsigned long int_flags;
+
+ if (flags & QLA2XX_HW_ERROR)
+ vha->hw_err_cnt = 0;
+ if (flags & QLA2XX_SHT_LNK_DWN)
+ vha->short_link_down_cnt = 0;
+ if (flags & QLA2XX_INT_ERR)
+ vha->interface_err_cnt = 0;
+ if (flags & QLA2XX_CMD_TIMEOUT)
+ vha->cmd_timeout_cnt = 0;
+ if (flags & QLA2XX_RESET_CMD_ERR)
+ vha->reset_cmd_err_cnt = 0;
+ if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->tgt_short_link_down_cnt = 0;
+ fcport->tgt_link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
+ }
+ vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ return 0;
+}
+
+int qla2xxx_start_stats(struct Scsi_Host *host, u32 flags)
+{
+ return qla2xxx_reset_stats(host, flags);
+}
+
+int qla2xxx_stop_stats(struct Scsi_Host *host, u32 flags)
+{
+ return qla2xxx_reset_stats(host, flags);
+}
+
+int qla2xxx_get_ini_stats(struct Scsi_Host *host, u32 flags,
+ void *data, u64 size)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+ struct ql_vnd_host_stats_resp *resp = (struct ql_vnd_host_stats_resp *)data;
+ struct ql_vnd_stats *rsp_data = &resp->stats;
+ u64 ini_entry_count = 0;
+ u64 i = 0;
+ u64 entry_count = 0;
+ u64 num_tgt = 0;
+ u32 tmp_stat_type = 0;
+ fc_port_t *fcport = NULL;
+ unsigned long int_flags;
+
+ /* Copy stat type to work on it */
+ tmp_stat_type = flags;
+
+ if (tmp_stat_type & BIT_17) {
+ num_tgt = qla2x00_get_num_tgts(vha);
+ /* unset BIT_17 */
+ tmp_stat_type &= ~(1 << 17);
+ }
+ ini_entry_count = qla2x00_count_set_bits(tmp_stat_type);
+
+ entry_count = ini_entry_count + num_tgt;
+
+ rsp_data->entry_count = entry_count;
+
+ i = 0;
+ if (flags & QLA2XX_HW_ERROR) {
+ rsp_data->entry[i].stat_type = QLA2XX_HW_ERROR;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->hw_err_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_SHT_LNK_DWN) {
+ rsp_data->entry[i].stat_type = QLA2XX_SHT_LNK_DWN;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->short_link_down_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_INT_ERR) {
+ rsp_data->entry[i].stat_type = QLA2XX_INT_ERR;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->interface_err_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_CMD_TIMEOUT) {
+ rsp_data->entry[i].stat_type = QLA2XX_CMD_TIMEOUT;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->cmd_timeout_cnt;
+ i++;
+ }
+
+ if (flags & QLA2XX_RESET_CMD_ERR) {
+ rsp_data->entry[i].stat_type = QLA2XX_RESET_CMD_ERR;
+ rsp_data->entry[i].tgt_num = 0x0;
+ rsp_data->entry[i].cnt = vha->reset_cmd_err_cnt;
+ i++;
+ }
+
+ /* i will continue from previous loop, as target
+ * entries are after initiator
+ */
+ if (flags & QLA2XX_TGT_SHT_LNK_DOWN) {
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, int_flags);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->port_type != FCT_TARGET)
+ continue;
+ if (!fcport->rport)
+ continue;
+ rsp_data->entry[i].stat_type = QLA2XX_TGT_SHT_LNK_DOWN;
+ rsp_data->entry[i].tgt_num = fcport->rport->number;
+ rsp_data->entry[i].cnt = fcport->tgt_short_link_down_cnt;
+ i++;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, int_flags);
+ }
+ resp->status = EXT_STATUS_OK;
+
+ return 0;
+}
+
+int qla2xxx_get_tgt_stats(struct Scsi_Host *host, u32 flags,
+ struct fc_rport *rport, void *data, u64 size)
+{
+ struct ql_vnd_tgt_stats_resp *tgt_data = data;
+ fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
+
+ tgt_data->status = 0;
+ tgt_data->stats.entry_count = 1;
+ tgt_data->stats.entry[0].stat_type = flags;
+ tgt_data->stats.entry[0].tgt_num = rport->number;
+ tgt_data->stats.entry[0].cnt = fcport->tgt_short_link_down_cnt;
+
+ return 0;
+}
+
+int qla2xxx_disable_port(struct Scsi_Host *host)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+
+ vha->hw->flags.port_isolated = 1;
+
+ if (qla2x00_chip_is_down(vha))
+ return 0;
+
+ if (vha->flags.online) {
+ qla2x00_abort_isp_cleanup(vha);
+ qla2x00_wait_for_sess_deletion(vha);
+ }
+
+ return 0;
+}
+
+int qla2xxx_enable_port(struct Scsi_Host *host)
+{
+ scsi_qla_host_t *vha = shost_priv(host);
+
+ vha->hw->flags.port_isolated = 0;
+ /* Set the flag to 1, so that isp_abort can proceed */
+ vha->flags.online = 1;
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ qla2xxx_wake_dpc(vha);
+
+ return 0;
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index c532c74ca1ab..8b41cbaf8535 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2378,6 +2378,8 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
logio->io_parameter[0] =
cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
if (sp->vha->flags.nvme2_enabled) {
+ /* Set service parameter BIT_7 for NVME CONF support */
+ logio->io_parameter[0] |= NVME_PRLI_SP_CONF;
/* Set service parameter BIT_8 for SLER support */
logio->io_parameter[0] |=
cpu_to_le32(NVME_PRLI_SP_SLER);
@@ -3571,6 +3573,7 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
struct srb_iocb *aio = &sp->u.iocb_cmd;
scsi_qla_host_t *vha = sp->vha;
struct req_que *req = sp->qpair->req;
+ srb_t *orig_sp = sp->cmd_sp;
memset(abt_iocb, 0, sizeof(struct abort_entry_24xx));
abt_iocb->entry_type = ABORT_IOCB_TYPE;
@@ -3587,6 +3590,11 @@ qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
aio->u.abt.cmd_hndl);
abt_iocb->vp_index = vha->vp_idx;
abt_iocb->req_que_no = aio->u.abt.req_que_no;
+
+ /* need to pass original sp */
+ if (orig_sp)
+ qla_nvme_abort_set_option(abt_iocb, orig_sp);
+
/* Send the command to the firmware */
wmb();
}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index f9142dbec112..5e188375c871 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
*/
#include "qla_def.h"
#include "qla_target.h"
+#include "qla_gbl.h"
#include <linux/delay.h>
#include <linux/slab.h>
@@ -761,7 +762,7 @@ static void
qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
struct qla_hw_data *ha = vha->hw;
- bool reset_isp_needed = 0;
+ bool reset_isp_needed = false;
ql_log(ql_log_warn, vha, 0x02f0,
"MPI Heartbeat stop. MPI reset is%s needed. "
@@ -777,7 +778,7 @@ qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
if (ql2xfulldump_on_mpifail) {
ha->isp_ops->fw_dump(vha);
- reset_isp_needed = 1;
+ reset_isp_needed = true;
}
ha->isp_ops->mpi_fw_dump(vha, 1);
@@ -1059,6 +1060,9 @@ skip_rio:
case MBA_SYSTEM_ERR: /* System Error */
mbx = 0;
+
+ vha->hw_err_cnt++;
+
if (IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
u16 m[4];
@@ -1112,6 +1116,8 @@ skip_rio:
ql_log(ql_log_warn, vha, 0x5006,
"ISP Request Transfer Error (%x).\n", mb[1]);
+ vha->hw_err_cnt++;
+
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
@@ -1119,6 +1125,8 @@ skip_rio:
ql_log(ql_log_warn, vha, 0x5007,
"ISP Response Transfer Error (%x).\n", mb[1]);
+ vha->hw_err_cnt++;
+
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
break;
@@ -1176,12 +1184,18 @@ skip_rio:
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
+ if (vha->link_down_time < vha->hw->port_down_retry_count) {
+ vha->short_link_down_cnt++;
+ vha->link_down_time = QLA2XX_MAX_LINK_DOWN_TIME;
+ }
+
break;
case MBA_LOOP_DOWN: /* Loop Down Event */
SAVE_TOPO(ha);
ha->flags.lip_ae = 0;
ha->current_topology = 0;
+ vha->link_down_time = 0;
mbx = (IS_QLA81XX(ha) || IS_QLA8031(ha))
? rd_reg_word(&reg24->mailbox4) : 0;
@@ -1442,9 +1456,9 @@ global_port_update:
if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
break;
- ql_dbg(ql_dbg_async, vha, 0x5013,
- "RSCN database changed -- %04x %04x %04x.\n",
- mb[1], mb[2], mb[3]);
+ ql_log(ql_log_warn, vha, 0x5013,
+ "RSCN database changed -- %04x %04x %04x.\n",
+ mb[1], mb[2], mb[3]);
rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
@@ -1503,6 +1517,7 @@ global_port_update:
ql_dbg(ql_dbg_async, vha, 0x5016,
"Discard RND Frame -- %04x %04x %04x.\n",
mb[1], mb[2], mb[3]);
+ vha->interface_err_cnt++;
break;
case MBA_TRACE_NOTIFICATION:
@@ -1592,6 +1607,7 @@ global_port_update:
case MBA_IDC_AEN:
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
+ vha->hw_err_cnt++;
qla27xx_handle_8200_aen(vha, mb);
} else if (IS_QLA83XX(ha)) {
mb[4] = rd_reg_word(&reg24->mailbox4);
@@ -2206,12 +2222,12 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
break;
}
- ql_dbg(ql_dbg_async, sp->vha, 0x5037,
- "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
- type, sp->handle, fcport->d_id.b24, fcport->port_name,
- le16_to_cpu(logio->comp_status),
- le32_to_cpu(logio->io_parameter[0]),
- le32_to_cpu(logio->io_parameter[1]));
+ ql_log(ql_log_warn, sp->vha, 0x5037,
+ "Async-%s failed: handle=%x pid=%06x wwpn=%8phC comp_status=%x iop0=%x iop1=%x\n",
+ type, sp->handle, fcport->d_id.b24, fcport->port_name,
+ le16_to_cpu(logio->comp_status),
+ le32_to_cpu(logio->io_parameter[0]),
+ le32_to_cpu(logio->io_parameter[1]));
logio_done:
sp->done(sp, 0);
@@ -2374,9 +2390,9 @@ static void qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
tgt_xfer_len = be32_to_cpu(rsp_iu->xfrd_len);
if (fd->transferred_length != tgt_xfer_len) {
- ql_dbg(ql_dbg_io, fcport->vha, 0x3079,
- "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
- tgt_xfer_len, fd->transferred_length);
+ ql_log(ql_log_warn, fcport->vha, 0x3079,
+ "Dropped frame(s) detected (sent/rcvd=%u/%u).\n",
+ tgt_xfer_len, fd->transferred_length);
logit = 1;
} else if (le16_to_cpu(comp_status) == CS_DATA_UNDERRUN) {
/*
@@ -3097,9 +3113,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
scsi_set_resid(cp, resid);
if (scsi_status & SS_RESIDUAL_UNDER) {
if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
- ql_dbg(ql_dbg_io, fcport->vha, 0x301d,
- "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
- resid, scsi_bufflen(cp));
+ ql_log(ql_log_warn, fcport->vha, 0x301d,
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ vha->interface_err_cnt++;
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -3122,9 +3140,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
* task not completed.
*/
- ql_dbg(ql_dbg_io, fcport->vha, 0x301f,
- "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
- resid, scsi_bufflen(cp));
+ ql_log(ql_log_warn, fcport->vha, 0x301f,
+ "Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
+ resid, scsi_bufflen(cp));
+
+ vha->interface_err_cnt++;
res = DID_ERROR << 16 | lscsi_status;
goto check_scsi_status;
@@ -3208,6 +3228,7 @@ check_scsi_status:
case CS_TRANSPORT:
res = DID_ERROR << 16;
+ vha->hw_err_cnt++;
if (!IS_PI_SPLIT_DET_CAPABLE(ha))
break;
@@ -3228,6 +3249,7 @@ check_scsi_status:
ql_dump_buffer(ql_dbg_tgt + ql_dbg_verbose, vha, 0xe0ee,
pkt, sizeof(*sts24));
res = DID_ERROR << 16;
+ vha->hw_err_cnt++;
break;
default:
res = DID_ERROR << 16;
@@ -3236,15 +3258,13 @@ check_scsi_status:
out:
if (logit)
- ql_dbg(ql_dbg_io, fcport->vha, 0x3022,
- "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu "
- "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
- "rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
- comp_status, scsi_status, res, vha->host_no,
- cp->device->id, cp->device->lun, fcport->d_id.b.domain,
- fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
- cp->cmnd, scsi_bufflen(cp), rsp_info_len,
- resid_len, fw_resid_len, sp, cp);
+ ql_log(ql_log_warn, fcport->vha, 0x3022,
+ "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%llu portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x rsp_info=0x%x resid=0x%x fw_resid=0x%x sp=%p cp=%p.\n",
+ comp_status, scsi_status, res, vha->host_no,
+ cp->device->id, cp->device->lun, fcport->d_id.b.domain,
+ fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
+ cp->cmnd, scsi_bufflen(cp), rsp_info_len,
+ resid_len, fw_resid_len, sp, cp);
if (rsp->status_srb == NULL)
sp->done(sp, res);
@@ -3412,6 +3432,7 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
{
const char func[] = "ABT_IOCB";
srb_t *sp;
+ srb_t *orig_sp = NULL;
struct srb_iocb *abt;
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
@@ -3419,7 +3440,12 @@ qla24xx_abort_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
return;
abt = &sp->u.iocb_cmd;
- abt->u.abt.comp_status = pkt->nport_handle;
+ abt->u.abt.comp_status = le16_to_cpu(pkt->comp_status);
+ orig_sp = sp->cmd_sp;
+ /* Need to pass original sp */
+ if (orig_sp)
+ qla_nvme_abort_process_comp_status(pkt, orig_sp);
+
sp->done(sp, 0);
}
@@ -3839,6 +3865,7 @@ qla24xx_msix_default(int irq, void *dev_id)
hccr);
qla2xxx_check_risc_status(vha);
+ vha->hw_err_cnt++;
ha->isp_ops->fw_dump(vha);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d7d4ab65009c..06c99963b2c9 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -180,6 +180,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_log(ql_log_warn, vha, 0xd035,
"Cmd access timeout, cmd=0x%x, Exiting.\n",
mcp->mb[0]);
+ vha->hw_err_cnt++;
atomic_dec(&ha->num_pend_mbx_stage1);
return QLA_FUNCTION_TIMEOUT;
}
@@ -307,6 +308,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
atomic_dec(&ha->num_pend_mbx_stage2);
ql_dbg(ql_dbg_mbx, vha, 0x1012,
"Pending mailbox timeout, exiting.\n");
+ vha->hw_err_cnt++;
rval = QLA_FUNCTION_TIMEOUT;
goto premature_exit;
}
@@ -418,6 +420,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
mb[7], host_status, hccr);
+ vha->hw_err_cnt++;
} else {
mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
@@ -425,6 +428,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
"MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
"mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
+ vha->hw_err_cnt++;
}
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
@@ -497,6 +501,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
"abort.\n", command, mcp->mb[0],
ha->flags.eeh_busy);
+ vha->hw_err_cnt++;
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
}
@@ -521,6 +526,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
"Mailbox cmd timeout occurred, cmd=0x%x, "
"mb[0]=0x%x. Scheduling ISP abort ",
command, mcp->mb[0]);
+ vha->hw_err_cnt++;
set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
/* Allow next mbx cmd to come in. */
@@ -625,6 +631,7 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
ql_dbg(ql_dbg_mbx, vha, 0x1023,
"Failed=%x mb[0]=%x mb[1]=%x.\n",
rval, mcp->mb[0], mcp->mb[1]);
+ vha->hw_err_cnt++;
} else {
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
"Done %s.\n", __func__);
@@ -736,6 +743,7 @@ again:
ql_dbg(ql_dbg_mbx, vha, 0x1026,
"Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ vha->hw_err_cnt++;
return rval;
}
@@ -1313,6 +1321,7 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
if (rval != QLA_SUCCESS) {
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
+ vha->hw_err_cnt++;
} else {
/*EMPTY*/
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
@@ -3234,6 +3243,8 @@ qla24xx_abort_command(srb_t *sp)
abt->vp_index = fcport->vha->vp_idx;
abt->req_que_no = cpu_to_le16(req->id);
+ /* Need to pass original sp */
+ qla_nvme_abort_set_option(abt, sp);
rval = qla2x00_issue_iocb(vha, abt, abt_dma, 0);
if (rval != QLA_SUCCESS) {
@@ -3256,6 +3267,10 @@ qla24xx_abort_command(srb_t *sp)
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
"Done %s.\n", __func__);
}
+ if (rval == QLA_SUCCESS)
+ qla_nvme_abort_process_comp_status(abt, sp);
+
+ qla_wait_nvme_release_cmd_kref(sp);
dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -4276,7 +4291,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
mcp->mb[8] = MSW(addr);
- mcp->out_mb = MBX_8|MBX_0;
+ mcp->mb[10] = 0;
+ mcp->out_mb = MBX_10|MBX_8|MBX_0;
} else {
mcp->mb[0] = MBC_DUMP_RISC_RAM;
mcp->out_mb = MBX_0;
diff --git a/drivers/scsi/qla2xxx/qla_nvme.c b/drivers/scsi/qla2xxx/qla_nvme.c
index eab559b3b257..0237588f48b0 100644
--- a/drivers/scsi/qla2xxx/qla_nvme.c
+++ b/drivers/scsi/qla2xxx/qla_nvme.c
@@ -245,6 +245,13 @@ static void qla_nvme_abort_work(struct work_struct *work)
__func__, (rval != QLA_SUCCESS) ? "Failed to abort" : "Aborted",
sp, sp->handle, fcport, rval);
+ /*
+ * Returned before decreasing kref so that I/O requests
+ * are waited until ABTS complete. This kref is decreased
+ * at qla24xx_abort_sp_done function.
+ */
+ if (ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(sp))
+ return;
out:
/* kref_get was done before work was schedule. */
kref_put(&sp->cmd_kref, sp->put_fn);
@@ -284,8 +291,7 @@ static int qla_nvme_ls_req(struct nvme_fc_local_port *lport,
struct qla_hw_data *ha;
srb_t *sp;
-
- if (!fcport || (fcport && fcport->deleted))
+ if (!fcport || fcport->deleted)
return rval;
vha = fcport->vha;
@@ -591,6 +597,7 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
sp->put_fn = qla_nvme_release_fcp_cmd_kref;
sp->qpair = qpair;
sp->vha = vha;
+ sp->cmd_sp = sp;
nvme = &sp->u.iocb_cmd;
nvme->u.nvme.desc = fd;
@@ -744,3 +751,85 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
return ret;
}
+
+void qla_nvme_abort_set_option(struct abort_entry_24xx *abt, srb_t *orig_sp)
+{
+ struct qla_hw_data *ha;
+
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+
+ ha = orig_sp->fcport->vha->hw;
+
+ WARN_ON_ONCE(abt->options & cpu_to_le16(BIT_0));
+ /* Use Driver Specified Retry Count */
+ abt->options |= cpu_to_le16(AOF_ABTS_RTY_CNT);
+ abt->drv.abts_rty_cnt = cpu_to_le16(2);
+ /* Use specified response timeout */
+ abt->options |= cpu_to_le16(AOF_RSP_TIMEOUT);
+ /* set it to 2 * r_a_tov in secs */
+ abt->drv.rsp_timeout = cpu_to_le16(2 * (ha->r_a_tov / 10));
+}
+
+void qla_nvme_abort_process_comp_status(struct abort_entry_24xx *abt, srb_t *orig_sp)
+{
+ u16 comp_status;
+ struct scsi_qla_host *vha;
+
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+
+ vha = orig_sp->fcport->vha;
+
+ comp_status = le16_to_cpu(abt->comp_status);
+ switch (comp_status) {
+ case CS_RESET: /* reset event aborted */
+ case CS_ABORTED: /* IOCB was cleaned */
+ /* N_Port handle is not currently logged in */
+ case CS_TIMEOUT:
+ /* N_Port handle was logged out while waiting for ABTS to complete */
+ case CS_PORT_UNAVAILABLE:
+ /* Firmware found that the port name changed */
+ case CS_PORT_LOGGED_OUT:
+ /* BA_RJT was received for the ABTS */
+ case CS_PORT_CONFIG_CHG:
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09d,
+ "Abort I/O IOCB completed with error, comp_status=%x\n",
+ comp_status);
+ break;
+
+ /* BA_RJT was received for the ABTS */
+ case CS_REJECT_RECEIVED:
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
+ "BA_RJT was received for the ABTS rjt_vendorUnique = %u",
+ abt->fw.ba_rjt_vendorUnique);
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09e,
+ "ba_rjt_reasonCodeExpl = %u, ba_rjt_reasonCode = %u\n",
+ abt->fw.ba_rjt_reasonCodeExpl, abt->fw.ba_rjt_reasonCode);
+ break;
+
+ case CS_COMPLETE:
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf09f,
+ "IOCB request is completed successfully comp_status=%x\n",
+ comp_status);
+ break;
+
+ case CS_IOCB_ERROR:
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a0,
+ "IOCB request is failed, comp_status=%x\n", comp_status);
+ break;
+
+ default:
+ ql_dbg(ql_dbg_async + ql_dbg_mbx, vha, 0xf0a1,
+ "Invalid Abort IO IOCB Completion Status %x\n",
+ comp_status);
+ break;
+ }
+}
+
+inline void qla_wait_nvme_release_cmd_kref(srb_t *orig_sp)
+{
+ if (!(ql2xabts_wait_nvme && QLA_ABTS_WAIT_ENABLED(orig_sp)))
+ return;
+ kref_put(&orig_sp->cmd_kref, orig_sp->put_fn);
+}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 0e0fe5b09496..074392560f3d 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -327,6 +327,11 @@ MODULE_PARM_DESC(ql2xrdpenable,
"Enables RDP responses. "
"0 - no RDP responses (default). "
"1 - provide RDP responses.");
+int ql2xabts_wait_nvme = 1;
+module_param(ql2xabts_wait_nvme, int, 0444);
+MODULE_PARM_DESC(ql2xabts_wait_nvme,
+ "To wait for ABTS response on I/O timeouts for NVMe. (default: 1)");
+
static void qla2x00_clear_drv_active(struct qla_hw_data *);
static void qla2x00_free_device(scsi_qla_host_t *);
@@ -957,7 +962,7 @@ qla2xxx_mqueuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd,
srb_t *sp;
int rval;
- rval = rport ? fc_remote_port_chkready(rport) : FC_PORTSTATE_OFFLINE;
+ rval = rport ? fc_remote_port_chkready(rport) : (DID_NO_CONNECT << 16);
if (rval) {
cmd->result = rval;
ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3076,
@@ -1274,6 +1279,8 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
sp = scsi_cmd_priv(cmd);
qpair = sp->qpair;
+ vha->cmd_timeout_cnt++;
+
if ((sp->fcport && sp->fcport->deleted) || !qpair)
return SUCCESS;
@@ -1442,6 +1449,7 @@ eh_reset_failed:
"%s RESET FAILED: %s nexus=%ld:%d:%llu cmd=%p.\n", name,
reset_errors[err], vha->host_no, cmd->device->id, cmd->device->lun,
cmd);
+ vha->reset_cmd_err_cnt++;
return FAILED;
}
@@ -3141,6 +3149,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mr.fcport.supported_classes = FC_COS_UNSPECIFIED;
ha->mr.fcport.scan_state = 1;
+ qla2xxx_reset_stats(host, QLA2XX_HW_ERROR | QLA2XX_SHT_LNK_DWN |
+ QLA2XX_INT_ERR | QLA2XX_CMD_TIMEOUT |
+ QLA2XX_RESET_CMD_ERR | QLA2XX_TGT_SHT_LNK_DOWN);
+
/* Set the SG table size based on ISP type */
if (!IS_FWI2_CAPABLE(ha)) {
if (IS_QLA2100(ha))
@@ -5090,6 +5102,7 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
fcport->d_id = e->u.new_sess.id;
fcport->flags |= FCF_FABRIC_DEVICE;
fcport->fw_login_state = DSC_LS_PLOGI_PEND;
+ fcport->tgt_short_link_down_cnt = 0;
memcpy(fcport->port_name, e->u.new_sess.port_name,
WWN_SIZE);
@@ -7061,6 +7074,8 @@ qla2x00_timer(struct timer_list *t)
uint16_t w;
struct qla_hw_data *ha = vha->hw;
struct req_que *req;
+ unsigned long flags;
+ fc_port_t *fcport = NULL;
if (ha->flags.eeh_busy) {
ql_dbg(ql_dbg_timer, vha, 0x6000,
@@ -7092,6 +7107,16 @@ qla2x00_timer(struct timer_list *t)
if (!vha->vp_idx && IS_QLAFX00(ha))
qlafx00_timer_routine(vha);
+ if (vha->link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
+ vha->link_down_time++;
+
+ spin_lock_irqsave(&vha->hw->tgt.sess_lock, flags);
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (fcport->tgt_link_down_time < QLA2XX_MAX_LINK_DOWN_TIME)
+ fcport->tgt_link_down_time++;
+ }
+ spin_unlock_irqrestore(&vha->hw->tgt.sess_lock, flags);
+
/* Loop down handler. */
if (atomic_read(&vha->loop_down_timer) > 0 &&
!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) &&
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index ccec858875dd..72c648442e8d 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -6,9 +6,9 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "10.02.00.104-k"
+#define QLA2XXX_VERSION "10.02.00.105-k"
#define QLA_DRIVER_MAJOR_VER 10
#define QLA_DRIVER_MINOR_VER 2
#define QLA_DRIVER_PATCH_VER 0
-#define QLA_DRIVER_BETA_VER 104
+#define QLA_DRIVER_BETA_VER 105
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index b9142464d3f0..4e1764df0a73 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -1181,7 +1181,6 @@ struct status_entry {
uint32_t handle; /* 04-07 */
uint8_t scsiStatus; /* 08 */
-#define SCSI_CHECK_CONDITION 0x02
uint8_t iscsiFlags; /* 09 */
#define ISCSI_FLAG_RESIDUAL_UNDER 0x02
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index a51910ae9525..6f0e77dc2a34 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -182,7 +182,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
cmd->result = DID_OK << 16 | scsi_status;
- if (scsi_status != SCSI_CHECK_CONDITION)
+ if (scsi_status != SAM_STAT_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index b6540b92f566..3cdeaeb92933 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -853,7 +853,7 @@ static const int illegal_condition_result =
(DRIVER_SENSE << 24) | (DID_ABORT << 16) | SAM_STAT_CHECK_CONDITION;
static const int device_qfull_result =
- (DID_OK << 16) | (COMMAND_COMPLETE << 8) | SAM_STAT_TASK_SET_FULL;
+ (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
static const int condition_met_result = SAM_STAT_CONDITION_MET;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index c00f06e9ecb0..08c06c56331c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -124,6 +124,17 @@ static bool scsi_cmd_retry_allowed(struct scsi_cmnd *cmd)
return ++cmd->retries <= cmd->allowed;
}
+static bool scsi_eh_should_retry_cmd(struct scsi_cmnd *cmd)
+{
+ struct scsi_device *sdev = cmd->device;
+ struct Scsi_Host *host = sdev->host;
+
+ if (host->hostt->eh_should_retry_cmd)
+ return host->hostt->eh_should_retry_cmd(cmd);
+
+ return true;
+}
+
/**
* scmd_eh_abort_handler - Handle command aborts
* @work: command to be aborted.
@@ -159,7 +170,8 @@ scmd_eh_abort_handler(struct work_struct *work)
"eh timeout, not retrying "
"aborted command\n"));
} else if (!scsi_noretry_cmd(scmd) &&
- scsi_cmd_retry_allowed(scmd)) {
+ scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_WARNING, scmd,
"retry aborted command\n"));
@@ -1861,6 +1873,12 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
* the fast io fail tmo fired), so send IO directly upwards.
*/
return SUCCESS;
+ case DID_TRANSPORT_MARGINAL:
+ /*
+ * caller has decided not to do retries on
+ * abort success, so send IO directly upwards
+ */
+ return SUCCESS;
case DID_ERROR:
if (msg_byte(scmd->result) == COMMAND_COMPLETE &&
status_byte(scmd->result) == RESERVATION_CONFLICT)
@@ -2105,7 +2123,8 @@ void scsi_eh_flush_done_q(struct list_head *done_q)
list_for_each_entry_safe(scmd, next, done_q, eh_entry) {
list_del_init(&scmd->eh_entry);
if (scsi_device_online(scmd->device) &&
- !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd)) {
+ !scsi_noretry_cmd(scmd) && scsi_cmd_retry_allowed(scmd) &&
+ scsi_eh_should_retry_cmd(scmd)) {
SCSI_LOG_ERROR_RECOVERY(3,
scmd_printk(KERN_INFO, scmd,
"%s: flush retry cmd\n",
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 4d2280658559..7d52a11e1b61 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -630,6 +630,7 @@ static blk_status_t scsi_result_to_blk_status(struct scsi_cmnd *cmd, int result)
return BLK_STS_OK;
return BLK_STS_IOERR;
case DID_TRANSPORT_FAILFAST:
+ case DID_TRANSPORT_MARGINAL:
return BLK_STS_TRANSPORT;
case DID_TARGET_FAILURE:
set_host_byte(cmd, DID_OK);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index a926e8f9e56e..da5b503dc7a1 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -148,20 +148,23 @@ fc_enum_name_search(host_event_code, fc_host_event_code,
static struct {
enum fc_port_state value;
char *name;
+ int matchlen;
} fc_port_state_names[] = {
- { FC_PORTSTATE_UNKNOWN, "Unknown" },
- { FC_PORTSTATE_NOTPRESENT, "Not Present" },
- { FC_PORTSTATE_ONLINE, "Online" },
- { FC_PORTSTATE_OFFLINE, "Offline" },
- { FC_PORTSTATE_BLOCKED, "Blocked" },
- { FC_PORTSTATE_BYPASSED, "Bypassed" },
- { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics" },
- { FC_PORTSTATE_LINKDOWN, "Linkdown" },
- { FC_PORTSTATE_ERROR, "Error" },
- { FC_PORTSTATE_LOOPBACK, "Loopback" },
- { FC_PORTSTATE_DELETED, "Deleted" },
+ { FC_PORTSTATE_UNKNOWN, "Unknown", 7},
+ { FC_PORTSTATE_NOTPRESENT, "Not Present", 11 },
+ { FC_PORTSTATE_ONLINE, "Online", 6 },
+ { FC_PORTSTATE_OFFLINE, "Offline", 7 },
+ { FC_PORTSTATE_BLOCKED, "Blocked", 7 },
+ { FC_PORTSTATE_BYPASSED, "Bypassed", 8 },
+ { FC_PORTSTATE_DIAGNOSTICS, "Diagnostics", 11 },
+ { FC_PORTSTATE_LINKDOWN, "Linkdown", 8 },
+ { FC_PORTSTATE_ERROR, "Error", 5 },
+ { FC_PORTSTATE_LOOPBACK, "Loopback", 8 },
+ { FC_PORTSTATE_DELETED, "Deleted", 7 },
+ { FC_PORTSTATE_MARGINAL, "Marginal", 8 },
};
fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
+fc_enum_name_match(port_state, fc_port_state, fc_port_state_names)
#define FC_PORTSTATE_MAX_NAMELEN 20
@@ -1235,7 +1238,59 @@ show_fc_rport_roles (struct device *dev, struct device_attribute *attr,
static FC_DEVICE_ATTR(rport, roles, S_IRUGO,
show_fc_rport_roles, NULL);
-fc_private_rport_rd_enum_attr(port_state, FC_PORTSTATE_MAX_NAMELEN);
+static ssize_t fc_rport_set_marginal_state(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct fc_rport *rport = transport_class_to_rport(dev);
+ enum fc_port_state port_state;
+ int ret = 0;
+
+ ret = get_fc_port_state_match(buf, &port_state);
+ if (ret)
+ return -EINVAL;
+ if (port_state == FC_PORTSTATE_MARGINAL) {
+ /*
+ * Change the state to Marginal only if the
+ * current rport state is Online
+ * Allow only Online->Marginal
+ */
+ if (rport->port_state == FC_PORTSTATE_ONLINE)
+ rport->port_state = port_state;
+ else
+ return -EINVAL;
+ } else if (port_state == FC_PORTSTATE_ONLINE) {
+ /*
+ * Change the state to Online only if the
+ * current rport state is Marginal
+ * Allow only Marginal->Online
+ */
+ if (rport->port_state == FC_PORTSTATE_MARGINAL)
+ rport->port_state = port_state;
+ else
+ return -EINVAL;
+ } else
+ return -EINVAL;
+ return count;
+}
+
+static ssize_t
+show_fc_rport_port_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *name;
+ struct fc_rport *rport = transport_class_to_rport(dev);
+
+ name = get_fc_port_state_name(rport->port_state);
+ if (!name)
+ return -EINVAL;
+
+ return snprintf(buf, 20, "%s\n", name);
+}
+
+static FC_DEVICE_ATTR(rport, port_state, 0444 | 0200,
+ show_fc_rport_port_state, fc_rport_set_marginal_state);
+
fc_private_rport_rd_attr(scsi_target_id, "%d\n", 20);
/*
@@ -2509,7 +2564,8 @@ fc_user_scan_tgt(struct Scsi_Host *shost, uint channel, uint id, u64 lun)
if (rport->scsi_target_id == -1)
continue;
- if (rport->port_state != FC_PORTSTATE_ONLINE)
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (rport->port_state != FC_PORTSTATE_MARGINAL))
continue;
if ((channel == rport->channel) &&
@@ -2677,7 +2733,7 @@ fc_attach_transport(struct fc_function_template *ft)
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_name);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_id);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(roles);
- SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(port_state);
+ SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(port_state);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RD(scsi_target_id);
SETUP_PRIVATE_RPORT_ATTRIBUTE_RW(fast_io_fail_tmo);
@@ -3373,7 +3429,8 @@ fc_remote_port_delete(struct fc_rport *rport)
spin_lock_irqsave(shost->host_lock, flags);
- if (rport->port_state != FC_PORTSTATE_ONLINE) {
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (rport->port_state != FC_PORTSTATE_MARGINAL)) {
spin_unlock_irqrestore(shost->host_lock, flags);
return;
}
@@ -3515,7 +3572,8 @@ fc_timeout_deleted_rport(struct work_struct *work)
* target, validate it still is. If not, tear down the
* scsi_target on it.
*/
- if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
+ if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
+ (rport->port_state == FC_PORTSTATE_MARGINAL)) &&
(rport->scsi_target_id != -1) &&
!(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
dev_printk(KERN_ERR, &rport->dev,
@@ -3658,7 +3716,8 @@ fc_scsi_scan_rport(struct work_struct *work)
struct fc_internal *i = to_fc_internal(shost->transportt);
unsigned long flags;
- if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
+ if (((rport->port_state == FC_PORTSTATE_ONLINE) ||
+ (rport->port_state == FC_PORTSTATE_MARGINAL)) &&
(rport->roles & FC_PORT_ROLE_FCP_TARGET) &&
!(i->f->disable_target_scan)) {
scsi_scan_target(&rport->dev, rport->channel,
@@ -3731,6 +3790,28 @@ int fc_block_scsi_eh(struct scsi_cmnd *cmnd)
}
EXPORT_SYMBOL(fc_block_scsi_eh);
+/*
+ * fc_eh_should_retry_cmd - Checks if the cmd should be retried or not
+ * @scmd: The SCSI command to be checked
+ *
+ * This checks the rport state to decide if a cmd is
+ * retryable.
+ *
+ * Returns: true if the rport state is not in marginal state.
+ */
+bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(scmd->device));
+
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT)) {
+ set_host_byte(scmd, DID_TRANSPORT_MARGINAL);
+ return false;
+ }
+ return true;
+}
+EXPORT_SYMBOL_GPL(fc_eh_should_retry_cmd);
+
/**
* fc_vport_setup - allocates and creates a FC virtual port.
* @shost: scsi host the virtual port is connected to.
@@ -4162,7 +4243,8 @@ static blk_status_t fc_bsg_rport_prep(struct fc_rport *rport)
!(rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT))
return BLK_STS_RESOURCE;
- if (rport->port_state != FC_PORTSTATE_ONLINE)
+ if ((rport->port_state != FC_PORTSTATE_ONLINE) &&
+ (rport->port_state != FC_PORTSTATE_MARGINAL))
return BLK_STS_IOERR;
return BLK_STS_OK;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 40473e4f850f..12471208c7a8 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -625,7 +625,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
if (page == 0x8 || page == 0x3f) {
scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
sizeof(ms10_caching_page));
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ cmd->result = DID_OK << 16;
done(cmd);
} else
stex_invalid_field(cmd, done);
@@ -644,7 +644,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
break;
case TEST_UNIT_READY:
if (id == host->max_id - 1) {
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ cmd->result = DID_OK << 16;
done(cmd);
return 0;
}
@@ -661,7 +661,7 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
(cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
sizeof(console_inq_page));
- cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
+ cmd->result = DID_OK << 16;
done(cmd);
} else
stex_invalid_field(cmd, done);
@@ -679,9 +679,10 @@ stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
ver.console_id = host->max_id - 1;
ver.host_no = hba->host->host_no;
cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
- cmd->result = sizeof(ver) == cp_len ?
- DID_OK << 16 | COMMAND_COMPLETE << 8 :
- DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ if (sizeof(ver) == cp_len)
+ cmd->result = DID_OK << 16;
+ else
+ cmd->result = DID_ERROR << 16;
done(cmd);
return 0;
}
@@ -736,16 +737,16 @@ static void stex_scsi_done(struct st_ccb *ccb)
result = ccb->scsi_status;
switch (ccb->scsi_status) {
case SAM_STAT_GOOD:
- result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
+ result |= DID_OK << 16;
break;
case SAM_STAT_CHECK_CONDITION:
result |= DRIVER_SENSE << 24;
break;
case SAM_STAT_BUSY:
- result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ result |= DID_BUS_BUSY << 16;
break;
default:
- result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ result |= DID_ERROR << 16;
break;
}
}
@@ -753,15 +754,15 @@ static void stex_scsi_done(struct st_ccb *ccb)
result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
else switch (ccb->srb_status) {
case SRB_STATUS_SELECTION_TIMEOUT:
- result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
+ result = DID_NO_CONNECT << 16;
break;
case SRB_STATUS_BUSY:
- result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
+ result = DID_BUS_BUSY << 16;
break;
case SRB_STATUS_INVALID_REQUEST:
case SRB_STATUS_ERROR:
default:
- result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
+ result = DID_ERROR << 16;
break;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 2e4fa77445fd..6bc5453cea8a 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -216,18 +216,6 @@ struct vmscsi_request {
} __attribute((packed));
-
-/*
- * The size of the vmscsi_request has changed in win8. The
- * additional size is because of new elements added to the
- * structure. These elements are valid only when we are talking
- * to a win8 host.
- * Track the correction to size we need to apply. This value
- * will likely change during protocol negotiation but it is
- * valid to start by assuming pre-Win8.
- */
-static int vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
-
/*
* The list of storage protocols in order of preference.
*/
@@ -451,6 +439,17 @@ struct storvsc_device {
unsigned char target_id;
/*
+ * The size of the vmscsi_request has changed in win8. The
+ * additional size is because of new elements added to the
+ * structure. These elements are valid only when we are talking
+ * to a win8 host.
+ * Track the correction to size we need to apply. This value
+ * will likely change during protocol negotiation but it is
+ * valid to start by assuming pre-Win8.
+ */
+ int vmscsi_size_delta;
+
+ /*
* Max I/O, the device can support.
*/
u32 max_transfer_bytes;
@@ -769,7 +768,7 @@ static void handle_multichannel_storage(struct hv_device *device, int max_chns)
ret = vmbus_sendpacket(device->channel, vstor_packet,
(sizeof(struct vstor_packet) -
- vmscsi_size_delta),
+ stor_device->vmscsi_size_delta),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -823,9 +822,14 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
struct storvsc_cmd_request *request,
bool status_check)
{
+ struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
int ret, t;
+ stor_device = get_out_stor_device(device);
+ if (!stor_device)
+ return -ENODEV;
+
vstor_packet = &request->vstor_packet;
init_completion(&request->wait_event);
@@ -833,7 +837,7 @@ static int storvsc_execute_vstor_op(struct hv_device *device,
ret = vmbus_sendpacket(device->channel, vstor_packet,
(sizeof(struct vstor_packet) -
- vmscsi_size_delta),
+ stor_device->vmscsi_size_delta),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -910,7 +914,7 @@ static int storvsc_channel_init(struct hv_device *device, bool is_fc)
sense_buffer_size =
vmstor_protocols[i].sense_buffer_size;
- vmscsi_size_delta =
+ stor_device->vmscsi_size_delta =
vmstor_protocols[i].vmscsi_size_delta;
break;
@@ -1261,10 +1265,16 @@ static void storvsc_on_channel_callback(void *context)
request = (struct storvsc_cmd_request *)(unsigned long)cmd_rqst;
+ if (hv_pkt_datalen(desc) < sizeof(struct vstor_packet) -
+ stor_device->vmscsi_size_delta) {
+ dev_err(&device->device, "Invalid packet len\n");
+ continue;
+ }
+
if (request == &stor_device->init_request ||
request == &stor_device->reset_request) {
memcpy(&request->vstor_packet, packet,
- (sizeof(struct vstor_packet) - vmscsi_size_delta));
+ (sizeof(struct vstor_packet) - stor_device->vmscsi_size_delta));
complete(&request->wait_event);
} else {
storvsc_on_receive(stor_device, packet, request);
@@ -1482,7 +1492,7 @@ found_channel:
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
vstor_packet->vm_srb.length = (sizeof(struct vmscsi_request) -
- vmscsi_size_delta);
+ stor_device->vmscsi_size_delta);
vstor_packet->vm_srb.sense_info_length = sense_buffer_size;
@@ -1499,12 +1509,12 @@ found_channel:
request->payload, request->payload_sz,
vstor_packet,
(sizeof(struct vstor_packet) -
- vmscsi_size_delta),
+ stor_device->vmscsi_size_delta),
(unsigned long)request);
} else {
ret = vmbus_sendpacket(outgoing_channel, vstor_packet,
(sizeof(struct vstor_packet) -
- vmscsi_size_delta),
+ stor_device->vmscsi_size_delta),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1609,7 +1619,7 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
ret = vmbus_sendpacket(device->channel, vstor_packet,
(sizeof(struct vstor_packet) -
- vmscsi_size_delta),
+ stor_device->vmscsi_size_delta),
(unsigned long)&stor_device->reset_request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
@@ -1661,7 +1671,7 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
* this. So, don't send it.
*/
case SET_WINDOW:
- scmnd->result = ILLEGAL_REQUEST << 16;
+ scmnd->result = DID_ERROR << 16;
allowed = false;
break;
default:
@@ -1959,6 +1969,7 @@ static int storvsc_probe(struct hv_device *device,
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
stor_device->host = host;
+ stor_device->vmscsi_size_delta = sizeof(struct vmscsi_win8_extension);
spin_lock_init(&stor_device->lock);
hv_set_drvdata(device, stor_device);
@@ -2161,12 +2172,15 @@ static int __init storvsc_drv_init(void)
* than the ring buffer size since that page is reserved for
* the ring buffer indices) by the max request size (which is
* vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
+ *
+ * The computation underestimates max_outstanding_req_per_channel
+ * for Win7 and older hosts because it does not take into account
+ * the vmscsi_size_delta correction to the max request size.
*/
max_outstanding_req_per_channel =
((storvsc_ringbuffer_size - PAGE_SIZE) /
ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
- sizeof(struct vstor_packet) + sizeof(u64) -
- vmscsi_size_delta,
+ sizeof(struct vstor_packet) + sizeof(u64),
sizeof(u64)));
#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index b915b38c2b27..07cf415367b4 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -39,7 +39,7 @@ config SCSI_UFSHCD
select DEVFREQ_GOV_SIMPLE_ONDEMAND
select NLS
help
- This selects the support for UFS devices in Linux, say Y and make
+ This selects the support for UFS devices in Linux, say Y and make
sure that you know the name of your UFS host adapter (the card
inside your computer that "speaks" the UFS protocol, also
called UFS Host Controller), because you will be asked for it.
@@ -54,8 +54,8 @@ config SCSI_UFSHCD_PCI
tristate "PCI bus based UFS Controller support"
depends on SCSI_UFSHCD && PCI
help
- This selects the PCI UFS Host Controller Interface. Select this if
- you have UFS Host Controller with PCI Interface.
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
If you have a controller with this interface, say Y or M here.
@@ -74,10 +74,10 @@ config SCSI_UFSHCD_PLATFORM
depends on SCSI_UFSHCD
depends on HAS_IOMEM
help
- This selects the UFS host controller support. Select this if
- you have an UFS controller on Platform bus.
+ This selects the UFS host controller support. Select this if
+ you have an UFS controller on Platform bus.
- If you have a controller with this interface, say Y or M here.
+ If you have a controller with this interface, say Y or M here.
If unsure, say N.
@@ -85,7 +85,7 @@ config SCSI_UFS_CDNS_PLATFORM
tristate "Cadence UFS Controller platform driver"
depends on SCSI_UFSHCD_PLATFORM
help
- This selects the Cadence-specific additions to UFSHCD platform driver.
+ This selects the Cadence-specific additions to UFSHCD platform driver.
If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index 4679af1b564e..06f3a3fe4a44 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,5 +1,14 @@
# SPDX-License-Identifier: GPL-2.0
# UFSHCD makefile
+
+# The link order is important here. ufshcd-core must initialize
+# before vendor drivers.
+obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o
+ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
+ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
+ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
+
obj-$(CONFIG_SCSI_UFS_DWC_TC_PCI) += tc-dwc-g210-pci.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_DWC_TC_PLATFORM) += tc-dwc-g210-pltfrm.o ufshcd-dwc.o tc-dwc-g210.o
obj-$(CONFIG_SCSI_UFS_CDNS_PLATFORM) += cdns-pltfrm.o
@@ -7,10 +16,6 @@ obj-$(CONFIG_SCSI_UFS_QCOM) += ufs_qcom.o
ufs_qcom-y += ufs-qcom.o
ufs_qcom-$(CONFIG_SCSI_UFS_CRYPTO) += ufs-qcom-ice.o
obj-$(CONFIG_SCSI_UFS_EXYNOS) += ufs-exynos.o
-obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o
-ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
-ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o
obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
obj-$(CONFIG_SCSI_UFSHCD_PLATFORM) += ufshcd-pltfrm.o
obj-$(CONFIG_SCSI_UFS_HISI) += ufs-hisi.o
diff --git a/drivers/scsi/ufs/ufs-debugfs.c b/drivers/scsi/ufs/ufs-debugfs.c
new file mode 100644
index 000000000000..dee98dc72d29
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2020 Intel Corporation
+
+#include <linux/debugfs.h>
+
+#include "ufs-debugfs.h"
+#include "ufshcd.h"
+
+static struct dentry *ufs_debugfs_root;
+
+void __init ufs_debugfs_init(void)
+{
+ ufs_debugfs_root = debugfs_create_dir("ufshcd", NULL);
+}
+
+void __exit ufs_debugfs_exit(void)
+{
+ debugfs_remove_recursive(ufs_debugfs_root);
+}
+
+static int ufs_debugfs_stats_show(struct seq_file *s, void *data)
+{
+ struct ufs_hba *hba = s->private;
+ struct ufs_event_hist *e = hba->ufs_stats.event;
+
+#define PRT(fmt, typ) \
+ seq_printf(s, fmt, e[UFS_EVT_ ## typ].cnt)
+
+ PRT("PHY Adapter Layer errors (except LINERESET): %llu\n", PA_ERR);
+ PRT("Data Link Layer errors: %llu\n", DL_ERR);
+ PRT("Network Layer errors: %llu\n", NL_ERR);
+ PRT("Transport Layer errors: %llu\n", TL_ERR);
+ PRT("Generic DME errors: %llu\n", DME_ERR);
+ PRT("Auto-hibernate errors: %llu\n", AUTO_HIBERN8_ERR);
+ PRT("IS Fatal errors (CEFES, SBFES, HCFES, DFES): %llu\n", FATAL_ERR);
+ PRT("DME Link Startup errors: %llu\n", LINK_STARTUP_FAIL);
+ PRT("PM Resume errors: %llu\n", RESUME_ERR);
+ PRT("PM Suspend errors : %llu\n", SUSPEND_ERR);
+ PRT("Logical Unit Resets: %llu\n", DEV_RESET);
+ PRT("Host Resets: %llu\n", HOST_RESET);
+ PRT("SCSI command aborts: %llu\n", ABORT);
+#undef PRT
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ufs_debugfs_stats);
+
+void ufs_debugfs_hba_init(struct ufs_hba *hba)
+{
+ hba->debugfs_root = debugfs_create_dir(dev_name(hba->dev), ufs_debugfs_root);
+ debugfs_create_file("stats", 0400, hba->debugfs_root, hba, &ufs_debugfs_stats_fops);
+}
+
+void ufs_debugfs_hba_exit(struct ufs_hba *hba)
+{
+ debugfs_remove_recursive(hba->debugfs_root);
+}
diff --git a/drivers/scsi/ufs/ufs-debugfs.h b/drivers/scsi/ufs/ufs-debugfs.h
new file mode 100644
index 000000000000..f35b39c4b4f5
--- /dev/null
+++ b/drivers/scsi/ufs/ufs-debugfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Intel Corporation
+ */
+
+#ifndef __UFS_DEBUGFS_H__
+#define __UFS_DEBUGFS_H__
+
+struct ufs_hba;
+
+#ifdef CONFIG_DEBUG_FS
+void __init ufs_debugfs_init(void);
+void __exit ufs_debugfs_exit(void);
+void ufs_debugfs_hba_init(struct ufs_hba *hba);
+void ufs_debugfs_hba_exit(struct ufs_hba *hba);
+#else
+static inline void ufs_debugfs_init(void) {}
+static inline void ufs_debugfs_exit(void) {}
+static inline void ufs_debugfs_hba_init(struct ufs_hba *hba) {}
+static inline void ufs_debugfs_hba_exit(struct ufs_hba *hba) {}
+#endif
+
+#endif
diff --git a/drivers/scsi/ufs/ufs-exynos.c b/drivers/scsi/ufs/ufs-exynos.c
index a8770ff14588..267943a13a94 100644
--- a/drivers/scsi/ufs/ufs-exynos.c
+++ b/drivers/scsi/ufs/ufs-exynos.c
@@ -640,6 +640,11 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
}
}
+ /* setting for three timeout values for traffic class #0 */
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), 8064);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), 28224);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), 20160);
+
return 0;
out:
return ret;
@@ -1236,7 +1241,9 @@ struct exynos_ufs_drv_data exynos_ufs_drvs = {
UFSHCI_QUIRK_BROKEN_HCE |
UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR |
UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR |
- UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL,
+ UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL |
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING |
+ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE,
.opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL |
EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX |
diff --git a/drivers/scsi/ufs/ufs-mediatek.c b/drivers/scsi/ufs/ufs-mediatek.c
index 80618af7c872..c55202b92a43 100644
--- a/drivers/scsi/ufs/ufs-mediatek.c
+++ b/drivers/scsi/ufs/ufs-mediatek.c
@@ -661,6 +661,7 @@ static int ufs_mtk_init(struct ufs_hba *hba)
/* Enable WriteBooster */
hba->caps |= UFSHCD_CAP_WB_EN;
+ hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL;
hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80);
if (host->caps & UFS_MTK_CAP_DISABLE_AH8)
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2206b1e4b774..f97d7b0ae3b6 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -568,6 +568,17 @@ out:
return err;
}
+static void ufs_qcom_device_reset_ctrl(struct ufs_hba *hba, bool asserted)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
+ /* reset gpio is optional */
+ if (!host->device_reset)
+ return;
+
+ gpiod_set_value_cansleep(host->device_reset, asserted);
+}
+
static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -582,6 +593,9 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
+ /* reset the connected UFS device during power down */
+ ufs_qcom_device_reset_ctrl(hba, true);
+
} else if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
}
@@ -1421,10 +1435,10 @@ static int ufs_qcom_device_reset(struct ufs_hba *hba)
* The UFS device shall detect reset pulses of 1us, sleep for 10us to
* be on the safe side.
*/
- gpiod_set_value_cansleep(host->device_reset, 1);
+ ufs_qcom_device_reset_ctrl(hba, true);
usleep_range(10, 15);
- gpiod_set_value_cansleep(host->device_reset, 0);
+ ufs_qcom_device_reset_ctrl(hba, false);
usleep_range(10, 15);
return 0;
diff --git a/drivers/scsi/ufs/ufs-sysfs.c b/drivers/scsi/ufs/ufs-sysfs.c
index 08e72b7eef6a..acc54f530f2d 100644
--- a/drivers/scsi/ufs/ufs-sysfs.c
+++ b/drivers/scsi/ufs/ufs-sysfs.c
@@ -67,7 +67,7 @@ static ssize_t rpm_lvl_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hba->rpm_lvl);
+ return sysfs_emit(buf, "%d\n", hba->rpm_lvl);
}
static ssize_t rpm_lvl_store(struct device *dev,
@@ -81,7 +81,7 @@ static ssize_t rpm_target_dev_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
ufs_pm_lvl_states[hba->rpm_lvl].dev_state));
}
@@ -90,7 +90,7 @@ static ssize_t rpm_target_link_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_uic_link_state_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_uic_link_state_to_string(
ufs_pm_lvl_states[hba->rpm_lvl].link_state));
}
@@ -99,7 +99,7 @@ static ssize_t spm_lvl_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%d\n", hba->spm_lvl);
+ return sysfs_emit(buf, "%d\n", hba->spm_lvl);
}
static ssize_t spm_lvl_store(struct device *dev,
@@ -113,7 +113,7 @@ static ssize_t spm_target_dev_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_ufs_dev_pwr_mode_to_string(
ufs_pm_lvl_states[hba->spm_lvl].dev_state));
}
@@ -122,7 +122,7 @@ static ssize_t spm_target_link_state_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", ufschd_uic_link_state_to_string(
+ return sysfs_emit(buf, "%s\n", ufschd_uic_link_state_to_string(
ufs_pm_lvl_states[hba->spm_lvl].link_state));
}
@@ -154,18 +154,29 @@ static ssize_t auto_hibern8_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
u32 ahit;
+ int ret;
struct ufs_hba *hba = dev_get_drvdata(dev);
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
- return scnprintf(buf, PAGE_SIZE, "%d\n", ufshcd_ahit_to_us(ahit));
+ ret = sysfs_emit(buf, "%d\n", ufshcd_ahit_to_us(ahit));
+
+out:
+ up(&hba->host_sem);
+ return ret;
}
static ssize_t auto_hibern8_store(struct device *dev,
@@ -174,6 +185,7 @@ static ssize_t auto_hibern8_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
unsigned int timer;
+ int ret = 0;
if (!ufshcd_is_auto_hibern8_supported(hba))
return -EOPNOTSUPP;
@@ -184,9 +196,61 @@ static ssize_t auto_hibern8_store(struct device *dev,
if (timer > UFSHCI_AHIBERN8_MAX)
return -EINVAL;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
ufshcd_auto_hibern8_update(hba, ufshcd_us_to_ahit(timer));
- return count;
+out:
+ up(&hba->host_sem);
+ return ret ? ret : count;
+}
+
+static ssize_t wb_on_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+
+ return sysfs_emit(buf, "%d\n", hba->dev_info.wb_enabled);
+}
+
+static ssize_t wb_on_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct ufs_hba *hba = dev_get_drvdata(dev);
+ unsigned int wb_enable;
+ ssize_t res;
+
+ if (!ufshcd_is_wb_allowed(hba) || ufshcd_is_clkscaling_supported(hba)) {
+ /*
+ * If the platform supports UFSHCD_CAP_CLK_SCALING, turn WB
+ * on/off will be done while clock scaling up/down.
+ */
+ dev_warn(dev, "To control WB through wb_on is not allowed!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (kstrtouint(buf, 0, &wb_enable))
+ return -EINVAL;
+
+ if (wb_enable != 0 && wb_enable != 1)
+ return -EINVAL;
+
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ res = -EBUSY;
+ goto out;
+ }
+
+ pm_runtime_get_sync(hba->dev);
+ res = ufshcd_wb_ctrl(hba, wb_enable);
+ pm_runtime_put_sync(hba->dev);
+out:
+ up(&hba->host_sem);
+ return res < 0 ? res : count;
}
static DEVICE_ATTR_RW(rpm_lvl);
@@ -196,6 +260,7 @@ static DEVICE_ATTR_RW(spm_lvl);
static DEVICE_ATTR_RO(spm_target_dev_state);
static DEVICE_ATTR_RO(spm_target_link_state);
static DEVICE_ATTR_RW(auto_hibern8);
+static DEVICE_ATTR_RW(wb_on);
static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_rpm_lvl.attr,
@@ -205,6 +270,7 @@ static struct attribute *ufs_sysfs_ufshcd_attrs[] = {
&dev_attr_spm_target_dev_state.attr,
&dev_attr_spm_target_link_state.attr,
&dev_attr_auto_hibern8.attr,
+ &dev_attr_wb_on.attr,
NULL
};
@@ -225,30 +291,41 @@ static ssize_t ufs_sysfs_read_desc_param(struct ufs_hba *hba,
if (param_size > 8)
return -EINVAL;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
pm_runtime_get_sync(hba->dev);
ret = ufshcd_read_desc_param(hba, desc_id, desc_index,
param_offset, desc_buf, param_size);
pm_runtime_put_sync(hba->dev);
- if (ret)
- return -EINVAL;
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
switch (param_size) {
case 1:
- ret = sprintf(sysfs_buf, "0x%02X\n", *desc_buf);
+ ret = sysfs_emit(sysfs_buf, "0x%02X\n", *desc_buf);
break;
case 2:
- ret = sprintf(sysfs_buf, "0x%04X\n",
+ ret = sysfs_emit(sysfs_buf, "0x%04X\n",
get_unaligned_be16(desc_buf));
break;
case 4:
- ret = sprintf(sysfs_buf, "0x%08X\n",
+ ret = sysfs_emit(sysfs_buf, "0x%08X\n",
get_unaligned_be32(desc_buf));
break;
case 8:
- ret = sprintf(sysfs_buf, "0x%016llX\n",
+ ret = sysfs_emit(sysfs_buf, "0x%016llX\n",
get_unaligned_be64(desc_buf));
break;
}
+out:
+ up(&hba->host_sem);
return ret;
}
@@ -591,9 +668,16 @@ static ssize_t _name##_show(struct device *dev, \
int desc_len = QUERY_DESC_MAX_SIZE; \
u8 *desc_buf; \
\
+ down(&hba->host_sem); \
+ if (!ufshcd_is_user_access_allowed(hba)) { \
+ up(&hba->host_sem); \
+ return -EBUSY; \
+ } \
desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_ATOMIC); \
- if (!desc_buf) \
- return -ENOMEM; \
+ if (!desc_buf) { \
+ up(&hba->host_sem); \
+ return -ENOMEM; \
+ } \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_descriptor_retry(hba, \
UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_DEVICE, \
@@ -609,10 +693,11 @@ static ssize_t _name##_show(struct device *dev, \
SD_ASCII_STD); \
if (ret < 0) \
goto out; \
- ret = snprintf(buf, PAGE_SIZE, "%s\n", desc_buf); \
+ ret = sysfs_emit(buf, "%s\n", desc_buf); \
out: \
pm_runtime_put_sync(hba->dev); \
kfree(desc_buf); \
+ up(&hba->host_sem); \
return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -651,15 +736,26 @@ static ssize_t _name##_show(struct device *dev, \
u8 index = 0; \
int ret; \
struct ufs_hba *hba = dev_get_drvdata(dev); \
+ \
+ down(&hba->host_sem); \
+ if (!ufshcd_is_user_access_allowed(hba)) { \
+ up(&hba->host_sem); \
+ return -EBUSY; \
+ } \
if (ufshcd_is_wb_flags(QUERY_FLAG_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG, \
QUERY_FLAG_IDN##_uname, index, &flag); \
pm_runtime_put_sync(hba->dev); \
- if (ret) \
- return -EINVAL; \
- return sprintf(buf, "%s\n", flag ? "true" : "false"); \
+ if (ret) { \
+ ret = -EINVAL; \
+ goto out; \
+ } \
+ ret = sysfs_emit(buf, "%s\n", flag ? "true" : "false"); \
+out: \
+ up(&hba->host_sem); \
+ return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -709,15 +805,26 @@ static ssize_t _name##_show(struct device *dev, \
u32 value; \
int ret; \
u8 index = 0; \
+ \
+ down(&hba->host_sem); \
+ if (!ufshcd_is_user_access_allowed(hba)) { \
+ up(&hba->host_sem); \
+ return -EBUSY; \
+ } \
if (ufshcd_is_wb_attrs(QUERY_ATTR_IDN##_uname)) \
index = ufshcd_wb_get_query_index(hba); \
pm_runtime_get_sync(hba->dev); \
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR, \
QUERY_ATTR_IDN##_uname, index, 0, &value); \
pm_runtime_put_sync(hba->dev); \
- if (ret) \
- return -EINVAL; \
- return sprintf(buf, "0x%08X\n", value); \
+ if (ret) { \
+ ret = -EINVAL; \
+ goto out; \
+ } \
+ ret = sysfs_emit(buf, "0x%08X\n", value); \
+out: \
+ up(&hba->host_sem); \
+ return ret; \
} \
static DEVICE_ATTR_RO(_name)
@@ -792,7 +899,8 @@ static ssize_t _pname##_show(struct device *dev, \
struct scsi_device *sdev = to_scsi_device(dev); \
struct ufs_hba *hba = shost_priv(sdev->host); \
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); \
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun)) \
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, \
+ _duname##_DESC_PARAM##_puname)) \
return -EINVAL; \
return ufs_sysfs_read_desc_param(hba, QUERY_DESC_IDN_##_duname, \
lun, _duname##_DESC_PARAM##_puname, buf, _size); \
@@ -850,13 +958,26 @@ static ssize_t dyn_cap_needed_attribute_show(struct device *dev,
u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun);
int ret;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ ret = -EBUSY;
+ goto out;
+ }
+
pm_runtime_get_sync(hba->dev);
ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_READ_ATTR,
QUERY_ATTR_IDN_DYN_CAP_NEEDED, lun, 0, &value);
pm_runtime_put_sync(hba->dev);
- if (ret)
- return -EINVAL;
- return sprintf(buf, "0x%08X\n", value);
+ if (ret) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = sysfs_emit(buf, "0x%08X\n", value);
+
+out:
+ up(&hba->host_sem);
+ return ret;
}
static DEVICE_ATTR_RO(dyn_cap_needed_attribute);
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index 14dfda735adf..bf1897a72532 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -527,22 +527,42 @@ struct ufs_vreg_info {
};
struct ufs_dev_info {
- bool f_power_on_wp_en;
+ bool f_power_on_wp_en;
/* Keeps information if any of the LU is power on write protected */
- bool is_lu_power_on_wp;
+ bool is_lu_power_on_wp;
/* Maximum number of general LU supported by the UFS device */
- u8 max_lu_supported;
- u8 wb_dedicated_lu;
- u16 wmanufacturerid;
+ u8 max_lu_supported;
+ u16 wmanufacturerid;
/*UFS device Product Name */
- u8 *model;
- u16 wspecversion;
- u32 clk_gating_wait_us;
- u32 d_ext_ufs_feature_sup;
- u8 b_wb_buffer_type;
- u32 d_wb_alloc_units;
- bool b_rpm_dev_flush_capable;
- u8 b_presrv_uspc_en;
+ u8 *model;
+ u16 wspecversion;
+ u32 clk_gating_wait_us;
+
+ /* UFS WB related flags */
+ bool wb_enabled;
+ bool wb_buf_flush_enabled;
+ u8 wb_dedicated_lu;
+ u8 wb_buffer_type;
+
+ bool b_rpm_dev_flush_capable;
+ u8 b_presrv_uspc_en;
+};
+
+/*
+ * This enum is used in string mapping in include/trace/events/ufs.h.
+ */
+enum ufs_trace_str_t {
+ UFS_CMD_SEND, UFS_CMD_COMP, UFS_DEV_COMP,
+ UFS_QUERY_SEND, UFS_QUERY_COMP, UFS_QUERY_ERR,
+ UFS_TM_SEND, UFS_TM_COMP, UFS_TM_ERR
+};
+
+/*
+ * Transaction Specific Fields (TSF) type in the UPIU package, this enum is
+ * used in include/trace/events/ufs.h for UFS command trace.
+ */
+enum ufs_trace_tsf_t {
+ UFS_TSF_CDB, UFS_TSF_OSF, UFS_TSF_TM_INPUT, UFS_TSF_TM_OUTPUT
};
/**
@@ -552,13 +572,15 @@ struct ufs_dev_info {
* @return: true if the lun has a matching unit descriptor, false otherwise
*/
static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info,
- u8 lun)
+ u8 lun, u8 param_offset)
{
if (!dev_info || !dev_info->max_lu_supported) {
pr_err("Max General LU supported by UFS isn't initialized\n");
return false;
}
-
+ /* WB is available only for the logical unit from 0 to 7 */
+ if (param_offset == UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS)
+ return lun < UFS_UPIU_MAX_WB_LUN_ID;
return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported);
}
diff --git a/drivers/scsi/ufs/ufshcd-crypto.c b/drivers/scsi/ufs/ufshcd-crypto.c
index 153dd5765d9c..d70cdcd35e43 100644
--- a/drivers/scsi/ufs/ufshcd-crypto.c
+++ b/drivers/scsi/ufs/ufshcd-crypto.c
@@ -182,7 +182,7 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
err = devm_blk_ksm_init(hba->dev, &hba->ksm,
hba->crypto_capabilities.config_count + 1);
if (err)
- goto out_free_caps;
+ goto out;
hba->ksm.ksm_ll_ops = ufshcd_ksm_ops;
/* UFS only supports 8 bytes for any DUN */
@@ -208,8 +208,6 @@ int ufshcd_hba_init_crypto_capabilities(struct ufs_hba *hba)
return 0;
-out_free_caps:
- devm_kfree(hba->dev, hba->crypto_cap_array);
out:
/* Indicate that init failed by clearing UFSHCD_CAP_CRYPTO */
hba->caps &= ~UFSHCD_CAP_CRYPTO;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4c24eb782835..721f55db181f 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -20,6 +20,7 @@
#include "ufs_quirks.h"
#include "unipro.h"
#include "ufs-sysfs.h"
+#include "ufs-debugfs.h"
#include "ufs_bsg.h"
#include "ufshcd-crypto.h"
#include <asm/unaligned.h>
@@ -94,6 +95,8 @@
16, 4, buf, __len, false); \
} while (0)
+static bool early_suspend;
+
int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
const char *prefix)
{
@@ -244,11 +247,8 @@ static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
struct ufs_vreg *vreg);
static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
-static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba);
-static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba);
-static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set);
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
+static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable);
static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba);
static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba);
@@ -306,53 +306,67 @@ static void ufshcd_scsi_block_requests(struct ufs_hba *hba)
}
static void ufshcd_add_cmd_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->sc.cdb);
+ if (!trace_ufshcd_upiu_enabled())
+ return;
+
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq->header, &rq->sc.cdb,
+ UFS_TSF_CDB);
}
-static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+static void ufshcd_add_query_upiu_trace(struct ufs_hba *hba,
+ enum ufs_trace_str_t str_t,
+ struct utp_upiu_req *rq_rsp)
{
- struct utp_upiu_req *rq = hba->lrb[tag].ucd_req_ptr;
+ if (!trace_ufshcd_upiu_enabled())
+ return;
- trace_ufshcd_upiu(dev_name(hba->dev), str, &rq->header, &rq->qr);
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &rq_rsp->header,
+ &rq_rsp->qr, UFS_TSF_OSF);
}
static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
int off = (int)tag - hba->nutrs;
struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[off];
- trace_ufshcd_upiu(dev_name(hba->dev), str, &descp->req_header,
- &descp->input_param1);
+ if (!trace_ufshcd_upiu_enabled())
+ return;
+
+ if (str_t == UFS_TM_SEND)
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->req_header,
+ &descp->input_param1, UFS_TSF_TM_INPUT);
+ else
+ trace_ufshcd_upiu(dev_name(hba->dev), str_t, &descp->rsp_header,
+ &descp->output_param1, UFS_TSF_TM_OUTPUT);
}
static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
struct uic_command *ucmd,
- const char *str)
+ enum ufs_trace_str_t str_t)
{
u32 cmd;
if (!trace_ufshcd_uic_command_enabled())
return;
- if (!strcmp(str, "send"))
+ if (str_t == UFS_CMD_SEND)
cmd = ucmd->command;
else
cmd = ufshcd_readl(hba, REG_UIC_COMMAND);
- trace_ufshcd_uic_command(dev_name(hba->dev), str, cmd,
+ trace_ufshcd_uic_command(dev_name(hba->dev), str_t, cmd,
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_1),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_2),
ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3));
}
-static void ufshcd_add_command_trace(struct ufs_hba *hba,
- unsigned int tag, const char *str)
+static void ufshcd_add_command_trace(struct ufs_hba *hba, unsigned int tag,
+ enum ufs_trace_str_t str_t)
{
sector_t lba = -1;
u8 opcode = 0, group_id = 0;
@@ -364,13 +378,13 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
if (!trace_ufshcd_command_enabled()) {
/* trace UPIU W/O tracing command */
if (cmd)
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
return;
}
if (cmd) { /* data phase exists */
/* trace UPIU also */
- ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ ufshcd_add_cmd_upiu_trace(hba, tag, str_t);
opcode = cmd->cmnd[0];
if ((opcode == READ_10) || (opcode == WRITE_10)) {
/*
@@ -393,7 +407,7 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
intr = ufshcd_readl(hba, REG_INTERRUPT_STATUS);
doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL);
- trace_ufshcd_command(dev_name(hba->dev), str, tag,
+ trace_ufshcd_command(dev_name(hba->dev), str_t, tag,
doorbell, transfer_len, intr, lba, opcode, group_id);
}
@@ -591,8 +605,8 @@ static void ufshcd_device_reset(struct ufs_hba *hba)
if (!err) {
ufshcd_set_ufs_dev_active(hba);
if (ufshcd_is_wb_allowed(hba)) {
- hba->wb_enabled = false;
- hba->wb_buf_flush_enabled = false;
+ hba->dev_info.wb_enabled = false;
+ hba->dev_info.wb_buf_flush_enabled = false;
}
}
if (err != -EOPNOTSUPP)
@@ -1182,19 +1196,30 @@ static int ufshcd_clock_scaling_prepare(struct ufs_hba *hba)
*/
ufshcd_scsi_block_requests(hba);
down_write(&hba->clk_scaling_lock);
- if (ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
+
+ if (!hba->clk_scaling.is_allowed ||
+ ufshcd_wait_for_doorbell_clr(hba, DOORBELL_CLR_TOUT_US)) {
ret = -EBUSY;
up_write(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ goto out;
}
+ /* let's not get into low power until clock scaling is completed */
+ ufshcd_hold(hba, false);
+
+out:
return ret;
}
-static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
+static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba, bool writelock)
{
- up_write(&hba->clk_scaling_lock);
+ if (writelock)
+ up_write(&hba->clk_scaling_lock);
+ else
+ up_read(&hba->clk_scaling_lock);
ufshcd_scsi_unblock_requests(hba);
+ ufshcd_release(hba);
}
/**
@@ -1209,13 +1234,11 @@ static void ufshcd_clock_scaling_unprepare(struct ufs_hba *hba)
static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
{
int ret = 0;
-
- /* let's not get into low power until clock scaling is completed */
- ufshcd_hold(hba, false);
+ bool is_writelock = true;
ret = ufshcd_clock_scaling_prepare(hba);
if (ret)
- goto out;
+ return ret;
/* scale down the gear before scaling down clocks */
if (!scale_up) {
@@ -1241,14 +1264,12 @@ static int ufshcd_devfreq_scale(struct ufs_hba *hba, bool scale_up)
}
/* Enable Write Booster if we have scaled up else disable it */
- up_write(&hba->clk_scaling_lock);
+ downgrade_write(&hba->clk_scaling_lock);
+ is_writelock = false;
ufshcd_wb_ctrl(hba, scale_up);
- down_write(&hba->clk_scaling_lock);
out_unprepare:
- ufshcd_clock_scaling_unprepare(hba);
-out:
- ufshcd_release(hba);
+ ufshcd_clock_scaling_unprepare(hba, is_writelock);
return ret;
}
@@ -1329,15 +1350,8 @@ static int ufshcd_devfreq_target(struct device *dev,
}
spin_unlock_irqrestore(hba->host->host_lock, irq_flags);
- pm_runtime_get_noresume(hba->dev);
- if (!pm_runtime_active(hba->dev)) {
- pm_runtime_put_noidle(hba->dev);
- ret = -EAGAIN;
- goto out;
- }
start = ktime_get();
ret = ufshcd_devfreq_scale(hba, scale_up);
- pm_runtime_put(hba->dev);
trace_ufshcd_profile_clk_scaling(dev_name(hba->dev),
(scale_up ? "up" : "down"),
@@ -1484,8 +1498,8 @@ static void ufshcd_suspend_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool suspend = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
+ cancel_work_sync(&hba->clk_scaling.suspend_work);
+ cancel_work_sync(&hba->clk_scaling.resume_work);
spin_lock_irqsave(hba->host->host_lock, flags);
if (!hba->clk_scaling.is_suspended) {
@@ -1503,9 +1517,6 @@ static void ufshcd_resume_clkscaling(struct ufs_hba *hba)
unsigned long flags;
bool resume = false;
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_scaling.is_suspended) {
resume = true;
@@ -1522,7 +1533,7 @@ static ssize_t ufshcd_clkscale_enable_show(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_allowed);
+ return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_scaling.is_enabled);
}
static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
@@ -1530,22 +1541,25 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
{
struct ufs_hba *hba = dev_get_drvdata(dev);
u32 value;
- int err;
+ int err = 0;
if (kstrtou32(buf, 0, &value))
return -EINVAL;
+ down(&hba->host_sem);
+ if (!ufshcd_is_user_access_allowed(hba)) {
+ err = -EBUSY;
+ goto out;
+ }
+
value = !!value;
- if (value == hba->clk_scaling.is_allowed)
+ if (value == hba->clk_scaling.is_enabled)
goto out;
pm_runtime_get_sync(hba->dev);
ufshcd_hold(hba, false);
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
-
- hba->clk_scaling.is_allowed = value;
+ hba->clk_scaling.is_enabled = value;
if (value) {
ufshcd_resume_clkscaling(hba);
@@ -1560,10 +1574,11 @@ static ssize_t ufshcd_clkscale_enable_store(struct device *dev,
ufshcd_release(hba);
pm_runtime_put_sync(hba->dev);
out:
- return count;
+ up(&hba->host_sem);
+ return err ? err : count;
}
-static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
+static void ufshcd_init_clk_scaling_sysfs(struct ufs_hba *hba)
{
hba->clk_scaling.enable_attr.show = ufshcd_clkscale_enable_show;
hba->clk_scaling.enable_attr.store = ufshcd_clkscale_enable_store;
@@ -1574,6 +1589,45 @@ static void ufshcd_clkscaling_init_sysfs(struct ufs_hba *hba)
dev_err(hba->dev, "Failed to create sysfs for clkscale_enable\n");
}
+static void ufshcd_remove_clk_scaling_sysfs(struct ufs_hba *hba)
+{
+ if (hba->clk_scaling.enable_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
+}
+
+static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+{
+ char wq_name[sizeof("ufs_clkscaling_00")];
+
+ if (!ufshcd_is_clkscaling_supported(hba))
+ return;
+
+ if (!hba->clk_scaling.min_gear)
+ hba->clk_scaling.min_gear = UFS_HS_G1;
+
+ INIT_WORK(&hba->clk_scaling.suspend_work,
+ ufshcd_clk_scaling_suspend_work);
+ INIT_WORK(&hba->clk_scaling.resume_work,
+ ufshcd_clk_scaling_resume_work);
+
+ snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
+ hba->host->host_no);
+ hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+
+ hba->clk_scaling.is_initialized = true;
+}
+
+static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+{
+ if (!hba->clk_scaling.is_initialized)
+ return;
+
+ ufshcd_remove_clk_scaling_sysfs(hba);
+ destroy_workqueue(hba->clk_scaling.workq);
+ ufshcd_devfreq_remove(hba);
+ hba->clk_scaling.is_initialized = false;
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -1865,35 +1919,31 @@ out:
return count;
}
-static void ufshcd_init_clk_scaling(struct ufs_hba *hba)
+static void ufshcd_init_clk_gating_sysfs(struct ufs_hba *hba)
{
- char wq_name[sizeof("ufs_clkscaling_00")];
-
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- if (!hba->clk_scaling.min_gear)
- hba->clk_scaling.min_gear = UFS_HS_G1;
-
- INIT_WORK(&hba->clk_scaling.suspend_work,
- ufshcd_clk_scaling_suspend_work);
- INIT_WORK(&hba->clk_scaling.resume_work,
- ufshcd_clk_scaling_resume_work);
-
- snprintf(wq_name, sizeof(wq_name), "ufs_clkscaling_%d",
- hba->host->host_no);
- hba->clk_scaling.workq = create_singlethread_workqueue(wq_name);
+ hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+ hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+ sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+ hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+ hba->clk_gating.delay_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
- ufshcd_clkscaling_init_sysfs(hba);
+ hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
+ hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
+ sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
+ hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
+ hba->clk_gating.enable_attr.attr.mode = 0644;
+ if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
+ dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
}
-static void ufshcd_exit_clk_scaling(struct ufs_hba *hba)
+static void ufshcd_remove_clk_gating_sysfs(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkscaling_supported(hba))
- return;
-
- destroy_workqueue(hba->clk_scaling.workq);
- ufshcd_devfreq_remove(hba);
+ if (hba->clk_gating.delay_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+ if (hba->clk_gating.enable_attr.attr.name)
+ device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
}
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
@@ -1914,34 +1964,21 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
hba->clk_gating.clk_gating_workq = alloc_ordered_workqueue(wq_name,
WQ_MEM_RECLAIM | WQ_HIGHPRI);
- hba->clk_gating.is_enabled = true;
-
- hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
- hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
- sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
- hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
- hba->clk_gating.delay_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+ ufshcd_init_clk_gating_sysfs(hba);
- hba->clk_gating.enable_attr.show = ufshcd_clkgate_enable_show;
- hba->clk_gating.enable_attr.store = ufshcd_clkgate_enable_store;
- sysfs_attr_init(&hba->clk_gating.enable_attr.attr);
- hba->clk_gating.enable_attr.attr.name = "clkgate_enable";
- hba->clk_gating.enable_attr.attr.mode = 0644;
- if (device_create_file(hba->dev, &hba->clk_gating.enable_attr))
- dev_err(hba->dev, "Failed to create sysfs for clkgate_enable\n");
+ hba->clk_gating.is_enabled = true;
+ hba->clk_gating.is_initialized = true;
}
static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
{
- if (!ufshcd_is_clkgating_allowed(hba))
+ if (!hba->clk_gating.is_initialized)
return;
- device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
- device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ ufshcd_remove_clk_gating_sysfs(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
cancel_delayed_work_sync(&hba->clk_gating.gate_work);
destroy_workqueue(hba->clk_gating.clk_gating_workq);
+ hba->clk_gating.is_initialized = false;
}
/* Must be called with host lock acquired */
@@ -1956,7 +1993,7 @@ static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
if (!hba->clk_scaling.active_reqs++)
queue_resume_work = true;
- if (!hba->clk_scaling.is_allowed || hba->pm_op_in_progress)
+ if (!hba->clk_scaling.is_enabled || hba->pm_op_in_progress)
return;
if (queue_resume_work)
@@ -2002,7 +2039,7 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
lrbp->issue_time_stamp = ktime_get();
lrbp->compl_time_stamp = ktime_set(0, 0);
ufshcd_vops_setup_xfer_req(hba, task_tag, (lrbp->cmd ? true : false));
- ufshcd_add_command_trace(hba, task_tag, "send");
+ ufshcd_add_command_trace(hba, task_tag, UFS_CMD_SEND);
ufshcd_clk_scaling_start_busy(hba);
__set_bit(task_tag, &hba->outstanding_reqs);
ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
@@ -2138,7 +2175,7 @@ ufshcd_dispatch_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
ufshcd_writel(hba, uic_cmd->argument2, REG_UIC_COMMAND_ARG_2);
ufshcd_writel(hba, uic_cmd->argument3, REG_UIC_COMMAND_ARG_3);
- ufshcd_add_uic_command_trace(hba, uic_cmd, "send");
+ ufshcd_add_uic_command_trace(hba, uic_cmd, UFS_CMD_SEND);
/* Write UIC Cmd */
ufshcd_writel(hba, uic_cmd->command & COMMAND_OPCODE_MASK,
@@ -2857,7 +2894,7 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
hba->dev_cmd.complete = &wait;
- ufshcd_add_query_upiu_trace(hba, tag, "query_send");
+ ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr);
/* Make sure descriptors are ready before ringing the doorbell */
wmb();
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -2867,8 +2904,8 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba,
err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout);
out:
- ufshcd_add_query_upiu_trace(hba, tag,
- err ? "query_complete_err" : "query_complete");
+ ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP,
+ (struct utp_upiu_req *)lrbp->ucd_rsp_ptr);
out_put_tag:
blk_put_request(req);
@@ -3425,7 +3462,7 @@ static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
* Unit descriptors are only available for general purpose LUs (LUN id
* from 0 to 7) and RPMB Well known LU.
*/
- if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun))
+ if (!ufs_is_valid_unit_desc_lun(&hba->dev_info, lun, param_offset))
return -EOPNOTSUPP;
return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
@@ -4218,25 +4255,27 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
pwr_mode->hs_rate);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
- DL_AFC0ReqTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
- DL_FC1ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
- DL_TC1ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
- DL_AFC1ReqTimeOutVal_Default);
-
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
- DL_FC0ProtectionTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
- DL_TC0ReplayTimeOutVal_Default);
- ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
- DL_AFC0ReqTimeOutVal_Default);
+ if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) {
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2),
+ DL_AFC0ReqTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3),
+ DL_FC1ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4),
+ DL_TC1ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5),
+ DL_AFC1ReqTimeOutVal_Default);
+
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal),
+ DL_FC0ProtectionTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal),
+ DL_TC0ReplayTimeOutVal_Default);
+ ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal),
+ DL_AFC0ReqTimeOutVal_Default);
+ }
ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
| pwr_mode->pwr_tx);
@@ -4543,6 +4582,7 @@ void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val)
e = &hba->ufs_stats.event[id];
e->val[e->pos] = val;
e->tstamp[e->pos] = ktime_get();
+ e->cnt += 1;
e->pos = (e->pos + 1) % UFS_EVENT_HIST_LENGTH;
ufshcd_vops_event_notify(hba, id, &val);
@@ -4827,6 +4867,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev)
struct request_queue *q = sdev->request_queue;
blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1);
+ if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE)
+ blk_queue_update_dma_alignment(q, PAGE_SIZE - 1);
if (ufshcd_is_rpm_autosuspend_allowed(hba))
sdev->rpm_autosuspend = 1;
@@ -4872,9 +4914,7 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status)
ufshcd_copy_sense_data(lrbp);
fallthrough;
case SAM_STAT_GOOD:
- result |= DID_OK << 16 |
- COMMAND_COMPLETE << 8 |
- scsi_status;
+ result |= DID_OK << 16 | scsi_status;
break;
case SAM_STAT_TASK_SET_FULL:
case SAM_STAT_BUSY:
@@ -5032,7 +5072,7 @@ static irqreturn_t ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status)
if (retval == IRQ_HANDLED)
ufshcd_add_uic_command_trace(hba, hba->active_uic_cmd,
- "complete");
+ UFS_CMD_COMP);
return retval;
}
@@ -5056,7 +5096,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->compl_time_stamp = ktime_get();
cmd = lrbp->cmd;
if (cmd) {
- ufshcd_add_command_trace(hba, index, "complete");
+ ufshcd_add_command_trace(hba, index, UFS_CMD_COMP);
result = ufshcd_transfer_rsp_status(hba, lrbp);
scsi_dma_unmap(cmd);
cmd->result = result;
@@ -5070,7 +5110,7 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
lrbp->command_type == UTP_CMD_TYPE_UFS_STORAGE) {
if (hba->dev_cmd.complete) {
ufshcd_add_command_trace(hba, index,
- "dev_complete");
+ UFS_DEV_COMP);
complete(hba->dev_cmd.complete);
update_scaling = true;
}
@@ -5390,7 +5430,7 @@ out:
__func__, err);
}
-static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
+int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
{
int ret;
u8 index;
@@ -5399,7 +5439,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
if (!ufshcd_is_wb_allowed(hba))
return 0;
- if (!(enable ^ hba->wb_enabled))
+ if (!(enable ^ hba->dev_info.wb_enabled))
return 0;
if (enable)
opcode = UPIU_QUERY_OPCODE_SET_FLAG;
@@ -5415,7 +5455,7 @@ static int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable)
return ret;
}
- hba->wb_enabled = enable;
+ hba->dev_info.wb_enabled = enable;
dev_dbg(hba->dev, "%s write booster %s %d\n",
__func__, enable ? "enable" : "disable", ret);
@@ -5438,58 +5478,37 @@ static int ufshcd_wb_toggle_flush_during_h8(struct ufs_hba *hba, bool set)
index, NULL);
}
-static inline void ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
-{
- if (enable)
- ufshcd_wb_buf_flush_enable(hba);
- else
- ufshcd_wb_buf_flush_disable(hba);
-
-}
-
-static int ufshcd_wb_buf_flush_enable(struct ufs_hba *hba)
+static inline int ufshcd_wb_toggle_flush(struct ufs_hba *hba, bool enable)
{
int ret;
u8 index;
+ enum query_opcode opcode;
- if (!ufshcd_is_wb_allowed(hba) || hba->wb_buf_flush_enabled)
+ if (!ufshcd_is_wb_allowed(hba) ||
+ hba->dev_info.wb_buf_flush_enabled == enable)
return 0;
- index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_SET_FLAG,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
- index, NULL);
- if (ret)
- dev_err(hba->dev, "%s WB - buf flush enable failed %d\n",
- __func__, ret);
+ if (enable)
+ opcode = UPIU_QUERY_OPCODE_SET_FLAG;
else
- hba->wb_buf_flush_enabled = true;
-
- dev_dbg(hba->dev, "WB - Flush enabled: %d\n", ret);
- return ret;
-}
-
-static int ufshcd_wb_buf_flush_disable(struct ufs_hba *hba)
-{
- int ret;
- u8 index;
-
- if (!ufshcd_is_wb_allowed(hba) || !hba->wb_buf_flush_enabled)
- return 0;
+ opcode = UPIU_QUERY_OPCODE_CLEAR_FLAG;
index = ufshcd_wb_get_query_index(hba);
- ret = ufshcd_query_flag_retry(hba, UPIU_QUERY_OPCODE_CLEAR_FLAG,
- QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN,
- index, NULL);
+ ret = ufshcd_query_flag_retry(hba, opcode,
+ QUERY_FLAG_IDN_WB_BUFF_FLUSH_EN, index,
+ NULL);
if (ret) {
- dev_warn(hba->dev, "%s: WB - buf flush disable failed %d\n",
- __func__, ret);
- } else {
- hba->wb_buf_flush_enabled = false;
- dev_dbg(hba->dev, "WB - Flush disabled: %d\n", ret);
+ dev_err(hba->dev, "%s WB-Buf Flush %s failed %d\n", __func__,
+ enable ? "enable" : "disable", ret);
+ goto out;
}
+ hba->dev_info.wb_buf_flush_enabled = enable;
+
+ dev_dbg(hba->dev, "WB-Buf Flush %s\n", enable ? "enabled" : "disabled");
+out:
return ret;
+
}
static bool ufshcd_wb_presrv_usrspc_keep_vcc_on(struct ufs_hba *hba,
@@ -5714,6 +5733,26 @@ static inline void ufshcd_schedule_eh_work(struct ufs_hba *hba)
}
}
+static void ufshcd_clk_scaling_allow(struct ufs_hba *hba, bool allow)
+{
+ down_write(&hba->clk_scaling_lock);
+ hba->clk_scaling.is_allowed = allow;
+ up_write(&hba->clk_scaling_lock);
+}
+
+static void ufshcd_clk_scaling_suspend(struct ufs_hba *hba, bool suspend)
+{
+ if (suspend) {
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_suspend_clkscaling(hba);
+ ufshcd_clk_scaling_allow(hba, false);
+ } else {
+ ufshcd_clk_scaling_allow(hba, true);
+ if (hba->clk_scaling.is_enabled)
+ ufshcd_resume_clkscaling(hba);
+ }
+}
+
static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
{
pm_runtime_get_sync(hba->dev);
@@ -5738,27 +5777,27 @@ static void ufshcd_err_handling_prepare(struct ufs_hba *hba)
ufshcd_vops_resume(hba, pm_op);
} else {
ufshcd_hold(hba, false);
- if (hba->clk_scaling.is_allowed) {
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
+ if (ufshcd_is_clkscaling_supported(hba) &&
+ hba->clk_scaling.is_enabled)
ufshcd_suspend_clkscaling(hba);
- }
+ ufshcd_clk_scaling_allow(hba, false);
}
}
static void ufshcd_err_handling_unprepare(struct ufs_hba *hba)
{
ufshcd_release(hba);
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
pm_runtime_put(hba->dev);
}
static inline bool ufshcd_err_handling_should_stop(struct ufs_hba *hba)
{
- return (!hba->is_powered || hba->ufshcd_state == UFSHCD_STATE_ERROR ||
+ return (!hba->is_powered || hba->shutting_down ||
+ hba->ufshcd_state == UFSHCD_STATE_ERROR ||
(!(hba->saved_err || hba->saved_uic_err || hba->force_reset ||
- ufshcd_is_link_broken(hba))));
+ ufshcd_is_link_broken(hba))));
}
#ifdef CONFIG_PM
@@ -5828,13 +5867,13 @@ static void ufshcd_err_handler(struct work_struct *work)
hba = container_of(work, struct ufs_hba, eh_work);
- down(&hba->eh_sem);
+ down(&hba->host_sem);
spin_lock_irqsave(hba->host->host_lock, flags);
if (ufshcd_err_handling_should_stop(hba)) {
if (hba->ufshcd_state != UFSHCD_STATE_ERROR)
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
spin_unlock_irqrestore(hba->host->host_lock, flags);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return;
}
ufshcd_set_eh_in_progress(hba);
@@ -6003,7 +6042,7 @@ skip_err_handling:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_scsi_unblock_requests(hba);
ufshcd_err_handling_unprepare(hba);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
if (!err && needs_reset)
ufshcd_clear_ua_wluns(hba);
@@ -6293,8 +6332,7 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba)
while (intr_status && retries--) {
enabled_intr_status =
intr_status & ufshcd_readl(hba, REG_INTERRUPT_ENABLE);
- if (intr_status)
- ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
+ ufshcd_writel(hba, intr_status, REG_INTERRUPT_STATUS);
if (enabled_intr_status)
retval |= ufshcd_sl_intr(hba, enabled_intr_status);
@@ -6380,7 +6418,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
spin_unlock_irqrestore(host->host_lock, flags);
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_send");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_SEND);
/* wait until the task management command is completed */
err = wait_for_completion_io_timeout(&wait,
@@ -6391,7 +6429,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
* use-after-free.
*/
req->end_io_data = NULL;
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete_err");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_ERR);
dev_err(hba->dev, "%s: task management cmd 0x%.2x timed-out\n",
__func__, tm_function);
if (ufshcd_clear_tm_cmd(hba, free_slot))
@@ -6402,7 +6440,7 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba,
err = 0;
memcpy(treq, hba->utmrdl_base_addr + free_slot, sizeof(*treq));
- ufshcd_add_tm_upiu_trace(hba, task_tag, "tm_complete");
+ ufshcd_add_tm_upiu_trace(hba, task_tag, UFS_TM_COMP);
}
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7248,6 +7286,7 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
struct ufs_dev_info *dev_info = &hba->dev_info;
u8 lun;
u32 d_lu_wb_buf_alloc;
+ u32 ext_ufs_feature;
if (!ufshcd_is_wb_allowed(hba))
return;
@@ -7265,30 +7304,25 @@ static void ufshcd_wb_probe(struct ufs_hba *hba, u8 *desc_buf)
DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP + 4)
goto wb_disabled;
- dev_info->d_ext_ufs_feature_sup =
- get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
+ ext_ufs_feature = get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP);
- if (!(dev_info->d_ext_ufs_feature_sup & UFS_DEV_WRITE_BOOSTER_SUP))
+ if (!(ext_ufs_feature & UFS_DEV_WRITE_BOOSTER_SUP))
goto wb_disabled;
/*
- * WB may be supported but not configured while provisioning.
- * The spec says, in dedicated wb buffer mode,
- * a max of 1 lun would have wb buffer configured.
- * Now only shared buffer mode is supported.
+ * WB may be supported but not configured while provisioning. The spec
+ * says, in dedicated wb buffer mode, a max of 1 lun would have wb
+ * buffer configured.
*/
- dev_info->b_wb_buffer_type =
- desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
+ dev_info->wb_buffer_type = desc_buf[DEVICE_DESC_PARAM_WB_TYPE];
dev_info->b_presrv_uspc_en =
desc_buf[DEVICE_DESC_PARAM_WB_PRESRV_USRSPC_EN];
- if (dev_info->b_wb_buffer_type == WB_BUF_MODE_SHARED) {
- dev_info->d_wb_alloc_units =
- get_unaligned_be32(desc_buf +
- DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS);
- if (!dev_info->d_wb_alloc_units)
+ if (dev_info->wb_buffer_type == WB_BUF_MODE_SHARED) {
+ if (!get_unaligned_be32(desc_buf +
+ DEVICE_DESC_PARAM_WB_SHARED_ALLOC_UNITS))
goto wb_disabled;
} else {
for (lun = 0; lun < UFS_UPIU_MAX_WB_LUN_ID; lun++) {
@@ -7734,13 +7768,14 @@ static int ufshcd_add_lus(struct ufs_hba *hba)
&hba->pwr_info,
sizeof(struct ufs_pa_layer_attr));
hba->clk_scaling.saved_pwr_info.is_valid = true;
- if (!hba->devfreq) {
- ret = ufshcd_devfreq_init(hba);
- if (ret)
- goto out;
- }
-
hba->clk_scaling.is_allowed = true;
+
+ ret = ufshcd_devfreq_init(hba);
+ if (ret)
+ goto out;
+
+ hba->clk_scaling.is_enabled = true;
+ ufshcd_init_clk_scaling_sysfs(hba);
}
ufs_bsg_probe(hba);
@@ -7911,10 +7946,10 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie)
struct ufs_hba *hba = (struct ufs_hba *)data;
int ret;
- down(&hba->eh_sem);
+ down(&hba->host_sem);
/* Initialize hba, detect and initialize UFS device */
ret = ufshcd_probe_hba(hba, true);
- up(&hba->eh_sem);
+ up(&hba->host_sem);
if (ret)
goto out;
@@ -7927,7 +7962,6 @@ out:
*/
if (ret) {
pm_runtime_put_sync(hba->dev);
- ufshcd_exit_clk_scaling(hba);
ufshcd_hba_exit(hba);
}
}
@@ -8339,6 +8373,8 @@ static int ufshcd_hba_init(struct ufs_hba *hba)
if (err)
goto out_disable_vreg;
+ ufs_debugfs_hba_init(hba);
+
hba->is_powered = true;
goto out;
@@ -8355,12 +8391,13 @@ out:
static void ufshcd_hba_exit(struct ufs_hba *hba)
{
if (hba->is_powered) {
+ ufshcd_exit_clk_scaling(hba);
+ ufshcd_exit_clk_gating(hba);
+ if (hba->eh_wq)
+ destroy_workqueue(hba->eh_wq);
+ ufs_debugfs_hba_exit(hba);
ufshcd_variant_hba_exit(hba);
ufshcd_setup_vreg(hba, false);
- ufshcd_suspend_clkscaling(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- if (hba->devfreq)
- ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
ufshcd_setup_hba_vreg(hba, false);
hba->is_powered = false;
@@ -8655,11 +8692,8 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
ufshcd_hold(hba, false);
hba->clk_gating.is_suspended = true;
- if (hba->clk_scaling.is_allowed) {
- cancel_work_sync(&hba->clk_scaling.suspend_work);
- cancel_work_sync(&hba->clk_scaling.resume_work);
- ufshcd_suspend_clkscaling(hba);
- }
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, true);
if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
req_link_state == UIC_LINK_ACTIVE_STATE) {
@@ -8726,8 +8760,6 @@ static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
if (ret)
goto set_dev_active;
- ufshcd_vreg_set_lpm(hba);
-
disable_clks:
/*
* Call vendor specific suspend callback. As these callbacks may access
@@ -8751,13 +8783,13 @@ disable_clks:
hba->clk_gating.state);
}
+ ufshcd_vreg_set_lpm(hba);
+
/* Put the host controller in low power mode if possible */
ufshcd_hba_vreg_set_lpm(hba);
goto out;
set_link_active:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
ufshcd_vreg_set_hpm(hba);
/*
* Device hardware reset is required to exit DeepSleep. Also, for
@@ -8781,8 +8813,9 @@ set_dev_active:
if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
ufshcd_disable_auto_bkops(hba);
enable_gating:
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
+
hba->clk_gating.is_suspended = false;
hba->dev_info.b_rpm_dev_flush_capable = false;
ufshcd_clear_ua_wluns(hba);
@@ -8819,18 +8852,18 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
old_link_state = hba->uic_link_state;
ufshcd_hba_vreg_set_hpm(hba);
+ ret = ufshcd_vreg_set_hpm(hba);
+ if (ret)
+ goto out;
+
/* Make sure clocks are enabled before accessing controller */
ret = ufshcd_setup_clocks(hba, true);
if (ret)
- goto out;
+ goto disable_vreg;
/* enable the host irq as host controller would be active soon */
ufshcd_enable_irq(hba);
- ret = ufshcd_vreg_set_hpm(hba);
- if (ret)
- goto disable_irq_and_vops_clks;
-
/*
* Call vendor specific resume callback. As these callbacks may access
* vendor specific host controller register space call them when the
@@ -8838,7 +8871,7 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
*/
ret = ufshcd_vops_resume(hba, pm_op);
if (ret)
- goto disable_vreg;
+ goto disable_irq_and_vops_clks;
/* For DeepSleep, the only supported option is to have the link off */
WARN_ON(ufshcd_is_ufs_dev_deepsleep(hba) && !ufshcd_is_link_off(hba));
@@ -8885,8 +8918,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
hba->clk_gating.is_suspended = false;
- if (hba->clk_scaling.is_allowed)
- ufshcd_resume_clkscaling(hba);
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_clk_scaling_suspend(hba, false);
/* Enable Auto-Hibernate if configured */
ufshcd_auto_hibern8_enable(hba);
@@ -8907,18 +8940,16 @@ set_old_link_state:
ufshcd_link_state_transition(hba, old_link_state, 0);
vendor_suspend:
ufshcd_vops_suspend(hba, pm_op);
-disable_vreg:
- ufshcd_vreg_set_lpm(hba);
disable_irq_and_vops_clks:
ufshcd_disable_irq(hba);
- if (hba->clk_scaling.is_allowed)
- ufshcd_suspend_clkscaling(hba);
ufshcd_setup_clocks(hba, false);
if (ufshcd_is_clkgating_allowed(hba)) {
hba->clk_gating.state = CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
}
+disable_vreg:
+ ufshcd_vreg_set_lpm(hba);
out:
hba->pm_op_in_progress = 0;
if (ret)
@@ -8939,8 +8970,14 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- down(&hba->eh_sem);
- if (!hba || !hba->is_powered)
+ if (!hba) {
+ early_suspend = true;
+ return 0;
+ }
+
+ down(&hba->host_sem);
+
+ if (!hba->is_powered)
return 0;
if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
@@ -8972,7 +9009,7 @@ out:
if (!ret)
hba->is_sys_suspended = true;
else
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_suspend);
@@ -8989,9 +9026,12 @@ int ufshcd_system_resume(struct ufs_hba *hba)
int ret = 0;
ktime_t start = ktime_get();
- if (!hba) {
- up(&hba->eh_sem);
+ if (!hba)
return -EINVAL;
+
+ if (unlikely(early_suspend)) {
+ early_suspend = false;
+ down(&hba->host_sem);
}
if (!hba->is_powered || pm_runtime_suspended(hba->dev))
@@ -9008,7 +9048,7 @@ out:
hba->curr_dev_pwr_mode, hba->uic_link_state);
if (!ret)
hba->is_sys_suspended = false;
- up(&hba->eh_sem);
+ up(&hba->host_sem);
return ret;
}
EXPORT_SYMBOL(ufshcd_system_resume);
@@ -9100,7 +9140,10 @@ int ufshcd_shutdown(struct ufs_hba *hba)
{
int ret = 0;
- down(&hba->eh_sem);
+ down(&hba->host_sem);
+ hba->shutting_down = true;
+ up(&hba->host_sem);
+
if (!hba->is_powered)
goto out;
@@ -9114,7 +9157,6 @@ out:
if (ret)
dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
hba->is_powered = false;
- up(&hba->eh_sem);
/* allow force shutdown even in case of errors */
return 0;
}
@@ -9133,15 +9175,9 @@ void ufshcd_remove(struct ufs_hba *hba)
blk_mq_free_tag_set(&hba->tmf_tag_set);
blk_cleanup_queue(hba->cmd_queue);
scsi_remove_host(hba->host);
- destroy_workqueue(hba->eh_wq);
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba);
-
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- if (ufshcd_is_clkscaling_supported(hba))
- device_remove_file(hba->dev, &hba->clk_scaling.enable_attr);
ufshcd_hba_exit(hba);
}
EXPORT_SYMBOL_GPL(ufshcd_remove);
@@ -9309,7 +9345,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
- sema_init(&hba->eh_sem, 1);
+ sema_init(&hba->host_sem, 1);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
@@ -9341,7 +9377,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
dev_err(hba->dev, "request irq failed\n");
- goto exit_gating;
+ goto out_disable;
} else {
hba->is_irq_enabled = true;
}
@@ -9349,7 +9385,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
err = scsi_add_host(host, hba->dev);
if (err) {
dev_err(hba->dev, "scsi_add_host failed\n");
- goto exit_gating;
+ goto out_disable;
}
hba->cmd_queue = blk_mq_init_queue(&hba->host->tag_set);
@@ -9432,10 +9468,6 @@ free_cmd_queue:
blk_cleanup_queue(hba->cmd_queue);
out_remove_scsi_host:
scsi_remove_host(hba->host);
-exit_gating:
- ufshcd_exit_clk_scaling(hba);
- ufshcd_exit_clk_gating(hba);
- destroy_workqueue(hba->eh_wq);
out_disable:
hba->is_irq_enabled = false;
ufshcd_hba_exit(hba);
@@ -9444,6 +9476,20 @@ out_error:
}
EXPORT_SYMBOL_GPL(ufshcd_init);
+static int __init ufshcd_core_init(void)
+{
+ ufs_debugfs_init();
+ return 0;
+}
+
+static void __exit ufshcd_core_exit(void)
+{
+ ufs_debugfs_exit();
+}
+
+module_init(ufshcd_core_init);
+module_exit(ufshcd_core_exit);
+
MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
MODULE_DESCRIPTION("Generic UFS host controller driver Core");
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index aa9ea3552323..ee61f821f75d 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -383,6 +383,7 @@ enum clk_gating_state {
* @delay_attr: sysfs attribute to control delay_attr
* @enable_attr: sysfs attribute to enable/disable clock gating
* @is_enabled: Indicates the current status of clock gating
+ * @is_initialized: Indicates whether clock gating is initialized or not
* @active_reqs: number of requests that are pending and should be waited for
* completion before gating clocks.
*/
@@ -395,6 +396,7 @@ struct ufs_clk_gating {
struct device_attribute delay_attr;
struct device_attribute enable_attr;
bool is_enabled;
+ bool is_initialized;
int active_reqs;
struct workqueue_struct *clk_gating_workq;
};
@@ -419,7 +421,11 @@ struct ufs_saved_pwr_info {
* @suspend_work: worker to suspend devfreq
* @resume_work: worker to resume devfreq
* @min_gear: lowest HS gear to scale down to
- * @is_allowed: tracks if scaling is currently allowed or not
+ * @is_enabled: tracks if scaling is currently enabled or not, controlled by
+ clkscale_enable sysfs node
+ * @is_allowed: tracks if scaling is currently allowed or not, used to block
+ clock scaling which is not invoked from devfreq governor
+ * @is_initialized: Indicates whether clock scaling is initialized or not
* @is_busy_started: tracks if busy period has started or not
* @is_suspended: tracks if devfreq is suspended or not
*/
@@ -434,7 +440,9 @@ struct ufs_clk_scaling {
struct work_struct suspend_work;
struct work_struct resume_work;
u32 min_gear;
+ bool is_enabled;
bool is_allowed;
+ bool is_initialized;
bool is_busy_started;
bool is_suspended;
};
@@ -445,11 +453,13 @@ struct ufs_clk_scaling {
* @pos: index to indicate cyclic buffer position
* @reg: cyclic buffer for registers value
* @tstamp: cyclic buffer for time stamp
+ * @cnt: error counter
*/
struct ufs_event_hist {
int pos;
u32 val[UFS_EVENT_HIST_LENGTH];
ktime_t tstamp[UFS_EVENT_HIST_LENGTH];
+ unsigned long long cnt;
};
/**
@@ -551,6 +561,16 @@ enum ufshcd_quirks {
*/
UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL = 1 << 12,
+ /*
+ * This quirk needs to disable unipro timeout values
+ * before power mode change
+ */
+ UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13,
+
+ /*
+ * This quirk allows only sg entries aligned with page size.
+ */
+ UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 13,
};
enum ufshcd_caps {
@@ -657,6 +677,8 @@ struct ufs_hba_variant_params {
* @intr_mask: Interrupt Mask Bits
* @ee_ctrl_mask: Exception event control mask
* @is_powered: flag to check if HBA is powered
+ * @shutting_down: flag to check if shutdown has been invoked
+ * @host_sem: semaphore used to serialize concurrent contexts
* @eh_wq: Workqueue that eh_work works on
* @eh_work: Worker to handle UFS errors that require s/w attention
* @eeh_work: Worker to handle exception events
@@ -753,7 +775,8 @@ struct ufs_hba {
u32 intr_mask;
u16 ee_ctrl_mask;
bool is_powered;
- struct semaphore eh_sem;
+ bool shutting_down;
+ struct semaphore host_sem;
/* Work Queues */
struct workqueue_struct *eh_wq;
@@ -807,8 +830,6 @@ struct ufs_hba {
struct device bsg_dev;
struct request_queue *bsg_queue;
- bool wb_buf_flush_enabled;
- bool wb_enabled;
struct delayed_work rpm_dev_flush_recheck_work;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -817,6 +838,9 @@ struct ufs_hba {
u32 crypto_cfg_register;
struct blk_keyslot_manager ksm;
#endif
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs_root;
+#endif
};
/* Returns true if clocks can be gated. Otherwise false */
@@ -877,6 +901,11 @@ static inline bool ufshcd_is_wb_allowed(struct ufs_hba *hba)
return hba->caps & UFSHCD_CAP_WB_EN;
}
+static inline bool ufshcd_is_user_access_allowed(struct ufs_hba *hba)
+{
+ return !hba->shutting_down;
+}
+
#define ufshcd_writel(hba, val, reg) \
writel((val), (hba)->mmio_base + (reg))
#define ufshcd_readl(hba, reg) \
@@ -948,7 +977,7 @@ static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
static inline u8 ufshcd_wb_get_query_index(struct ufs_hba *hba)
{
- if (hba->dev_info.b_wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
+ if (hba->dev_info.wb_buffer_type == WB_BUF_MODE_LU_DEDICATED)
return hba->dev_info.wb_dedicated_lu;
return 0;
}
@@ -1070,6 +1099,8 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
u8 *desc_buff, int *buff_len,
enum query_opcode desc_op);
+int ufshcd_wb_ctrl(struct ufs_hba *hba, bool enable);
+
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c
index 87dafbc942d3..a23277bb870e 100644
--- a/drivers/scsi/wd33c93.c
+++ b/drivers/scsi/wd33c93.c
@@ -1176,7 +1176,7 @@ wd33c93_intr(struct Scsi_Host *instance)
if (cmd->SCp.Status == ILLEGAL_STATUS_BYTE)
cmd->SCp.Status = lun;
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != GOOD)
+ && cmd->SCp.Status != SAM_STAT_GOOD)
cmd->result =
(cmd->
result & 0x00ffff) | (DID_ERROR << 16);
@@ -1262,7 +1262,7 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->connected = NULL;
hostdata->busy[cmd->device->id] &= ~(1 << (cmd->device->lun & 0xff));
hostdata->state = S_UNCONNECTED;
- if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != GOOD)
+ if (cmd->cmnd[0] == REQUEST_SENSE && cmd->SCp.Status != SAM_STAT_GOOD)
cmd->result =
(cmd->result & 0x00ffff) | (DID_ERROR << 16);
else
@@ -1296,7 +1296,7 @@ wd33c93_intr(struct Scsi_Host *instance)
hostdata->state = S_UNCONNECTED;
DB(DB_INTR, printk(":%d", cmd->SCp.Status))
if (cmd->cmnd[0] == REQUEST_SENSE
- && cmd->SCp.Status != GOOD)
+ && cmd->SCp.Status != SAM_STAT_GOOD)
cmd->result =
(cmd->
result & 0x00ffff) | (DID_ERROR << 16);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 518fac4864cf..d0e7ed8f28cc 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -4327,7 +4327,7 @@ int iscsit_close_connection(
atomic_read(&sess->session_fall_back_to_erl0)) {
spin_unlock_bh(&sess->conn_lock);
complete_all(&sess->session_wait_comp);
- iscsit_close_session(sess);
+ iscsit_close_session(sess, true);
return 0;
} else if (atomic_read(&sess->session_logout)) {
@@ -4337,7 +4337,7 @@ int iscsit_close_connection(
if (atomic_read(&sess->session_close)) {
spin_unlock_bh(&sess->conn_lock);
complete_all(&sess->session_wait_comp);
- iscsit_close_session(sess);
+ iscsit_close_session(sess, true);
} else {
spin_unlock_bh(&sess->conn_lock);
}
@@ -4353,7 +4353,7 @@ int iscsit_close_connection(
if (atomic_read(&sess->session_close)) {
spin_unlock_bh(&sess->conn_lock);
complete_all(&sess->session_wait_comp);
- iscsit_close_session(sess);
+ iscsit_close_session(sess, true);
} else {
spin_unlock_bh(&sess->conn_lock);
}
@@ -4366,7 +4366,7 @@ int iscsit_close_connection(
* If the iSCSI Session for the iSCSI Initiator Node exists,
* forcefully shutdown the iSCSI NEXUS.
*/
-int iscsit_close_session(struct iscsi_session *sess)
+int iscsit_close_session(struct iscsi_session *sess, bool can_sleep)
{
struct iscsi_portal_group *tpg = sess->tpg;
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
@@ -4399,14 +4399,10 @@ int iscsit_close_session(struct iscsi_session *sess)
* time2retain handler) and contain and active session usage count we
* restart the timer and exit.
*/
- if (!in_interrupt()) {
- iscsit_check_session_usage_count(sess);
- } else {
- if (iscsit_check_session_usage_count(sess) == 2) {
- atomic_set(&sess->session_logout, 0);
- iscsit_start_time2retain_handler(sess);
- return 0;
- }
+ if (iscsit_check_session_usage_count(sess, can_sleep)) {
+ atomic_set(&sess->session_logout, 0);
+ iscsit_start_time2retain_handler(sess);
+ return 0;
}
transport_deregister_session(sess->se_sess);
diff --git a/drivers/target/iscsi/iscsi_target.h b/drivers/target/iscsi/iscsi_target.h
index 7409ce2a6607..b35a96ded9c1 100644
--- a/drivers/target/iscsi/iscsi_target.h
+++ b/drivers/target/iscsi/iscsi_target.h
@@ -41,7 +41,7 @@ extern void iscsit_thread_get_cpumask(struct iscsi_conn *);
extern int iscsi_target_tx_thread(void *);
extern int iscsi_target_rx_thread(void *);
extern int iscsit_close_connection(struct iscsi_conn *);
-extern int iscsit_close_session(struct iscsi_session *);
+extern int iscsit_close_session(struct iscsi_session *, bool can_sleep);
extern void iscsit_fail_session(struct iscsi_session *);
extern void iscsit_stop_session(struct iscsi_session *, int, int);
extern int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *, int);
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index b4abd7b68e6d..102c9cbf59f3 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -765,7 +765,7 @@ void iscsit_handle_time2retain_timeout(struct timer_list *t)
iscsit_fill_cxn_timeout_err_stats(sess);
spin_unlock_bh(&se_tpg->session_lock);
- iscsit_close_session(sess);
+ iscsit_close_session(sess, false);
}
void iscsit_start_time2retain_handler(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 45ba07c6ec27..9468b017b4a7 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -779,21 +779,22 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown)
}
EXPORT_SYMBOL(iscsit_free_cmd);
-int iscsit_check_session_usage_count(struct iscsi_session *sess)
+bool iscsit_check_session_usage_count(struct iscsi_session *sess,
+ bool can_sleep)
{
spin_lock_bh(&sess->session_usage_lock);
if (sess->session_usage_count != 0) {
sess->session_waiting_on_uc = 1;
spin_unlock_bh(&sess->session_usage_lock);
- if (in_interrupt())
- return 2;
+ if (!can_sleep)
+ return true;
wait_for_completion(&sess->session_waiting_on_uc_comp);
- return 1;
+ return false;
}
spin_unlock_bh(&sess->session_usage_lock);
- return 0;
+ return false;
}
void iscsit_dec_session_usage_count(struct iscsi_session *sess)
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h
index 68e84803b0a1..8ee1c133a9b7 100644
--- a/drivers/target/iscsi/iscsi_target_util.h
+++ b/drivers/target/iscsi/iscsi_target_util.h
@@ -40,7 +40,7 @@ extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);
extern void iscsit_release_cmd(struct iscsi_cmd *);
extern void __iscsit_free_cmd(struct iscsi_cmd *, bool);
extern void iscsit_free_cmd(struct iscsi_cmd *, bool);
-extern int iscsit_check_session_usage_count(struct iscsi_session *);
+extern bool iscsit_check_session_usage_count(struct iscsi_session *sess, bool can_sleep);
extern void iscsit_dec_session_usage_count(struct iscsi_session *);
extern void iscsit_inc_session_usage_count(struct iscsi_session *);
extern struct iscsi_conn *iscsit_get_conn_from_cid(struct iscsi_session *, u16);
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 6b72afee2f8b..5517c7dd5144 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -860,8 +860,6 @@ int core_alua_check_nonop_delay(
{
if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
return 0;
- if (in_interrupt())
- return 0;
/*
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index cce455929778..5a66854def95 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -267,7 +267,7 @@ fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct fd_dev *fd_dev = FD_DEV(dev);
struct file *file = fd_dev->fd_file;
struct target_core_file_cmd *aio_cmd;
- struct iov_iter iter = {};
+ struct iov_iter iter;
struct scatterlist *sg;
ssize_t len = 0;
int ret = 0, i;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index fca4bd079d02..93ea17cbad79 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1313,12 +1313,32 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
" %u does not match SCSI CDB Length: %u for SAM Opcode:"
" 0x%02x\n", cmd->se_tfo->fabric_name,
cmd->data_length, size, cmd->t_task_cdb[0]);
+ /*
+ * For READ command for the overflow case keep the existing
+ * fabric provided ->data_length. Otherwise for the underflow
+ * case, reset ->data_length to the smaller SCSI expected data
+ * transfer length.
+ */
+ if (size > cmd->data_length) {
+ cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
+ cmd->residual_count = (size - cmd->data_length);
+ } else {
+ cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+ cmd->residual_count = (cmd->data_length - size);
+ /*
+ * Do not truncate ->data_length for WRITE command to
+ * dump all payload
+ */
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ cmd->data_length = size;
+ }
+ }
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
pr_err_ratelimited("Rejecting underflow/overflow"
" for WRITE data CDB\n");
- return TCM_INVALID_CDB_FIELD;
+ return TCM_INVALID_FIELD_IN_COMMAND_IU;
}
/*
* Some fabric drivers like iscsi-target still expect to
@@ -1332,31 +1352,6 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
return TCM_INVALID_CDB_FIELD;
}
}
- /*
- * Reject READ_* or WRITE_* with overflow/underflow for
- * type SCF_SCSI_DATA_CDB.
- */
- if (dev->dev_attrib.block_size != 512) {
- pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
- " CDB on non 512-byte sector setup subsystem"
- " plugin: %s\n", dev->transport->name);
- /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
- return TCM_INVALID_CDB_FIELD;
- }
- /*
- * For the overflow case keep the existing fabric provided
- * ->data_length. Otherwise for the underflow case, reset
- * ->data_length to the smaller SCSI expected data transfer
- * length.
- */
- if (size > cmd->data_length) {
- cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
- cmd->residual_count = (size - cmd->data_length);
- } else {
- cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
- cmd->residual_count = (cmd->data_length - size);
- cmd->data_length = size;
- }
}
return target_check_max_data_sg_nents(cmd, dev, size);
@@ -1512,17 +1507,14 @@ int transport_handle_cdb_direct(
{
sense_reason_t ret;
+ might_sleep();
+
if (!cmd->se_lun) {
dump_stack();
pr_err("cmd->se_lun is NULL\n");
return -EINVAL;
}
- if (in_interrupt()) {
- dump_stack();
- pr_err("transport_generic_handle_cdb cannot be called"
- " from interrupt context\n");
- return -EINVAL;
- }
+
/*
* Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
* outstanding descriptors are handled correctly during shutdown via
@@ -1613,10 +1605,11 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
sense_reason_t rc;
int ret;
+ might_sleep();
+
se_tpg = se_sess->se_tpg;
BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
- BUG_ON(in_interrupt());
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
@@ -1884,6 +1877,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
case TCM_TOO_MANY_SEGMENT_DESCS:
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
+ case TCM_INVALID_FIELD_IN_COMMAND_IU:
break;
case TCM_OUT_OF_RESOURCES:
cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
@@ -3210,6 +3204,11 @@ static const struct sense_detail sense_detail_table[] = {
.asc = 0x55,
.ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
},
+ [TCM_INVALID_FIELD_IN_COMMAND_IU] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x0e,
+ .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
+ },
};
/**
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 4e2d61e8fb1e..9271d7a49b90 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -391,10 +391,6 @@ struct sas_ha_struct {
int strict_wide_ports; /* both sas_addr and attached_sas_addr must match
* their siblings when forming wide ports */
- /* LLDD calls these to notify the class of an event. */
- int (*notify_port_event)(struct asd_sas_phy *, enum port_event);
- int (*notify_phy_event)(struct asd_sas_phy *, enum phy_event);
-
void *lldd_ha; /* not touched by sas class code */
struct list_head eh_done_q; /* complete via scsi_eh_flush_done_q */
@@ -706,4 +702,9 @@ struct sas_phy *sas_get_local_phy(struct domain_device *dev);
int sas_request_addr(struct Scsi_Host *shost, u8 *addr);
+int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event,
+ gfp_t gfp_flags);
+int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event,
+ gfp_t gfp_flags);
+
#endif /* _SASLIB_H_ */
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 5339baadc082..e75cca25338a 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -116,6 +116,7 @@ static inline int scsi_is_wlun(u64 lun)
#define CLEAR_TASK_SET 0x0e
#define INITIATE_RECOVERY 0x0f /* SCSI-II only */
#define RELEASE_RECOVERY 0x10 /* SCSI-II only */
+#define TERMINATE_IO_PROC 0x11 /* SCSI-II only */
#define CLEAR_ACA 0x16
#define LOGICAL_UNIT_RESET 0x17
#define SIMPLE_QUEUE_TAG 0x20
@@ -159,6 +160,7 @@ static inline int scsi_is_wlun(u64 lun)
* paths might yield different results */
#define DID_ALLOC_FAILURE 0x12 /* Space allocation on the device failed */
#define DID_MEDIUM_ERROR 0x13 /* Medium error */
+#define DID_TRANSPORT_MARGINAL 0x14 /* Transport marginal errors */
#define DRIVER_OK 0x00 /* Driver status */
/*
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 69ade4fb71aa..ace15b5dc956 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -308,6 +308,11 @@ static inline struct scsi_data_buffer *scsi_prot(struct scsi_cmnd *cmd)
#define scsi_for_each_prot_sg(cmd, sg, nseg, __i) \
for_each_sg(scsi_prot_sglist(cmd), sg, nseg, __i)
+static inline void set_status_byte(struct scsi_cmnd *cmd, char status)
+{
+ cmd->result = (cmd->result & 0xffffff00) | status;
+}
+
static inline void set_msg_byte(struct scsi_cmnd *cmd, char status)
{
cmd->result = (cmd->result & 0xffff00ff) | (status << 8);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 701f178b20ae..e30fd963b97d 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -314,6 +314,12 @@ struct scsi_host_template {
* Status: OPTIONAL
*/
enum blk_eh_timer_return (*eh_timed_out)(struct scsi_cmnd *);
+ /*
+ * Optional routine that allows the transport to decide if a cmd
+ * is retryable. Return true if the transport is in a state the
+ * cmd should be retried on.
+ */
+ bool (*eh_should_retry_cmd)(struct scsi_cmnd *scmd);
/* This is an optional routine that allows transport to initiate
* LLD adapter or firmware reset using sysfs attribute.
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index c759b29e46c7..14214ee121ad 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -67,6 +67,7 @@ enum fc_port_state {
FC_PORTSTATE_ERROR,
FC_PORTSTATE_LOOPBACK,
FC_PORTSTATE_DELETED,
+ FC_PORTSTATE_MARGINAL,
};
@@ -742,7 +743,6 @@ struct fc_function_template {
unsigned long disable_target_scan:1;
};
-
/**
* fc_remote_port_chkready - called to validate the remote port state
* prior to initiating io to the port.
@@ -758,6 +758,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
switch (rport->port_state) {
case FC_PORTSTATE_ONLINE:
+ case FC_PORTSTATE_MARGINAL:
if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
result = 0;
else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
@@ -839,6 +840,7 @@ int fc_vport_terminate(struct fc_vport *vport);
int fc_block_rport(struct fc_rport *rport);
int fc_block_scsi_eh(struct scsi_cmnd *cmnd);
enum blk_eh_timer_return fc_eh_timed_out(struct scsi_cmnd *scmd);
+bool fc_eh_should_retry_cmd(struct scsi_cmnd *scmd);
static inline struct Scsi_Host *fc_bsg_to_shost(struct bsg_job *job)
{
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 63dd12124139..54dcc0eb25fa 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -187,6 +187,7 @@ enum tcm_sense_reason_table {
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
TCM_LUN_BUSY = R(0x1e),
+ TCM_INVALID_FIELD_IN_COMMAND_IU = R(0x1f),
#undef R
};
diff --git a/include/trace/events/ufs.h b/include/trace/events/ufs.h
index 0bd54a184391..e151477d645c 100644
--- a/include/trace/events/ufs.h
+++ b/include/trace/events/ufs.h
@@ -20,32 +20,51 @@
{ SYNCHRONIZE_CACHE, "SYNC" }, \
{ UNMAP, "UNMAP" })
-#define UFS_LINK_STATES \
- EM(UIC_LINK_OFF_STATE) \
- EM(UIC_LINK_ACTIVE_STATE) \
- EMe(UIC_LINK_HIBERN8_STATE)
-
-#define UFS_PWR_MODES \
- EM(UFS_ACTIVE_PWR_MODE) \
- EM(UFS_SLEEP_PWR_MODE) \
- EM(UFS_POWERDOWN_PWR_MODE) \
- EMe(UFS_DEEPSLEEP_PWR_MODE)
-
-#define UFSCHD_CLK_GATING_STATES \
- EM(CLKS_OFF) \
- EM(CLKS_ON) \
- EM(REQ_CLKS_OFF) \
- EMe(REQ_CLKS_ON)
+#define UFS_LINK_STATES \
+ EM(UIC_LINK_OFF_STATE, "UIC_LINK_OFF_STATE") \
+ EM(UIC_LINK_ACTIVE_STATE, "UIC_LINK_ACTIVE_STATE") \
+ EMe(UIC_LINK_HIBERN8_STATE, "UIC_LINK_HIBERN8_STATE")
+
+#define UFS_PWR_MODES \
+ EM(UFS_ACTIVE_PWR_MODE, "UFS_ACTIVE_PWR_MODE") \
+ EM(UFS_SLEEP_PWR_MODE, "UFS_SLEEP_PWR_MODE") \
+ EM(UFS_POWERDOWN_PWR_MODE, "UFS_POWERDOWN_PWR_MODE") \
+ EMe(UFS_DEEPSLEEP_PWR_MODE, "UFS_DEEPSLEEP_PWR_MODE")
+
+#define UFSCHD_CLK_GATING_STATES \
+ EM(CLKS_OFF, "CLKS_OFF") \
+ EM(CLKS_ON, "CLKS_ON") \
+ EM(REQ_CLKS_OFF, "REQ_CLKS_OFF") \
+ EMe(REQ_CLKS_ON, "REQ_CLKS_ON")
+
+#define UFS_CMD_TRACE_STRINGS \
+ EM(UFS_CMD_SEND, "send_req") \
+ EM(UFS_CMD_COMP, "complete_rsp") \
+ EM(UFS_DEV_COMP, "dev_complete") \
+ EM(UFS_QUERY_SEND, "query_send") \
+ EM(UFS_QUERY_COMP, "query_complete") \
+ EM(UFS_QUERY_ERR, "query_complete_err") \
+ EM(UFS_TM_SEND, "tm_send") \
+ EM(UFS_TM_COMP, "tm_complete") \
+ EMe(UFS_TM_ERR, "tm_complete_err")
+
+#define UFS_CMD_TRACE_TSF_TYPES \
+ EM(UFS_TSF_CDB, "CDB") \
+ EM(UFS_TSF_OSF, "OSF") \
+ EM(UFS_TSF_TM_INPUT, "TM_INPUT") \
+ EMe(UFS_TSF_TM_OUTPUT, "TM_OUTPUT")
/* Enums require being exported to userspace, for user tool parsing */
#undef EM
#undef EMe
-#define EM(a) TRACE_DEFINE_ENUM(a);
-#define EMe(a) TRACE_DEFINE_ENUM(a);
+#define EM(a, b) TRACE_DEFINE_ENUM(a);
+#define EMe(a, b) TRACE_DEFINE_ENUM(a);
UFS_LINK_STATES;
UFS_PWR_MODES;
UFSCHD_CLK_GATING_STATES;
+UFS_CMD_TRACE_STRINGS
+UFS_CMD_TRACE_TSF_TYPES
/*
* Now redefine the EM() and EMe() macros to map the enums to the strings
@@ -53,8 +72,13 @@ UFSCHD_CLK_GATING_STATES;
*/
#undef EM
#undef EMe
-#define EM(a) { a, #a },
-#define EMe(a) { a, #a }
+#define EM(a, b) {a, b},
+#define EMe(a, b) {a, b}
+
+#define show_ufs_cmd_trace_str(str_t) \
+ __print_symbolic(str_t, UFS_CMD_TRACE_STRINGS)
+#define show_ufs_cmd_trace_tsf(tsf) \
+ __print_symbolic(tsf, UFS_CMD_TRACE_TSF_TYPES)
TRACE_EVENT(ufshcd_clk_gating,
@@ -223,16 +247,16 @@ DEFINE_EVENT(ufshcd_template, ufshcd_init,
TP_ARGS(dev_name, err, usecs, dev_state, link_state));
TRACE_EVENT(ufshcd_command,
- TP_PROTO(const char *dev_name, const char *str, unsigned int tag,
- u32 doorbell, int transfer_len, u32 intr, u64 lba,
- u8 opcode, u8 group_id),
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t,
+ unsigned int tag, u32 doorbell, int transfer_len, u32 intr,
+ u64 lba, u8 opcode, u8 group_id),
- TP_ARGS(dev_name, str, tag, doorbell, transfer_len,
+ TP_ARGS(dev_name, str_t, tag, doorbell, transfer_len,
intr, lba, opcode, group_id),
TP_STRUCT__entry(
__string(dev_name, dev_name)
- __string(str, str)
+ __field(enum ufs_trace_str_t, str_t)
__field(unsigned int, tag)
__field(u32, doorbell)
__field(int, transfer_len)
@@ -244,7 +268,7 @@ TRACE_EVENT(ufshcd_command,
TP_fast_assign(
__assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->str_t = str_t;
__entry->tag = tag;
__entry->doorbell = doorbell;
__entry->transfer_len = transfer_len;
@@ -256,22 +280,22 @@ TRACE_EVENT(ufshcd_command,
TP_printk(
"%s: %s: tag: %u, DB: 0x%x, size: %d, IS: %u, LBA: %llu, opcode: 0x%x (%s), group_id: 0x%x",
- __get_str(str), __get_str(dev_name), __entry->tag,
- __entry->doorbell, __entry->transfer_len,
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ __entry->tag, __entry->doorbell, __entry->transfer_len,
__entry->intr, __entry->lba, (u32)__entry->opcode,
str_opcode(__entry->opcode), (u32)__entry->group_id
)
);
TRACE_EVENT(ufshcd_uic_command,
- TP_PROTO(const char *dev_name, const char *str, u32 cmd,
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, u32 cmd,
u32 arg1, u32 arg2, u32 arg3),
- TP_ARGS(dev_name, str, cmd, arg1, arg2, arg3),
+ TP_ARGS(dev_name, str_t, cmd, arg1, arg2, arg3),
TP_STRUCT__entry(
__string(dev_name, dev_name)
- __string(str, str)
+ __field(enum ufs_trace_str_t, str_t)
__field(u32, cmd)
__field(u32, arg1)
__field(u32, arg2)
@@ -280,7 +304,7 @@ TRACE_EVENT(ufshcd_uic_command,
TP_fast_assign(
__assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->str_t = str_t;
__entry->cmd = cmd;
__entry->arg1 = arg1;
__entry->arg2 = arg2;
@@ -289,34 +313,38 @@ TRACE_EVENT(ufshcd_uic_command,
TP_printk(
"%s: %s: cmd: 0x%x, arg1: 0x%x, arg2: 0x%x, arg3: 0x%x",
- __get_str(str), __get_str(dev_name), __entry->cmd,
- __entry->arg1, __entry->arg2, __entry->arg3
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
+ __entry->cmd, __entry->arg1, __entry->arg2, __entry->arg3
)
);
TRACE_EVENT(ufshcd_upiu,
- TP_PROTO(const char *dev_name, const char *str, void *hdr, void *tsf),
+ TP_PROTO(const char *dev_name, enum ufs_trace_str_t str_t, void *hdr,
+ void *tsf, enum ufs_trace_tsf_t tsf_t),
- TP_ARGS(dev_name, str, hdr, tsf),
+ TP_ARGS(dev_name, str_t, hdr, tsf, tsf_t),
TP_STRUCT__entry(
__string(dev_name, dev_name)
- __string(str, str)
+ __field(enum ufs_trace_str_t, str_t)
__array(unsigned char, hdr, 12)
__array(unsigned char, tsf, 16)
+ __field(enum ufs_trace_tsf_t, tsf_t)
),
TP_fast_assign(
__assign_str(dev_name, dev_name);
- __assign_str(str, str);
+ __entry->str_t = str_t;
memcpy(__entry->hdr, hdr, sizeof(__entry->hdr));
memcpy(__entry->tsf, tsf, sizeof(__entry->tsf));
+ __entry->tsf_t = tsf_t;
),
TP_printk(
- "%s: %s: HDR:%s, CDB:%s",
- __get_str(str), __get_str(dev_name),
+ "%s: %s: HDR:%s, %s:%s",
+ show_ufs_cmd_trace_str(__entry->str_t), __get_str(dev_name),
__print_hex(__entry->hdr, sizeof(__entry->hdr)),
+ show_ufs_cmd_trace_tsf(__entry->tsf_t),
__print_hex(__entry->tsf, sizeof(__entry->tsf))
)
);