summaryrefslogtreecommitdiff
path: root/drivers/scsi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi')
-rw-r--r--drivers/scsi/3w-9xxx.c4
-rw-r--r--drivers/scsi/3w-sas.c4
-rw-r--r--drivers/scsi/3w-xxxx.c4
-rw-r--r--drivers/scsi/BusLogic.c2
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/NCR5380.c2
-rw-r--r--drivers/scsi/NCR_D700.c12
-rw-r--r--drivers/scsi/NCR_Q720.c2
-rw-r--r--drivers/scsi/a100u2w.c8
-rw-r--r--drivers/scsi/a2091.c9
-rw-r--r--drivers/scsi/aacraid/aachba.c87
-rw-r--r--drivers/scsi/aacraid/aacraid.h8
-rw-r--r--drivers/scsi/aacraid/comminit.c11
-rw-r--r--drivers/scsi/aacraid/linit.c15
-rw-r--r--drivers/scsi/aacraid/src.c4
-rw-r--r--drivers/scsi/advansys.c152
-rw-r--r--drivers/scsi/aha152x.c2
-rw-r--r--drivers/scsi/aha1740.c4
-rw-r--r--drivers/scsi/aic94xx/aic94xx_init.c23
-rw-r--r--drivers/scsi/arm/acornscsi.c7
-rw-r--r--drivers/scsi/arm/arxescsi.c7
-rw-r--r--drivers/scsi/arm/cumana_1.c8
-rw-r--r--drivers/scsi/arm/cumana_2.c8
-rw-r--r--drivers/scsi/arm/eesox.c7
-rw-r--r--drivers/scsi/arm/oak.c7
-rw-r--r--drivers/scsi/arm/powertec.c8
-rw-r--r--drivers/scsi/atp870u.c2
-rw-r--r--drivers/scsi/be2iscsi/be.h7
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.c236
-rw-r--r--drivers/scsi/be2iscsi/be_cmds.h93
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.c124
-rw-r--r--drivers/scsi/be2iscsi/be_iscsi.h2
-rw-r--r--drivers/scsi/be2iscsi/be_main.c1063
-rw-r--r--drivers/scsi/be2iscsi/be_main.h152
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c424
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.h23
-rw-r--r--drivers/scsi/bfa/bfad.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h27
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c23
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c31
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c95
-rw-r--r--drivers/scsi/bnx2i/bnx2i.h2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c2
-rw-r--r--drivers/scsi/bnx2i/bnx2i_init.c45
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c2
-rw-r--r--drivers/scsi/bvme6000_scsi.c6
-rw-r--r--drivers/scsi/csiostor/Kconfig19
-rw-r--r--drivers/scsi/csiostor/Makefile11
-rw-r--r--drivers/scsi/csiostor/csio_attr.c796
-rw-r--r--drivers/scsi/csiostor/csio_defs.h121
-rw-r--r--drivers/scsi/csiostor/csio_hw.c4398
-rw-r--r--drivers/scsi/csiostor/csio_hw.h665
-rw-r--r--drivers/scsi/csiostor/csio_init.c1262
-rw-r--r--drivers/scsi/csiostor/csio_init.h158
-rw-r--r--drivers/scsi/csiostor/csio_isr.c624
-rw-r--r--drivers/scsi/csiostor/csio_lnode.c2135
-rw-r--r--drivers/scsi/csiostor/csio_lnode.h255
-rw-r--r--drivers/scsi/csiostor/csio_mb.c1750
-rw-r--r--drivers/scsi/csiostor/csio_mb.h278
-rw-r--r--drivers/scsi/csiostor/csio_rnode.c913
-rw-r--r--drivers/scsi/csiostor/csio_rnode.h141
-rw-r--r--drivers/scsi/csiostor/csio_scsi.c2555
-rw-r--r--drivers/scsi/csiostor/csio_scsi.h342
-rw-r--r--drivers/scsi/csiostor/csio_wr.c1632
-rw-r--r--drivers/scsi/csiostor/csio_wr.h512
-rw-r--r--drivers/scsi/csiostor/t4fw_api_stor.h539
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c1
-rw-r--r--drivers/scsi/dc395x.c53
-rw-r--r--drivers/scsi/dmx3191d.c8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c4
-rw-r--r--drivers/scsi/fdomain.c2
-rw-r--r--drivers/scsi/fnic/Makefile2
-rw-r--r--drivers/scsi/fnic/fnic.h60
-rw-r--r--drivers/scsi/fnic/fnic_debugfs.c314
-rw-r--r--drivers/scsi/fnic/fnic_fcs.c6
-rw-r--r--drivers/scsi/fnic/fnic_io.h6
-rw-r--r--drivers/scsi/fnic/fnic_main.c24
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c721
-rw-r--r--drivers/scsi/fnic/fnic_trace.c273
-rw-r--r--drivers/scsi/fnic/fnic_trace.h90
-rw-r--r--drivers/scsi/g_NCR5380.c2
-rw-r--r--drivers/scsi/gdth.c27
-rw-r--r--drivers/scsi/gvp11.c11
-rw-r--r--drivers/scsi/hpsa.c209
-rw-r--r--drivers/scsi/hptiop.c416
-rw-r--r--drivers/scsi/hptiop.h72
-rw-r--r--drivers/scsi/ibmvscsi/ibmvfc.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvstgt.c2
-rw-r--r--drivers/scsi/initio.c2
-rw-r--r--drivers/scsi/ipr.c1357
-rw-r--r--drivers/scsi/ipr.h102
-rw-r--r--drivers/scsi/ips.c10
-rw-r--r--drivers/scsi/isci/init.c8
-rw-r--r--drivers/scsi/jazz_esp.c6
-rw-r--r--drivers/scsi/lasi700.c2
-rw-r--r--drivers/scsi/libsas/sas_expander.c2
-rw-r--r--drivers/scsi/lpfc/lpfc.h19
-rw-r--r--drivers/scsi/lpfc/lpfc_attr.c85
-rw-r--r--drivers/scsi/lpfc/lpfc_bsg.c51
-rw-r--r--drivers/scsi/lpfc/lpfc_crtn.h5
-rw-r--r--drivers/scsi/lpfc/lpfc_ct.c39
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c33
-rw-r--r--drivers/scsi/lpfc/lpfc_hw.h2
-rw-r--r--drivers/scsi/lpfc/lpfc_hw4.h179
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c322
-rw-r--r--drivers/scsi/lpfc/lpfc_nportdisc.c10
-rw-r--r--drivers/scsi/lpfc/lpfc_scsi.c43
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c450
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.h4
-rw-r--r--drivers/scsi/lpfc/lpfc_sli4.h11
-rw-r--r--drivers/scsi/lpfc/lpfc_version.h2
-rw-r--r--drivers/scsi/mac_esp.c6
-rw-r--r--drivers/scsi/megaraid.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_mbox.c6
-rw-r--r--drivers/scsi/megaraid/megaraid_sas.h8
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c24
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.h1
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c27
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.h7
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_scsih.c4
-rw-r--r--drivers/scsi/mpt3sas/Kconfig67
-rw-r--r--drivers/scsi/mpt3sas/Makefile8
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2.h1164
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h3323
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_init.h560
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_ioc.h1665
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_raid.h346
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_sas.h295
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_tool.h437
-rw-r--r--drivers/scsi/mpt3sas/mpi/mpi2_type.h56
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.c4837
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h1139
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_config.c1649
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.c3296
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_ctl.h418
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_debug.h219
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c8163
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c2128
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c433
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h193
-rw-r--r--drivers/scsi/mvme16x_scsi.c8
-rw-r--r--drivers/scsi/mvsas/mv_64xx.c8
-rw-r--r--drivers/scsi/mvsas/mv_94xx.c7
-rw-r--r--drivers/scsi/mvsas/mv_94xx.h14
-rw-r--r--drivers/scsi/mvsas/mv_chips.h2
-rw-r--r--drivers/scsi/mvsas/mv_init.c19
-rw-r--r--drivers/scsi/mvsas/mv_sas.c14
-rw-r--r--drivers/scsi/mvsas/mv_sas.h7
-rw-r--r--drivers/scsi/mvumi.c5
-rw-r--r--drivers/scsi/nsp32.c16
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/osd/osd_uld.c28
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c36
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c31
-rw-r--r--drivers/scsi/pmcraid.c31
-rw-r--r--drivers/scsi/ps3rom.c2
-rw-r--r--drivers/scsi/qla1280.c8
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c38
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c271
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c23
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h58
-rw-r--r--drivers/scsi/qla2xxx/qla_dfs.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_fw.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h18
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c16
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c353
-rw-r--r--drivers/scsi/qla2xxx/qla_inline.h28
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c54
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c124
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c213
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c9
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c96
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h4
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c206
-rw-r--r--drivers/scsi/qla2xxx/qla_settings.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c204
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h19
-rw-r--r--drivers/scsi/qla2xxx/qla_version.h4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c6
-rw-r--r--drivers/scsi/qla4xxx/ql4_83xx.c55
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c16
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h10
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h6
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c17
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c68
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c67
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c14
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c166
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/qlogicfas.c2
-rw-r--r--drivers/scsi/qlogicpti.c20
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/scsi_pm.c98
-rw-r--r--drivers/scsi/scsi_sysfs.c11
-rw-r--r--drivers/scsi/scsi_transport_iscsi.c12
-rw-r--r--drivers/scsi/scsi_transport_sas.c1
-rw-r--r--drivers/scsi/scsi_transport_srp.c51
-rw-r--r--drivers/scsi/sd.c35
-rw-r--r--drivers/scsi/sgiwd93.c4
-rw-r--r--drivers/scsi/sim710.c11
-rw-r--r--drivers/scsi/sni_53c710.c4
-rw-r--r--drivers/scsi/stex.c5
-rw-r--r--drivers/scsi/storvsc_drv.c137
-rw-r--r--drivers/scsi/sun3x_esp.c6
-rw-r--r--drivers/scsi/sun_esp.c30
-rw-r--r--drivers/scsi/sym53c416.c2
-rw-r--r--drivers/scsi/sym53c8xx_2/sym_glue.c23
-rw-r--r--drivers/scsi/tmscsim.c23
-rw-r--r--drivers/scsi/ufs/Kconfig74
-rw-r--r--drivers/scsi/ufs/Makefile1
-rw-r--r--drivers/scsi/ufs/ufs.h44
-rw-r--r--drivers/scsi/ufs/ufshcd-pci.c211
-rw-r--r--drivers/scsi/ufs/ufshcd.c424
-rw-r--r--drivers/scsi/ufs/ufshcd.h202
-rw-r--r--drivers/scsi/ufs/ufshci.h44
-rw-r--r--drivers/scsi/virtio_scsi.c32
-rw-r--r--drivers/scsi/vmw_pvscsi.c9
-rw-r--r--drivers/scsi/zalon.c2
-rw-r--r--drivers/scsi/zorro7xx.c12
228 files changed, 58084 insertions, 3342 deletions
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 3868ab2397c6..d1f0120cdb98 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -2029,7 +2029,7 @@ static struct scsi_host_template driver_template = {
};
/* This function will probe and initialize a card */
-static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
@@ -2305,7 +2305,7 @@ out_disable_device:
#endif
/* PCI Devices supported by this driver */
-static struct pci_device_id twa_pci_tbl[] __devinitdata = {
+static struct pci_device_id twa_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 13e39e1fdfe2..52a2f0580d97 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -1604,7 +1604,7 @@ static struct scsi_host_template driver_template = {
};
/* This function will probe and initialize a card */
-static int __devinit twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+static int twl_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
@@ -1893,7 +1893,7 @@ out_disable_device:
#endif
/* PCI Devices supported by this driver */
-static struct pci_device_id twl_pci_tbl[] __devinitdata = {
+static struct pci_device_id twl_pci_tbl[] = {
{ PCI_VDEVICE(3WARE, PCI_DEVICE_ID_3WARE_9750) },
{ }
};
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 7fe96ff60c58..62071d2fc1ce 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2281,7 +2281,7 @@ static struct scsi_host_template driver_template = {
};
/* This function will probe and initialize a card */
-static int __devinit tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
+static int tw_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct Scsi_Host *host = NULL;
TW_Device_Extension *tw_dev;
@@ -2422,7 +2422,7 @@ static void tw_remove(struct pci_dev *pdev)
} /* End tw_remove() */
/* PCI Devices supported by this driver */
-static struct pci_device_id tw_pci_tbl[] __devinitdata = {
+static struct pci_device_id tw_pci_tbl[] = {
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_1000,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_7000,
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index d4da3708763b..d7ca247efa35 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -3615,7 +3615,7 @@ static void __exit BusLogic_exit(void)
__setup("BusLogic=", BusLogic_Setup);
#ifdef MODULE
-static struct pci_device_id BusLogic_pci_tbl[] __devinitdata = {
+static struct pci_device_id BusLogic_pci_tbl[] = {
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ PCI_VENDOR_ID_BUSLOGIC, PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC,
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 74bf1aa7af46..142f632e2a2e 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -603,6 +603,7 @@ config SCSI_ARCMSR
source "drivers/scsi/megaraid/Kconfig.megaraid"
source "drivers/scsi/mpt2sas/Kconfig"
+source "drivers/scsi/mpt3sas/Kconfig"
source "drivers/scsi/ufs/Kconfig"
config SCSI_HPTIOP
@@ -1812,6 +1813,7 @@ config SCSI_VIRTIO
This is the virtual HBA driver for virtio. If the kernel will
be used in a virtual machine, say Y or M.
+source "drivers/scsi/csiostor/Kconfig"
endif # SCSI_LOWLEVEL
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 888f73a4aae1..b607ba4f5630 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -90,6 +90,7 @@ obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_T128) += t128.o
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
@@ -106,6 +107,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
obj-$(CONFIG_SCSI_MPT2SAS) += mpt2sas/
+obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index 165e4dd865d9..450353e04dde 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -814,7 +814,7 @@ static char *lprint_opcode(int opcode, char *pos, char *buffer, int length)
* Locks: interrupts must be enabled when we are called
*/
-static int __devinit NCR5380_init(struct Scsi_Host *instance, int flags)
+static int NCR5380_init(struct Scsi_Host *instance, int flags)
{
NCR5380_local_declare();
int i, pass;
diff --git a/drivers/scsi/NCR_D700.c b/drivers/scsi/NCR_D700.c
index 8647256ad66d..b39a2409a507 100644
--- a/drivers/scsi/NCR_D700.c
+++ b/drivers/scsi/NCR_D700.c
@@ -114,7 +114,7 @@ MODULE_DESCRIPTION("NCR Dual700 SCSI Driver");
MODULE_LICENSE("GPL");
module_param(NCR_D700, charp, 0);
-static __u8 __devinitdata id_array[2*(MCA_MAX_SLOT_NR + 1)] =
+static __u8 id_array[2*(MCA_MAX_SLOT_NR + 1)] =
{ [0 ... 2*(MCA_MAX_SLOT_NR + 1)-1] = 7 };
#ifdef MODULE
@@ -173,7 +173,7 @@ struct NCR_D700_private {
char pad;
};
-static int __devinit
+static int
NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
int slot, u32 region, int differential)
{
@@ -243,7 +243,7 @@ NCR_D700_intr(int irq, void *data)
* essentially connectecd to the MCA bus independently, it is easier
* to set them up as two separate host adapters, rather than one
* adapter with two channels */
-static int __devinit
+static int
NCR_D700_probe(struct device *dev)
{
struct NCR_D700_private *p;
@@ -349,7 +349,7 @@ NCR_D700_probe(struct device *dev)
return 0;
}
-static void __devexit
+static void
NCR_D700_remove_one(struct Scsi_Host *host)
{
scsi_remove_host(host);
@@ -359,7 +359,7 @@ NCR_D700_remove_one(struct Scsi_Host *host)
release_region(host->base, 64);
}
-static int __devexit
+static int
NCR_D700_remove(struct device *dev)
{
struct NCR_D700_private *p = dev_get_drvdata(dev);
@@ -380,7 +380,7 @@ static struct mca_driver NCR_D700_driver = {
.name = "NCR_D700",
.bus = &mca_bus_type,
.probe = NCR_D700_probe,
- .remove = __devexit_p(NCR_D700_remove),
+ .remove = NCR_D700_remove,
},
};
diff --git a/drivers/scsi/NCR_Q720.c b/drivers/scsi/NCR_Q720.c
index afdbb9addf18..05835bf1bf9c 100644
--- a/drivers/scsi/NCR_Q720.c
+++ b/drivers/scsi/NCR_Q720.c
@@ -351,7 +351,7 @@ static struct mca_driver NCR_Q720_driver = {
.name = "NCR_Q720",
.bus = &mca_bus_type,
.probe = NCR_Q720_probe,
- .remove = __devexit_p(NCR_Q720_remove),
+ .remove = NCR_Q720_remove,
},
};
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index a391090a17c5..0163457c12bb 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -1082,8 +1082,8 @@ static struct scsi_host_template inia100_template = {
.use_clustering = ENABLE_CLUSTERING,
};
-static int __devinit inia100_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int inia100_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct Scsi_Host *shost;
struct orc_host *host;
@@ -1197,7 +1197,7 @@ out:
return error;
}
-static void __devexit inia100_remove_one(struct pci_dev *pdev)
+static void inia100_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct orc_host *host = (struct orc_host *)shost->hostdata;
@@ -1224,7 +1224,7 @@ static struct pci_driver inia100_pci_driver = {
.name = "inia100",
.id_table = inia100_pci_tbl,
.probe = inia100_probe_one,
- .remove = __devexit_p(inia100_remove_one),
+ .remove = inia100_remove_one,
};
static int __init inia100_init(void)
diff --git a/drivers/scsi/a2091.c b/drivers/scsi/a2091.c
index 79a30633d4aa..3e09aa21c1ca 100644
--- a/drivers/scsi/a2091.c
+++ b/drivers/scsi/a2091.c
@@ -179,8 +179,7 @@ static struct scsi_host_template a2091_scsi_template = {
.use_clustering = DISABLE_CLUSTERING
};
-static int __devinit a2091_probe(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int a2091_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
{
struct Scsi_Host *instance;
int error;
@@ -239,7 +238,7 @@ fail_alloc:
return error;
}
-static void __devexit a2091_remove(struct zorro_dev *z)
+static void a2091_remove(struct zorro_dev *z)
{
struct Scsi_Host *instance = zorro_get_drvdata(z);
struct a2091_hostdata *hdata = shost_priv(instance);
@@ -251,7 +250,7 @@ static void __devexit a2091_remove(struct zorro_dev *z)
release_mem_region(z->resource.start, 256);
}
-static struct zorro_device_id a2091_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id a2091_zorro_tbl[] = {
{ ZORRO_PROD_CBM_A590_A2091_1 },
{ ZORRO_PROD_CBM_A590_A2091_2 },
{ 0 }
@@ -262,7 +261,7 @@ static struct zorro_driver a2091_driver = {
.name = "a2091",
.id_table = a2091_zorro_tbl,
.probe = a2091_probe,
- .remove = __devexit_p(a2091_remove),
+ .remove = a2091_remove,
};
static int __init a2091_init(void)
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index d79457ac8bef..681434e2dfe9 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -132,11 +132,13 @@ struct inquiry_data {
* M O D U L E G L O B A L S
*/
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* sgmap);
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg);
-static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg);
-static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max);
-static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int nseg_new);
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *sgmap);
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg);
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg);
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max);
+static int aac_convert_sgraw2(struct aac_raw_io2 *rio2,
+ int pages, int nseg, int nseg_new);
static int aac_send_srb_fib(struct scsi_cmnd* scsicmd);
#ifdef AAC_DETAILED_STATUS_INFO
static char *aac_get_status_string(u32 status);
@@ -971,6 +973,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
+ long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
@@ -982,7 +985,10 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd2->byteCount = cpu_to_le32(count<<9);
readcmd2->cid = cpu_to_le16(scmd_id(cmd));
readcmd2->flags = cpu_to_le16(RIO2_IO_TYPE_READ);
- aac_build_sgraw2(cmd, readcmd2, dev->scsi_host_ptr->sg_tablesize);
+ ret = aac_build_sgraw2(cmd, readcmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
@@ -996,7 +1002,9 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
readcmd->flags = cpu_to_le16(RIO_TYPE_READ);
readcmd->bpTotal = 0;
readcmd->bpComplete = 0;
- aac_build_sgraw(cmd, &readcmd->sg);
+ ret = aac_build_sgraw(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(readcmd->sg.count)-1) * sizeof(struct sgentryraw));
@@ -1019,6 +1027,8 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
{
u16 fibsize;
struct aac_read64 *readcmd;
+ long ret;
+
aac_fib_init(fib);
readcmd = (struct aac_read64 *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtHostRead64);
@@ -1028,7 +1038,9 @@ static int aac_read_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
readcmd->pad = 0;
readcmd->flags = 0;
- aac_build_sg64(cmd, &readcmd->sg);
+ ret = aac_build_sg64(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_read64) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry64));
@@ -1050,6 +1062,8 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
{
u16 fibsize;
struct aac_read *readcmd;
+ long ret;
+
aac_fib_init(fib);
readcmd = (struct aac_read *) fib_data(fib);
readcmd->command = cpu_to_le32(VM_CtBlockRead);
@@ -1057,7 +1071,9 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
readcmd->block = cpu_to_le32((u32)(lba&0xffffffff));
readcmd->count = cpu_to_le32(count * 512);
- aac_build_sg(cmd, &readcmd->sg);
+ ret = aac_build_sg(cmd, &readcmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_read) +
((le32_to_cpu(readcmd->sg.count) - 1) *
sizeof (struct sgentry));
@@ -1079,6 +1095,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
{
struct aac_dev *dev = fib->dev;
u16 fibsize, command;
+ long ret;
aac_fib_init(fib);
if (dev->comm_interface == AAC_COMM_MESSAGE_TYPE2 && !dev->sync_mode) {
@@ -1093,7 +1110,10 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
(((aac_cache & 5) != 5) || !fib->dev->cache_protected)) ?
cpu_to_le16(RIO2_IO_TYPE_WRITE|RIO2_IO_SUREWRITE) :
cpu_to_le16(RIO2_IO_TYPE_WRITE);
- aac_build_sgraw2(cmd, writecmd2, dev->scsi_host_ptr->sg_tablesize);
+ ret = aac_build_sgraw2(cmd, writecmd2,
+ dev->scsi_host_ptr->sg_tablesize);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo2;
fibsize = sizeof(struct aac_raw_io2) +
((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
@@ -1110,7 +1130,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
cpu_to_le16(RIO_TYPE_WRITE);
writecmd->bpTotal = 0;
writecmd->bpComplete = 0;
- aac_build_sgraw(cmd, &writecmd->sg);
+ ret = aac_build_sgraw(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
command = ContainerRawIo;
fibsize = sizeof(struct aac_raw_io) +
((le32_to_cpu(writecmd->sg.count)-1) * sizeof (struct sgentryraw));
@@ -1133,6 +1155,8 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
{
u16 fibsize;
struct aac_write64 *writecmd;
+ long ret;
+
aac_fib_init(fib);
writecmd = (struct aac_write64 *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtHostWrite64);
@@ -1142,7 +1166,9 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
writecmd->pad = 0;
writecmd->flags = 0;
- aac_build_sg64(cmd, &writecmd->sg);
+ ret = aac_build_sg64(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_write64) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry64));
@@ -1164,6 +1190,8 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
{
u16 fibsize;
struct aac_write *writecmd;
+ long ret;
+
aac_fib_init(fib);
writecmd = (struct aac_write *) fib_data(fib);
writecmd->command = cpu_to_le32(VM_CtBlockWrite);
@@ -1173,7 +1201,9 @@ static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
writecmd->sg.count = cpu_to_le32(1);
/* ->stable is not used - it did mean which type of write */
- aac_build_sg(cmd, &writecmd->sg);
+ ret = aac_build_sg(cmd, &writecmd->sg);
+ if (ret < 0)
+ return ret;
fibsize = sizeof(struct aac_write) +
((le32_to_cpu(writecmd->sg.count) - 1) *
sizeof (struct sgentry));
@@ -1235,8 +1265,11 @@ static int aac_scsi_64(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
- aac_build_sg64(cmd, (struct sgmap64*) &srbcmd->sg);
+ ret = aac_build_sg64(cmd, (struct sgmap64 *) &srbcmd->sg);
+ if (ret < 0)
+ return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
@@ -1263,8 +1296,11 @@ static int aac_scsi_32(struct fib * fib, struct scsi_cmnd * cmd)
{
u16 fibsize;
struct aac_srb * srbcmd = aac_scsi_common(fib, cmd);
+ long ret;
- aac_build_sg(cmd, (struct sgmap*)&srbcmd->sg);
+ ret = aac_build_sg(cmd, (struct sgmap *)&srbcmd->sg);
+ if (ret < 0)
+ return ret;
srbcmd->count = cpu_to_le32(scsi_bufflen(cmd));
memset(srbcmd->cdb, 0, sizeof(srbcmd->cdb));
@@ -2870,7 +2906,7 @@ static int aac_send_srb_fib(struct scsi_cmnd* scsicmd)
return -1;
}
-static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
+static long aac_build_sg(struct scsi_cmnd *scsicmd, struct sgmap *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
@@ -2883,7 +2919,8 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -2912,7 +2949,7 @@ static unsigned long aac_build_sg(struct scsi_cmnd* scsicmd, struct sgmap* psg)
}
-static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* psg)
+static long aac_build_sg64(struct scsi_cmnd *scsicmd, struct sgmap64 *psg)
{
struct aac_dev *dev;
unsigned long byte_count = 0;
@@ -2927,7 +2964,8 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
psg->sg[0].count = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -2957,7 +2995,7 @@ static unsigned long aac_build_sg64(struct scsi_cmnd* scsicmd, struct sgmap64* p
return byte_count;
}
-static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw* psg)
+static long aac_build_sgraw(struct scsi_cmnd *scsicmd, struct sgmapraw *psg)
{
unsigned long byte_count = 0;
int nseg;
@@ -2972,7 +3010,8 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
psg->sg[0].flags = 0;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i;
@@ -3005,13 +3044,15 @@ static unsigned long aac_build_sgraw(struct scsi_cmnd* scsicmd, struct sgmapraw*
return byte_count;
}
-static unsigned long aac_build_sgraw2(struct scsi_cmnd *scsicmd, struct aac_raw_io2 *rio2, int sg_max)
+static long aac_build_sgraw2(struct scsi_cmnd *scsicmd,
+ struct aac_raw_io2 *rio2, int sg_max)
{
unsigned long byte_count = 0;
int nseg;
nseg = scsi_dma_map(scsicmd);
- BUG_ON(nseg < 0);
+ if (nseg < 0)
+ return nseg;
if (nseg) {
struct scatterlist *sg;
int i, conformable = 0;
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index 9e933a88a8bc..a6f7190c09a4 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -12,13 +12,13 @@
*----------------------------------------------------------------------------*/
#ifndef AAC_DRIVER_BUILD
-# define AAC_DRIVER_BUILD 29800
+# define AAC_DRIVER_BUILD 30000
# define AAC_DRIVER_BRANCH "-ms"
#endif
#define MAXIMUM_NUM_CONTAINERS 32
#define AAC_NUM_MGT_FIB 8
-#define AAC_NUM_IO_FIB (512 - AAC_NUM_MGT_FIB)
+#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
#define AAC_MAX_LUN (8)
@@ -36,6 +36,10 @@
#define CONTAINER_TO_ID(cont) (cont)
#define CONTAINER_TO_LUN(cont) (0)
+#define PMC_DEVICE_S7 0x28c
+#define PMC_DEVICE_S8 0x28d
+#define PMC_DEVICE_S9 0x28f
+
#define aac_phys_to_logical(x) ((x)+1)
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
diff --git a/drivers/scsi/aacraid/comminit.c b/drivers/scsi/aacraid/comminit.c
index 8e5d3be16127..3f759957f4b4 100644
--- a/drivers/scsi/aacraid/comminit.c
+++ b/drivers/scsi/aacraid/comminit.c
@@ -404,7 +404,13 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
dev->max_fib_size = status[1] & 0xFFE0;
host->sg_tablesize = status[2] >> 16;
dev->sg_tablesize = status[2] & 0xFFFF;
- host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
+ if (dev->pdev->device == PMC_DEVICE_S7 ||
+ dev->pdev->device == PMC_DEVICE_S8 ||
+ dev->pdev->device == PMC_DEVICE_S9)
+ host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
+ (status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
+ else
+ host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
dev->max_num_aif = status[4] & 0xFFFF;
/*
* NOTE:
@@ -452,6 +458,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
}
}
+ if (host->can_queue > AAC_NUM_IO_FIB)
+ host->can_queue = AAC_NUM_IO_FIB;
+
/*
* Ok now init the communication subsystem
*/
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index cb7f1582a6d1..408a42ef787a 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -88,13 +88,7 @@ char aac_driver_version[] = AAC_DRIVER_FULL_VERSION;
*
* Note: The last field is used to index into aac_drivers below.
*/
-#ifdef DECLARE_PCI_DEVICE_TABLE
-static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
-#elif defined(__devinitconst)
-static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
-#else
-static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
-#endif
+static const struct pci_device_id aac_pci_tbl[] = {
{ 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
{ 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
{ 0x1028, 0x0003, 0x1028, 0x0003, 0, 0, 2 }, /* PERC 3/Si (SlimFast/PERC3Si */
@@ -1107,8 +1101,7 @@ static void __aac_shutdown(struct aac_dev * aac)
pci_disable_msi(aac->pdev);
}
-static int __devinit aac_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
unsigned index = id->driver_data;
struct Scsi_Host *shost;
@@ -1310,7 +1303,7 @@ static void aac_shutdown(struct pci_dev *dev)
__aac_shutdown((struct aac_dev *)shost->hostdata);
}
-static void __devexit aac_remove_one(struct pci_dev *pdev)
+static void aac_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct aac_dev *aac = (struct aac_dev *)shost->hostdata;
@@ -1341,7 +1334,7 @@ static struct pci_driver aac_pci_driver = {
.name = AAC_DRIVERNAME,
.id_table = aac_pci_tbl,
.probe = aac_probe_one,
- .remove = __devexit_p(aac_remove_one),
+ .remove = aac_remove_one,
.shutdown = aac_shutdown,
};
diff --git a/drivers/scsi/aacraid/src.c b/drivers/scsi/aacraid/src.c
index 3b021ec63255..e2e349204e7d 100644
--- a/drivers/scsi/aacraid/src.c
+++ b/drivers/scsi/aacraid/src.c
@@ -407,7 +407,7 @@ static int aac_src_deliver_message(struct fib *fib)
fib->hw_fib_va->header.StructType = FIB_MAGIC2;
fib->hw_fib_va->header.SenderFibAddress = (u32)address;
fib->hw_fib_va->header.u.TimeStamp = 0;
- BUG_ON((u32)(address >> 32) != 0L);
+ BUG_ON(upper_32_bits(address) != 0L);
address |= fibsize;
} else {
/* Calculate the amount to the fibsize bits */
@@ -431,7 +431,7 @@ static int aac_src_deliver_message(struct fib *fib)
address |= fibsize;
}
- src_writel(dev, MUnit.IQ_H, (address >> 32) & 0xffffffff);
+ src_writel(dev, MUnit.IQ_H, upper_32_bits(address) & 0xffffffff);
src_writel(dev, MUnit.IQ_L, address & 0xffffffff);
return 0;
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 374c4edf4fcb..dcfaee66a8b9 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -9526,7 +9526,7 @@ advansys_queuecommand_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *
static DEF_SCSI_QCMD(advansys_queuecommand)
-static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base)
+static ushort AscGetEisaChipCfg(PortAddr iop_base)
{
PortAddr eisa_cfg_iop = (PortAddr) ASC_GET_EISA_SLOT(iop_base) |
(PortAddr) (ASC_EISA_CFG_IOP_MASK);
@@ -9537,8 +9537,8 @@ static ushort __devinit AscGetEisaChipCfg(PortAddr iop_base)
* Return the BIOS address of the adapter at the specified
* I/O port and with the specified bus type.
*/
-static unsigned short __devinit
-AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type)
+static unsigned short AscGetChipBiosAddress(PortAddr iop_base,
+ unsigned short bus_type)
{
unsigned short cfg_lsw;
unsigned short bios_addr;
@@ -9569,7 +9569,7 @@ AscGetChipBiosAddress(PortAddr iop_base, unsigned short bus_type)
return bios_addr;
}
-static uchar __devinit AscSetChipScsiID(PortAddr iop_base, uchar new_host_id)
+static uchar AscSetChipScsiID(PortAddr iop_base, uchar new_host_id)
{
ushort cfg_lsw;
@@ -9583,7 +9583,7 @@ static uchar __devinit AscSetChipScsiID(PortAddr iop_base, uchar new_host_id)
return (AscGetChipScsiID(iop_base));
}
-static unsigned char __devinit AscGetChipScsiCtrl(PortAddr iop_base)
+static unsigned char AscGetChipScsiCtrl(PortAddr iop_base)
{
unsigned char sc;
@@ -9593,8 +9593,8 @@ static unsigned char __devinit AscGetChipScsiCtrl(PortAddr iop_base)
return sc;
}
-static unsigned char __devinit
-AscGetChipVersion(PortAddr iop_base, unsigned short bus_type)
+static unsigned char AscGetChipVersion(PortAddr iop_base,
+ unsigned short bus_type)
{
if (bus_type & ASC_IS_EISA) {
PortAddr eisa_iop;
@@ -9608,7 +9608,7 @@ AscGetChipVersion(PortAddr iop_base, unsigned short bus_type)
}
#ifdef CONFIG_ISA
-static void __devinit AscEnableIsaDma(uchar dma_channel)
+static void AscEnableIsaDma(uchar dma_channel)
{
if (dma_channel < 4) {
outp(0x000B, (ushort)(0xC0 | dma_channel));
@@ -9638,7 +9638,7 @@ static int AscStopQueueExe(PortAddr iop_base)
return (0);
}
-static ASC_DCNT __devinit AscGetMaxDmaCount(ushort bus_type)
+static ASC_DCNT AscGetMaxDmaCount(ushort bus_type)
{
if (bus_type & ASC_IS_ISA)
return ASC_MAX_ISA_DMA_COUNT;
@@ -9648,7 +9648,7 @@ static ASC_DCNT __devinit AscGetMaxDmaCount(ushort bus_type)
}
#ifdef CONFIG_ISA
-static ushort __devinit AscGetIsaDmaChannel(PortAddr iop_base)
+static ushort AscGetIsaDmaChannel(PortAddr iop_base)
{
ushort channel;
@@ -9660,7 +9660,7 @@ static ushort __devinit AscGetIsaDmaChannel(PortAddr iop_base)
return (channel + 4);
}
-static ushort __devinit AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
+static ushort AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channel)
{
ushort cfg_lsw;
uchar value;
@@ -9678,7 +9678,7 @@ static ushort __devinit AscSetIsaDmaChannel(PortAddr iop_base, ushort dma_channe
return 0;
}
-static uchar __devinit AscGetIsaDmaSpeed(PortAddr iop_base)
+static uchar AscGetIsaDmaSpeed(PortAddr iop_base)
{
uchar speed_value;
@@ -9689,7 +9689,7 @@ static uchar __devinit AscGetIsaDmaSpeed(PortAddr iop_base)
return speed_value;
}
-static uchar __devinit AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
+static uchar AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
{
speed_value &= 0x07;
AscSetBank(iop_base, 1);
@@ -9699,7 +9699,7 @@ static uchar __devinit AscSetIsaDmaSpeed(PortAddr iop_base, uchar speed_value)
}
#endif /* CONFIG_ISA */
-static ushort __devinit AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
+static ushort AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
{
int i;
PortAddr iop_base;
@@ -9786,7 +9786,7 @@ static ushort __devinit AscInitAscDvcVar(ASC_DVC_VAR *asc_dvc)
return warn_code;
}
-static int __devinit AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
+static int AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
{
int retry;
@@ -9801,12 +9801,12 @@ static int __devinit AscWriteEEPCmdReg(PortAddr iop_base, uchar cmd_reg)
return 0;
}
-static void __devinit AscWaitEEPRead(void)
+static void AscWaitEEPRead(void)
{
mdelay(1);
}
-static ushort __devinit AscReadEEPWord(PortAddr iop_base, uchar addr)
+static ushort AscReadEEPWord(PortAddr iop_base, uchar addr)
{
ushort read_wval;
uchar cmd_reg;
@@ -9821,8 +9821,8 @@ static ushort __devinit AscReadEEPWord(PortAddr iop_base, uchar addr)
return read_wval;
}
-static ushort __devinit
-AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
+static ushort AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
{
ushort wval;
ushort sum;
@@ -9868,7 +9868,7 @@ AscGetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
return sum;
}
-static int __devinit AscTestExternalLram(ASC_DVC_VAR *asc_dvc)
+static int AscTestExternalLram(ASC_DVC_VAR *asc_dvc)
{
PortAddr iop_base;
ushort q_addr;
@@ -9890,12 +9890,12 @@ static int __devinit AscTestExternalLram(ASC_DVC_VAR *asc_dvc)
return (sta);
}
-static void __devinit AscWaitEEPWrite(void)
+static void AscWaitEEPWrite(void)
{
mdelay(20);
}
-static int __devinit AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
+static int AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
{
ushort read_back;
int retry;
@@ -9914,8 +9914,7 @@ static int __devinit AscWriteEEPDataReg(PortAddr iop_base, ushort data_reg)
}
}
-static ushort __devinit
-AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val)
+static ushort AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val)
{
ushort read_wval;
@@ -9935,8 +9934,8 @@ AscWriteEEPWord(PortAddr iop_base, uchar addr, ushort word_val)
return (read_wval);
}
-static int __devinit
-AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
+static int AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
{
int n_error;
ushort *wbuf;
@@ -10031,8 +10030,8 @@ AscSetEEPConfigOnce(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
return n_error;
}
-static int __devinit
-AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
+static int AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf,
+ ushort bus_type)
{
int retry;
int n_error;
@@ -10050,7 +10049,7 @@ AscSetEEPConfig(PortAddr iop_base, ASCEEP_CONFIG *cfg_buf, ushort bus_type)
return n_error;
}
-static ushort __devinit AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
+static ushort AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
{
ASCEEP_CONFIG eep_config_buf;
ASCEEP_CONFIG *eep_config;
@@ -10215,7 +10214,7 @@ static ushort __devinit AscInitFromEEP(ASC_DVC_VAR *asc_dvc)
return (warn_code);
}
-static int __devinit AscInitGetConfig(struct Scsi_Host *shost)
+static int AscInitGetConfig(struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var;
@@ -10269,7 +10268,7 @@ static int __devinit AscInitGetConfig(struct Scsi_Host *shost)
return asc_dvc->err_code;
}
-static int __devinit AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
+static int AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
ASC_DVC_VAR *asc_dvc = &board->dvc_var.asc_dvc_var;
@@ -10383,7 +10382,7 @@ static int __devinit AscInitSetConfig(struct pci_dev *pdev, struct Scsi_Host *sh
* on big-endian platforms so char fields read as words are actually being
* unswapped on big-endian platforms.
*/
-static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config __devinitdata = {
+static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config = {
ADV_EEPROM_BIOS_ENABLE, /* cfg_lsw */
0x0000, /* cfg_msw */
0xFFFF, /* disc_enable */
@@ -10421,7 +10420,7 @@ static ADVEEP_3550_CONFIG Default_3550_EEPROM_Config __devinitdata = {
0 /* num_of_err */
};
-static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar __devinitdata = {
+static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar = {
0, /* cfg_lsw */
0, /* cfg_msw */
0, /* -disc_enable */
@@ -10459,7 +10458,7 @@ static ADVEEP_3550_CONFIG ADVEEP_3550_Config_Field_IsChar __devinitdata = {
0 /* num_of_err */
};
-static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config __devinitdata = {
+static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config = {
ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */
0x0000, /* 01 cfg_msw */
0xFFFF, /* 02 disc_enable */
@@ -10524,7 +10523,7 @@ static ADVEEP_38C0800_CONFIG Default_38C0800_EEPROM_Config __devinitdata = {
0 /* 63 reserved */
};
-static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar __devinitdata = {
+static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar = {
0, /* 00 cfg_lsw */
0, /* 01 cfg_msw */
0, /* 02 disc_enable */
@@ -10589,7 +10588,7 @@ static ADVEEP_38C0800_CONFIG ADVEEP_38C0800_Config_Field_IsChar __devinitdata =
0 /* 63 reserved */
};
-static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config __devinitdata = {
+static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config = {
ADV_EEPROM_BIOS_ENABLE, /* 00 cfg_lsw */
0x0000, /* 01 cfg_msw */
0xFFFF, /* 02 disc_enable */
@@ -10654,7 +10653,7 @@ static ADVEEP_38C1600_CONFIG Default_38C1600_EEPROM_Config __devinitdata = {
0 /* 63 reserved */
};
-static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar __devinitdata = {
+static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar = {
0, /* 00 cfg_lsw */
0, /* 01 cfg_msw */
0, /* 02 disc_enable */
@@ -10723,7 +10722,7 @@ static ADVEEP_38C1600_CONFIG ADVEEP_38C1600_Config_Field_IsChar __devinitdata =
/*
* Wait for EEPROM command to complete
*/
-static void __devinit AdvWaitEEPCmd(AdvPortAddr iop_base)
+static void AdvWaitEEPCmd(AdvPortAddr iop_base)
{
int eep_delay_ms;
@@ -10742,7 +10741,7 @@ static void __devinit AdvWaitEEPCmd(AdvPortAddr iop_base)
/*
* Read the EEPROM from specified location
*/
-static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
+static ushort AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
{
AdvWriteWordRegister(iop_base, IOPW_EE_CMD,
ASC_EEP_CMD_READ | eep_word_addr);
@@ -10753,8 +10752,8 @@ static ushort __devinit AdvReadEEPWord(AdvPortAddr iop_base, int eep_word_addr)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-static void __devinit
-AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
+static void AdvSet3550EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_3550_CONFIG *cfg_buf)
{
ushort *wbuf;
ushort addr, chksum;
@@ -10820,8 +10819,8 @@ AdvSet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-static void __devinit
-AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
+static void AdvSet38C0800EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C0800_CONFIG *cfg_buf)
{
ushort *wbuf;
ushort *charfields;
@@ -10887,8 +10886,8 @@ AdvSet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
/*
* Write the EEPROM from 'cfg_buf'.
*/
-static void __devinit
-AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
+static void AdvSet38C1600EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C1600_CONFIG *cfg_buf)
{
ushort *wbuf;
ushort *charfields;
@@ -10956,8 +10955,8 @@ AdvSet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
*
* Return a checksum based on the EEPROM configuration read.
*/
-static ushort __devinit
-AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
+static ushort AdvGet3550EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_3550_CONFIG *cfg_buf)
{
ushort wval, chksum;
ushort *wbuf;
@@ -10999,8 +10998,8 @@ AdvGet3550EEPConfig(AdvPortAddr iop_base, ADVEEP_3550_CONFIG *cfg_buf)
*
* Return a checksum based on the EEPROM configuration read.
*/
-static ushort __devinit
-AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
+static ushort AdvGet38C0800EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C0800_CONFIG *cfg_buf)
{
ushort wval, chksum;
ushort *wbuf;
@@ -11042,8 +11041,8 @@ AdvGet38C0800EEPConfig(AdvPortAddr iop_base, ADVEEP_38C0800_CONFIG *cfg_buf)
*
* Return a checksum based on the EEPROM configuration read.
*/
-static ushort __devinit
-AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
+static ushort AdvGet38C1600EEPConfig(AdvPortAddr iop_base,
+ ADVEEP_38C1600_CONFIG *cfg_buf)
{
ushort wval, chksum;
ushort *wbuf;
@@ -11092,7 +11091,7 @@ AdvGet38C1600EEPConfig(AdvPortAddr iop_base, ADVEEP_38C1600_CONFIG *cfg_buf)
*
* Note: Chip is stopped on entry.
*/
-static int __devinit AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc)
+static int AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
ushort warn_code;
@@ -11242,7 +11241,7 @@ static int __devinit AdvInitFrom3550EEP(ADV_DVC_VAR *asc_dvc)
*
* Note: Chip is stopped on entry.
*/
-static int __devinit AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc)
+static int AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
ushort warn_code;
@@ -11441,7 +11440,7 @@ static int __devinit AdvInitFrom38C0800EEP(ADV_DVC_VAR *asc_dvc)
*
* Note: Chip is stopped on entry.
*/
-static int __devinit AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc)
+static int AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc)
{
AdvPortAddr iop_base;
ushort warn_code;
@@ -11661,8 +11660,7 @@ static int __devinit AdvInitFrom38C1600EEP(ADV_DVC_VAR *asc_dvc)
* For a non-fatal error return a warning code. If there are no warnings
* then 0 is returned.
*/
-static int __devinit
-AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
+static int AdvInitGetConfig(struct pci_dev *pdev, struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
ADV_DVC_VAR *asc_dvc = &board->dvc_var.adv_dvc_var;
@@ -11769,7 +11767,7 @@ static struct scsi_host_template advansys_template = {
.use_clustering = ENABLE_CLUSTERING,
};
-static int __devinit advansys_wide_init_chip(struct Scsi_Host *shost)
+static int advansys_wide_init_chip(struct Scsi_Host *shost)
{
struct asc_board *board = shost_priv(shost);
struct adv_dvc_var *adv_dvc = &board->dvc_var.adv_dvc_var;
@@ -11882,8 +11880,8 @@ static void advansys_wide_free_mem(struct asc_board *board)
}
}
-static int __devinit advansys_board_found(struct Scsi_Host *shost,
- unsigned int iop, int bus_type)
+static int advansys_board_found(struct Scsi_Host *shost, unsigned int iop,
+ int bus_type)
{
struct pci_dev *pdev;
struct asc_board *boardp = shost_priv(shost);
@@ -12428,7 +12426,7 @@ static PortAddr _asc_def_iop_base[ASC_IOADR_TABLE_MAX_IX] = {
* 10: 12
* 11: 15
*/
-static unsigned int __devinit advansys_isa_irq_no(PortAddr iop_base)
+static unsigned int advansys_isa_irq_no(PortAddr iop_base)
{
unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
unsigned int chip_irq = ((cfg_lsw >> 2) & 0x03) + 10;
@@ -12437,7 +12435,7 @@ static unsigned int __devinit advansys_isa_irq_no(PortAddr iop_base)
return chip_irq;
}
-static int __devinit advansys_isa_probe(struct device *dev, unsigned int id)
+static int advansys_isa_probe(struct device *dev, unsigned int id)
{
int err = -ENODEV;
PortAddr iop_base = _asc_def_iop_base[id];
@@ -12477,7 +12475,7 @@ static int __devinit advansys_isa_probe(struct device *dev, unsigned int id)
return err;
}
-static int __devexit advansys_isa_remove(struct device *dev, unsigned int id)
+static int advansys_isa_remove(struct device *dev, unsigned int id)
{
int ioport = _asc_def_iop_base[id];
advansys_release(dev_get_drvdata(dev));
@@ -12487,7 +12485,7 @@ static int __devexit advansys_isa_remove(struct device *dev, unsigned int id)
static struct isa_driver advansys_isa_driver = {
.probe = advansys_isa_probe,
- .remove = __devexit_p(advansys_isa_remove),
+ .remove = advansys_isa_remove,
.driver = {
.owner = THIS_MODULE,
.name = DRV_NAME,
@@ -12505,7 +12503,7 @@ static struct isa_driver advansys_isa_driver = {
* 110: 15
* 111: invalid
*/
-static unsigned int __devinit advansys_vlb_irq_no(PortAddr iop_base)
+static unsigned int advansys_vlb_irq_no(PortAddr iop_base)
{
unsigned short cfg_lsw = AscGetChipCfgLsw(iop_base);
unsigned int chip_irq = ((cfg_lsw >> 2) & 0x07) + 9;
@@ -12514,7 +12512,7 @@ static unsigned int __devinit advansys_vlb_irq_no(PortAddr iop_base)
return chip_irq;
}
-static int __devinit advansys_vlb_probe(struct device *dev, unsigned int id)
+static int advansys_vlb_probe(struct device *dev, unsigned int id)
{
int err = -ENODEV;
PortAddr iop_base = _asc_def_iop_base[id];
@@ -12561,14 +12559,14 @@ static int __devinit advansys_vlb_probe(struct device *dev, unsigned int id)
static struct isa_driver advansys_vlb_driver = {
.probe = advansys_vlb_probe,
- .remove = __devexit_p(advansys_isa_remove),
+ .remove = advansys_isa_remove,
.driver = {
.owner = THIS_MODULE,
.name = "advansys_vlb",
},
};
-static struct eisa_device_id advansys_eisa_table[] __devinitdata = {
+static struct eisa_device_id advansys_eisa_table[] = {
{ "ABP7401" },
{ "ABP7501" },
{ "" }
@@ -12595,7 +12593,7 @@ struct eisa_scsi_data {
* 110: invalid
* 111: invalid
*/
-static unsigned int __devinit advansys_eisa_irq_no(struct eisa_device *edev)
+static unsigned int advansys_eisa_irq_no(struct eisa_device *edev)
{
unsigned short cfg_lsw = inw(edev->base_addr + 0xc86);
unsigned int chip_irq = ((cfg_lsw >> 8) & 0x07) + 10;
@@ -12604,7 +12602,7 @@ static unsigned int __devinit advansys_eisa_irq_no(struct eisa_device *edev)
return chip_irq;
}
-static int __devinit advansys_eisa_probe(struct device *dev)
+static int advansys_eisa_probe(struct device *dev)
{
int i, ioport, irq = 0;
int err;
@@ -12677,7 +12675,7 @@ static int __devinit advansys_eisa_probe(struct device *dev)
return err;
}
-static __devexit int advansys_eisa_remove(struct device *dev)
+static int advansys_eisa_remove(struct device *dev)
{
int i;
struct eisa_scsi_data *data = dev_get_drvdata(dev);
@@ -12701,12 +12699,12 @@ static struct eisa_driver advansys_eisa_driver = {
.driver = {
.name = DRV_NAME,
.probe = advansys_eisa_probe,
- .remove = __devexit_p(advansys_eisa_remove),
+ .remove = advansys_eisa_remove,
}
};
/* PCI Devices supported by this driver */
-static struct pci_device_id advansys_pci_tbl[] __devinitdata = {
+static struct pci_device_id advansys_pci_tbl[] = {
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_1200A,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_ASP, PCI_DEVICE_ID_ASP_ABP940,
@@ -12724,7 +12722,7 @@ static struct pci_device_id advansys_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, advansys_pci_tbl);
-static void __devinit advansys_set_latency(struct pci_dev *pdev)
+static void advansys_set_latency(struct pci_dev *pdev)
{
if ((pdev->device == PCI_DEVICE_ID_ASP_1200A) ||
(pdev->device == PCI_DEVICE_ID_ASP_ABP940)) {
@@ -12737,8 +12735,8 @@ static void __devinit advansys_set_latency(struct pci_dev *pdev)
}
}
-static int __devinit
-advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int advansys_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int err, ioport;
struct Scsi_Host *shost;
@@ -12791,7 +12789,7 @@ advansys_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return err;
}
-static void __devexit advansys_pci_remove(struct pci_dev *pdev)
+static void advansys_pci_remove(struct pci_dev *pdev)
{
advansys_release(pci_get_drvdata(pdev));
pci_release_regions(pdev);
@@ -12802,7 +12800,7 @@ static struct pci_driver advansys_pci_driver = {
.name = DRV_NAME,
.id_table = advansys_pci_tbl,
.probe = advansys_pci_probe,
- .remove = __devexit_p(advansys_pci_remove),
+ .remove = advansys_pci_remove,
};
static int __init advansys_init(void)
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index dd4547bf6881..a284be17699f 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -420,7 +420,7 @@ MODULE_PARM_DESC(aha152x1, "parameters for second controller");
#endif /* MODULE */
#ifdef __ISAPNP__
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1502), 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1505), 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID, ISAPNP_VENDOR('A', 'D', 'P'), ISAPNP_FUNCTION(0x1510), 0 },
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index a3e6ed353917..df775e6ba579 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -646,7 +646,7 @@ static int aha1740_probe (struct device *dev)
return -ENODEV;
}
-static __devexit int aha1740_remove (struct device *dev)
+static int aha1740_remove (struct device *dev)
{
struct Scsi_Host *shpnt = dev_get_drvdata(dev);
struct aha1740_hostdata *host = HOSTDATA (shpnt);
@@ -677,7 +677,7 @@ static struct eisa_driver aha1740_driver = {
.driver = {
.name = "aha1740",
.probe = aha1740_probe,
- .remove = __devexit_p (aha1740_remove),
+ .remove = aha1740_remove,
},
};
diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
index 1c4120c3db41..c56741fc4b99 100644
--- a/drivers/scsi/aic94xx/aic94xx_init.c
+++ b/drivers/scsi/aic94xx/aic94xx_init.c
@@ -85,7 +85,7 @@ static struct scsi_host_template aic94xx_sht = {
.ioctl = sas_ioctl,
};
-static int __devinit asd_map_memio(struct asd_ha_struct *asd_ha)
+static int asd_map_memio(struct asd_ha_struct *asd_ha)
{
int err, i;
struct asd_ha_addrspace *io_handle;
@@ -146,7 +146,7 @@ static void asd_unmap_memio(struct asd_ha_struct *asd_ha)
pci_release_region(asd_ha->pcidev, 0);
}
-static int __devinit asd_map_ioport(struct asd_ha_struct *asd_ha)
+static int asd_map_ioport(struct asd_ha_struct *asd_ha)
{
int i = PCI_IOBAR_OFFSET, err;
struct asd_ha_addrspace *io_handle = &asd_ha->io_handle[0];
@@ -175,7 +175,7 @@ static void asd_unmap_ioport(struct asd_ha_struct *asd_ha)
pci_release_region(asd_ha->pcidev, PCI_IOBAR_OFFSET);
}
-static int __devinit asd_map_ha(struct asd_ha_struct *asd_ha)
+static int asd_map_ha(struct asd_ha_struct *asd_ha)
{
int err;
u16 cmd_reg;
@@ -221,7 +221,7 @@ static const char *asd_dev_rev[30] = {
[8] = "B0",
};
-static int __devinit asd_common_setup(struct asd_ha_struct *asd_ha)
+static int asd_common_setup(struct asd_ha_struct *asd_ha)
{
int err, i;
@@ -257,7 +257,7 @@ Err:
return err;
}
-static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha)
+static int asd_aic9410_setup(struct asd_ha_struct *asd_ha)
{
int err = asd_common_setup(asd_ha);
@@ -272,7 +272,7 @@ static int __devinit asd_aic9410_setup(struct asd_ha_struct *asd_ha)
return 0;
}
-static int __devinit asd_aic9405_setup(struct asd_ha_struct *asd_ha)
+static int asd_aic9405_setup(struct asd_ha_struct *asd_ha)
{
int err = asd_common_setup(asd_ha);
@@ -531,7 +531,7 @@ static void asd_remove_dev_attrs(struct asd_ha_struct *asd_ha)
static const struct asd_pcidev_struct {
const char * name;
int (*setup)(struct asd_ha_struct *asd_ha);
-} asd_pcidev_data[] __devinitconst = {
+} asd_pcidev_data[] = {
/* Id 0 is used for dynamic ids. */
{ .name = "Adaptec AIC-94xx SAS/SATA Host Adapter",
.setup = asd_aic9410_setup
@@ -731,8 +731,7 @@ static int asd_unregister_sas_ha(struct asd_ha_struct *asd_ha)
return err;
}
-static int __devinit asd_pci_probe(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int asd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
const struct asd_pcidev_struct *asd_dev;
unsigned asd_id = (unsigned) id->driver_data;
@@ -924,7 +923,7 @@ static void asd_turn_off_leds(struct asd_ha_struct *asd_ha)
}
}
-static void __devexit asd_pci_remove(struct pci_dev *dev)
+static void asd_pci_remove(struct pci_dev *dev)
{
struct asd_ha_struct *asd_ha = pci_get_drvdata(dev);
@@ -1012,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
.lldd_ata_set_dmamode = asd_set_dmamode,
};
-static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
+static const struct pci_device_id aic94xx_pci_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
{PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
@@ -1031,7 +1030,7 @@ static struct pci_driver aic94xx_pci_driver = {
.name = ASD_DRIVER_NAME,
.id_table = aic94xx_pci_table,
.probe = asd_pci_probe,
- .remove = __devexit_p(asd_pci_remove),
+ .remove = asd_pci_remove,
};
static int __init aic94xx_init(void)
diff --git a/drivers/scsi/arm/acornscsi.c b/drivers/scsi/arm/acornscsi.c
index b330438ac662..3e1172adb37b 100644
--- a/drivers/scsi/arm/acornscsi.c
+++ b/drivers/scsi/arm/acornscsi.c
@@ -2965,8 +2965,7 @@ static struct scsi_host_template acornscsi_template = {
.proc_name = "acornscsi",
};
-static int __devinit
-acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
AS_Host *ashost;
@@ -3032,7 +3031,7 @@ acornscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit acornscsi_remove(struct expansion_card *ec)
+static void acornscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
AS_Host *ashost = (AS_Host *)host->hostdata;
@@ -3063,7 +3062,7 @@ static const struct ecard_id acornscsi_cids[] = {
static struct ecard_driver acornscsi_driver = {
.probe = acornscsi_probe,
- .remove = __devexit_p(acornscsi_remove),
+ .remove = acornscsi_remove,
.id_table = acornscsi_cids,
.drv = {
.name = "acornscsi",
diff --git a/drivers/scsi/arm/arxescsi.c b/drivers/scsi/arm/arxescsi.c
index 2a28b4ad1975..9274510294ac 100644
--- a/drivers/scsi/arm/arxescsi.c
+++ b/drivers/scsi/arm/arxescsi.c
@@ -276,8 +276,7 @@ static struct scsi_host_template arxescsi_template = {
.proc_name = "arxescsi",
};
-static int __devinit
-arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
struct arxescsi_info *info;
@@ -340,7 +339,7 @@ arxescsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit arxescsi_remove(struct expansion_card *ec)
+static void arxescsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
@@ -359,7 +358,7 @@ static const struct ecard_id arxescsi_cids[] = {
static struct ecard_driver arxescsi_driver = {
.probe = arxescsi_probe,
- .remove = __devexit_p(arxescsi_remove),
+ .remove = arxescsi_remove,
.id_table = arxescsi_cids,
.drv = {
.name = "arxescsi",
diff --git a/drivers/scsi/arm/cumana_1.c b/drivers/scsi/arm/cumana_1.c
index c3b99c93637a..c93938b246d5 100644
--- a/drivers/scsi/arm/cumana_1.c
+++ b/drivers/scsi/arm/cumana_1.c
@@ -225,8 +225,8 @@ static struct scsi_host_template cumanascsi_template = {
.proc_name = "CumanaSCSI-1",
};
-static int __devinit
-cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int cumanascsi1_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct Scsi_Host *host;
int ret;
@@ -298,7 +298,7 @@ cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit cumanascsi1_remove(struct expansion_card *ec)
+static void cumanascsi1_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
@@ -320,7 +320,7 @@ static const struct ecard_id cumanascsi1_cids[] = {
static struct ecard_driver cumanascsi1_driver = {
.probe = cumanascsi1_probe,
- .remove = __devexit_p(cumanascsi1_remove),
+ .remove = cumanascsi1_remove,
.id_table = cumanascsi1_cids,
.drv = {
.name = "cumanascsi1",
diff --git a/drivers/scsi/arm/cumana_2.c b/drivers/scsi/arm/cumana_2.c
index 547987b86384..e3bae93c3c22 100644
--- a/drivers/scsi/arm/cumana_2.c
+++ b/drivers/scsi/arm/cumana_2.c
@@ -397,8 +397,8 @@ static struct scsi_host_template cumanascsi2_template = {
.proc_name = "cumanascsi2",
};
-static int __devinit
-cumanascsi2_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int cumanascsi2_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct Scsi_Host *host;
struct cumanascsi2_info *info;
@@ -495,7 +495,7 @@ cumanascsi2_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit cumanascsi2_remove(struct expansion_card *ec)
+static void cumanascsi2_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
struct cumanascsi2_info *info = (struct cumanascsi2_info *)host->hostdata;
@@ -519,7 +519,7 @@ static const struct ecard_id cumanascsi2_cids[] = {
static struct ecard_driver cumanascsi2_driver = {
.probe = cumanascsi2_probe,
- .remove = __devexit_p(cumanascsi2_remove),
+ .remove = cumanascsi2_remove,
.id_table = cumanascsi2_cids,
.drv = {
.name = "cumanascsi2",
diff --git a/drivers/scsi/arm/eesox.c b/drivers/scsi/arm/eesox.c
index 968d08358d20..8e36908415ec 100644
--- a/drivers/scsi/arm/eesox.c
+++ b/drivers/scsi/arm/eesox.c
@@ -515,8 +515,7 @@ static struct scsi_host_template eesox_template = {
.proc_name = "eesox",
};
-static int __devinit
-eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
struct eesoxscsi_info *info;
@@ -617,7 +616,7 @@ eesoxscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit eesoxscsi_remove(struct expansion_card *ec)
+static void eesoxscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
struct eesoxscsi_info *info = (struct eesoxscsi_info *)host->hostdata;
@@ -643,7 +642,7 @@ static const struct ecard_id eesoxscsi_cids[] = {
static struct ecard_driver eesoxscsi_driver = {
.probe = eesoxscsi_probe,
- .remove = __devexit_p(eesoxscsi_remove),
+ .remove = eesoxscsi_remove,
.id_table = eesoxscsi_cids,
.drv = {
.name = "eesoxscsi",
diff --git a/drivers/scsi/arm/oak.c b/drivers/scsi/arm/oak.c
index fc6a5aabf66e..48facdc18002 100644
--- a/drivers/scsi/arm/oak.c
+++ b/drivers/scsi/arm/oak.c
@@ -129,8 +129,7 @@ static struct scsi_host_template oakscsi_template = {
.proc_name = "oakscsi",
};
-static int __devinit
-oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
struct Scsi_Host *host;
int ret = -ENOMEM;
@@ -182,7 +181,7 @@ oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit oakscsi_remove(struct expansion_card *ec)
+static void oakscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
@@ -202,7 +201,7 @@ static const struct ecard_id oakscsi_cids[] = {
static struct ecard_driver oakscsi_driver = {
.probe = oakscsi_probe,
- .remove = __devexit_p(oakscsi_remove),
+ .remove = oakscsi_remove,
.id_table = oakscsi_cids,
.drv = {
.name = "oakscsi",
diff --git a/drivers/scsi/arm/powertec.c b/drivers/scsi/arm/powertec.c
index 9274c0677b9c..246600b93555 100644
--- a/drivers/scsi/arm/powertec.c
+++ b/drivers/scsi/arm/powertec.c
@@ -309,8 +309,8 @@ static struct scsi_host_template powertecscsi_template = {
.proc_name = "powertec",
};
-static int __devinit
-powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
+static int powertecscsi_probe(struct expansion_card *ec,
+ const struct ecard_id *id)
{
struct Scsi_Host *host;
struct powertec_info *info;
@@ -409,7 +409,7 @@ powertecscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
return ret;
}
-static void __devexit powertecscsi_remove(struct expansion_card *ec)
+static void powertecscsi_remove(struct expansion_card *ec)
{
struct Scsi_Host *host = ecard_get_drvdata(ec);
struct powertec_info *info = (struct powertec_info *)host->hostdata;
@@ -435,7 +435,7 @@ static const struct ecard_id powertecscsi_cids[] = {
static struct ecard_driver powertecscsi_driver = {
.probe = powertecscsi_probe,
- .remove = __devexit_p(powertecscsi_remove),
+ .remove = powertecscsi_remove,
.id_table = powertecscsi_cids,
.drv = {
.name = "powertecscsi",
diff --git a/drivers/scsi/atp870u.c b/drivers/scsi/atp870u.c
index a540162ac59c..cfc73041f102 100644
--- a/drivers/scsi/atp870u.c
+++ b/drivers/scsi/atp870u.c
@@ -3210,7 +3210,7 @@ static struct pci_driver atp870u_driver = {
.id_table = atp870u_id_table,
.name = "atp870u",
.probe = atp870u_probe,
- .remove = __devexit_p(atp870u_remove),
+ .remove = atp870u_remove,
};
static int __init atp870u_init(void)
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index a50b6a9030e8..f1733dfa3ae2 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -28,7 +28,7 @@
/* BladeEngine Generation numbers */
#define BE_GEN2 2
#define BE_GEN3 3
-
+#define BE_GEN4 4
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -84,9 +84,12 @@ static inline void queue_tail_inc(struct be_queue_info *q)
/*ISCSI */
struct be_eq_obj {
+ bool todo_mcc_cq;
+ bool todo_cq;
struct be_queue_info q;
struct beiscsi_hba *phba;
struct be_queue_info *cq;
+ struct work_struct work_cqs; /* Work Item */
struct blk_iopoll iopoll;
};
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 07d2cb126d93..5c87768c109c 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -56,7 +56,7 @@ int beiscsi_pci_soft_reset(struct beiscsi_hba *phba)
writel(pconline0, (void *)pci_online0_offset);
writel(pconline1, (void *)pci_online1_offset);
- sreset = BE2_SET_RESET;
+ sreset |= BE2_SET_RESET;
writel(sreset, (void *)pci_reset_offset);
i = 0;
@@ -133,6 +133,87 @@ unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
return tag;
}
+/*
+ * beiscsi_mccq_compl()- Wait for completion of MBX
+ * @phba: Driver private structure
+ * @tag: Tag for the MBX Command
+ * @wrb: the WRB used for the MBX Command
+ * @cmd_hdr: IOCTL Hdr for the MBX Cmd
+ *
+ * Waits for MBX completion with the passed TAG.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb,
+ void *cmd_hdr)
+{
+ int rc = 0;
+ uint32_t mcc_tag_response;
+ uint16_t status = 0, addl_status = 0, wrb_num = 0;
+ struct be_mcc_wrb *temp_wrb;
+ struct be_cmd_req_hdr *ioctl_hdr;
+ struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+
+ if (beiscsi_error(phba))
+ return -EIO;
+
+ /* wait for the mccq completion */
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
+
+ if (rc <= 0) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Completion timed out\n");
+ rc = -EAGAIN;
+ goto release_mcc_tag;
+ } else
+ rc = 0;
+
+ mcc_tag_response = phba->ctrl.mcc_numtag[tag];
+ status = (mcc_tag_response & CQE_STATUS_MASK);
+ addl_status = ((mcc_tag_response & CQE_STATUS_ADDL_MASK) >>
+ CQE_STATUS_ADDL_SHIFT);
+
+ if (cmd_hdr) {
+ ioctl_hdr = (struct be_cmd_req_hdr *)cmd_hdr;
+ } else {
+ wrb_num = (mcc_tag_response & CQE_STATUS_WRB_MASK) >>
+ CQE_STATUS_WRB_SHIFT;
+ temp_wrb = (struct be_mcc_wrb *)queue_get_wrb(mccq, wrb_num);
+ ioctl_hdr = embedded_payload(temp_wrb);
+
+ if (wrb)
+ *wrb = temp_wrb;
+ }
+
+ if (status || addl_status) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_INIT | BEISCSI_LOG_EH |
+ BEISCSI_LOG_CONFIG,
+ "BC_%d : MBX Cmd Failed for "
+ "Subsys : %d Opcode : %d with "
+ "Status : %d and Extd_Status : %d\n",
+ ioctl_hdr->subsystem,
+ ioctl_hdr->opcode,
+ status, addl_status);
+ rc = -EAGAIN;
+ }
+
+release_mcc_tag:
+ /* Release the MCC entry */
+ free_mcc_tag(&phba->ctrl, tag);
+
+ return rc;
+}
+
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
{
spin_lock(&ctrl->mbox_lock);
@@ -168,11 +249,24 @@ static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
compl->flags = 0;
}
+/*
+ * be_mcc_compl_process()- Check the MBX comapletion status
+ * @ctrl: Function specific MBX data structure
+ * @compl: Completion status of MBX Command
+ *
+ * Check for the MBX completion status when BMBX method used
+ *
+ * return
+ * Success: Zero
+ * Failure: Non-Zero
+ **/
static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
struct be_mcc_compl *compl)
{
u16 compl_status, extd_status;
+ struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ struct be_cmd_req_hdr *hdr = embedded_payload(wrb);
be_dws_le_to_cpu(compl, 4);
@@ -184,7 +278,10 @@ static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : error in cmd completion: status(compl/extd)=%d/%d\n",
+ "BC_%d : error in cmd completion: "
+ "Subsystem : %d Opcode : %d "
+ "status(compl/extd)=%d/%d\n",
+ hdr->subsystem, hdr->opcode,
compl_status, extd_status);
return -EBUSY;
@@ -314,11 +411,24 @@ int beiscsi_process_mcc(struct beiscsi_hba *phba)
return status;
}
-/* Wait till no more pending mcc requests are present */
+/*
+ * be_mcc_wait_compl()- Wait for MBX completion
+ * @phba: driver private structure
+ *
+ * Wait till no more pending mcc requests are present
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ *
+ **/
static int be_mcc_wait_compl(struct beiscsi_hba *phba)
{
int i, status;
for (i = 0; i < mcc_timeout; i++) {
+ if (beiscsi_error(phba))
+ return -EIO;
+
status = beiscsi_process_mcc(phba);
if (status)
return status;
@@ -330,51 +440,83 @@ static int be_mcc_wait_compl(struct beiscsi_hba *phba)
if (i == mcc_timeout) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : mccq poll timed out\n");
-
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
return -EBUSY;
}
return 0;
}
-/* Notify MCC requests and wait for completion */
+/*
+ * be_mcc_notify_wait()- Notify and wait for Compl
+ * @phba: driver private structure
+ *
+ * Notify MCC requests and wait for completion
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
int be_mcc_notify_wait(struct beiscsi_hba *phba)
{
be_mcc_notify(phba);
return be_mcc_wait_compl(phba);
}
+/*
+ * be_mbox_db_ready_wait()- Check ready status
+ * @ctrl: Function specific MBX data structure
+ *
+ * Check for the ready status of FW to send BMBX
+ * commands to adapter.
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
{
-#define long_delay 2000
void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
- int cnt = 0, wait = 5; /* in usecs */
+ struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ int wait = 0;
u32 ready;
do {
+
+ if (beiscsi_error(phba))
+ return -EIO;
+
ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
if (ready)
break;
- if (cnt > 12000000) {
- struct beiscsi_hba *phba = pci_get_drvdata(ctrl->pdev);
+ if (wait > BEISCSI_HOST_MBX_TIMEOUT) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : mbox_db poll timed out\n");
-
+ "BC_%d : FW Timed Out\n");
+ phba->fw_timeout = true;
+ beiscsi_ue_detect(phba);
return -EBUSY;
}
- if (cnt > 50) {
- wait = long_delay;
- mdelay(long_delay / 1000);
- } else
- udelay(wait);
- cnt += wait;
+ mdelay(1);
+ wait++;
} while (true);
return 0;
}
+/*
+ * be_mbox_notify: Notify adapter of new BMBX command
+ * @ctrl: Function specific MBX data structure
+ *
+ * Ring doorbell to inform adapter of a BMBX command
+ * to process
+ *
+ * return
+ * Success: 0
+ * Failure: Non-Zero
+ **/
int be_mbox_notify(struct be_ctrl_info *ctrl)
{
int status;
@@ -391,13 +533,9 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : be_mbox_db_ready_wait failed\n");
-
+ if (status)
return status;
- }
+
val = 0;
val &= ~MPU_MAILBOX_DB_RDY_MASK;
val &= ~MPU_MAILBOX_DB_HI_MASK;
@@ -405,13 +543,9 @@ int be_mbox_notify(struct be_ctrl_info *ctrl)
iowrite32(val, db);
status = be_mbox_db_ready_wait(ctrl);
- if (status != 0) {
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BC_%d : be_mbox_db_ready_wait failed\n");
-
+ if (status)
return status;
- }
+
if (be_mcc_compl_is_new(compl)) {
status = be_mcc_compl_process(ctrl, &mbox->compl);
be_mcc_compl_use(compl);
@@ -499,7 +633,7 @@ void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
req_hdr->opcode = opcode;
req_hdr->subsystem = subsystem;
req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
- req_hdr->timeout = 120;
+ req_hdr->timeout = BEISCSI_FW_MBX_TIMEOUT;
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
@@ -649,18 +783,34 @@ int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
OPCODE_COMMON_CQ_CREATE, sizeof(*req));
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
+ if (chip_skh_r(ctrl->pdev)) {
+ req->hdr.version = MBX_CMD_VER2;
+ req->page_size = 1;
+ AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context_v2, nodelay,
+ ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, armed, ctxt, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_cq_context, coalescwm,
+ ctxt, coalesce_wm);
+ AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
+ AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
+ __ilog2_u32(cq->len / 256));
+ AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
+ AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
+ PCI_FUNC(ctrl->pdev->devfn));
+ }
- AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
- AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
- AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
- __ilog2_u32(cq->len / 256));
- AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
- AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
- AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
- PCI_FUNC(ctrl->pdev->devfn));
be_dws_cpu_to_le(ctxt, sizeof(req->context));
be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index 2c8f98df1287..23397d51ac54 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -57,6 +57,16 @@ struct be_mcc_wrb {
#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
#define CQE_STATUS_EXTD_MASK 0xFFFF
#define CQE_STATUS_EXTD_SHIFT 16 /* bits 0 - 15 */
+#define CQE_STATUS_ADDL_MASK 0xFF00
+#define CQE_STATUS_MASK 0xFF
+#define CQE_STATUS_ADDL_SHIFT 0x08
+#define CQE_STATUS_WRB_MASK 0xFF0000
+#define CQE_STATUS_WRB_SHIFT 16
+#define BEISCSI_HOST_MBX_TIMEOUT (110 * 1000)
+#define BEISCSI_FW_MBX_TIMEOUT 100
+
+/* MBOX Command VER */
+#define MBX_CMD_VER2 0x02
struct be_mcc_compl {
u32 status; /* dword 0 */
@@ -183,7 +193,8 @@ struct be_cmd_req_hdr {
u8 domain; /* dword 0 */
u32 timeout; /* dword 1 */
u32 request_length; /* dword 2 */
- u32 rsvd0; /* dword 3 */
+ u8 version; /* dword 3 */
+ u8 rsvd0[3]; /* dword 3 */
};
struct be_cmd_resp_hdr {
@@ -483,10 +494,28 @@ struct amap_cq_context {
u8 rsvd5[32]; /* dword 3 */
} __packed;
+struct amap_cq_context_v2 {
+ u8 rsvd0[12]; /* dword 0 */
+ u8 coalescwm[2]; /* dword 0 */
+ u8 nodelay; /* dword 0 */
+ u8 rsvd1[12]; /* dword 0 */
+ u8 count[2]; /* dword 0 */
+ u8 valid; /* dword 0 */
+ u8 rsvd2; /* dword 0 */
+ u8 eventable; /* dword 0 */
+ u8 eqid[16]; /* dword 1 */
+ u8 rsvd3[15]; /* dword 1 */
+ u8 armed; /* dword 1 */
+ u8 cqecount[16];/* dword 2 */
+ u8 rsvd4[16]; /* dword 2 */
+ u8 rsvd5[32]; /* dword 3 */
+};
+
struct be_cmd_req_cq_create {
struct be_cmd_req_hdr hdr;
u16 num_pages;
- u16 rsvd0;
+ u8 page_size;
+ u8 rsvd0;
u8 context[sizeof(struct amap_cq_context) / 8];
struct phys_addr pages[4];
} __packed;
@@ -663,6 +692,9 @@ unsigned int be_cmd_get_initname(struct beiscsi_hba *phba);
unsigned int be_cmd_get_port_speed(struct beiscsi_hba *phba);
void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag);
+
+int beiscsi_mccq_compl(struct beiscsi_hba *phba,
+ uint32_t tag, struct be_mcc_wrb **wrb, void *cmd_va);
/*ISCSI Functuions */
int be_cmd_fw_initialize(struct be_ctrl_info *ctrl);
@@ -804,6 +836,59 @@ struct amap_sol_cqe_ring {
u8 valid; /* dword 3 */
} __packed;
+struct amap_sol_cqe_v2 {
+ u8 hw_sts[8]; /* dword 0 */
+ u8 i_sts[8]; /* dword 0 */
+ u8 wrb_index[16]; /* dword 0 */
+ u8 i_exp_cmd_sn[32]; /* dword 1 */
+ u8 code[6]; /* dword 2 */
+ u8 cmd_cmpl; /* dword 2 */
+ u8 rsvd0; /* dword 2 */
+ u8 i_cmd_wnd[8]; /* dword 2 */
+ u8 cid[13]; /* dword 2 */
+ u8 u; /* dword 2 */
+ u8 o; /* dword 2 */
+ u8 s; /* dword 2 */
+ u8 i_res_cnt[31]; /* dword 3 */
+ u8 valid; /* dword 3 */
+} __packed;
+
+struct common_sol_cqe {
+ u32 exp_cmdsn;
+ u32 res_cnt;
+ u16 wrb_index;
+ u16 cid;
+ u8 hw_sts;
+ u8 cmd_wnd;
+ u8 res_flag; /* the s feild of structure */
+ u8 i_resp; /* for skh if cmd_complete is set then i_sts is response */
+ u8 i_flags; /* for skh or the u and o feilds */
+ u8 i_sts; /* for skh if cmd_complete is not-set then i_sts is status */
+};
+
+/*** iSCSI ack/driver message completions ***/
+struct amap_it_dmsg_cqe {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 cid[10]; /* DWORD 2 */
+ u8 wrb_idx[8]; /* DWORD 2 */
+ u8 rsvd0[8]; /* DWORD 2*/
+ u8 rsvd1[31]; /* DWORD 3*/
+ u8 valid; /* DWORD 3 */
+} __packed;
+
+struct amap_it_dmsg_cqe_v2 {
+ u8 ack_num[32]; /* DWORD 0 */
+ u8 pdu_bytes_rcvd[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 rsvd0[10]; /* DWORD 2 */
+ u8 wrb_idx[16]; /* DWORD 2 */
+ u8 rsvd1[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd2[2]; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
/**
@@ -992,8 +1077,6 @@ struct be_cmd_get_all_if_id_req {
#define CONNECTION_UPLOAD_ABORT_WITH_SEQ 4 /* Abortive upload with reset,
* sequence number by driver */
-/* Returns byte size of given field with a structure. */
-
/* Returns the number of items in the field array. */
#define BE_NUMBER_OF_FIELD(_type_, _field_) \
(FIELD_SIZEOF(_type_, _field_)/sizeof((((_type_ *)0)->_field_[0])))\
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index aedb0d9a9dae..214d691adb53 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -531,9 +531,9 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
break;
case ISCSI_NET_PARAM_IPV4_BOOTPROTO:
if (!if_info.dhcp_state)
- len = sprintf(buf, "static");
+ len = sprintf(buf, "static\n");
else
- len = sprintf(buf, "dhcp");
+ len = sprintf(buf, "dhcp\n");
break;
case ISCSI_NET_PARAM_IPV4_SUBNET:
len = sprintf(buf, "%pI4\n", &if_info.ip_addr.subnet_mask);
@@ -541,7 +541,7 @@ static int be2iscsi_get_if_param(struct beiscsi_hba *phba,
case ISCSI_NET_PARAM_VLAN_ENABLED:
len = sprintf(buf, "%s\n",
(if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
- ? "Disabled" : "Enabled");
+ ? "Disabled\n" : "Enabled\n");
break;
case ISCSI_NET_PARAM_VLAN_ID:
if (if_info.vlan_priority == BEISCSI_VLAN_DISABLE)
@@ -586,7 +586,7 @@ int be2iscsi_iface_get_param(struct iscsi_iface *iface,
len = be2iscsi_get_if_param(phba, iface, param, buf);
break;
case ISCSI_NET_PARAM_IFACE_ENABLE:
- len = sprintf(buf, "enabled");
+ len = sprintf(buf, "enabled\n");
break;
case ISCSI_NET_PARAM_IPV4_GW:
memset(&gateway, 0, sizeof(gateway));
@@ -690,11 +690,9 @@ int beiscsi_set_param(struct iscsi_cls_conn *cls_conn,
static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
{
int rc;
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_hba_name *resp;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
tag = be_cmd_get_initname(phba);
if (!tag) {
@@ -702,26 +700,16 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
"BS_%d : Getting Initiator Name Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ }
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+ "BS_%d : Initiator Name MBX Failed\n");
+ return rc;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
resp = embedded_payload(wrb);
rc = sprintf(buf, "%s\n", resp->initiator_name);
return rc;
@@ -731,7 +719,6 @@ static int beiscsi_get_initname(char *buf, struct beiscsi_hba *phba)
* beiscsi_get_port_state - Get the Port State
* @shost : pointer to scsi_host structure
*
- * returns number of bytes
*/
static void beiscsi_get_port_state(struct Scsi_Host *shost)
{
@@ -750,13 +737,12 @@ static void beiscsi_get_port_state(struct Scsi_Host *shost)
*/
static int beiscsi_get_port_speed(struct Scsi_Host *shost)
{
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ int rc;
+ unsigned int tag;
struct be_mcc_wrb *wrb;
struct be_cmd_ntwk_link_status_resp *resp;
struct beiscsi_hba *phba = iscsi_host_priv(shost);
struct iscsi_cls_host *ihost = shost->shost_data;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
tag = be_cmd_get_port_speed(phba);
if (!tag) {
@@ -764,26 +750,14 @@ static int beiscsi_get_port_speed(struct Scsi_Host *shost)
"BS_%d : Getting Port Speed Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
-
- if (status || extd_status) {
+ }
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : MailBox Command Failed with "
- "status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+ "BS_%d : Port Speed MBX Failed\n");
+ return rc;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
resp = embedded_payload(wrb);
switch (resp->mac_speed) {
@@ -937,6 +911,14 @@ static void beiscsi_set_params_for_offld(struct beiscsi_conn *beiscsi_conn,
session->initial_r2t_en);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, imd, params,
session->imm_data_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ data_seq_inorder, params,
+ session->dataseq_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder, params,
+ session->pdu_inorder_en);
+ AMAP_SET_BITS(struct amap_beiscsi_offload_params, max_r2t, params,
+ session->max_r2t);
AMAP_SET_BITS(struct amap_beiscsi_offload_params, exp_statsn, params,
(conn->exp_statsn - 1));
}
@@ -1027,12 +1009,10 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
{
struct beiscsi_endpoint *beiscsi_ep = ep->dd_data;
struct beiscsi_hba *phba = beiscsi_ep->phba;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
struct be_mcc_wrb *wrb;
struct tcp_connect_and_offload_out *ptcpcnct_out;
- unsigned short status, extd_status;
struct be_dma_mem nonemb_cmd;
- unsigned int tag, wrb_num;
+ unsigned int tag;
int ret = -ENOMEM;
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
@@ -1084,35 +1064,26 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
}
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+
+ ret = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BS_%d : mgmt_open_connection Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
+ "BS_%d : mgmt_open_connection Failed");
- free_mcc_tag(&phba->ctrl, tag);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
goto free_ep;
- } else {
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
-
- ptcpcnct_out = embedded_payload(wrb);
- beiscsi_ep = ep->dd_data;
- beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
- beiscsi_ep->cid_vld = 1;
- beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
- "BS_%d : mgmt_open_connection Success\n");
}
+
+ ptcpcnct_out = embedded_payload(wrb);
+ beiscsi_ep = ep->dd_data;
+ beiscsi_ep->fw_handle = ptcpcnct_out->connection_handle;
+ beiscsi_ep->cid_vld = 1;
+ beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
+ "BS_%d : mgmt_open_connection Success\n");
+
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return 0;
@@ -1150,8 +1121,8 @@ beiscsi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
if (phba->state != BE_ADAPTER_UP) {
ret = -EBUSY;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BS_%d : The Adapter state is Not UP\n");
+ beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
+ "BS_%d : The Adapter Port state is Down!!!\n");
return ERR_PTR(ret);
}
@@ -1216,11 +1187,9 @@ static int beiscsi_close_conn(struct beiscsi_endpoint *beiscsi_ep, int flag)
beiscsi_ep->ep_cid);
ret = -EAGAIN;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
return ret;
}
@@ -1281,12 +1250,9 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BS_%d : mgmt_invalidate_connection Failed for cid=%d\n",
beiscsi_ep->ep_cid);
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+ beiscsi_mccq_compl(phba, tag, NULL, NULL);
beiscsi_close_conn(beiscsi_ep, tcp_upload_flag);
beiscsi_free_ep(beiscsi_ep);
beiscsi_unbind_conn_to_cid(phba, beiscsi_ep->ep_cid);
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 8b826fc06bcc..38eab7232159 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index ff73f9500b01..4e2733d23003 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -47,8 +47,6 @@
static unsigned int be_iopoll_budget = 10;
static unsigned int be_max_phys_size = 64;
static unsigned int enable_msix = 1;
-static unsigned int gcrashmode = 0;
-static unsigned int num_hba = 0;
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
@@ -153,11 +151,54 @@ BEISCSI_RW_ATTR(log_enable, 0x00,
"\t\t\t\tIO Path Events : 0x10\n"
"\t\t\t\tConfiguration Path : 0x20\n");
+DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
+DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
struct device_attribute *beiscsi_attrs[] = {
&dev_attr_beiscsi_log_enable,
+ &dev_attr_beiscsi_drvr_ver,
+ &dev_attr_beiscsi_adapter_family,
NULL,
};
+static char const *cqe_desc[] = {
+ "RESERVED_DESC",
+ "SOL_CMD_COMPLETE",
+ "SOL_CMD_KILLED_DATA_DIGEST_ERR",
+ "CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
+ "CXN_KILLED_BURST_LEN_MISMATCH",
+ "CXN_KILLED_AHS_RCVD",
+ "CXN_KILLED_HDR_DIGEST_ERR",
+ "CXN_KILLED_UNKNOWN_HDR",
+ "CXN_KILLED_STALE_ITT_TTT_RCVD",
+ "CXN_KILLED_INVALID_ITT_TTT_RCVD",
+ "CXN_KILLED_RST_RCVD",
+ "CXN_KILLED_TIMED_OUT",
+ "CXN_KILLED_RST_SENT",
+ "CXN_KILLED_FIN_RCVD",
+ "CXN_KILLED_BAD_UNSOL_PDU_RCVD",
+ "CXN_KILLED_BAD_WRB_INDEX_ERROR",
+ "CXN_KILLED_OVER_RUN_RESIDUAL",
+ "CXN_KILLED_UNDER_RUN_RESIDUAL",
+ "CMD_KILLED_INVALID_STATSN_RCVD",
+ "CMD_KILLED_INVALID_R2T_RCVD",
+ "CMD_CXN_KILLED_LUN_INVALID",
+ "CMD_CXN_KILLED_ICD_INVALID",
+ "CMD_CXN_KILLED_ITT_INVALID",
+ "CMD_CXN_KILLED_SEQ_OUTOFORDER",
+ "CMD_CXN_KILLED_INVALID_DATASN_RCVD",
+ "CXN_INVALIDATE_NOTIFY",
+ "CXN_INVALIDATE_INDEX_NOTIFY",
+ "CMD_INVALIDATED_NOTIFY",
+ "UNSOL_HDR_NOTIFY",
+ "UNSOL_DATA_NOTIFY",
+ "UNSOL_DATA_DIGEST_ERROR_NOTIFY",
+ "DRIVERMSG_NOTIFY",
+ "CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
+ "SOL_CMD_KILLED_DIF_ERR",
+ "CXN_KILLED_SYN_RCVD",
+ "CXN_KILLED_IMM_DATA_RCVD"
+};
+
static int beiscsi_slave_configure(struct scsi_device *sdev)
{
blk_queue_max_segment_size(sdev->request_queue, 65536);
@@ -226,11 +267,9 @@ static int beiscsi_eh_abort(struct scsi_cmnd *sc)
nonemb_cmd.va, nonemb_cmd.dma);
return FAILED;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_abort(sc);
@@ -301,11 +340,9 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return FAILED;
- } else {
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
- free_mcc_tag(&phba->ctrl, tag);
}
+
+ beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return iscsi_eh_device_reset(sc);
@@ -482,6 +519,7 @@ static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
+ { PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
@@ -730,7 +768,7 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) == mcc->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
}
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
@@ -738,8 +776,8 @@ static irqreturn_t be_isr_mcc(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if (num_eq_processed)
hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
@@ -779,29 +817,26 @@ static irqreturn_t be_isr_msix(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
-
- return IRQ_HANDLED;
} else {
while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
& EQE_VALID_MASK) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 1;
+ pbe_eq->todo_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
queue_tail_inc(eq);
eqe = queue_tail_node(eq);
num_eq_processed++;
}
- if (phba->todo_cq)
- queue_work(phba->wq, &phba->work_cqs);
- if (num_eq_processed)
- hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
-
- return IRQ_HANDLED;
+ if (pbe_eq->todo_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
}
+
+ if (num_eq_processed)
+ hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 0, 1);
+
+ return IRQ_HANDLED;
}
/**
@@ -849,7 +884,7 @@ static irqreturn_t be_isr(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) == mcc->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
num_mcceq_processed++;
} else {
@@ -862,8 +897,8 @@ static irqreturn_t be_isr(int irq, void *dev_id)
eqe = queue_tail_node(eq);
}
if (num_ioeq_processed || num_mcceq_processed) {
- if (phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if ((num_mcceq_processed) && (!num_ioeq_processed))
hwi_ring_eq_db(phba, eq->id, 0,
@@ -886,11 +921,11 @@ static irqreturn_t be_isr(int irq, void *dev_id)
resource_id) / 32] &
EQE_RESID_MASK) >> 16) != cq->id) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 1;
+ pbe_eq->todo_mcc_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
} else {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 1;
+ pbe_eq->todo_cq = true;
spin_unlock_irqrestore(&phba->isr_lock, flags);
}
AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
@@ -898,8 +933,8 @@ static irqreturn_t be_isr(int irq, void *dev_id)
eqe = queue_tail_node(eq);
num_ioeq_processed++;
}
- if (phba->todo_cq || phba->todo_mcc_cq)
- queue_work(phba->wq, &phba->work_cqs);
+ if (pbe_eq->todo_cq || pbe_eq->todo_mcc_cq)
+ queue_work(phba->wq, &pbe_eq->work_cqs);
if (num_ioeq_processed) {
hwi_ring_eq_db(phba, eq->id, 0,
@@ -1211,7 +1246,8 @@ free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
static void
be_complete_io(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct be_status_bhs *sts_bhs =
@@ -1221,20 +1257,14 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
u32 resid = 0, exp_cmdsn, max_cmdsn;
u8 rsp, status, flags;
- exp_cmdsn = (psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK);
- max_cmdsn = ((psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
- rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
- & SOL_RESP_MASK) >> 16);
- status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
- & SOL_STS_MASK) >> 8);
- flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
+ exp_cmdsn = csol_cqe->exp_cmdsn;
+ max_cmdsn = (csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+ rsp = csol_cqe->i_resp;
+ status = csol_cqe->i_sts;
+ flags = csol_cqe->i_flags;
+ resid = csol_cqe->res_cnt;
+
if (!task->sc) {
if (io_task->scsi_cmnd)
scsi_dma_unmap(io_task->scsi_cmnd);
@@ -1249,9 +1279,6 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
/* bidi not initially supported */
if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
- resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
- 32] & SOL_RES_CNT_MASK);
-
if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
task->sc->result = DID_ERROR << 16;
@@ -1273,13 +1300,8 @@ be_complete_io(struct beiscsi_conn *beiscsi_conn,
min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
}
- if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
- if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
- & SOL_RES_CNT_MASK)
- conn->rxdata_octets += (psol->
- dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
- & SOL_RES_CNT_MASK);
- }
+ if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
+ conn->rxdata_octets += resid;
unmap:
scsi_dma_unmap(io_task->scsi_cmnd);
iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
@@ -1287,7 +1309,8 @@ unmap:
static void
be_complete_logout(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_logout_rsp *hdr;
struct beiscsi_io_task *io_task = task->dd_data;
@@ -1297,18 +1320,11 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
hdr->opcode = ISCSI_OP_LOGOUT_RSP;
hdr->t2wait = 5;
hdr->t2retain = 0;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
- 32] & SOL_RESP_MASK);
- hdr->exp_cmdsn = cpu_to_be32(psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->
- dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
- & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
+ hdr->max_cmdsn = (csol_cqe->exp_cmdsn + csol_cqe->cmd_wnd - 1);
+
hdr->dlength[0] = 0;
hdr->dlength[1] = 0;
hdr->dlength[2] = 0;
@@ -1319,7 +1335,8 @@ be_complete_logout(struct beiscsi_conn *beiscsi_conn,
static void
be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_tm_rsp *hdr;
struct iscsi_conn *conn = beiscsi_conn->conn;
@@ -1327,16 +1344,12 @@ be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
hdr = (struct iscsi_tm_rsp *)task->hdr;
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
- 32] & SOL_RESP_MASK);
- hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->response = csol_cqe->i_resp;
+ hdr->exp_cmdsn = csol_cqe->exp_cmdsn;
+ hdr->max_cmdsn = (csol_cqe->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
}
@@ -1352,15 +1365,24 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_io_task *io_task;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
+ uint16_t wrb_index, cid;
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[((psol->
- dw[offsetof(struct amap_sol_cqe, cid) / 32] &
- SOL_CID_MASK) >> 6) -
- phba->fw_config.iscsi_cid_start];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
- dw[offsetof(struct amap_sol_cqe, wrb_index) /
- 32] & SOL_WRB_INDEX_MASK) >> 16)];
+ if (chip_skh_r(phba->pcidev)) {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
+ cid, psol);
+ } else {
+ wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ wrb_idx, psol);
+ cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
+ cid, psol);
+ }
+
+ pwrb_context = &phwi_ctrlr->wrb_context[
+ cid - phba->fw_config.iscsi_cid_start];
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
task = pwrb_handle->pio_handle;
io_task = task->dd_data;
@@ -1374,26 +1396,78 @@ hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
static void
be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
- struct iscsi_task *task, struct sol_cqe *psol)
+ struct iscsi_task *task,
+ struct common_sol_cqe *csol_cqe)
{
struct iscsi_nopin *hdr;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct beiscsi_io_task *io_task = task->dd_data;
hdr = (struct iscsi_nopin *)task->hdr;
- hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
- & SOL_FLAGS_MASK) >> 24) | 0x80;
- hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
- hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
- i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
- ((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
- / 32] & SOL_CMD_WND_MASK) >> 24) - 1);
+ hdr->flags = csol_cqe->i_flags;
+ hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
+ hdr->max_cmdsn = be32_to_cpu(hdr->exp_cmdsn +
+ csol_cqe->cmd_wnd - 1);
+
hdr->opcode = ISCSI_OP_NOOP_IN;
hdr->itt = io_task->libiscsi_itt;
__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
}
+static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
+ struct sol_cqe *psol,
+ struct common_sol_cqe *csol_cqe)
+{
+ if (chip_skh_r(phba->pcidev)) {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_res_cnt, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ hw_sts, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_cmd_wnd, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cmd_cmpl, psol))
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ else
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ i_sts, psol);
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ u, psol))
+ csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
+
+ if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ o, psol))
+ csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
+ } else {
+ csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_exp_cmd_sn, psol);
+ csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_res_cnt, psol);
+ csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_cmd_wnd, psol);
+ csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
+ wrb_index, psol);
+ csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
+ cid, psol);
+ csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ hw_sts, psol);
+ csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_resp, psol);
+ csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_sts, psol);
+ csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
+ i_flags, psol);
+ }
+}
+
+
static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_hba *phba, struct sol_cqe *psol)
{
@@ -1405,19 +1479,22 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
unsigned int type;
struct iscsi_conn *conn = beiscsi_conn->conn;
struct iscsi_session *session = conn->session;
+ struct common_sol_cqe csol_cqe = {0};
phwi_ctrlr = phba->phwi_ctrlr;
- pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
- (struct amap_sol_cqe, cid) / 32]
- & SOL_CID_MASK) >> 6) -
- phba->fw_config.iscsi_cid_start];
- pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
- dw[offsetof(struct amap_sol_cqe, wrb_index) /
- 32] & SOL_WRB_INDEX_MASK) >> 16)];
+
+ /* Copy the elements to a common structure */
+ adapter_get_sol_cqe(phba, psol, &csol_cqe);
+
+ pwrb_context = &phwi_ctrlr->wrb_context[
+ csol_cqe.cid - phba->fw_config.iscsi_cid_start];
+
+ pwrb_handle = pwrb_context->pwrb_handle_basestd[
+ csol_cqe.wrb_index];
+
task = pwrb_handle->pio_handle;
pwrb = pwrb_handle->pwrb;
- type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
- WRB_TYPE_MASK) >> 28;
+ type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
spin_lock_bh(&session->lock);
switch (type) {
@@ -1425,17 +1502,16 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
case HWH_TYPE_IO_RD:
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
ISCSI_OP_NOOP_OUT)
- be_complete_nopin_resp(beiscsi_conn, task, psol);
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
else
- be_complete_io(beiscsi_conn, task, psol);
+ be_complete_io(beiscsi_conn, task, &csol_cqe);
break;
case HWH_TYPE_LOGOUT:
if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
- be_complete_logout(beiscsi_conn, task, psol);
+ be_complete_logout(beiscsi_conn, task, &csol_cqe);
else
- be_complete_tmf(beiscsi_conn, task, psol);
-
+ be_complete_tmf(beiscsi_conn, task, &csol_cqe);
break;
case HWH_TYPE_LOGIN:
@@ -1446,7 +1522,7 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
break;
case HWH_TYPE_NOP:
- be_complete_nopin_resp(beiscsi_conn, task, psol);
+ be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
break;
default:
@@ -1454,10 +1530,8 @@ static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
"BM_%d : In hwi_complete_cmd, unknown type = %d"
"wrb_index 0x%x CID 0x%x\n", type,
- ((psol->dw[offsetof(struct amap_iscsi_wrb,
- type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
- ((psol->dw[offsetof(struct amap_sol_cqe,
- cid) / 32] & SOL_CID_MASK) >> 6));
+ csol_cqe.wrb_index,
+ csol_cqe.cid);
break;
}
@@ -1485,13 +1559,26 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
struct list_head *pbusy_list;
struct async_pdu_handle *pasync_handle = NULL;
unsigned char is_header = 0;
+ unsigned int index, dpl;
+
+ if (chip_skh_r(phba->pcidev)) {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
+ index, pdpdu_cqe);
+ } else {
+ dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ dpl, pdpdu_cqe);
+ index = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
+ index, pdpdu_cqe);
+ }
phys_addr.u.a32.address_lo =
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
- ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
- & PDUCQE_DPL_MASK) >> 16);
+ (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_lo) / 32] - dpl);
phys_addr.u.a32.address_hi =
- pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
+ pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
+ db_addr_hi) / 32];
phys_addr.u.a64.address =
*((unsigned long long *)(&phys_addr.u.a64.address));
@@ -1501,14 +1588,12 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
case UNSOL_HDR_NOTIFY:
is_header = 1;
- pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
- (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK));
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
break;
case UNSOL_DATA_NOTIFY:
- pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
- dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK));
+ pbusy_list = hwi_get_async_busy_list(pasync_ctx,
+ is_header, index);
break;
default:
pbusy_list = NULL;
@@ -1531,12 +1616,9 @@ hwi_get_async_handle(struct beiscsi_hba *phba,
pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start;
pasync_handle->is_header = is_header;
- pasync_handle->buffer_len = ((pdpdu_cqe->
- dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
- & PDUCQE_DPL_MASK) >> 16);
+ pasync_handle->buffer_len = dpl;
+ *pcq_index = index;
- *pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
- index) / 32] & PDUCQE_INDEX_MASK);
return pasync_handle;
}
@@ -1914,6 +1996,13 @@ static void beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
}
+/**
+ * beiscsi_process_cq()- Process the Completion Queue
+ * @pbe_eq: Event Q on which the Completion has come
+ *
+ * return
+ * Number of Completion Entries processed.
+ **/
static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
{
struct be_queue_info *cq;
@@ -1935,12 +2024,24 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
CQE_VALID_MASK) {
be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
- cid = ((sol->dw[offsetof(struct amap_sol_cqe, cid)/32] &
- CQE_CID_MASK) >> 6);
- code = (sol->dw[offsetof(struct amap_sol_cqe, code)/32] &
- CQE_CODE_MASK);
- ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
+ code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
+ 32] & CQE_CODE_MASK);
+
+ /* Get the CID */
+ if (chip_skh_r(phba->pcidev)) {
+ if ((code == DRIVERMSG_NOTIFY) ||
+ (code == UNSOL_HDR_NOTIFY) ||
+ (code == UNSOL_DATA_NOTIFY))
+ cid = AMAP_GET_BITS(
+ struct amap_i_t_dpdu_cqe_v2,
+ cid, sol);
+ else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
+ cid, sol);
+ } else
+ cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
+ ep = phba->ep_array[cid - phba->fw_config.iscsi_cid_start];
beiscsi_ep = ep->dd_data;
beiscsi_conn = beiscsi_ep->conn;
@@ -1958,7 +2059,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case DRIVERMSG_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Received DRIVERMSG_NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
dmsg = (struct dmsg_cqe *)sol;
hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
@@ -1966,7 +2068,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case UNSOL_HDR_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Received UNSOL_HDR_ NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
@@ -1974,7 +2077,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case UNSOL_DATA_NOTIFY:
beiscsi_log(phba, KERN_INFO,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : Received UNSOL_DATA_NOTIFY\n");
+ "BM_%d : Received %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_process_default_pdu_ring(beiscsi_conn, phba,
(struct i_t_dpdu_cqe *)sol);
@@ -1984,8 +2088,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_INVALIDATE_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Ignoring CQ Error notification for"
- " cmd/cxn invalidate\n");
+ "BM_%d : Ignoring %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
break;
case SOL_CMD_KILLED_DATA_DIGEST_ERR:
case CMD_KILLED_INVALID_STATSN_RCVD:
@@ -1997,14 +2101,14 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
- "BM_%d : CQ Error notification for cmd.. "
- "code %d cid 0x%x\n", code, cid);
+ "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
+ cqe_desc[code], code, cid);
break;
case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Digest error on def pdu ring,"
- " dropping..\n");
+ "BM_%d : Dropping %s[%d] on DPDU ring on CID : %d\n",
+ cqe_desc[code], code, cid);
hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
(struct i_t_dpdu_cqe *) sol);
break;
@@ -2017,6 +2121,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_INVALID_ITT_TTT_RCVD:
case CXN_KILLED_TIMED_OUT:
case CXN_KILLED_FIN_RCVD:
+ case CXN_KILLED_RST_SENT:
+ case CXN_KILLED_RST_RCVD:
case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
case CXN_KILLED_BAD_WRB_INDEX_ERROR:
case CXN_KILLED_OVER_RUN_RESIDUAL:
@@ -2024,19 +2130,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error %d, reset CID 0x%x...\n",
- code, cid);
- if (beiscsi_conn)
- iscsi_conn_failure(beiscsi_conn->conn,
- ISCSI_ERR_CONN_FAILED);
- break;
- case CXN_KILLED_RST_SENT:
- case CXN_KILLED_RST_RCVD:
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error %d, reset"
- "received/sent on CID 0x%x...\n",
- code, cid);
+ "BM_%d : Event %s[%d] received on CID : %d\n",
+ cqe_desc[code], code, cid);
if (beiscsi_conn)
iscsi_conn_failure(beiscsi_conn->conn,
ISCSI_ERR_CONN_FAILED);
@@ -2044,8 +2139,8 @@ static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
default:
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : CQ Error Invalid code= %d "
- "received on CID 0x%x...\n",
+ "BM_%d : Invalid CQE Event Received Code : %d"
+ "CID 0x%x...\n",
code, cid);
break;
}
@@ -2068,30 +2163,30 @@ void beiscsi_process_all_cqs(struct work_struct *work)
unsigned long flags;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
- struct be_eq_obj *pbe_eq;
- struct beiscsi_hba *phba =
- container_of(work, struct beiscsi_hba, work_cqs);
+ struct beiscsi_hba *phba;
+ struct be_eq_obj *pbe_eq =
+ container_of(work, struct be_eq_obj, work_cqs);
+ phba = pbe_eq->phba;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
- if (phba->msix_enabled)
- pbe_eq = &phwi_context->be_eq[phba->num_cpus];
- else
- pbe_eq = &phwi_context->be_eq[0];
- if (phba->todo_mcc_cq) {
+ if (pbe_eq->todo_mcc_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_mcc_cq = 0;
+ pbe_eq->todo_mcc_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
beiscsi_process_mcc_isr(phba);
}
- if (phba->todo_cq) {
+ if (pbe_eq->todo_cq) {
spin_lock_irqsave(&phba->isr_lock, flags);
- phba->todo_cq = 0;
+ pbe_eq->todo_cq = false;
spin_unlock_irqrestore(&phba->isr_lock, flags);
beiscsi_process_cq(pbe_eq);
}
+
+ /* rearm EQ for further interrupts */
+ hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
}
static int be_iopoll(struct blk_iopoll *iop, int budget)
@@ -2115,6 +2210,101 @@ static int be_iopoll(struct blk_iopoll *iop, int budget)
}
static void
+hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
+ unsigned int num_sg, struct beiscsi_io_task *io_task)
+{
+ struct iscsi_sge *psgl;
+ unsigned int sg_len, index;
+ unsigned int sge_len = 0;
+ unsigned long long addr;
+ struct scatterlist *l_sg;
+ unsigned int offset;
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
+ io_task->bhs_pa.u.a32.address_lo);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
+ io_task->bhs_pa.u.a32.address_hi);
+
+ l_sg = sg;
+ for (index = 0; (index < num_sg) && (index < 2); index++,
+ sg = sg_next(sg)) {
+ if (index == 0) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge0_len, pwrb,
+ sg_len);
+ sge_len = sg_len;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
+ pwrb, sge_len);
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_lo, pwrb,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_addr_hi, pwrb,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ sge1_len, pwrb,
+ sg_len);
+ }
+ }
+ psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
+ memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
+
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ io_task->bhs_pa.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ io_task->bhs_pa.u.a32.address_lo);
+
+ if (num_sg == 1) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 1);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ } else if (num_sg == 2) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
+ 0);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
+ 0);
+ }
+
+ sg = l_sg;
+ psgl++;
+ psgl++;
+ offset = 0;
+ for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
+ sg_len = sg_dma_len(sg);
+ addr = (u64) sg_dma_address(sg);
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
+ lower_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
+ upper_32_bits(addr));
+ AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
+ AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
+ offset += sg_len;
+ }
+ psgl--;
+ AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
+}
+
+static void
hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
unsigned int num_sg, struct beiscsi_io_task *io_task)
{
@@ -2202,13 +2392,18 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
}
+/**
+ * hwi_write_buffer()- Populate the WRB with task info
+ * @pwrb: ptr to the WRB entry
+ * @task: iscsi task which is to be executed
+ **/
static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
{
struct iscsi_sge *psgl;
- unsigned long long addr;
struct beiscsi_io_task *io_task = task->dd_data;
struct beiscsi_conn *beiscsi_conn = io_task->conn;
struct beiscsi_hba *phba = beiscsi_conn->phba;
+ uint8_t dsp_value = 0;
io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
@@ -2217,26 +2412,38 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
io_task->bhs_pa.u.a32.address_hi);
if (task->data) {
- if (task->data_count) {
- AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
- addr = (u64) pci_map_single(phba->pcidev,
- task->data,
- task->data_count, 1);
- } else {
- AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
- addr = 0;
- }
+
+ /* Check for the data_count */
+ dsp_value = (task->data_count) ? 1 : 0;
+
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
+ pwrb, dsp_value);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
+ pwrb, dsp_value);
+
+ /* Map addr only if there is data_count */
+ if (dsp_value) {
+ io_task->mtask_addr = pci_map_single(phba->pcidev,
+ task->data,
+ task->data_count,
+ PCI_DMA_TODEVICE);
+ io_task->mtask_data_count = task->data_count;
+ } else
+ io_task->mtask_addr = 0;
+
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
- ((u32)(addr & 0xFFFFFFFF)));
+ lower_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
- ((u32)(addr >> 32)));
+ upper_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
task->data_count);
AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
} else {
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
- addr = 0;
+ io_task->mtask_addr = 0;
}
psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
@@ -2259,9 +2466,9 @@ static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
psgl++;
if (task->data) {
AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
- ((u32)(addr & 0xFFFFFFFF)));
+ lower_32_bits(io_task->mtask_addr));
AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
- ((u32)(addr >> 32)));
+ upper_32_bits(io_task->mtask_addr));
}
AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
}
@@ -2843,7 +3050,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
}
return 0;
create_eq_error:
- for (i = 0; i < (phba->num_cpus + 1); i++) {
+ for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
eq = &phwi_context->be_eq[i].q;
mem = &eq->dma_mem;
if (mem->va)
@@ -3268,15 +3475,31 @@ err:
return -ENOMEM;
}
-static int find_num_cpus(void)
+/**
+ * find_num_cpus()- Get the CPU online count
+ * @phba: ptr to priv structure
+ *
+ * CPU count is used for creating EQ.
+ **/
+static void find_num_cpus(struct beiscsi_hba *phba)
{
int num_cpus = 0;
num_cpus = num_online_cpus();
- if (num_cpus >= MAX_CPUS)
- num_cpus = MAX_CPUS - 1;
- return num_cpus;
+ switch (phba->generation) {
+ case BE_GEN2:
+ case BE_GEN3:
+ phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
+ BEISCSI_MAX_NUM_CPUS : num_cpus;
+ break;
+ case BE_GEN4:
+ phba->num_cpus = (num_cpus > OC_SKH_MAX_NUM_CPUS) ?
+ OC_SKH_MAX_NUM_CPUS : num_cpus;
+ break;
+ default:
+ phba->num_cpus = 1;
+ }
}
static int hwi_init_port(struct beiscsi_hba *phba)
@@ -3644,12 +3867,9 @@ static void hwi_disable_intr(struct beiscsi_hba *phba)
static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
{
struct be_cmd_get_session_resp *session_resp;
- struct be_mcc_wrb *wrb;
struct be_dma_mem nonemb_cmd;
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ unsigned int tag;
unsigned int s_handle;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
int ret = -ENOMEM;
/* Get the session handle of the boot target */
@@ -3682,25 +3902,16 @@ static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
" Failed\n");
goto boot_freemem;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
+ }
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ ret = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd.va);
+ if (ret) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BM_%d : beiscsi_get_session_info Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
-
- free_mcc_tag(&phba->ctrl, tag);
+ "BM_%d : beiscsi_get_session_info Failed");
goto boot_freemem;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
session_resp = nonemb_cmd.va ;
memcpy(&phba->boot_sess, &session_resp->session_info,
@@ -3853,6 +4064,11 @@ static void beiscsi_clean_port(struct beiscsi_hba *phba)
kfree(phba->ep_array);
}
+/**
+ * beiscsi_cleanup_task()- Free driver resources of the task
+ * @task: ptr to the iscsi task
+ *
+ **/
static void beiscsi_cleanup_task(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
@@ -3900,6 +4116,13 @@ static void beiscsi_cleanup_task(struct iscsi_task *task)
spin_unlock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = NULL;
}
+ if (io_task->mtask_addr) {
+ pci_unmap_single(phba->pcidev,
+ io_task->mtask_addr,
+ io_task->mtask_data_count,
+ PCI_DMA_TODEVICE);
+ io_task->mtask_addr = 0;
+ }
}
}
}
@@ -3909,8 +4132,6 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
struct beiscsi_offload_params *params)
{
struct wrb_handle *pwrb_handle;
- struct iscsi_target_context_update_wrb *pwrb = NULL;
- struct be_mem_descriptor *mem_descr;
struct beiscsi_hba *phba = beiscsi_conn->phba;
struct iscsi_task *task = beiscsi_conn->task;
struct iscsi_session *session = task->conn->session;
@@ -3927,67 +4148,16 @@ beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start));
- pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
- memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_burst_length, pwrb, params->dw[offsetof
- (struct amap_beiscsi_offload_params,
- max_burst_length) / 32]);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- max_send_data_segment_length, pwrb,
- params->dw[offsetof(struct amap_beiscsi_offload_params,
- max_send_data_segment_length) / 32]);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- first_burst_length,
- pwrb,
- params->dw[offsetof(struct amap_beiscsi_offload_params,
- first_burst_length) / 32]);
-
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- erl) / 32] & OFFLD_PARAMS_ERL));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
- pwrb,
- (params->dw[offsetof(struct amap_beiscsi_offload_params,
- exp_statsn) / 32] + 1));
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
- 0x7);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
- pwrb, pwrb_handle->wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
- pwrb, pwrb_handle->nxt_wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- session_state, pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
- pwrb, 1);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
- pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
- 0);
- mem_descr = phba->init_mem;
- mem_descr += ISCSI_MEM_GLOBAL_HEADER;
-
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- pad_buffer_addr_hi, pwrb,
- mem_descr->mem_array[0].bus_address.u.a32.address_hi);
- AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
- pad_buffer_addr_lo, pwrb,
- mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+ /* Check for the adapter family */
+ if (chip_skh_r(phba->pcidev))
+ beiscsi_offload_cxn_v2(params, pwrb_handle);
+ else
+ beiscsi_offload_cxn_v0(params, pwrb_handle,
+ phba->init_mem);
- be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
+ be_dws_le_to_cpu(pwrb_handle->pwrb,
+ sizeof(struct iscsi_target_context_update_wrb));
doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
@@ -4044,13 +4214,25 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
spin_lock(&phba->io_sgl_lock);
io_task->psgl_handle = alloc_io_sgl_handle(phba);
spin_unlock(&phba->io_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of IO_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_hndls;
+ }
io_task->pwrb_handle = alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_io_hndls;
+ }
} else {
io_task->scsi_cmnd = NULL;
if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
@@ -4059,8 +4241,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
io_task->psgl_handle = (struct sgl_handle *)
alloc_mgmt_sgl_handle(phba);
spin_unlock(&phba->mgmt_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
goto free_hndls;
+ }
beiscsi_conn->login_in_progress = 1;
beiscsi_conn->plogin_sgl_handle =
@@ -4069,8 +4259,16 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
- goto free_io_hndls;
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
+ goto free_mgmt_hndls;
+ }
beiscsi_conn->plogin_wrb_handle =
io_task->pwrb_handle;
@@ -4085,14 +4283,28 @@ static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
spin_lock(&phba->mgmt_sgl_lock);
io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
spin_unlock(&phba->mgmt_sgl_lock);
- if (!io_task->psgl_handle)
+ if (!io_task->psgl_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO |
+ BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of MGMT_SGL_ICD Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->
+ beiscsi_conn_cid);
goto free_hndls;
+ }
io_task->pwrb_handle =
alloc_wrb_handle(phba,
beiscsi_conn->beiscsi_conn_cid -
phba->fw_config.iscsi_cid_start);
- if (!io_task->pwrb_handle)
+ if (!io_task->pwrb_handle) {
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
+ "BM_%d : Alloc of WRB_HANDLE Failed"
+ "for the CID : %d\n",
+ beiscsi_conn->beiscsi_conn_cid);
goto free_mgmt_hndls;
+ }
}
}
@@ -4124,11 +4336,64 @@ free_hndls:
pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
io_task->bhs_pa.u.a64.address);
io_task->cmd_bhs = NULL;
- beiscsi_log(phba, KERN_ERR,
- BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
- "BM_%d : Alloc of SGL_ICD Failed\n");
return -ENOMEM;
}
+int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
+ unsigned int num_sg, unsigned int xferlen,
+ unsigned int writedir)
+{
+
+ struct beiscsi_io_task *io_task = task->dd_data;
+ struct iscsi_conn *conn = task->conn;
+ struct beiscsi_conn *beiscsi_conn = conn->dd_data;
+ struct beiscsi_hba *phba = beiscsi_conn->phba;
+ struct iscsi_wrb *pwrb = NULL;
+ unsigned int doorbell = 0;
+
+ pwrb = io_task->pwrb_handle->pwrb;
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
+ io_task->bhs_len = sizeof(struct be_cmd_bhs);
+
+ if (writedir) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_WR_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
+ INI_RD_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
+ }
+
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
+ type, pwrb);
+
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
+ cpu_to_be16(*(unsigned short *)
+ &io_task->cmd_bhs->iscsi_hdr.lun));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+
+ hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+
+ be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+
+ doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
+ doorbell |= (io_task->pwrb_handle->wrb_index &
+ DB_DEF_PDU_WRB_INDEX_MASK) <<
+ DB_DEF_PDU_WRB_INDEX_SHIFT;
+ doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
+ iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
+ return 0;
+}
static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
unsigned int num_sg, unsigned int xferlen,
@@ -4156,6 +4421,9 @@ static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
}
+ io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
+ type, pwrb);
+
AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
cpu_to_be16(*(unsigned short *)
&io_task->cmd_bhs->iscsi_hdr.lun));
@@ -4191,55 +4459,75 @@ static int beiscsi_mtask(struct iscsi_task *task)
struct iscsi_wrb *pwrb = NULL;
unsigned int doorbell = 0;
unsigned int cid;
+ unsigned int pwrb_typeoffset = 0;
cid = beiscsi_conn->beiscsi_conn_cid;
pwrb = io_task->pwrb_handle->pwrb;
memset(pwrb, 0, sizeof(*pwrb));
- AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
- be32_to_cpu(task->cmdsn));
- AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
- io_task->pwrb_handle->wrb_index);
- AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
- io_task->psgl_handle->sgl_index);
+
+ if (chip_skh_r(phba->pcidev)) {
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
+ } else {
+ AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
+ be32_to_cpu(task->cmdsn));
+ AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
+ io_task->pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
+ io_task->psgl_handle->sgl_index);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
+ task->data_count);
+ AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
+ io_task->pwrb_handle->nxt_wrb_index);
+ pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
+ }
+
switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
case ISCSI_OP_LOGIN:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_NOOP_OUT:
if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
- pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 1);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 1);
} else {
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_RD_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
+ if (chip_skh_r(phba->pcidev))
+ AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
+ dmsg, pwrb, 0);
+ else
+ AMAP_SET_BITS(struct amap_iscsi_wrb,
+ dmsg, pwrb, 0);
}
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_TEXT:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- TGT_DM_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_SCSI_TMFUNC:
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- INI_TMF_CMD);
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
+ ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
case ISCSI_OP_LOGOUT:
- AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
- AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
- HWH_TYPE_LOGOUT);
+ ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
hwi_write_buffer(pwrb, task);
break;
@@ -4251,11 +4539,10 @@ static int beiscsi_mtask(struct iscsi_task *task)
return -EINVAL;
}
- AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
- task->data_count);
- AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
- io_task->pwrb_handle->nxt_wrb_index);
- be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
+ /* Set the task type */
+ io_task->wrb_type = (chip_skh_r(phba->pcidev)) ?
+ AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb) :
+ AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb);
doorbell |= cid & DB_WRB_POST_CID_MASK;
doorbell |= (io_task->pwrb_handle->wrb_index &
@@ -4269,10 +4556,13 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
{
struct beiscsi_io_task *io_task = task->dd_data;
struct scsi_cmnd *sc = task->sc;
+ struct beiscsi_hba *phba = NULL;
struct scatterlist *sg;
int num_sg;
unsigned int writedir = 0, xferlen = 0;
+ phba = ((struct beiscsi_conn *)task->conn->dd_data)->phba;
+
if (!sc)
return beiscsi_mtask(task);
@@ -4295,7 +4585,7 @@ static int beiscsi_task_xmit(struct iscsi_task *task)
else
writedir = 0;
- return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
+ return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
}
/**
@@ -4326,20 +4616,24 @@ static int beiscsi_bsg_request(struct bsg_job *job)
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
"BM_%d : Failed to allocate memory for "
"beiscsi_bsg_request\n");
- return -EIO;
+ return -ENOMEM;
}
tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
&nonemb_cmd);
if (!tag) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BM_%d : be_cmd_get_mac_addr Failed\n");
+ "BM_%d : MBX Tag Allocation Failed\n");
pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
nonemb_cmd.va, nonemb_cmd.dma);
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
+ }
+
+ rc = wait_event_interruptible_timeout(
+ phba->ctrl.mcc_wait[tag],
+ phba->ctrl.mcc_numtag[tag],
+ msecs_to_jiffies(
+ BEISCSI_HOST_MBX_TIMEOUT));
extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
free_mcc_tag(&phba->ctrl, tag);
@@ -4356,11 +4650,13 @@ static int beiscsi_bsg_request(struct bsg_job *job)
nonemb_cmd.va, nonemb_cmd.dma);
if (status || extd_status) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
- "BM_%d : be_cmd_get_mac_addr Failed"
+ "BM_%d : MBX Cmd Failed"
" status = %d extd_status = %d\n",
status, extd_status);
return -EIO;
+ } else {
+ rc = 0;
}
break;
@@ -4380,14 +4676,18 @@ void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
beiscsi_log_enable_init(phba, beiscsi_log_enable);
}
+/*
+ * beiscsi_quiesce()- Cleanup Driver resources
+ * @phba: Instance Priv structure
+ *
+ * Free the OS and HW resources held by the driver
+ **/
static void beiscsi_quiesce(struct beiscsi_hba *phba)
{
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
unsigned int i, msix_vec;
- u8 *real_offset = 0;
- u32 value = 0;
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
@@ -4411,19 +4711,14 @@ static void beiscsi_quiesce(struct beiscsi_hba *phba)
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
- value = readl((void *)real_offset);
-
- if (value & 0x00010000) {
- value &= 0xfffeffff;
- writel(value, (void *)real_offset);
- }
beiscsi_unmap_pci_function(phba);
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
phba->ctrl.mbox_mem_alloced.dma);
+
+ cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
}
static void beiscsi_remove(struct pci_dev *pcidev)
@@ -4476,16 +4771,33 @@ static void beiscsi_msix_enable(struct beiscsi_hba *phba)
return;
}
-static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
- const struct pci_device_id *id)
+/*
+ * beiscsi_hw_health_check()- Check adapter health
+ * @work: work item to check HW health
+ *
+ * Check if adapter in an unrecoverable state or not.
+ **/
+static void
+beiscsi_hw_health_check(struct work_struct *work)
+{
+ struct beiscsi_hba *phba =
+ container_of(work, struct beiscsi_hba,
+ beiscsi_hw_check_task.work);
+
+ beiscsi_ue_detect(phba);
+
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+}
+
+static int beiscsi_dev_probe(struct pci_dev *pcidev,
+ const struct pci_device_id *id)
{
struct beiscsi_hba *phba = NULL;
struct hwi_controller *phwi_ctrlr;
struct hwi_context_memory *phwi_context;
struct be_eq_obj *pbe_eq;
- int ret, num_cpus, i;
- u8 *real_offset = 0;
- u32 value = 0;
+ int ret, i;
ret = beiscsi_enable_pci(pcidev);
if (ret < 0) {
@@ -4504,25 +4816,33 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
/* Initialize Driver configuration Paramters */
beiscsi_hba_attrs_init(phba);
+ phba->fw_timeout = false;
+
+
switch (pcidev->device) {
case BE_DEVICE_ID1:
case OC_DEVICE_ID1:
case OC_DEVICE_ID2:
phba->generation = BE_GEN2;
+ phba->iotask_fn = beiscsi_iotask;
break;
case BE_DEVICE_ID2:
case OC_DEVICE_ID3:
phba->generation = BE_GEN3;
+ phba->iotask_fn = beiscsi_iotask;
break;
+ case OC_SKH_ID1:
+ phba->generation = BE_GEN4;
+ phba->iotask_fn = beiscsi_iotask_v2;
default:
phba->generation = 0;
}
if (enable_msix)
- num_cpus = find_num_cpus();
+ find_num_cpus(phba);
else
- num_cpus = 1;
- phba->num_cpus = num_cpus;
+ phba->num_cpus = 1;
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"BM_%d : num_cpus = %d\n",
phba->num_cpus);
@@ -4540,31 +4860,18 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto hba_free;
}
- if (!num_hba) {
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
- value = readl((void *)real_offset);
- if (value & 0x00010000) {
- gcrashmode++;
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Loading Driver in crashdump mode\n");
- ret = beiscsi_cmd_reset_function(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Reset Failed. Aborting Crashdump\n");
- goto hba_free;
- }
- ret = be_chk_reset_complete(phba);
- if (ret) {
- beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
- "BM_%d : Failed to get out of reset."
- "Aborting Crashdump\n");
- goto hba_free;
- }
- } else {
- value |= 0x00010000;
- writel(value, (void *)real_offset);
- num_hba++;
- }
+ ret = beiscsi_cmd_reset_function(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Reset Failed. Aborting Crashdump\n");
+ goto hba_free;
+ }
+ ret = be_chk_reset_complete(phba);
+ if (ret) {
+ beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+ "BM_%d : Failed to get out of reset."
+ "Aborting Crashdump\n");
+ goto hba_free;
}
spin_lock_init(&phba->io_sgl_lock);
@@ -4596,7 +4903,7 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
- snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
+ snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_%02x_wq",
phba->shost->host_no);
phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
if (!phba->wq) {
@@ -4606,10 +4913,12 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
goto free_twq;
}
- INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
+ INIT_DELAYED_WORK(&phba->beiscsi_hw_check_task,
+ beiscsi_hw_health_check);
phwi_ctrlr = phba->phwi_ctrlr;
phwi_context = phwi_ctrlr->phwi_ctxt;
+
if (blk_iopoll_enabled) {
for (i = 0; i < phba->num_cpus; i++) {
pbe_eq = &phwi_context->be_eq[i];
@@ -4617,7 +4926,25 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
be_iopoll);
blk_iopoll_enable(&pbe_eq->iopoll);
}
+
+ i = (phba->msix_enabled) ? i : 0;
+ /* Work item for MCC handling */
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs, beiscsi_process_all_cqs);
+ } else {
+ if (phba->msix_enabled) {
+ for (i = 0; i <= phba->num_cpus; i++) {
+ pbe_eq = &phwi_context->be_eq[i];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
+ } else {
+ pbe_eq = &phwi_context->be_eq[0];
+ INIT_WORK(&pbe_eq->work_cqs,
+ beiscsi_process_all_cqs);
+ }
}
+
ret = beiscsi_init_irqs(phba);
if (ret < 0) {
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
@@ -4637,6 +4964,9 @@ static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
"iSCSI boot info.\n");
beiscsi_create_def_ifaces(phba);
+ schedule_delayed_work(&phba->beiscsi_hw_check_task,
+ msecs_to_jiffies(1000));
+
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
"\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
return 0;
@@ -4652,15 +4982,6 @@ free_twq:
beiscsi_clean_port(phba);
beiscsi_free_mem(phba);
free_port:
- real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
-
- value = readl((void *)real_offset);
-
- if (value & 0x00010000) {
- value &= 0xfffeffff;
- writel(value, (void *)real_offset);
- }
-
pci_free_consistent(phba->pcidev,
phba->ctrl.mbox_mem_alloced.size,
phba->ctrl.mbox_mem_alloced.va,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index b8912263ef4e..5946577d79d6 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -36,12 +36,13 @@
#include "be.h"
#define DRV_NAME "be2iscsi"
-#define BUILD_STR "4.4.58.0"
+#define BUILD_STR "10.0.272.0"
#define BE_NAME "Emulex OneConnect" \
"Open-iSCSI Driver version" BUILD_STR
#define DRV_DESC BE_NAME " " "Driver"
#define BE_VENDOR_ID 0x19A2
+#define ELX_VENDOR_ID 0x10DF
/* DEVICE ID's for BE2 */
#define BE_DEVICE_ID1 0x212
#define OC_DEVICE_ID1 0x702
@@ -51,6 +52,9 @@
#define BE_DEVICE_ID2 0x222
#define OC_DEVICE_ID3 0x712
+/* DEVICE ID for SKH */
+#define OC_SKH_ID1 0x722
+
#define BE2_IO_DEPTH 1024
#define BE2_MAX_SESSIONS 256
#define BE2_CMDS_PER_CXN 128
@@ -60,7 +64,11 @@
#define BE2_DEFPDU_HDR_SZ 64
#define BE2_DEFPDU_DATA_SZ 8192
-#define MAX_CPUS 31
+#define MAX_CPUS 64
+#define BEISCSI_MAX_NUM_CPUS 7
+#define OC_SKH_MAX_NUM_CPUS 63
+
+
#define BEISCSI_SGLIST_ELEMENTS 30
#define BEISCSI_CMD_PER_LUN 128 /* scsi_host->cmd_per_lun */
@@ -257,6 +265,7 @@ struct invalidate_command_table {
unsigned short cid;
} __packed;
+#define chip_skh_r(pdev) (pdev->device == OC_SKH_ID1)
struct beiscsi_hba {
struct hba_parameters params;
struct hwi_controller *phwi_ctrlr;
@@ -270,12 +279,11 @@ struct beiscsi_hba {
struct be_bus_address pci_pa; /* CSR */
/* PCI representation of our HBA */
struct pci_dev *pcidev;
- unsigned int state;
unsigned short asic_revision;
unsigned int num_cpus;
unsigned int nxt_cqid;
- struct msix_entry msix_entries[MAX_CPUS + 1];
- char *msi_name[MAX_CPUS + 1];
+ struct msix_entry msix_entries[MAX_CPUS];
+ char *msi_name[MAX_CPUS];
bool msix_enabled;
struct be_mem_descriptor *init_mem;
@@ -325,12 +333,14 @@ struct beiscsi_hba {
spinlock_t cid_lock;
} fw_config;
+ unsigned int state;
+ bool fw_timeout;
+ bool ue_detected;
+ struct delayed_work beiscsi_hw_check_task;
+
u8 mac_address[ETH_ALEN];
- unsigned short todo_cq;
- unsigned short todo_mcc_cq;
char wq_name[20];
struct workqueue_struct *wq; /* The actuak work queue */
- struct work_struct work_cqs; /* The work being queued */
struct be_ctrl_info ctrl;
unsigned int generation;
unsigned int interface_handle;
@@ -338,7 +348,10 @@ struct beiscsi_hba {
struct invalidate_command_table inv_tbl[128];
unsigned int attr_log_enable;
-
+ int (*iotask_fn)(struct iscsi_task *,
+ struct scatterlist *sg,
+ uint32_t num_sg, uint32_t xferlen,
+ uint32_t writedir);
};
struct beiscsi_session {
@@ -410,6 +423,9 @@ struct beiscsi_io_task {
struct be_cmd_bhs *cmd_bhs;
struct be_bus_address bhs_pa;
unsigned short bhs_len;
+ dma_addr_t mtask_addr;
+ uint32_t mtask_data_count;
+ uint8_t wrb_type;
};
struct be_nonio_bhs {
@@ -457,6 +473,9 @@ struct beiscsi_offload_params {
#define OFFLD_PARAMS_HDE 0x00000008
#define OFFLD_PARAMS_IR2T 0x00000010
#define OFFLD_PARAMS_IMD 0x00000020
+#define OFFLD_PARAMS_DATA_SEQ_INORDER 0x00000040
+#define OFFLD_PARAMS_PDU_SEQ_INORDER 0x00000080
+#define OFFLD_PARAMS_MAX_R2T 0x00FFFF00
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -471,7 +490,10 @@ struct amap_beiscsi_offload_params {
u8 hde[1];
u8 ir2t[1];
u8 imd[1];
- u8 pad[26];
+ u8 data_seq_inorder[1];
+ u8 pdu_seq_inorder[1];
+ u8 max_r2t[16];
+ u8 pad[8];
u8 exp_statsn[32];
};
@@ -569,6 +591,20 @@ struct amap_i_t_dpdu_cqe {
u8 valid;
} __packed;
+struct amap_i_t_dpdu_cqe_v2 {
+ u8 db_addr_hi[32]; /* DWORD 0 */
+ u8 db_addr_lo[32]; /* DWORD 1 */
+ u8 code[6]; /* DWORD 2 */
+ u8 num_cons; /* DWORD 2*/
+ u8 rsvd0[8]; /* DWORD 2 */
+ u8 dpl[17]; /* DWORD 2 */
+ u8 index[16]; /* DWORD 3 */
+ u8 cid[13]; /* DWORD 3 */
+ u8 rsvd1; /* DWORD 3 */
+ u8 final; /* DWORD 3 */
+ u8 valid; /* DWORD 3 */
+} __packed;
+
#define CQE_VALID_MASK 0x80000000
#define CQE_CODE_MASK 0x0000003F
#define CQE_CID_MASK 0x0000FFC0
@@ -617,6 +653,11 @@ struct iscsi_wrb {
} __packed;
#define WRB_TYPE_MASK 0xF0000000
+#define SKH_WRB_TYPE_OFFSET 27
+#define BE_WRB_TYPE_OFFSET 28
+
+#define ADAPTER_SET_WRB_TYPE(pwrb, wrb_type, type_offset) \
+ (pwrb->dw[0] |= (wrb_type << type_offset))
/**
* Pseudo amap definition in which each bit of the actual structure is defined
@@ -663,12 +704,57 @@ struct amap_iscsi_wrb {
} __packed;
+struct amap_iscsi_wrb_v2 {
+ u8 r2t_exp_dtl[25]; /* DWORD 0 */
+ u8 rsvd0[2]; /* DWORD 0*/
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 lun[16]; /* DWORD 1 */
+ u8 sgl_idx[16]; /* DWORD 2 */
+ u8 ref_sgl_icd_idx[16]; /* DWORD 2 */
+ u8 exp_data_sn[32]; /* DWORD 3 */
+ u8 iscsi_bhs_addr_hi[32]; /* DWORD 4 */
+ u8 iscsi_bhs_addr_lo[32]; /* DWORD 5 */
+ u8 cq_id[16]; /* DWORD 6 */
+ u8 rsvd1[16]; /* DWORD 6 */
+ u8 cmdsn_itt[32]; /* DWORD 7 */
+ u8 sge0_addr_hi[32]; /* DWORD 8 */
+ u8 sge0_addr_lo[32]; /* DWORD 9 */
+ u8 sge0_offset[24]; /* DWORD 10 */
+ u8 rsvd2[7]; /* DWORD 10 */
+ u8 sge0_last; /* DWORD 10 */
+ u8 sge0_len[17]; /* DWORD 11 */
+ u8 rsvd3[7]; /* DWORD 11 */
+ u8 diff_enbl; /* DWORD 11 */
+ u8 u_run; /* DWORD 11 */
+ u8 o_run; /* DWORD 11 */
+ u8 invalid; /* DWORD 11 */
+ u8 dsp; /* DWORD 11 */
+ u8 dmsg; /* DWORD 11 */
+ u8 rsvd4; /* DWORD 11 */
+ u8 lt; /* DWORD 11 */
+ u8 sge1_addr_hi[32]; /* DWORD 12 */
+ u8 sge1_addr_lo[32]; /* DWORD 13 */
+ u8 sge1_r2t_offset[24]; /* DWORD 14 */
+ u8 rsvd5[7]; /* DWORD 14 */
+ u8 sge1_last; /* DWORD 14 */
+ u8 sge1_len[17]; /* DWORD 15 */
+ u8 rsvd6[15]; /* DWORD 15 */
+} __packed;
+
+
struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid);
void
free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle);
void beiscsi_process_all_cqs(struct work_struct *work);
+static inline bool beiscsi_error(struct beiscsi_hba *phba)
+{
+ return phba->ue_detected || phba->fw_timeout;
+}
+
struct pdu_nop_out {
u32 dw[12];
};
@@ -728,6 +814,7 @@ struct iscsi_target_context_update_wrb {
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
*/
+#define BE_TGT_CTX_UPDT_CMD 0x07
struct amap_iscsi_target_context_update_wrb {
u8 lun[14]; /* DWORD 0 */
u8 lt; /* DWORD 0 */
@@ -773,6 +860,47 @@ struct amap_iscsi_target_context_update_wrb {
} __packed;
+#define BEISCSI_MAX_RECV_DATASEG_LEN (64 * 1024)
+#define BEISCSI_MAX_CXNS 1
+struct amap_iscsi_target_context_update_wrb_v2 {
+ u8 max_burst_length[24]; /* DWORD 0 */
+ u8 rsvd0[3]; /* DWORD 0 */
+ u8 type[5]; /* DWORD 0 */
+ u8 ptr2nextwrb[8]; /* DWORD 1 */
+ u8 wrb_idx[8]; /* DWORD 1 */
+ u8 rsvd1[16]; /* DWORD 1 */
+ u8 max_send_data_segment_length[24]; /* DWORD 2 */
+ u8 rsvd2[8]; /* DWORD 2 */
+ u8 first_burst_length[24]; /* DWORD 3 */
+ u8 rsvd3[8]; /* DOWRD 3 */
+ u8 max_r2t[16]; /* DWORD 4 */
+ u8 rsvd4[10]; /* DWORD 4 */
+ u8 hde; /* DWORD 4 */
+ u8 dde; /* DWORD 4 */
+ u8 erl[2]; /* DWORD 4 */
+ u8 imd; /* DWORD 4 */
+ u8 ir2t; /* DWORD 4 */
+ u8 stat_sn[32]; /* DWORD 5 */
+ u8 rsvd5[32]; /* DWORD 6 */
+ u8 rsvd6[32]; /* DWORD 7 */
+ u8 max_recv_dataseg_len[24]; /* DWORD 8 */
+ u8 rsvd7[8]; /* DWORD 8 */
+ u8 rsvd8[32]; /* DWORD 9 */
+ u8 rsvd9[32]; /* DWORD 10 */
+ u8 max_cxns[16]; /* DWORD 11 */
+ u8 rsvd10[11]; /* DWORD 11*/
+ u8 invld; /* DWORD 11 */
+ u8 rsvd11;/* DWORD 11*/
+ u8 dmsg; /* DWORD 11 */
+ u8 data_seq_inorder; /* DWORD 11 */
+ u8 pdu_seq_inorder; /* DWORD 11 */
+ u8 rsvd12[32]; /*DWORD 12 */
+ u8 rsvd13[32]; /* DWORD 13 */
+ u8 rsvd14[32]; /* DWORD 14 */
+ u8 rsvd15[32]; /* DWORD 15 */
+} __packed;
+
+
struct be_ring {
u32 pages; /* queue size in pages */
u32 id; /* queue id assigned by beklib */
@@ -837,7 +965,7 @@ struct hwi_context_memory {
u16 max_eqd; /* in usecs */
u16 cur_eqd; /* in usecs */
struct be_eq_obj be_eq[MAX_CPUS];
- struct be_queue_info be_cq[MAX_CPUS];
+ struct be_queue_info be_cq[MAX_CPUS - 1];
struct be_queue_info be_def_hdrq;
struct be_queue_info be_def_dataq;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index aab5dd359e2c..a6c2fe4b4d65 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -22,6 +22,138 @@
#include <scsi/scsi_bsg_iscsi.h>
#include "be_mgmt.h"
#include "be_iscsi.h"
+#include "be_main.h"
+
+/* UE Status Low CSR */
+static const char * const desc_ue_status_low[] = {
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
+};
+
+/* UE Status High CSR */
+static const char * const desc_ue_status_hi[] = {
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
+
+/*
+ * beiscsi_ue_detec()- Detect Unrecoverable Error on adapter
+ * @phba: Driver priv structure
+ *
+ * Read registers linked to UE and check for the UE status
+ **/
+void beiscsi_ue_detect(struct beiscsi_hba *phba)
+{
+ uint32_t ue_hi = 0, ue_lo = 0;
+ uint32_t ue_mask_hi = 0, ue_mask_lo = 0;
+ uint8_t i = 0;
+
+ if (phba->ue_detected)
+ return;
+
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_LOW, &ue_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_LOW,
+ &ue_mask_lo);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_HIGH,
+ &ue_hi);
+ pci_read_config_dword(phba->pcidev,
+ PCICFG_UE_STATUS_MASK_HI,
+ &ue_mask_hi);
+
+ ue_lo = (ue_lo & ~ue_mask_lo);
+ ue_hi = (ue_hi & ~ue_mask_hi);
+
+
+ if (ue_lo || ue_hi) {
+ phba->ue_detected = true;
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
+ "BG_%d : Error detected on the adapter\n");
+ }
+
+ if (ue_lo) {
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_LOW %s bit set\n",
+ desc_ue_status_low[i]);
+ }
+ }
+
+ if (ue_hi) {
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ beiscsi_log(phba, KERN_ERR,
+ BEISCSI_LOG_CONFIG,
+ "BG_%d : UE_HIGH %s bit set\n",
+ desc_ue_status_hi[i]);
+ }
+ }
+}
/**
* mgmt_reopen_session()- Reopen a session based on reopen_type
@@ -575,13 +707,20 @@ unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba)
return status;
}
+/*
+ * mgmt_exec_nonemb_cmd()- Execute Non Embedded MBX Cmd
+ * @phba: Driver priv structure
+ * @nonemb_cmd: Address of the MBX command issued
+ * @resp_buf: Buffer to copy the MBX cmd response
+ * @resp_buf_len: respone lenght to be copied
+ *
+ **/
static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
struct be_dma_mem *nonemb_cmd, void *resp_buf,
int resp_buf_len)
{
struct be_ctrl_info *ctrl = &phba->ctrl;
struct be_mcc_wrb *wrb = wrb_from_mccq(phba);
- unsigned short status, extd_status;
struct be_sge *sge;
unsigned int tag;
int rc = 0;
@@ -599,31 +738,25 @@ static int mgmt_exec_nonemb_cmd(struct beiscsi_hba *phba,
be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1);
sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
- sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
+ sge->pa_lo = cpu_to_le32(lower_32_bits(nonemb_cmd->dma));
sge->len = cpu_to_le32(nonemb_cmd->size);
be_mcc_notify(phba);
spin_unlock(&ctrl->mbox_lock);
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, NULL, nonemb_cmd->va);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
- "BG_%d : mgmt_exec_nonemb_cmd Failed status = %d"
- "extd_status = %d\n", status, extd_status);
+ "BG_%d : mgmt_exec_nonemb_cmd Failed status\n");
+
rc = -EIO;
- goto free_tag;
+ goto free_cmd;
}
if (resp_buf)
memcpy(resp_buf, nonemb_cmd->va, resp_buf_len);
-free_tag:
- free_mcc_tag(&phba->ctrl, tag);
free_cmd:
pci_free_consistent(ctrl->pdev, nonemb_cmd->size,
nonemb_cmd->va, nonemb_cmd->dma);
@@ -1009,10 +1142,9 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
{
struct be_cmd_get_boot_target_resp *boot_resp;
struct be_mcc_wrb *wrb;
- unsigned int tag, wrb_num;
+ unsigned int tag;
uint8_t boot_retry = 3;
- unsigned short status, extd_status;
- struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
+ int rc;
do {
/* Get the Boot Target Session Handle and Count*/
@@ -1022,24 +1154,16 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
"BG_%d : Getting Boot Target Info Failed\n");
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_get_boot_target Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
+ "BG_%d : MBX CMD get_boot_target Failed\n");
return -EBUSY;
}
- wrb = queue_get_wrb(mccq, wrb_num);
- free_mcc_tag(&phba->ctrl, tag);
+
boot_resp = embedded_payload(wrb);
/* Check if the there are any Boot targets configured */
@@ -1064,24 +1188,15 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
"BG_%d : mgmt_reopen_session Failed\n");
return -EAGAIN;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
- if (status || extd_status) {
+ }
+
+ rc = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
BEISCSI_LOG_INIT | BEISCSI_LOG_CONFIG,
- "BG_%d : mgmt_reopen_session Failed"
- " status = %d extd_status = %d\n",
- status, extd_status);
- free_mcc_tag(&phba->ctrl, tag);
- return -EBUSY;
+ "BG_%d : mgmt_reopen_session Failed");
+ return rc;
}
- free_mcc_tag(&phba->ctrl, tag);
-
} while (--boot_retry);
/* Couldn't log into the boot target */
@@ -1106,8 +1221,9 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
int mgmt_set_vlan(struct beiscsi_hba *phba,
uint16_t vlan_tag)
{
- unsigned int tag, wrb_num;
- unsigned short status, extd_status;
+ int rc;
+ unsigned int tag;
+ struct be_mcc_wrb *wrb = NULL;
tag = be_cmd_set_vlan(phba, vlan_tag);
if (!tag) {
@@ -1115,24 +1231,208 @@ int mgmt_set_vlan(struct beiscsi_hba *phba,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
"BG_%d : VLAN Setting Failed\n");
return -EBUSY;
- } else
- wait_event_interruptible(phba->ctrl.mcc_wait[tag],
- phba->ctrl.mcc_numtag[tag]);
-
- wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
- extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
- status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
+ }
- if (status || extd_status) {
+ rc = beiscsi_mccq_compl(phba, tag, &wrb, NULL);
+ if (rc) {
beiscsi_log(phba, KERN_ERR,
(BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX),
- "BS_%d : status : %d extd_status : %d\n",
- status, extd_status);
+ "BS_%d : VLAN MBX Cmd Failed\n");
+ return rc;
+ }
+ return rc;
+}
- free_mcc_tag(&phba->ctrl, tag);
- return -EAGAIN;
+/**
+ * beiscsi_drvr_ver_disp()- Display the driver Name and Version
+ * @dev: ptr to device not used.
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_drvr_ver_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, BE_NAME "\n");
+}
+
+/**
+ * beiscsi_adap_family_disp()- Display adapter family.
+ * @dev: ptr to device to get priv structure
+ * @attr: device attribute, not used.
+ * @buf: contains formatted text driver name and version
+ *
+ * return
+ * size of the formatted string
+ **/
+ssize_t
+beiscsi_adap_family_disp(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ uint16_t dev_id = 0;
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct beiscsi_hba *phba = iscsi_host_priv(shost);
+
+ dev_id = phba->pcidev->device;
+ switch (dev_id) {
+ case BE_DEVICE_ID1:
+ case OC_DEVICE_ID1:
+ case OC_DEVICE_ID2:
+ return snprintf(buf, PAGE_SIZE, "BE2 Adapter Family\n");
+ break;
+ case BE_DEVICE_ID2:
+ case OC_DEVICE_ID3:
+ return snprintf(buf, PAGE_SIZE, "BE3-R Adapter Family\n");
+ break;
+ case OC_SKH_ID1:
+ return snprintf(buf, PAGE_SIZE, "Skyhawk-R Adapter Family\n");
+ break;
+ default:
+ return snprintf(buf, PAGE_SIZE,
+ "Unkown Adapter Family: 0x%x\n", dev_id);
+ break;
}
+}
- free_mcc_tag(&phba->ctrl, tag);
- return 0;
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ first_burst_length,
+ pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ session_state, pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
+ pwrb, 1);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
+ pwrb, 0);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
+ 0);
+
+ mem_descr += ISCSI_MEM_GLOBAL_HEADER;
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_hi, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_hi);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ pad_buffer_addr_lo, pwrb,
+ mem_descr->mem_array[0].bus_address.u.a32.address_lo);
+}
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle)
+{
+ struct iscsi_wrb *pwrb = pwrb_handle->pwrb;
+
+ memset(pwrb, 0, sizeof(*pwrb));
+
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_burst_length, pwrb, params->dw[offsetof
+ (struct amap_beiscsi_offload_params,
+ max_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ type, pwrb,
+ BE_TGT_CTX_UPDT_CMD);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ptr2nextwrb,
+ pwrb, pwrb_handle->nxt_wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, wrb_idx,
+ pwrb, pwrb_handle->wrb_index);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_send_data_segment_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_send_data_segment_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ first_burst_length, pwrb,
+ params->dw[offsetof(struct amap_beiscsi_offload_params,
+ first_burst_length) / 32]);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_recv_dataseg_len, pwrb, BEISCSI_MAX_RECV_DATASEG_LEN);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ max_cxns, pwrb, BEISCSI_MAX_CXNS);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, erl, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ erl) / 32] & OFFLD_PARAMS_ERL));
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, dde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, hde, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ ir2t, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, imd, pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ data_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ data_seq_inorder) / 32] &
+ OFFLD_PARAMS_DATA_SEQ_INORDER) >> 6);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2,
+ pdu_seq_inorder,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ pdu_seq_inorder) / 32] &
+ OFFLD_PARAMS_PDU_SEQ_INORDER) >> 7);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, max_r2t,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ max_r2t) / 32] &
+ OFFLD_PARAMS_MAX_R2T) >> 8);
+ AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb_v2, stat_sn,
+ pwrb,
+ (params->dw[offsetof(struct amap_beiscsi_offload_params,
+ exp_statsn) / 32] + 1));
}
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index c50cef6fec0d..2e4968add799 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
/**
- * Copyright (C) 2005 - 2011 Emulex
+ * Copyright (C) 2005 - 2012 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -30,6 +30,12 @@
#define IP_V6_LEN 16
#define IP_V4_LEN 4
+/* UE Status and Mask register */
+#define PCICFG_UE_STATUS_LOW 0xA0
+#define PCICFG_UE_STATUS_HIGH 0xA4
+#define PCICFG_UE_STATUS_MASK_LOW 0xA8
+#define PCICFG_UE_STATUS_MASK_HI 0xAC
+
/**
* Pseudo amap definition in which each bit of the actual structure is defined
* as a byte: used to calculate offset/shift/mask of each field
@@ -301,4 +307,19 @@ int be_mgmt_get_boot_shandle(struct beiscsi_hba *phba,
unsigned int mgmt_get_all_if_id(struct beiscsi_hba *phba);
int mgmt_set_vlan(struct beiscsi_hba *phba, uint16_t vlan_tag);
+
+ssize_t beiscsi_drvr_ver_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+ssize_t beiscsi_adap_family_disp(struct device *dev,
+ struct device_attribute *attr, char *buf);
+
+void beiscsi_offload_cxn_v0(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle,
+ struct be_mem_descriptor *mem_descr);
+
+void beiscsi_offload_cxn_v2(struct beiscsi_offload_params *params,
+ struct wrb_handle *pwrb_handle);
+void beiscsi_ue_detect(struct beiscsi_hba *phba);
+
#endif
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 895b0e516e07..a5f7690e819e 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -1034,7 +1034,7 @@ bfad_start_ops(struct bfad_s *bfad) {
sizeof(driver_info.host_os_patch) - 1);
strncpy(driver_info.os_device_name, bfad->pci_name,
- sizeof(driver_info.os_device_name - 1));
+ sizeof(driver_info.os_device_name) - 1);
/* FCS driver info init */
spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -1739,7 +1739,7 @@ static struct pci_driver bfad_pci_driver = {
.name = BFAD_DRIVER_NAME,
.id_table = bfad_id_table,
.probe = bfad_pci_probe,
- .remove = __devexit_p(bfad_pci_remove),
+ .remove = bfad_pci_remove,
.err_handler = &bfad_err_handler,
};
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index 3486845ba301..50fcd018d14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -64,7 +64,7 @@
#include "bnx2fc_constants.h"
#define BNX2FC_NAME "bnx2fc"
-#define BNX2FC_VERSION "1.0.12"
+#define BNX2FC_VERSION "1.0.13"
#define PFX "bnx2fc: "
@@ -156,6 +156,18 @@
#define BNX2FC_RELOGIN_WAIT_TIME 200
#define BNX2FC_RELOGIN_WAIT_CNT 10
+#define BNX2FC_STATS(hba, stat, cnt) \
+ do { \
+ u32 val; \
+ \
+ val = fw_stats->stat.cnt; \
+ if (hba->prev_stats.stat.cnt <= val) \
+ val -= hba->prev_stats.stat.cnt; \
+ else \
+ val += (0xfffffff - hba->prev_stats.stat.cnt); \
+ hba->bfw_stats.cnt += val; \
+ } while (0)
+
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
extern struct fcoe_percpu_s bnx2fc_global;
@@ -167,6 +179,14 @@ struct bnx2fc_percpu_s {
spinlock_t fp_work_lock;
};
+struct bnx2fc_fw_stats {
+ u64 fc_crc_cnt;
+ u64 fcoe_tx_pkt_cnt;
+ u64 fcoe_rx_pkt_cnt;
+ u64 fcoe_tx_byte_cnt;
+ u64 fcoe_rx_byte_cnt;
+};
+
struct bnx2fc_hba {
struct list_head list;
struct cnic_dev *cnic;
@@ -207,6 +227,8 @@ struct bnx2fc_hba {
struct bnx2fc_rport **tgt_ofld_list;
/* statistics */
+ struct bnx2fc_fw_stats bfw_stats;
+ struct fcoe_statistics_params prev_stats;
struct fcoe_statistics_params *stats_buffer;
dma_addr_t stats_buf_dma;
struct completion stat_req_done;
@@ -280,6 +302,7 @@ struct bnx2fc_rport {
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
#define BNX2FC_FLAG_EXPL_LOGO 0x8
#define BNX2FC_FLAG_DISABLE_FAILED 0x9
+#define BNX2FC_FLAG_ENABLED 0xa
u8 src_addr[ETH_ALEN];
u32 max_sqes;
@@ -468,6 +491,8 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt);
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+ struct bnx2fc_rport *tgt);
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt);
int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 4d5c72159346..2daf4b0da434 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,10 +22,10 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
#define DRV_MODULE_NAME "bnx2fc"
#define DRV_MODULE_VERSION BNX2FC_VERSION
-#define DRV_MODULE_RELDATE "Jun 04, 2012"
+#define DRV_MODULE_RELDATE "Dec 21, 2012"
-static char version[] __devinitdata =
+static char version[] =
"Broadcom NetXtreme II FCoE Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -652,11 +652,16 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
return bnx2fc_stats;
}
- bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
- bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
- bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
- bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
- bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
+ BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
+ bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
+ bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
+ BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
+ bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
+ bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
+ BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
+ bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
bnx2fc_stats->dumped_frames = 0;
bnx2fc_stats->lip_count = 0;
@@ -665,6 +670,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
bnx2fc_stats->loss_of_signal_count = 0;
bnx2fc_stats->prim_seq_protocol_err_count = 0;
+ memcpy(&hba->prev_stats, hba->stats_buffer,
+ sizeof(struct fcoe_statistics_params));
return bnx2fc_stats;
}
@@ -2702,7 +2709,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
.can_queue = BNX2FC_CAN_QUEUE,
.use_clustering = ENABLE_CLUSTERING,
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
- .max_sectors = 512,
+ .max_sectors = 1024,
};
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index 6d6eee42ac7d..85ea98a80f40 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -296,7 +296,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
ofld_req3.flags |= (interface->vlan_enabled <<
FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
- /* C2_VALID and ACK flags are not set as they are not suppported */
+ /* C2_VALID and ACK flags are not set as they are not supported */
/* Initialize offload request 4 structure */
@@ -347,7 +347,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
* @port: port structure pointer
* @tgt: bnx2fc_rport structure pointer
*/
-static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
+int bnx2fc_send_session_enable_req(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
struct kwqe *kwqe_arr[2];
@@ -759,8 +759,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
xid);
- memset(&io_req->err_entry, 0,
- sizeof(struct fcoe_err_report_entry));
memcpy(&io_req->err_entry, err_entry,
sizeof(struct fcoe_err_report_entry));
if (!test_bit(BNX2FC_FLAG_SRR_SENT,
@@ -847,8 +845,6 @@ ret_err_rqe:
goto ret_warn_rqe;
}
- memset(&io_req->err_entry, 0,
- sizeof(struct fcoe_err_report_entry));
memcpy(&io_req->err_entry, err_entry,
sizeof(struct fcoe_err_report_entry));
@@ -1124,7 +1120,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
struct bnx2fc_interface *interface;
u32 conn_id;
u32 context_id;
- int rc;
conn_id = ofld_kcqe->fcoe_conn_id;
context_id = ofld_kcqe->fcoe_conn_context_id;
@@ -1153,17 +1148,10 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
"resources\n");
set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
}
- goto ofld_cmpl_err;
} else {
-
- /* now enable the session */
- rc = bnx2fc_send_session_enable_req(port, tgt);
- if (rc) {
- printk(KERN_ERR PFX "enable session failed\n");
- goto ofld_cmpl_err;
- }
+ /* FW offload request successfully completed */
+ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
}
- return;
ofld_cmpl_err:
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
wake_up_interruptible(&tgt->ofld_wait);
@@ -1210,15 +1198,9 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
goto enbl_cmpl_err;
}
- if (ofld_kcqe->completion_status)
- goto enbl_cmpl_err;
- else {
+ if (!ofld_kcqe->completion_status)
/* enable successful - rport ready for issuing IOs */
- set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
- set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
- wake_up_interruptible(&tgt->ofld_wait);
- }
- return;
+ set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
enbl_cmpl_err:
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
@@ -1251,6 +1233,7 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
/* disable successful */
BNX2FC_TGT_DBG(tgt, "disable successful\n");
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
wake_up_interruptible(&tgt->upld_wait);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 8d4626c07a12..60798e829de6 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -654,7 +654,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
&mp_req->mp_resp_bd_dma,
GFP_ATOMIC);
- if (!mp_req->mp_req_bd) {
+ if (!mp_req->mp_resp_bd) {
printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
bnx2fc_free_mp_resc(io_req);
return FAILED;
@@ -685,8 +685,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
{
struct fc_lport *lport;
- struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
- struct fc_rport_libfc_priv *rp = rport->dd_data;
+ struct fc_rport *rport;
+ struct fc_rport_libfc_priv *rp;
struct fcoe_port *port;
struct bnx2fc_interface *interface;
struct bnx2fc_rport *tgt;
@@ -704,6 +704,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
unsigned long start = jiffies;
lport = shost_priv(host);
+ rport = starget_to_rport(scsi_target(sc_cmd->device));
port = lport_priv(lport);
interface = port->priv;
@@ -712,6 +713,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
rc = FAILED;
goto tmf_err;
}
+ rp = rport->dd_data;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index b9d0d9cb17f9..c57a3bb8a9fb 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -33,6 +33,7 @@ static void bnx2fc_upld_timer(unsigned long data)
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
wake_up_interruptible(&tgt->upld_wait);
}
@@ -55,10 +56,25 @@ static void bnx2fc_ofld_timer(unsigned long data)
* resources are freed up in bnx2fc_offload_session
*/
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
+ clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
wake_up_interruptible(&tgt->ofld_wait);
}
+static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+
+ wait_event_interruptible(tgt->ofld_wait,
+ (test_bit(
+ BNX2FC_FLAG_OFLD_REQ_CMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->ofld_timer);
+}
+
static void bnx2fc_offload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt,
struct fc_rport_priv *rdata)
@@ -103,17 +119,7 @@ retry_ofld:
* wait for the session is offloaded and enabled. 3 Secs
* should be ample time for this process to complete.
*/
- setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
- mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
- wait_event_interruptible(tgt->ofld_wait,
- (test_bit(
- BNX2FC_FLAG_OFLD_REQ_CMPL,
- &tgt->flags)));
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->ofld_timer);
+ bnx2fc_ofld_wait(tgt);
if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
@@ -131,14 +137,23 @@ retry_ofld:
}
if (bnx2fc_map_doorbell(tgt)) {
printk(KERN_ERR PFX "map doorbell failed - no mem\n");
- /* upload will take care of cleaning up sess resc */
- lport->tt.rport_logoff(rdata);
+ goto ofld_err;
}
+ clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
+ rval = bnx2fc_send_session_enable_req(port, tgt);
+ if (rval) {
+ pr_err(PFX "enable session failed\n");
+ goto ofld_err;
+ }
+ bnx2fc_ofld_wait(tgt);
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
+ goto ofld_err;
return;
ofld_err:
/* couldn't offload the session. log off from this rport */
BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
+ clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
/* Free session resources */
bnx2fc_free_session_resc(hba, tgt);
tgt_init_err:
@@ -259,6 +274,19 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
spin_unlock_bh(&tgt->tgt_lock);
}
+static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
+{
+ setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
+ wait_event_interruptible(tgt->upld_wait,
+ (test_bit(
+ BNX2FC_FLAG_UPLD_REQ_COMPL,
+ &tgt->flags)));
+ if (signal_pending(current))
+ flush_signals(current);
+ del_timer_sync(&tgt->upld_timer);
+}
+
static void bnx2fc_upload_session(struct fcoe_port *port,
struct bnx2fc_rport *tgt)
{
@@ -279,19 +307,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
* wait for upload to complete. 3 Secs
* should be sufficient time for this process to complete.
*/
- setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
- mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
- wait_event_interruptible(tgt->upld_wait,
- (test_bit(
- BNX2FC_FLAG_UPLD_REQ_COMPL,
- &tgt->flags)));
-
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->upld_timer);
+ bnx2fc_upld_wait(tgt);
/*
* traverse thru the active_q and tmf_q and cleanup
@@ -308,24 +325,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
bnx2fc_send_session_destroy_req(hba, tgt);
/* wait for destroy to complete */
- setup_timer(&tgt->upld_timer,
- bnx2fc_upld_timer, (unsigned long)tgt);
- mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
-
- wait_event_interruptible(tgt->upld_wait,
- (test_bit(
- BNX2FC_FLAG_UPLD_REQ_COMPL,
- &tgt->flags)));
+ bnx2fc_upld_wait(tgt);
if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
tgt->flags);
- if (signal_pending(current))
- flush_signals(current);
-
- del_timer_sync(&tgt->upld_timer);
} else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
@@ -381,7 +387,9 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
tgt->rq_cons_idx = 0;
atomic_set(&tgt->num_active_ios, 0);
- if (rdata->flags & FC_RP_FLAGS_RETRY) {
+ if (rdata->flags & FC_RP_FLAGS_RETRY &&
+ rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
+ !(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
tgt->dev_type = TYPE_TAPE;
tgt->io_timeout = 0; /* use default ULP timeout */
} else {
@@ -479,7 +487,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
tgt = (struct bnx2fc_rport *)&rp[1];
/* This can happen when ADISC finds the same target */
- if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
BNX2FC_TGT_DBG(tgt, "already offloaded\n");
mutex_unlock(&hba->hba_mutex);
return;
@@ -494,11 +502,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
hba->num_ofld_sess);
- if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
- /*
- * Session is offloaded and enabled. Map
- * doorbell register for this target
- */
+ if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
+ /* Session is offloaded and enabled. */
BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
/* This counter is protected with hba mutex */
hba->num_ofld_sess++;
@@ -535,7 +540,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
*/
tgt = (struct bnx2fc_rport *)&rp[1];
- if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
+ if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
mutex_unlock(&hba->hba_mutex);
break;
}
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
index 3f9e7061258e..b44d04e41b0d 100644
--- a/drivers/scsi/bnx2i/bnx2i.h
+++ b/drivers/scsi/bnx2i/bnx2i.h
@@ -800,7 +800,7 @@ extern struct device_attribute *bnx2i_dev_attributes[];
/*
* Function Prototypes
*/
-extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev);
extern void bnx2i_ulp_init(struct cnic_dev *dev);
extern void bnx2i_ulp_exit(struct cnic_dev *dev);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 91eec60252ee..a28b03e5a5f6 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -1317,7 +1317,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
if (error_mask1) {
iscsi_init2.error_bit_map[0] = error_mask1;
- mask64 &= (u32)(~mask64);
+ mask64 ^= (u32)(mask64);
mask64 |= error_mask1;
} else
iscsi_init2.error_bit_map[0] = (u32) mask64;
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index b17637aab9a7..50fef6963a81 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -21,7 +21,7 @@ static u32 adapter_count;
#define DRV_MODULE_VERSION "2.7.2.2"
#define DRV_MODULE_RELDATE "Apr 25, 2012"
-static char version[] __devinitdata =
+static char version[] =
"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
" v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -79,42 +79,33 @@ static struct notifier_block bnx2i_cpu_notifier = {
/**
* bnx2i_identify_device - identifies NetXtreme II device type
* @hba: Adapter structure pointer
+ * @cnic: Corresponding cnic device
*
* This function identifies the NX2 device type and sets appropriate
* queue mailbox register access method, 5709 requires driver to
* access MBOX regs using *bin* mode
*/
-void bnx2i_identify_device(struct bnx2i_hba *hba)
+void bnx2i_identify_device(struct bnx2i_hba *hba, struct cnic_dev *dev)
{
hba->cnic_dev_type = 0;
- if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
- set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
- else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
- set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
- else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
- (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
- set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
- hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
- } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57711E ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57712E ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57800_VF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57810_VF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840 ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840_MF ||
- hba->pci_did == PCI_DEVICE_ID_NX2_57840_VF)
+ if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
+ if (hba->pci_did == PCI_DEVICE_ID_NX2_5706 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5706S) {
+ set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5708 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5708S) {
+ set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+ } else if (hba->pci_did == PCI_DEVICE_ID_NX2_5709 ||
+ hba->pci_did == PCI_DEVICE_ID_NX2_5709S) {
+ set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+ hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+ }
+ } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
- else
+ } else {
printk(KERN_ALERT "bnx2i: unknown device, 0x%x\n",
hba->pci_did);
+ }
}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 3b34c13e2f02..0056e47bd56e 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -808,7 +808,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
- bnx2i_identify_device(hba);
+ bnx2i_identify_device(hba, cnic);
bnx2i_setup_host_queue_size(hba, shost);
hba->reg_base = pci_resource_start(hba->pcidev, 0);
diff --git a/drivers/scsi/bvme6000_scsi.c b/drivers/scsi/bvme6000_scsi.c
index d40ea2f5be10..1e3f96adf9da 100644
--- a/drivers/scsi/bvme6000_scsi.c
+++ b/drivers/scsi/bvme6000_scsi.c
@@ -34,7 +34,7 @@ static struct scsi_host_template bvme6000_scsi_driver_template = {
static struct platform_device *bvme6000_scsi_device;
-static __devinit int
+static int
bvme6000_probe(struct platform_device *dev)
{
struct Scsi_Host *host;
@@ -88,7 +88,7 @@ bvme6000_probe(struct platform_device *dev)
return -ENODEV;
}
-static __devexit int
+static int
bvme6000_device_remove(struct platform_device *dev)
{
struct Scsi_Host *host = platform_get_drvdata(dev);
@@ -108,7 +108,7 @@ static struct platform_driver bvme6000_scsi_driver = {
.owner = THIS_MODULE,
},
.probe = bvme6000_probe,
- .remove = __devexit_p(bvme6000_device_remove),
+ .remove = bvme6000_device_remove,
};
static int __init bvme6000_scsi_init(void)
diff --git a/drivers/scsi/csiostor/Kconfig b/drivers/scsi/csiostor/Kconfig
new file mode 100644
index 000000000000..4d03b032aa10
--- /dev/null
+++ b/drivers/scsi/csiostor/Kconfig
@@ -0,0 +1,19 @@
+config SCSI_CHELSIO_FCOE
+ tristate "Chelsio Communications FCoE support"
+ depends on PCI && SCSI
+ select SCSI_FC_ATTRS
+ select FW_LOADER
+ help
+ This driver supports FCoE Offload functionality over
+ Chelsio T4-based 10Gb Converged Network Adapters.
+
+ For general information about Chelsio and our products, visit
+ our website at <http://www.chelsio.com>.
+
+ For customer support, please visit our customer support page at
+ <http://www.chelsio.com/support.html>.
+
+ Please send feedback to <linux-bugs@chelsio.com>.
+
+ To compile this driver as a module choose M here; the module
+ will be called csiostor.
diff --git a/drivers/scsi/csiostor/Makefile b/drivers/scsi/csiostor/Makefile
new file mode 100644
index 000000000000..b581966c88f9
--- /dev/null
+++ b/drivers/scsi/csiostor/Makefile
@@ -0,0 +1,11 @@
+#
+## Chelsio FCoE driver
+#
+##
+
+ccflags-y += -I$(srctree)/drivers/net/ethernet/chelsio/cxgb4
+
+obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor.o
+
+csiostor-objs := csio_attr.o csio_init.o csio_lnode.o csio_scsi.o \
+ csio_hw.o csio_isr.o csio_mb.o csio_rnode.o csio_wr.o
diff --git a/drivers/scsi/csiostor/csio_attr.c b/drivers/scsi/csiostor/csio_attr.c
new file mode 100644
index 000000000000..065a87ace623
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_attr.c
@@ -0,0 +1,796 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <linux/jiffies.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_init.h"
+
+static void
+csio_vport_set_state(struct csio_lnode *ln);
+
+/*
+ * csio_reg_rnode - Register a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_add() to register this remote port with FC transport.
+ * If remote port is Initiator OR Target OR both, change the role appropriately.
+ *
+ */
+void
+csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct fc_rport_identifiers ids;
+ struct fc_rport *rport;
+ struct csio_service_parms *sp;
+
+ ids.node_name = wwn_to_u64(csio_rn_wwnn(rn));
+ ids.port_name = wwn_to_u64(csio_rn_wwpn(rn));
+ ids.port_id = rn->nport_id;
+ ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
+ if (rn->role & CSIO_RNFR_INITIATOR || rn->role & CSIO_RNFR_TARGET) {
+ rport = rn->rport;
+ CSIO_ASSERT(rport != NULL);
+ goto update_role;
+ }
+
+ rn->rport = fc_remote_port_add(shost, 0, &ids);
+ if (!rn->rport) {
+ csio_ln_err(ln, "Failed to register rport = 0x%x.\n",
+ rn->nport_id);
+ return;
+ }
+
+ ln->num_reg_rnodes++;
+ rport = rn->rport;
+ spin_lock_irq(shost->host_lock);
+ *((struct csio_rnode **)rport->dd_data) = rn;
+ spin_unlock_irq(shost->host_lock);
+
+ sp = &rn->rn_sparm;
+ rport->maxframe_size = ntohs(sp->csp.sp_bb_data);
+ if (ntohs(sp->clsp[2].cp_class) & FC_CPC_VALID)
+ rport->supported_classes = FC_COS_CLASS3;
+ else
+ rport->supported_classes = FC_COS_UNSPECIFIED;
+update_role:
+ if (rn->role & CSIO_RNFR_INITIATOR)
+ ids.roles |= FC_RPORT_ROLE_FCP_INITIATOR;
+ if (rn->role & CSIO_RNFR_TARGET)
+ ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
+
+ if (ids.roles != FC_RPORT_ROLE_UNKNOWN)
+ fc_remote_port_rolechg(rport, ids.roles);
+
+ rn->scsi_id = rport->scsi_target_id;
+
+ csio_ln_dbg(ln, "Remote port x%x role 0x%x registered\n",
+ rn->nport_id, ids.roles);
+}
+
+/*
+ * csio_unreg_rnode - Unregister a remote port with FC transport.
+ * @rn: Rnode representing remote port.
+ *
+ * Call fc_remote_port_delete() to unregister this remote port with FC
+ * transport.
+ *
+ */
+void
+csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct fc_rport *rport = rn->rport;
+
+ rn->role &= ~(CSIO_RNFR_INITIATOR | CSIO_RNFR_TARGET);
+ fc_remote_port_delete(rport);
+ ln->num_reg_rnodes--;
+
+ csio_ln_dbg(ln, "Remote port x%x un-registered\n", rn->nport_id);
+}
+
+/*
+ * csio_lnode_async_event - Async events from local port.
+ * @ln: lnode representing local port.
+ *
+ * Async events from local node that FC transport/SCSI ML
+ * should be made aware of (Eg: RSCN).
+ */
+void
+csio_lnode_async_event(struct csio_lnode *ln, enum csio_ln_fc_evt fc_evt)
+{
+ switch (fc_evt) {
+ case CSIO_LN_FC_RSCN:
+ /* Get payload of rscn from ln */
+ /* For each RSCN entry */
+ /*
+ * fc_host_post_event(shost,
+ * fc_get_event_number(),
+ * FCH_EVT_RSCN,
+ * rscn_entry);
+ */
+ break;
+ case CSIO_LN_FC_LINKUP:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_LINKDOWN:
+ /* send fc_host_post_event */
+ /* set vport state */
+ if (csio_is_npiv_ln(ln))
+ csio_vport_set_state(ln);
+
+ break;
+ case CSIO_LN_FC_ATTRIB_UPDATE:
+ csio_fchost_attr_init(ln);
+ break;
+ default:
+ break;
+ }
+}
+
+/*
+ * csio_fchost_attr_init - Initialize FC transport attributes
+ * @ln: Lnode.
+ *
+ */
+void
+csio_fchost_attr_init(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ fc_host_node_name(shost) = wwn_to_u64(csio_ln_wwnn(ln));
+ fc_host_port_name(shost) = wwn_to_u64(csio_ln_wwpn(ln));
+
+ fc_host_supported_classes(shost) = FC_COS_CLASS3;
+ fc_host_max_npiv_vports(shost) =
+ (csio_lnode_to_hw(ln))->fres_info.max_vnps;
+ fc_host_supported_speeds(shost) = FC_PORTSPEED_10GBIT |
+ FC_PORTSPEED_1GBIT;
+
+ fc_host_maxframe_size(shost) = ntohs(ln->ln_sparm.csp.sp_bb_data);
+ memset(fc_host_supported_fc4s(shost), 0,
+ sizeof(fc_host_supported_fc4s(shost)));
+ fc_host_supported_fc4s(shost)[7] = 1;
+
+ memset(fc_host_active_fc4s(shost), 0,
+ sizeof(fc_host_active_fc4s(shost)));
+ fc_host_active_fc4s(shost)[7] = 1;
+}
+
+/*
+ * csio_get_host_port_id - sysfs entries for nport_id is
+ * populated/cached from this function
+ */
+static void
+csio_get_host_port_id(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ fc_host_port_id(shost) = ln->nport_id;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_type - Return FC local port type.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_type(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ if (csio_is_npiv_ln(ln))
+ fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
+ else
+ fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_port_state - Return FC local port state.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_port_state(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ char state[16];
+
+ spin_lock_irq(&hw->lock);
+
+ csio_lnode_state_to_str(ln, state);
+ if (!strcmp(state, "READY"))
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else if (!strcmp(state, "OFFLINE"))
+ fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
+
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return link speed to FC transport.
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_speed(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ switch (hw->pport[ln->portid].link_speed) {
+ case FW_PORT_CAP_SPEED_1G:
+ fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
+ break;
+ case FW_PORT_CAP_SPEED_10G:
+ fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+ break;
+ default:
+ fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
+ break;
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_fabric_name - Return fabric name
+ * @shost: scsi host.
+ *
+ */
+static void
+csio_get_host_fabric_name(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_rnode *rn = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+ rn = csio_rnode_lookup_portid(ln, FC_FID_FLOGI);
+ if (rn)
+ fc_host_fabric_name(shost) = wwn_to_u64(csio_rn_wwnn(rn));
+ else
+ fc_host_fabric_name(shost) = 0;
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_get_host_speed - Return FC transport statistics.
+ * @ln: Lnode.
+ *
+ */
+static struct fc_host_statistics *
+csio_get_stats(struct Scsi_Host *shost)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct fc_host_statistics *fhs = &ln->fch_stats;
+ struct fw_fcoe_port_stats fcoe_port_stats;
+ uint64_t seconds;
+
+ memset(&fcoe_port_stats, 0, sizeof(struct fw_fcoe_port_stats));
+ csio_get_phy_port_stats(hw, ln->portid, &fcoe_port_stats);
+
+ fhs->tx_frames += (be64_to_cpu(fcoe_port_stats.tx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_frames) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_frames));
+ fhs->tx_words += (be64_to_cpu(fcoe_port_stats.tx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_ucast_bytes) +
+ be64_to_cpu(fcoe_port_stats.tx_offload_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->rx_frames += (be64_to_cpu(fcoe_port_stats.rx_bcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_frames) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_frames));
+ fhs->rx_words += (be64_to_cpu(fcoe_port_stats.rx_bcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_mcast_bytes) +
+ be64_to_cpu(fcoe_port_stats.rx_ucast_bytes)) /
+ CSIO_WORD_TO_BYTE;
+ fhs->error_frames += be64_to_cpu(fcoe_port_stats.rx_err_frames);
+ fhs->fcp_input_requests += ln->stats.n_input_requests;
+ fhs->fcp_output_requests += ln->stats.n_output_requests;
+ fhs->fcp_control_requests += ln->stats.n_control_requests;
+ fhs->fcp_input_megabytes += ln->stats.n_input_bytes >> 20;
+ fhs->fcp_output_megabytes += ln->stats.n_output_bytes >> 20;
+ fhs->link_failure_count = ln->stats.n_link_down;
+ /* Reset stats for the device */
+ seconds = jiffies_to_msecs(jiffies) - hw->stats.n_reset_start;
+ do_div(seconds, 1000);
+ fhs->seconds_since_last_reset = seconds;
+
+ return fhs;
+}
+
+/*
+ * csio_set_rport_loss_tmo - Set the rport dev loss timeout
+ * @rport: fc rport.
+ * @timeout: new value for dev loss tmo.
+ *
+ * If timeout is non zero set the dev_loss_tmo to timeout, else set
+ * dev_loss_tmo to one.
+ */
+static void
+csio_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
+{
+ if (timeout)
+ rport->dev_loss_tmo = timeout;
+ else
+ rport->dev_loss_tmo = 1;
+}
+
+static void
+csio_vport_set_state(struct csio_lnode *ln)
+{
+ struct fc_vport *fc_vport = ln->fc_vport;
+ struct csio_lnode *pln = ln->pln;
+ char state[16];
+
+ /* Set fc vport state based on phyiscal lnode */
+ csio_lnode_state_to_str(pln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+
+ if (!(pln->flags & CSIO_LNF_NPIVSUPP)) {
+ fc_vport_set_state(fc_vport, FC_VPORT_NO_FABRIC_SUPP);
+ return;
+ }
+
+ /* Set fc vport state based on virtual lnode */
+ csio_lnode_state_to_str(ln, state);
+ if (strcmp(state, "READY")) {
+ fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
+ return;
+ }
+ fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE);
+}
+
+static int
+csio_fcoe_alloc_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to alloc vport */
+ /* Allocate Mbox request */
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+ ln->fcf_flowid = pln->fcf_flowid;
+ ln->portid = pln->portid;
+
+ csio_fcoe_vnp_alloc_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ pln->fcf_flowid, pln->vnp_flowid, 0,
+ csio_ln_wwnn(ln), csio_ln_wwpn(ln), NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ /* FW is expected to complete vnp cmd in immediate mode
+ * without much delay.
+ * Otherwise, there will be increase in IO latency since HW
+ * lock is held till completion of vnp mbox cmd.
+ */
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP ALLOC cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ goto out_free;
+ }
+
+ ln->vnp_flowid = FW_FCOE_VNP_CMD_VNPI_GET(
+ ntohl(rsp->gen_wwn_to_vnpi));
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ csio_ln_dbg(ln, "FCOE VNPI: 0x%x\n", ln->vnp_flowid);
+ csio_ln_dbg(ln, "\tWWNN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwnn[0], ln->ln_sparm.wwnn[1],
+ ln->ln_sparm.wwnn[2], ln->ln_sparm.wwnn[3],
+ ln->ln_sparm.wwnn[4], ln->ln_sparm.wwnn[5],
+ ln->ln_sparm.wwnn[6], ln->ln_sparm.wwnn[7]);
+ csio_ln_dbg(ln, "\tWWPN: %x%x%x%x%x%x%x%x\n",
+ ln->ln_sparm.wwpn[0], ln->ln_sparm.wwpn[1],
+ ln->ln_sparm.wwpn[2], ln->ln_sparm.wwpn[3],
+ ln->ln_sparm.wwpn[4], ln->ln_sparm.wwpn[5],
+ ln->ln_sparm.wwpn[6], ln->ln_sparm.wwpn[7]);
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_fcoe_free_vnp(struct csio_hw *hw, struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+ struct csio_mb *mbp;
+ struct fw_fcoe_vnp_cmd *rsp;
+ int ret = 0;
+ int retry = 0;
+
+ /* Issue VNP cmd to free vport */
+ /* Allocate Mbox request */
+
+ spin_lock_irq(&hw->lock);
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pln = ln->pln;
+
+ csio_fcoe_vnp_free_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid, ln->vnp_flowid,
+ NULL);
+
+ for (retry = 0; retry < 3; retry++) {
+ ret = csio_mb_issue(hw, mbp);
+ if (ret != -EBUSY)
+ break;
+
+ /* Retry if mbox returns busy */
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ if (ret) {
+ csio_ln_err(ln, "Failed to issue mbox FCoE VNP command\n");
+ goto out_free;
+ }
+
+ /* Process Mbox response of VNP command */
+ rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ if (FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)) != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE VNP FREE cmd returned 0x%x!\n",
+ FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16)));
+ ret = -EINVAL;
+ }
+
+out_free:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ spin_unlock_irq(&hw->lock);
+ return ret;
+}
+
+static int
+csio_vport_create(struct fc_vport *fc_vport, bool disable)
+{
+ struct Scsi_Host *shost = fc_vport->shost;
+ struct csio_lnode *pln = shost_priv(shost);
+ struct csio_lnode *ln = NULL;
+ struct csio_hw *hw = csio_lnode_to_hw(pln);
+ uint8_t wwn[8];
+ int ret = -1;
+
+ ln = csio_shost_init(hw, &fc_vport->dev, false, pln);
+ if (!ln)
+ goto error;
+
+ if (fc_vport->node_name != 0) {
+ u64_to_wwn(fc_vport->node_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwnn\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwnn(ln), wwn, 8);
+ }
+
+ if (fc_vport->port_name != 0) {
+ u64_to_wwn(fc_vport->port_name, wwn);
+
+ if (!CSIO_VALID_WWN(wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. Invalid wwpn\n");
+ goto error;
+ }
+
+ if (csio_lnode_lookup_by_wwpn(hw, wwn)) {
+ csio_ln_err(ln,
+ "vport create failed. wwpn already exists\n");
+ goto error;
+ }
+ memcpy(csio_ln_wwpn(ln), wwn, 8);
+ }
+
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+
+ if (csio_fcoe_alloc_vnp(hw, ln))
+ goto error;
+
+ *(struct csio_lnode **)fc_vport->dd_data = ln;
+ ln->fc_vport = fc_vport;
+ if (!fc_vport->node_name)
+ fc_vport->node_name = wwn_to_u64(csio_ln_wwnn(ln));
+ if (!fc_vport->port_name)
+ fc_vport->port_name = wwn_to_u64(csio_ln_wwpn(ln));
+ csio_fchost_attr_init(ln);
+ return 0;
+error:
+ if (ln)
+ csio_shost_exit(ln);
+
+ return ret;
+}
+
+static int
+csio_vport_delete(struct fc_vport *fc_vport)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ int rmv;
+
+ spin_lock_irq(&hw->lock);
+ rmv = csio_is_hw_removing(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (rmv) {
+ csio_shost_exit(ln);
+ return 0;
+ }
+
+ /* Quiesce ios and send remove event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_close(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ if (fc_vport->vport_state != FC_VPORT_DISABLED)
+ csio_fcoe_free_vnp(hw, ln);
+
+ csio_shost_exit(ln);
+ return 0;
+}
+
+static int
+csio_vport_disable(struct fc_vport *fc_vport, bool disable)
+{
+ struct csio_lnode *ln = *(struct csio_lnode **)fc_vport->dd_data;
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* disable vport */
+ if (disable) {
+ /* Quiesce ios and send stop event to lnode */
+ scsi_block_requests(shost);
+ spin_lock_irq(&hw->lock);
+ csio_scsim_cleanup_io_lnode(csio_hw_to_scsim(hw), ln);
+ csio_lnode_stop(ln);
+ spin_unlock_irq(&hw->lock);
+ scsi_unblock_requests(shost);
+
+ /* Free vnp */
+ csio_fcoe_free_vnp(hw, ln);
+ fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
+ csio_ln_err(ln, "vport disabled\n");
+ return 0;
+ } else {
+ /* enable vport */
+ fc_vport_set_state(fc_vport, FC_VPORT_INITIALIZING);
+ if (csio_fcoe_alloc_vnp(hw, ln)) {
+ csio_ln_err(ln, "vport enabled failed.\n");
+ return -1;
+ }
+ csio_ln_err(ln, "vport enabled\n");
+ return 0;
+ }
+}
+
+static void
+csio_dev_loss_tmo_callbk(struct fc_rport *rport)
+{
+ struct csio_rnode *rn;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ rn = *((struct csio_rnode **)rport->dd_data);
+ ln = csio_rnode_to_lnode(rn);
+ hw = csio_lnode_to_hw(ln);
+
+ spin_lock_irq(&hw->lock);
+
+ /* return if driver is being removed or same rnode comes back online */
+ if (csio_is_hw_removing(hw) || csio_is_rnode_ready(rn))
+ goto out;
+
+ csio_ln_dbg(ln, "devloss timeout on rnode:%p portid:x%x flowid:x%x\n",
+ rn, rn->nport_id, csio_rn_flowid(rn));
+
+ CSIO_INC_STATS(ln, n_dev_loss_tmo);
+
+ /*
+ * enqueue devloss event to event worker thread to serialize all
+ * rnode events.
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_DEV_LOSS, &rn, sizeof(rn))) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out;
+ }
+
+ if (!(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+
+out:
+ spin_unlock_irq(&hw->lock);
+}
+
+/* FC transport functions template - Physical port */
+struct fc_function_template csio_fc_transport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+
+ .show_host_active_fc4s = 1,
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+ .dd_fcvport_size = sizeof(struct csio_lnode *),
+
+ .vport_create = csio_vport_create,
+ .vport_disable = csio_vport_disable,
+ .vport_delete = csio_vport_delete,
+};
+
+/* FC transport functions template - Virtual port */
+struct fc_function_template csio_fc_transport_vport_funcs = {
+ .show_host_node_name = 1,
+ .show_host_port_name = 1,
+ .show_host_supported_classes = 1,
+ .show_host_supported_fc4s = 1,
+ .show_host_maxframe_size = 1,
+
+ .get_host_port_id = csio_get_host_port_id,
+ .show_host_port_id = 1,
+
+ .get_host_port_type = csio_get_host_port_type,
+ .show_host_port_type = 1,
+
+ .get_host_port_state = csio_get_host_port_state,
+ .show_host_port_state = 1,
+ .show_host_active_fc4s = 1,
+
+ .get_host_speed = csio_get_host_speed,
+ .show_host_speed = 1,
+
+ .get_host_fabric_name = csio_get_host_fabric_name,
+ .show_host_fabric_name = 1,
+
+ .get_fc_host_stats = csio_get_stats,
+
+ .dd_fcrport_size = sizeof(struct csio_rnode *),
+ .show_rport_maxframe_size = 1,
+ .show_rport_supported_classes = 1,
+
+ .set_rport_dev_loss_tmo = csio_set_rport_loss_tmo,
+ .show_rport_dev_loss_tmo = 1,
+
+ .show_starget_port_id = 1,
+ .show_starget_node_name = 1,
+ .show_starget_port_name = 1,
+
+ .dev_loss_tmo_callbk = csio_dev_loss_tmo_callbk,
+
+};
diff --git a/drivers/scsi/csiostor/csio_defs.h b/drivers/scsi/csiostor/csio_defs.h
new file mode 100644
index 000000000000..c38017b4af98
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_defs.h
@@ -0,0 +1,121 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_DEFS_H__
+#define __CSIO_DEFS_H__
+
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/timer.h>
+#include <linux/list.h>
+#include <linux/bug.h>
+#include <linux/pci.h>
+#include <linux/jiffies.h>
+
+#define CSIO_INVALID_IDX 0xFFFFFFFF
+#define CSIO_INC_STATS(elem, val) ((elem)->stats.val++)
+#define CSIO_DEC_STATS(elem, val) ((elem)->stats.val--)
+#define CSIO_VALID_WWN(__n) ((*__n >> 4) == 0x5 ? true : false)
+#define CSIO_DID_MASK 0xFFFFFF
+#define CSIO_WORD_TO_BYTE 4
+
+#ifndef readq
+static inline u64 readq(void __iomem *addr)
+{
+ return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, void __iomem *addr)
+{
+ writel(val, addr);
+ writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline int
+csio_list_deleted(struct list_head *list)
+{
+ return ((list->next == list) && (list->prev == list));
+}
+
+#define csio_list_next(elem) (((struct list_head *)(elem))->next)
+#define csio_list_prev(elem) (((struct list_head *)(elem))->prev)
+
+/* State machine */
+typedef void (*csio_sm_state_t)(void *, uint32_t);
+
+struct csio_sm {
+ struct list_head sm_list;
+ csio_sm_state_t sm_state;
+};
+
+static inline void
+csio_set_state(void *smp, void *state)
+{
+ ((struct csio_sm *)smp)->sm_state = (csio_sm_state_t)state;
+}
+
+static inline void
+csio_init_state(struct csio_sm *smp, void *state)
+{
+ csio_set_state(smp, state);
+}
+
+static inline void
+csio_post_event(void *smp, uint32_t evt)
+{
+ ((struct csio_sm *)smp)->sm_state(smp, evt);
+}
+
+static inline csio_sm_state_t
+csio_get_state(void *smp)
+{
+ return ((struct csio_sm *)smp)->sm_state;
+}
+
+static inline bool
+csio_match_state(void *smp, void *state)
+{
+ return (csio_get_state(smp) == (csio_sm_state_t)state);
+}
+
+#define CSIO_ASSERT(cond) BUG_ON(!(cond))
+
+#ifdef __CSIO_DEBUG__
+#define CSIO_DB_ASSERT(__c) CSIO_ASSERT((__c))
+#else
+#define CSIO_DB_ASSERT(__c)
+#endif
+
+#endif /* ifndef __CSIO_DEFS_H__ */
diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c
new file mode 100644
index 000000000000..bdd78fb4fc70
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.c
@@ -0,0 +1,4398 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/pci.h>
+#include <linux/pci_regs.h>
+#include <linux/firmware.h>
+#include <linux/stddef.h>
+#include <linux/delay.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_force_master;
+int csio_dbg_level = 0xFEFF;
+unsigned int csio_port_mask = 0xf;
+
+/* Default FW event queue entries. */
+static uint32_t csio_evtq_sz = CSIO_EVTQ_SIZE;
+
+/* Default MSI param level */
+int csio_msi = 2;
+
+/* FCoE function instances */
+static int dev_num;
+
+/* FCoE Adapter types & its description */
+static const struct csio_adap_desc csio_fcoe_adapters[] = {
+ {"T440-Dbg 10G", "Chelsio T440-Dbg 10G [FCoE]"},
+ {"T420-CR 10G", "Chelsio T420-CR 10G [FCoE]"},
+ {"T422-CR 10G/1G", "Chelsio T422-CR 10G/1G [FCoE]"},
+ {"T440-CR 10G", "Chelsio T440-CR 10G [FCoE]"},
+ {"T420-BCH 10G", "Chelsio T420-BCH 10G [FCoE]"},
+ {"T440-BCH 10G", "Chelsio T440-BCH 10G [FCoE]"},
+ {"T440-CH 10G", "Chelsio T440-CH 10G [FCoE]"},
+ {"T420-SO 10G", "Chelsio T420-SO 10G [FCoE]"},
+ {"T420-CX4 10G", "Chelsio T420-CX4 10G [FCoE]"},
+ {"T420-BT 10G", "Chelsio T420-BT 10G [FCoE]"},
+ {"T404-BT 1G", "Chelsio T404-BT 1G [FCoE]"},
+ {"B420-SR 10G", "Chelsio B420-SR 10G [FCoE]"},
+ {"B404-BT 1G", "Chelsio B404-BT 1G [FCoE]"},
+ {"T480-CR 10G", "Chelsio T480-CR 10G [FCoE]"},
+ {"T440-LP-CR 10G", "Chelsio T440-LP-CR 10G [FCoE]"},
+ {"T4 FPGA", "Chelsio T4 FPGA [FCoE]"}
+};
+
+static void csio_mgmtm_cleanup(struct csio_mgmtm *);
+static void csio_hw_mbm_cleanup(struct csio_hw *);
+
+/* State machine forward declarations */
+static void csio_hws_uninit(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_configuring(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_initializing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_ready(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiescing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_quiesced(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_resetting(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_removing(struct csio_hw *, enum csio_hw_ev);
+static void csio_hws_pcierr(struct csio_hw *, enum csio_hw_ev);
+
+static void csio_hw_initialize(struct csio_hw *hw);
+static void csio_evtq_stop(struct csio_hw *hw);
+static void csio_evtq_start(struct csio_hw *hw);
+
+int csio_is_hw_ready(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_ready);
+}
+
+int csio_is_hw_removing(struct csio_hw *hw)
+{
+ return csio_match_state(hw, csio_hws_removing);
+}
+
+
+/*
+ * csio_hw_wait_op_done_val - wait until an operation is completed
+ * @hw: the HW module
+ * @reg: the register to check for completion
+ * @mask: a single-bit field within @reg that indicates completion
+ * @polarity: the value of the field when the operation is completed
+ * @attempts: number of check iterations
+ * @delay: delay in usecs between iterations
+ * @valp: where to store the value of the register at completion time
+ *
+ * Wait until an operation is completed by checking a bit in a register
+ * up to @attempts times. If @valp is not NULL the value of the register
+ * at the time it indicated completion is stored there. Returns 0 if the
+ * operation completes and -EAGAIN otherwise.
+ */
+static int
+csio_hw_wait_op_done_val(struct csio_hw *hw, int reg, uint32_t mask,
+ int polarity, int attempts, int delay, uint32_t *valp)
+{
+ uint32_t val;
+ while (1) {
+ val = csio_rd_reg32(hw, reg);
+
+ if (!!(val & mask) == polarity) {
+ if (valp)
+ *valp = val;
+ return 0;
+ }
+
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ udelay(delay);
+ }
+}
+
+void
+csio_set_reg_field(struct csio_hw *hw, uint32_t reg, uint32_t mask,
+ uint32_t value)
+{
+ uint32_t val = csio_rd_reg32(hw, reg) & ~mask;
+
+ csio_wr_reg32(hw, val | value, reg);
+ /* Flush */
+ csio_rd_reg32(hw, reg);
+
+}
+
+/*
+ * csio_hw_mc_read - read from MC through backdoor accesses
+ * @hw: the hw module
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int
+csio_hw_mc_read(struct csio_hw *hw, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ if (csio_rd_reg32(hw, MC_BIST_CMD) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, MC_BIST_CMD_ADDR);
+ csio_wr_reg32(hw, 64, MC_BIST_CMD_LEN);
+ csio_wr_reg32(hw, 0xc, MC_BIST_DATA_PATTERN);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | START_BIST | BIST_CMD_GAP(1),
+ MC_BIST_CMD);
+ i = csio_hw_wait_op_done_val(hw, MC_BIST_CMD, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, MC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, MC_DATA(16));
+#undef MC_DATA
+ return 0;
+}
+
+/*
+ * csio_hw_edc_read - read from EDC through backdoor accesses
+ * @hw: the hw module
+ * @idx: which EDC to access
+ * @addr: address of first byte requested
+ * @data: 64 bytes of data containing the requested address
+ * @ecc: where to store the corresponding 64-bit ECC word
+ *
+ * Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ * that covers the requested address @addr. If @parity is not %NULL it
+ * is assigned the 64-bit ECC word for the read data.
+ */
+int
+csio_hw_edc_read(struct csio_hw *hw, int idx, uint32_t addr, __be32 *data,
+ uint64_t *ecc)
+{
+ int i;
+
+ idx *= EDC_STRIDE;
+ if (csio_rd_reg32(hw, EDC_BIST_CMD + idx) & START_BIST)
+ return -EBUSY;
+ csio_wr_reg32(hw, addr & ~0x3fU, EDC_BIST_CMD_ADDR + idx);
+ csio_wr_reg32(hw, 64, EDC_BIST_CMD_LEN + idx);
+ csio_wr_reg32(hw, 0xc, EDC_BIST_DATA_PATTERN + idx);
+ csio_wr_reg32(hw, BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST,
+ EDC_BIST_CMD + idx);
+ i = csio_hw_wait_op_done_val(hw, EDC_BIST_CMD + idx, START_BIST,
+ 0, 10, 1, NULL);
+ if (i)
+ return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+ for (i = 15; i >= 0; i--)
+ *data++ = htonl(csio_rd_reg32(hw, EDC_DATA(i)));
+ if (ecc)
+ *ecc = csio_rd_reg64(hw, EDC_DATA(16));
+#undef EDC_DATA
+ return 0;
+}
+
+/*
+ * csio_mem_win_rw - read/write memory through PCIE memory window
+ * @hw: the adapter
+ * @addr: address of first byte requested
+ * @data: MEMWIN0_APERTURE bytes of data containing the requested address
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
+ * MEMWIN0_APERTURE-byte-aligned address that covers the requested
+ * address @addr.
+ */
+static int
+csio_mem_win_rw(struct csio_hw *hw, u32 addr, u32 *data, int dir)
+{
+ int i;
+
+ /*
+ * Setup offset into PCIE memory window. Address must be a
+ * MEMWIN0_APERTURE-byte-aligned address. (Read back MA register to
+ * ensure that changes propagate before we attempt to use the new
+ * values.)
+ */
+ csio_wr_reg32(hw, addr & ~(MEMWIN0_APERTURE - 1),
+ PCIE_MEM_ACCESS_OFFSET);
+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_OFFSET);
+
+ /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
+ for (i = 0; i < MEMWIN0_APERTURE; i = i + sizeof(__be32)) {
+ if (dir)
+ *data++ = csio_rd_reg32(hw, (MEMWIN0_BASE + i));
+ else
+ csio_wr_reg32(hw, *data++, (MEMWIN0_BASE + i));
+ }
+
+ return 0;
+}
+
+/*
+ * csio_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
+ * @hw: the csio_hw
+ * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
+ * @addr: address within indicated memory type
+ * @len: amount of memory to transfer
+ * @buf: host memory buffer
+ * @dir: direction of transfer 1 => read, 0 => write
+ *
+ * Reads/writes an [almost] arbitrary memory region in the firmware: the
+ * firmware memory address, length and host buffer must be aligned on
+ * 32-bit boudaries. The memory is transferred as a raw byte sequence
+ * from/to the firmware's memory. If this memory contains data
+ * structures which contain multi-byte integers, it's the callers
+ * responsibility to perform appropriate byte order conversions.
+ */
+static int
+csio_memory_rw(struct csio_hw *hw, int mtype, u32 addr, u32 len,
+ uint32_t *buf, int dir)
+{
+ uint32_t pos, start, end, offset, memoffset;
+ int ret;
+ uint32_t *data;
+
+ /*
+ * Argument sanity checks ...
+ */
+ if ((addr & 0x3) || (len & 0x3))
+ return -EINVAL;
+
+ data = kzalloc(MEMWIN0_APERTURE, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ /* Offset into the region of memory which is being accessed
+ * MEM_EDC0 = 0
+ * MEM_EDC1 = 1
+ * MEM_MC = 2
+ */
+ memoffset = (mtype * (5 * 1024 * 1024));
+
+ /* Determine the PCIE_MEM_ACCESS_OFFSET */
+ addr = addr + memoffset;
+
+ /*
+ * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
+ * at a time so we need to round down the start and round up the end.
+ * We'll start copying out of the first line at (addr - start) a word
+ * at a time.
+ */
+ start = addr & ~(MEMWIN0_APERTURE-1);
+ end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
+ offset = (addr - start)/sizeof(__be32);
+
+ for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+ /*
+ * If we're writing, copy the data from the caller's memory
+ * buffer
+ */
+ if (!dir) {
+ /*
+ * If we're doing a partial write, then we need to do
+ * a read-modify-write ...
+ */
+ if (offset || len < MEMWIN0_APERTURE) {
+ ret = csio_mem_win_rw(hw, pos, data, 1);
+ if (ret) {
+ kfree(data);
+ return ret;
+ }
+ }
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ data[offset++] = *buf++;
+ len -= sizeof(__be32);
+ }
+ }
+
+ /*
+ * Transfer a block of memory and bail if there's an error.
+ */
+ ret = csio_mem_win_rw(hw, pos, data, dir);
+ if (ret) {
+ kfree(data);
+ return ret;
+ }
+
+ /*
+ * If we're reading, copy the data into the caller's memory
+ * buffer.
+ */
+ if (dir)
+ while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
+ len > 0) {
+ *buf++ = data[offset++];
+ len -= sizeof(__be32);
+ }
+ }
+
+ kfree(data);
+
+ return 0;
+}
+
+static int
+csio_memory_write(struct csio_hw *hw, int mtype, u32 addr, u32 len, u32 *buf)
+{
+ return csio_memory_rw(hw, mtype, addr, len, buf, 0);
+}
+
+/*
+ * EEPROM reads take a few tens of us while writes can take a bit over 5 ms.
+ */
+#define EEPROM_MAX_RD_POLL 40
+#define EEPROM_MAX_WR_POLL 6
+#define EEPROM_STAT_ADDR 0x7bfc
+#define VPD_BASE 0x400
+#define VPD_BASE_OLD 0
+#define VPD_LEN 512
+#define VPD_INFO_FLD_HDR_SIZE 3
+
+/*
+ * csio_hw_seeprom_read - read a serial EEPROM location
+ * @hw: hw to read
+ * @addr: EEPROM virtual address
+ * @data: where to store the read data
+ *
+ * Read a 32-bit word from a location in serial EEPROM using the card's PCI
+ * VPD capability. Note that this function must be called with a virtual
+ * address.
+ */
+static int
+csio_hw_seeprom_read(struct csio_hw *hw, uint32_t addr, uint32_t *data)
+{
+ uint16_t val = 0;
+ int attempts = EEPROM_MAX_RD_POLL;
+ uint32_t base = hw->params.pci.vpd_cap_addr;
+
+ if (addr >= EEPROMVSIZE || (addr & 3))
+ return -EINVAL;
+
+ pci_write_config_word(hw->pdev, base + PCI_VPD_ADDR, (uint16_t)addr);
+
+ do {
+ udelay(10);
+ pci_read_config_word(hw->pdev, base + PCI_VPD_ADDR, &val);
+ } while (!(val & PCI_VPD_ADDR_F) && --attempts);
+
+ if (!(val & PCI_VPD_ADDR_F)) {
+ csio_err(hw, "reading EEPROM address 0x%x failed\n", addr);
+ return -EINVAL;
+ }
+
+ pci_read_config_dword(hw->pdev, base + PCI_VPD_DATA, data);
+ *data = le32_to_cpu(*data);
+
+ return 0;
+}
+
+/*
+ * Partial EEPROM Vital Product Data structure. Includes only the ID and
+ * VPD-R sections.
+ */
+struct t4_vpd_hdr {
+ u8 id_tag;
+ u8 id_len[2];
+ u8 id_data[ID_LEN];
+ u8 vpdr_tag;
+ u8 vpdr_len[2];
+};
+
+/*
+ * csio_hw_get_vpd_keyword_val - Locates an information field keyword in
+ * the VPD
+ * @v: Pointer to buffered vpd data structure
+ * @kw: The keyword to search for
+ *
+ * Returns the value of the information field keyword or
+ * -EINVAL otherwise.
+ */
+static int
+csio_hw_get_vpd_keyword_val(const struct t4_vpd_hdr *v, const char *kw)
+{
+ int32_t i;
+ int32_t offset , len;
+ const uint8_t *buf = &v->id_tag;
+ const uint8_t *vpdr_len = &v->vpdr_tag;
+ offset = sizeof(struct t4_vpd_hdr);
+ len = (uint16_t)vpdr_len[1] + ((uint16_t)vpdr_len[2] << 8);
+
+ if (len + sizeof(struct t4_vpd_hdr) > VPD_LEN)
+ return -EINVAL;
+
+ for (i = offset; (i + VPD_INFO_FLD_HDR_SIZE) <= (offset + len);) {
+ if (memcmp(buf + i , kw, 2) == 0) {
+ i += VPD_INFO_FLD_HDR_SIZE;
+ return i;
+ }
+
+ i += VPD_INFO_FLD_HDR_SIZE + buf[i+2];
+ }
+
+ return -EINVAL;
+}
+
+static int
+csio_pci_capability(struct pci_dev *pdev, int cap, int *pos)
+{
+ *pos = pci_find_capability(pdev, cap);
+ if (*pos)
+ return 0;
+
+ return -1;
+}
+
+/*
+ * csio_hw_get_vpd_params - read VPD parameters from VPD EEPROM
+ * @hw: HW module
+ * @p: where to store the parameters
+ *
+ * Reads card parameters stored in VPD EEPROM.
+ */
+static int
+csio_hw_get_vpd_params(struct csio_hw *hw, struct csio_vpd *p)
+{
+ int i, ret, ec, sn, addr;
+ uint8_t *vpd, csum;
+ const struct t4_vpd_hdr *v;
+ /* To get around compilation warning from strstrip */
+ char *s;
+
+ if (csio_is_valid_vpd(hw))
+ return 0;
+
+ ret = csio_pci_capability(hw->pdev, PCI_CAP_ID_VPD,
+ &hw->params.pci.vpd_cap_addr);
+ if (ret)
+ return -EINVAL;
+
+ vpd = kzalloc(VPD_LEN, GFP_ATOMIC);
+ if (vpd == NULL)
+ return -ENOMEM;
+
+ /*
+ * Card information normally starts at VPD_BASE but early cards had
+ * it at 0.
+ */
+ ret = csio_hw_seeprom_read(hw, VPD_BASE, (uint32_t *)(vpd));
+ addr = *vpd == 0x82 ? VPD_BASE : VPD_BASE_OLD;
+
+ for (i = 0; i < VPD_LEN; i += 4) {
+ ret = csio_hw_seeprom_read(hw, addr + i, (uint32_t *)(vpd + i));
+ if (ret) {
+ kfree(vpd);
+ return ret;
+ }
+ }
+
+ /* Reset the VPD flag! */
+ hw->flags &= (~CSIO_HWF_VPD_VALID);
+
+ v = (const struct t4_vpd_hdr *)vpd;
+
+#define FIND_VPD_KW(var, name) do { \
+ var = csio_hw_get_vpd_keyword_val(v, name); \
+ if (var < 0) { \
+ csio_err(hw, "missing VPD keyword " name "\n"); \
+ kfree(vpd); \
+ return -EINVAL; \
+ } \
+} while (0)
+
+ FIND_VPD_KW(i, "RV");
+ for (csum = 0; i >= 0; i--)
+ csum += vpd[i];
+
+ if (csum) {
+ csio_err(hw, "corrupted VPD EEPROM, actual csum %u\n", csum);
+ kfree(vpd);
+ return -EINVAL;
+ }
+ FIND_VPD_KW(ec, "EC");
+ FIND_VPD_KW(sn, "SN");
+#undef FIND_VPD_KW
+
+ memcpy(p->id, v->id_data, ID_LEN);
+ s = strstrip(p->id);
+ memcpy(p->ec, vpd + ec, EC_LEN);
+ s = strstrip(p->ec);
+ i = vpd[sn - VPD_INFO_FLD_HDR_SIZE + 2];
+ memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
+ s = strstrip(p->sn);
+
+ csio_valid_vpd_copied(hw);
+
+ kfree(vpd);
+ return 0;
+}
+
+/*
+ * csio_hw_sf1_read - read data from the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to read
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @valp: where to store the read data
+ *
+ * Reads up to 4 bytes of data from the serial flash. The location of
+ * the read needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_read(struct csio_hw *hw, uint32_t byte_cnt, int32_t cont,
+ int32_t lock, uint32_t *valp)
+{
+ int ret;
+
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ return -EBUSY;
+
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+
+ csio_wr_reg32(hw, lock | cont | BYTECNT(byte_cnt - 1), SF_OP);
+ ret = csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ 10, NULL);
+ if (!ret)
+ *valp = csio_rd_reg32(hw, SF_DATA);
+ return ret;
+}
+
+/*
+ * csio_hw_sf1_write - write data to the serial flash
+ * @hw: the HW module
+ * @byte_cnt: number of bytes to write
+ * @cont: whether another operation will be chained
+ * @lock: whether to lock SF for PL access only
+ * @val: value to write
+ *
+ * Writes up to 4 bytes of data to the serial flash. The location of
+ * the write needs to be specified prior to calling this by issuing the
+ * appropriate commands to the serial flash.
+ */
+static int
+csio_hw_sf1_write(struct csio_hw *hw, uint32_t byte_cnt, uint32_t cont,
+ int32_t lock, uint32_t val)
+{
+ if (!byte_cnt || byte_cnt > 4)
+ return -EINVAL;
+ if (csio_rd_reg32(hw, SF_OP) & SF_BUSY)
+ return -EBUSY;
+
+ cont = cont ? SF_CONT : 0;
+ lock = lock ? SF_LOCK : 0;
+
+ csio_wr_reg32(hw, val, SF_DATA);
+ csio_wr_reg32(hw, cont | BYTECNT(byte_cnt - 1) | OP_WR | lock, SF_OP);
+
+ return csio_hw_wait_op_done_val(hw, SF_OP, SF_BUSY, 0, SF_ATTEMPTS,
+ 10, NULL);
+}
+
+/*
+ * csio_hw_flash_wait_op - wait for a flash operation to complete
+ * @hw: the HW module
+ * @attempts: max number of polls of the status register
+ * @delay: delay between polls in ms
+ *
+ * Wait for a flash operation to complete by polling the status register.
+ */
+static int
+csio_hw_flash_wait_op(struct csio_hw *hw, int32_t attempts, int32_t delay)
+{
+ int ret;
+ uint32_t status;
+
+ while (1) {
+ ret = csio_hw_sf1_write(hw, 1, 1, 1, SF_RD_STATUS);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 0, 1, &status);
+ if (ret != 0)
+ return ret;
+
+ if (!(status & 1))
+ return 0;
+ if (--attempts == 0)
+ return -EAGAIN;
+ if (delay)
+ msleep(delay);
+ }
+}
+
+/*
+ * csio_hw_read_flash - read words from serial flash
+ * @hw: the HW module
+ * @addr: the start address for the read
+ * @nwords: how many 32-bit words to read
+ * @data: where to store the read data
+ * @byte_oriented: whether to store data as bytes or as words
+ *
+ * Read the specified number of 32-bit words from the serial flash.
+ * If @byte_oriented is set the read data is stored as a byte array
+ * (i.e., big-endian), otherwise as 32-bit words in the platform's
+ * natural endianess.
+ */
+static int
+csio_hw_read_flash(struct csio_hw *hw, uint32_t addr, uint32_t nwords,
+ uint32_t *data, int32_t byte_oriented)
+{
+ int ret;
+
+ if (addr + nwords * sizeof(uint32_t) > hw->params.sf_size || (addr & 3))
+ return -EINVAL;
+
+ addr = swab32(addr) | SF_RD_DATA_FAST;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 0, addr);
+ if (ret != 0)
+ return ret;
+
+ ret = csio_hw_sf1_read(hw, 1, 1, 0, data);
+ if (ret != 0)
+ return ret;
+
+ for ( ; nwords; nwords--, data++) {
+ ret = csio_hw_sf1_read(hw, 4, nwords > 1, nwords == 1, data);
+ if (nwords == 1)
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ if (ret)
+ return ret;
+ if (byte_oriented)
+ *data = htonl(*data);
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_write_flash - write up to a page of data to the serial flash
+ * @hw: the hw
+ * @addr: the start address to write
+ * @n: length of data to write in bytes
+ * @data: the data to write
+ *
+ * Writes up to a page of data (256 bytes) to the serial flash starting
+ * at the given address. All the data must be written to the same page.
+ */
+static int
+csio_hw_write_flash(struct csio_hw *hw, uint32_t addr,
+ uint32_t n, const uint8_t *data)
+{
+ int ret = -EINVAL;
+ uint32_t buf[64];
+ uint32_t i, c, left, val, offset = addr & 0xff;
+
+ if (addr >= hw->params.sf_size || offset + n > SF_PAGE_SIZE)
+ return -EINVAL;
+
+ val = swab32(addr) | SF_PROG_PAGE;
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto unlock;
+
+ ret = csio_hw_sf1_write(hw, 4, 1, 1, val);
+ if (ret != 0)
+ goto unlock;
+
+ for (left = n; left; left -= c) {
+ c = min(left, 4U);
+ for (val = 0, i = 0; i < c; ++i)
+ val = (val << 8) + *data++;
+
+ ret = csio_hw_sf1_write(hw, c, c != left, 1, val);
+ if (ret)
+ goto unlock;
+ }
+ ret = csio_hw_flash_wait_op(hw, 8, 1);
+ if (ret)
+ goto unlock;
+
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+
+ /* Read the page to verify the write succeeded */
+ ret = csio_hw_read_flash(hw, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+ if (ret)
+ return ret;
+
+ if (memcmp(data - n, (uint8_t *)buf + offset, n)) {
+ csio_err(hw,
+ "failed to correctly write the flash page at %#x\n",
+ addr);
+ return -EINVAL;
+ }
+
+ return 0;
+
+unlock:
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ return ret;
+}
+
+/*
+ * csio_hw_flash_erase_sectors - erase a range of flash sectors
+ * @hw: the HW module
+ * @start: the first sector to erase
+ * @end: the last sector to erase
+ *
+ * Erases the sectors in the given inclusive range.
+ */
+static int
+csio_hw_flash_erase_sectors(struct csio_hw *hw, int32_t start, int32_t end)
+{
+ int ret = 0;
+
+ while (start <= end) {
+
+ ret = csio_hw_sf1_write(hw, 1, 0, 1, SF_WR_ENABLE);
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_sf1_write(hw, 4, 0, 1,
+ SF_ERASE_SECTOR | (start << 8));
+ if (ret != 0)
+ goto out;
+
+ ret = csio_hw_flash_wait_op(hw, 14, 500);
+ if (ret != 0)
+ goto out;
+
+ start++;
+ }
+out:
+ if (ret)
+ csio_err(hw, "erase of flash sector %d failed, error %d\n",
+ start, ret);
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ return 0;
+}
+
+/*
+ * csio_hw_flash_cfg_addr - return the address of the flash
+ * configuration file
+ * @hw: the HW module
+ *
+ * Return the address within the flash where the Firmware Configuration
+ * File is stored.
+ */
+static unsigned int
+csio_hw_flash_cfg_addr(struct csio_hw *hw)
+{
+ if (hw->params.sf_size == 0x100000)
+ return FPGA_FLASH_CFG_OFFSET;
+ else
+ return FLASH_CFG_OFFSET;
+}
+
+static void
+csio_hw_print_fw_version(struct csio_hw *hw, char *str)
+{
+ csio_info(hw, "%s: %u.%u.%u.%u\n", str,
+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+}
+
+/*
+ * csio_hw_get_fw_version - read the firmware version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the FW version from flash.
+ */
+static int
+csio_hw_get_fw_version(struct csio_hw *hw, uint32_t *vers)
+{
+ return csio_hw_read_flash(hw, FW_IMG_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_get_tp_version - read the TP microcode version
+ * @hw: HW module
+ * @vers: where to place the version
+ *
+ * Reads the TP microcode version from flash.
+ */
+static int
+csio_hw_get_tp_version(struct csio_hw *hw, u32 *vers)
+{
+ return csio_hw_read_flash(hw, FLASH_FW_START +
+ offsetof(struct fw_hdr, tp_microcode_ver), 1,
+ vers, 0);
+}
+
+/*
+ * csio_hw_check_fw_version - check if the FW is compatible with
+ * this driver
+ * @hw: HW module
+ *
+ * Checks if an adapter's FW is compatible with the driver. Returns 0
+ * if there's exact match, a negative error if the version could not be
+ * read or there's a major/minor version mismatch/minor.
+ */
+static int
+csio_hw_check_fw_version(struct csio_hw *hw)
+{
+ int ret, major, minor, micro;
+
+ ret = csio_hw_get_fw_version(hw, &hw->fwrev);
+ if (!ret)
+ ret = csio_hw_get_tp_version(hw, &hw->tp_vers);
+ if (ret)
+ return ret;
+
+ major = FW_HDR_FW_VER_MAJOR_GET(hw->fwrev);
+ minor = FW_HDR_FW_VER_MINOR_GET(hw->fwrev);
+ micro = FW_HDR_FW_VER_MICRO_GET(hw->fwrev);
+
+ if (major != FW_VERSION_MAJOR) { /* major mismatch - fail */
+ csio_err(hw, "card FW has major version %u, driver wants %u\n",
+ major, FW_VERSION_MAJOR);
+ return -EINVAL;
+ }
+
+ if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+ return 0; /* perfect match */
+
+ /* Minor/micro version mismatch */
+ return -EINVAL;
+}
+
+/*
+ * csio_hw_fw_dload - download firmware.
+ * @hw: HW module
+ * @fw_data: firmware image to write.
+ * @size: image size
+ *
+ * Write the supplied firmware image to the card's serial flash.
+ */
+static int
+csio_hw_fw_dload(struct csio_hw *hw, uint8_t *fw_data, uint32_t size)
+{
+ uint32_t csum;
+ int32_t addr;
+ int ret;
+ uint32_t i;
+ uint8_t first_page[SF_PAGE_SIZE];
+ const __be32 *p = (const __be32 *)fw_data;
+ struct fw_hdr *hdr = (struct fw_hdr *)fw_data;
+ uint32_t sf_sec_size;
+
+ if ((!hw->params.sf_size) || (!hw->params.sf_nsec)) {
+ csio_err(hw, "Serial Flash data invalid\n");
+ return -EINVAL;
+ }
+
+ if (!size) {
+ csio_err(hw, "FW image has no data\n");
+ return -EINVAL;
+ }
+
+ if (size & 511) {
+ csio_err(hw, "FW image size not multiple of 512 bytes\n");
+ return -EINVAL;
+ }
+
+ if (ntohs(hdr->len512) * 512 != size) {
+ csio_err(hw, "FW image size differs from size in FW header\n");
+ return -EINVAL;
+ }
+
+ if (size > FW_MAX_SIZE) {
+ csio_err(hw, "FW image too large, max is %u bytes\n",
+ FW_MAX_SIZE);
+ return -EINVAL;
+ }
+
+ for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+ csum += ntohl(p[i]);
+
+ if (csum != 0xffffffff) {
+ csio_err(hw, "corrupted firmware image, checksum %#x\n", csum);
+ return -EINVAL;
+ }
+
+ sf_sec_size = hw->params.sf_size / hw->params.sf_nsec;
+ i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
+
+ csio_dbg(hw, "Erasing sectors... start:%d end:%d\n",
+ FW_START_SEC, FW_START_SEC + i - 1);
+
+ ret = csio_hw_flash_erase_sectors(hw, FW_START_SEC,
+ FW_START_SEC + i - 1);
+ if (ret) {
+ csio_err(hw, "Flash Erase failed\n");
+ goto out;
+ }
+
+ /*
+ * We write the correct version at the end so the driver can see a bad
+ * version if the FW write fails. Start by writing a copy of the
+ * first page with a bad version.
+ */
+ memcpy(first_page, fw_data, SF_PAGE_SIZE);
+ ((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+ ret = csio_hw_write_flash(hw, FW_IMG_START, SF_PAGE_SIZE, first_page);
+ if (ret)
+ goto out;
+
+ csio_dbg(hw, "Writing Flash .. start:%d end:%d\n",
+ FW_IMG_START, FW_IMG_START + size);
+
+ addr = FW_IMG_START;
+ for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+ addr += SF_PAGE_SIZE;
+ fw_data += SF_PAGE_SIZE;
+ ret = csio_hw_write_flash(hw, addr, SF_PAGE_SIZE, fw_data);
+ if (ret)
+ goto out;
+ }
+
+ ret = csio_hw_write_flash(hw,
+ FW_IMG_START +
+ offsetof(struct fw_hdr, fw_ver),
+ sizeof(hdr->fw_ver),
+ (const uint8_t *)&hdr->fw_ver);
+
+out:
+ if (ret)
+ csio_err(hw, "firmware download failed, error %d\n", ret);
+ return ret;
+}
+
+static int
+csio_hw_get_flash_params(struct csio_hw *hw)
+{
+ int ret;
+ uint32_t info = 0;
+
+ ret = csio_hw_sf1_write(hw, 1, 1, 0, SF_RD_ID);
+ if (!ret)
+ ret = csio_hw_sf1_read(hw, 3, 0, 1, &info);
+ csio_wr_reg32(hw, 0, SF_OP); /* unlock SF */
+ if (ret != 0)
+ return ret;
+
+ if ((info & 0xff) != 0x20) /* not a Numonix flash */
+ return -EINVAL;
+ info >>= 16; /* log2 of size */
+ if (info >= 0x14 && info < 0x18)
+ hw->params.sf_nsec = 1 << (info - 16);
+ else if (info == 0x18)
+ hw->params.sf_nsec = 64;
+ else
+ return -EINVAL;
+ hw->params.sf_size = 1 << info;
+
+ return 0;
+}
+
+static void
+csio_set_pcie_completion_timeout(struct csio_hw *hw, u8 range)
+{
+ uint16_t val;
+ uint32_t pcie_cap;
+
+ if (!csio_pci_capability(hw->pdev, PCI_CAP_ID_EXP, &pcie_cap)) {
+ pci_read_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, &val);
+ val &= 0xfff0;
+ val |= range ;
+ pci_write_config_word(hw->pdev,
+ pcie_cap + PCI_EXP_DEVCTL2, val);
+ }
+}
+
+
+/*
+ * Return the specified PCI-E Configuration Space register from our Physical
+ * Function. We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+static uint32_t
+csio_read_pcie_cfg4(struct csio_hw *hw, int reg)
+{
+ u32 val = 0;
+ struct csio_mb *mbp;
+ int rv;
+ struct fw_ldst_cmd *ldst_cmd;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ pci_read_config_dword(hw->pdev, reg, &val);
+ return val;
+ }
+
+ csio_mb_ldst(hw, mbp, CSIO_MB_DEFAULT_TMO, reg);
+
+ rv = csio_mb_issue(hw, mbp);
+
+ /*
+ * If the LDST Command suucceeded, exctract the returned register
+ * value. Otherwise read it directly ourself.
+ */
+ if (rv == 0) {
+ ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ val = ntohl(ldst_cmd->u.pcie.data[0]);
+ } else
+ pci_read_config_dword(hw->pdev, reg, &val);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return val;
+} /* csio_read_pcie_cfg4 */
+
+static int
+csio_hw_set_mem_win(struct csio_hw *hw)
+{
+ u32 bar0;
+
+ /*
+ * Truncation intentional: we only read the bottom 32-bits of the
+ * 64-bit BAR0/BAR1 ... We use the hardware backdoor mechanism to
+ * read BAR0 instead of using pci_resource_start() because we could be
+ * operating from within a Virtual Machine which is trapping our
+ * accesses to our Configuration Space and we need to set up the PCI-E
+ * Memory Window decoders with the actual addresses which will be
+ * coming across the PCI-E link.
+ */
+ bar0 = csio_read_pcie_cfg4(hw, PCI_BASE_ADDRESS_0);
+ bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+
+ /*
+ * Set up memory window for accessing adapter memory ranges. (Read
+ * back MA register to ensure that changes propagate before we attempt
+ * to use the new values.)
+ */
+ csio_wr_reg32(hw, (bar0 + MEMWIN0_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN0_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0));
+ csio_wr_reg32(hw, (bar0 + MEMWIN1_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN1_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1));
+ csio_wr_reg32(hw, (bar0 + MEMWIN2_BASE) | BIR(0) |
+ WINDOW(ilog2(MEMWIN2_APERTURE) - 10),
+ PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ csio_rd_reg32(hw, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
+ return 0;
+} /* csio_hw_set_mem_win */
+
+
+
+/*****************************************************************************/
+/* HW State machine assists */
+/*****************************************************************************/
+
+static int
+csio_hw_dev_ready(struct csio_hw *hw)
+{
+ uint32_t reg;
+ int cnt = 6;
+
+ while (((reg = csio_rd_reg32(hw, PL_WHOAMI)) == 0xFFFFFFFF) &&
+ (--cnt != 0))
+ mdelay(100);
+
+ if ((cnt == 0) && (((int32_t)(SOURCEPF_GET(reg)) < 0) ||
+ (SOURCEPF_GET(reg) >= CSIO_MAX_PFN))) {
+ csio_err(hw, "PL_WHOAMI returned 0x%x, cnt:%d\n", reg, cnt);
+ return -EIO;
+ }
+
+ hw->pfn = SOURCEPF_GET(reg);
+
+ return 0;
+}
+
+/*
+ * csio_do_hello - Perform the HELLO FW Mailbox command and process response.
+ * @hw: HW module
+ * @state: Device state
+ *
+ * FW_HELLO_CMD has to be polled for completion.
+ */
+static int
+csio_do_hello(struct csio_hw *hw, enum csio_dev_state *state)
+{
+ struct csio_mb *mbp;
+ int rv = 0;
+ enum csio_dev_master master;
+ enum fw_retval retval;
+ uint8_t mpfn;
+ char state_str[16];
+ int retries = FW_CMD_HELLO_RETRIES;
+
+ memset(state_str, 0, sizeof(state_str));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ rv = -ENOMEM;
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto out;
+ }
+
+ master = csio_force_master ? CSIO_MASTER_MUST : CSIO_MASTER_MAY;
+
+retry:
+ csio_mb_hello(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn,
+ hw->pfn, master, NULL);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv) {
+ csio_err(hw, "failed to issue HELLO cmd. ret:%d.\n", rv);
+ goto out_free_mb;
+ }
+
+ csio_mb_process_hello_rsp(hw, mbp, &retval, state, &mpfn);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "HELLO cmd failed with ret: %d\n", retval);
+ rv = -EINVAL;
+ goto out_free_mb;
+ }
+
+ /* Firmware has designated us to be master */
+ if (hw->pfn == mpfn) {
+ hw->flags |= CSIO_HWF_MASTER;
+ } else if (*state == CSIO_DEV_STATE_UNINIT) {
+ /*
+ * If we're not the Master PF then we need to wait around for
+ * the Master PF Driver to finish setting up the adapter.
+ *
+ * Note that we also do this wait if we're a non-Master-capable
+ * PF and there is no current Master PF; a Master PF may show up
+ * momentarily and we wouldn't want to fail pointlessly. (This
+ * can happen when an OS loads lots of different drivers rapidly
+ * at the same time). In this case, the Master PF returned by
+ * the firmware will be PCIE_FW_MASTER_MASK so the test below
+ * will work ...
+ */
+
+ int waiting = FW_CMD_HELLO_TIMEOUT;
+
+ /*
+ * Wait for the firmware to either indicate an error or
+ * initialized state. If we see either of these we bail out
+ * and report the issue to the caller. If we exhaust the
+ * "hello timeout" and we haven't exhausted our retries, try
+ * again. Otherwise bail with a timeout error.
+ */
+ for (;;) {
+ uint32_t pcie_fw;
+
+ msleep(50);
+ waiting -= 50;
+
+ /*
+ * If neither Error nor Initialialized are indicated
+ * by the firmware keep waiting till we exaust our
+ * timeout ... and then retry if we haven't exhausted
+ * our retries ...
+ */
+ pcie_fw = csio_rd_reg32(hw, PCIE_FW);
+ if (!(pcie_fw & (PCIE_FW_ERR|PCIE_FW_INIT))) {
+ if (waiting <= 0) {
+ if (retries-- > 0)
+ goto retry;
+
+ rv = -ETIMEDOUT;
+ break;
+ }
+ continue;
+ }
+
+ /*
+ * We either have an Error or Initialized condition
+ * report errors preferentially.
+ */
+ if (state) {
+ if (pcie_fw & PCIE_FW_ERR) {
+ *state = CSIO_DEV_STATE_ERR;
+ rv = -ETIMEDOUT;
+ } else if (pcie_fw & PCIE_FW_INIT)
+ *state = CSIO_DEV_STATE_INIT;
+ }
+
+ /*
+ * If we arrived before a Master PF was selected and
+ * there's not a valid Master PF, grab its identity
+ * for our caller.
+ */
+ if (mpfn == PCIE_FW_MASTER_MASK &&
+ (pcie_fw & PCIE_FW_MASTER_VLD))
+ mpfn = PCIE_FW_MASTER_GET(pcie_fw);
+ break;
+ }
+ hw->flags &= ~CSIO_HWF_MASTER;
+ }
+
+ switch (*state) {
+ case CSIO_DEV_STATE_UNINIT:
+ strcpy(state_str, "Initializing");
+ break;
+ case CSIO_DEV_STATE_INIT:
+ strcpy(state_str, "Initialized");
+ break;
+ case CSIO_DEV_STATE_ERR:
+ strcpy(state_str, "Error");
+ break;
+ default:
+ strcpy(state_str, "Unknown");
+ break;
+ }
+
+ if (hw->pfn == mpfn)
+ csio_info(hw, "PF: %d, Coming up as MASTER, HW state: %s\n",
+ hw->pfn, state_str);
+ else
+ csio_info(hw,
+ "PF: %d, Coming up as SLAVE, Master PF: %d, HW state: %s\n",
+ hw->pfn, mpfn, state_str);
+
+out_free_mb:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return rv;
+}
+
+/*
+ * csio_do_bye - Perform the BYE FW Mailbox command and process response.
+ * @hw: HW module
+ *
+ */
+static int
+csio_do_bye(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_bye(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of BYE command failed\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_do_reset- Perform the device reset.
+ * @hw: HW module
+ * @fw_rst: FW reset
+ *
+ * If fw_rst is set, issues FW reset mbox cmd otherwise
+ * does PIO reset.
+ * Performs reset of the function.
+ */
+static int
+csio_do_reset(struct csio_hw *hw, bool fw_rst)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ if (!fw_rst) {
+ /* PIO reset */
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ mdelay(2000);
+ return 0;
+ }
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE | PIORST, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed.n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "RESET cmd failed with ret:0x%x.\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_validate_caps(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct fw_caps_config_cmd *rsp = (struct fw_caps_config_cmd *)mbp->mb;
+ uint16_t caps;
+
+ caps = ntohs(rsp->fcoecaps);
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_INITIATOR)) {
+ csio_err(hw, "No FCoE Initiator capability in the firmware.\n");
+ return -EINVAL;
+ }
+
+ if (!(caps & FW_CAPS_CONFIG_FCOE_CTRL_OFLD)) {
+ csio_err(hw, "No FCoE Control Offload capability\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_hw_fw_halt - issue a reset/halt to FW and put uP into RESET
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @force: force uP into RESET even if FW RESET command fails
+ *
+ * Issues a RESET command to firmware (if desired) with a HALT indication
+ * and then puts the microprocessor into RESET state. The RESET command
+ * will only be issued if a legitimate mailbox is provided (mbox <=
+ * PCIE_FW_MASTER_MASK).
+ *
+ * This is generally used in order for the host to safely manipulate the
+ * adapter without fear of conflicting with whatever the firmware might
+ * be doing. The only way out of this state is to RESTART the firmware
+ * ...
+ */
+static int
+csio_hw_fw_halt(struct csio_hw *hw, uint32_t mbox, int32_t force)
+{
+ enum fw_retval retval = 0;
+
+ /*
+ * If a legitimate mailbox is provided, issue a RESET command
+ * with a HALT indication.
+ */
+ if (mbox <= PCIE_FW_MASTER_MASK) {
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_mb_reset(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ PIORSTMODE | PIORST, FW_RESET_CMD_HALT(1),
+ NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of RESET command failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ /*
+ * Normally we won't complete the operation if the firmware RESET
+ * command fails but if our caller insists we'll go ahead and put the
+ * uP into RESET. This can be useful if the firmware is hung or even
+ * missing ... We'll have to take the risk of putting the uP into
+ * RESET without the cooperation of firmware in that case.
+ *
+ * We also force the firmware's HALT flag to be on in case we bypassed
+ * the firmware RESET command above or we're dealing with old firmware
+ * which doesn't have the HALT capability. This will serve as a flag
+ * for the incoming firmware to know that it's coming out of a HALT
+ * rather than a RESET ... if it's new enough to understand that ...
+ */
+ if (retval == 0 || force) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, UPCRST);
+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, PCIE_FW_HALT);
+ }
+
+ /*
+ * And we always return the result of the firmware RESET command
+ * even when we force the uP into RESET ...
+ */
+ return retval ? -EINVAL : 0;
+}
+
+/*
+ * csio_hw_fw_restart - restart the firmware by taking the uP out of RESET
+ * @hw: the HW module
+ * @reset: if we want to do a RESET to restart things
+ *
+ * Restart firmware previously halted by csio_hw_fw_halt(). On successful
+ * return the previous PF Master remains as the new PF Master and there
+ * is no need to issue a new HELLO command, etc.
+ *
+ * We do this in two ways:
+ *
+ * 1. If we're dealing with newer firmware we'll simply want to take
+ * the chip's microprocessor out of RESET. This will cause the
+ * firmware to start up from its start vector. And then we'll loop
+ * until the firmware indicates it's started again (PCIE_FW.HALT
+ * reset to 0) or we timeout.
+ *
+ * 2. If we're dealing with older firmware then we'll need to RESET
+ * the chip since older firmware won't recognize the PCIE_FW.HALT
+ * flag and automatically RESET itself on startup.
+ */
+static int
+csio_hw_fw_restart(struct csio_hw *hw, uint32_t mbox, int32_t reset)
+{
+ if (reset) {
+ /*
+ * Since we're directing the RESET instead of the firmware
+ * doing it automatically, we need to clear the PCIE_FW.HALT
+ * bit.
+ */
+ csio_set_reg_field(hw, PCIE_FW, PCIE_FW_HALT, 0);
+
+ /*
+ * If we've been given a valid mailbox, first try to get the
+ * firmware to do the RESET. If that works, great and we can
+ * return success. Otherwise, if we haven't been given a
+ * valid mailbox or the RESET command failed, fall back to
+ * hitting the chip with a hammer.
+ */
+ if (mbox <= PCIE_FW_MASTER_MASK) {
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ msleep(100);
+ if (csio_do_reset(hw, true) == 0)
+ return 0;
+ }
+
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ msleep(2000);
+ } else {
+ int ms;
+
+ csio_set_reg_field(hw, CIM_BOOT_CFG, UPCRST, 0);
+ for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
+ if (!(csio_rd_reg32(hw, PCIE_FW) & PCIE_FW_HALT))
+ return 0;
+ msleep(100);
+ ms += 100;
+ }
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+/*
+ * csio_hw_fw_upgrade - perform all of the steps necessary to upgrade FW
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW RESET command (if desired)
+ * @fw_data: the firmware image to write
+ * @size: image size
+ * @force: force upgrade even if firmware doesn't cooperate
+ *
+ * Perform all of the steps necessary for upgrading an adapter's
+ * firmware image. Normally this requires the cooperation of the
+ * existing firmware in order to halt all existing activities
+ * but if an invalid mailbox token is passed in we skip that step
+ * (though we'll still put the adapter microprocessor into RESET in
+ * that case).
+ *
+ * On successful return the new firmware will have been loaded and
+ * the adapter will have been fully RESET losing all previous setup
+ * state. On unsuccessful return the adapter may be completely hosed ...
+ * positive errno indicates that the adapter is ~probably~ intact, a
+ * negative errno indicates that things are looking bad ...
+ */
+static int
+csio_hw_fw_upgrade(struct csio_hw *hw, uint32_t mbox,
+ const u8 *fw_data, uint32_t size, int32_t force)
+{
+ const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
+ int reset, ret;
+
+ ret = csio_hw_fw_halt(hw, mbox, force);
+ if (ret != 0 && !force)
+ return ret;
+
+ ret = csio_hw_fw_dload(hw, (uint8_t *) fw_data, size);
+ if (ret != 0)
+ return ret;
+
+ /*
+ * Older versions of the firmware don't understand the new
+ * PCIE_FW.HALT flag and so won't know to perform a RESET when they
+ * restart. So for newly loaded older firmware we'll have to do the
+ * RESET for it so it starts up on a clean slate. We can tell if
+ * the newly loaded firmware will handle this right by checking
+ * its header flags to see if it advertises the capability.
+ */
+ reset = ((ntohl(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
+ return csio_hw_fw_restart(hw, mbox, reset);
+}
+
+
+/*
+ * csio_hw_fw_config_file - setup an adapter via a Configuration File
+ * @hw: the HW module
+ * @mbox: mailbox to use for the FW command
+ * @mtype: the memory type where the Configuration File is located
+ * @maddr: the memory address where the Configuration File is located
+ * @finiver: return value for CF [fini] version
+ * @finicsum: return value for CF [fini] checksum
+ * @cfcsum: return value for CF computed checksum
+ *
+ * Issue a command to get the firmware to process the Configuration
+ * File located at the specified mtype/maddress. If the Configuration
+ * File is processed successfully and return value pointers are
+ * provided, the Configuration File "[fini] section version and
+ * checksum values will be returned along with the computed checksum.
+ * It's up to the caller to decide how it wants to respond to the
+ * checksums not matching but it recommended that a prominant warning
+ * be emitted in order to help people rapidly identify changed or
+ * corrupted Configuration Files.
+ *
+ * Also note that it's possible to modify things like "niccaps",
+ * "toecaps",etc. between processing the Configuration File and telling
+ * the firmware to use the new configuration. Callers which want to
+ * do this will need to "hand-roll" their own CAPS_CONFIGS commands for
+ * Configuration Files if they want to do this.
+ */
+static int
+csio_hw_fw_config_file(struct csio_hw *hw,
+ unsigned int mtype, unsigned int maddr,
+ uint32_t *finiver, uint32_t *finicsum, uint32_t *cfcsum)
+{
+ struct csio_mb *mbp;
+ struct fw_caps_config_cmd *caps_cmd;
+ int rv = -EINVAL;
+ enum fw_retval ret;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+ /*
+ * Tell the firmware to process the indicated Configuration File.
+ * If there are no errors and the caller has provided return value
+ * pointers for the [fini] section version, checksum and computed
+ * checksum, pass those back to the caller.
+ */
+ caps_cmd = (struct fw_caps_config_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, caps_cmd, CSIO_MB_DEFAULT_TMO, hw, NULL, 1);
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ caps_cmd->cfvalid_to_len16 =
+ htonl(FW_CAPS_CONFIG_CMD_CFVALID |
+ FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
+ FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) |
+ FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
+ goto out;
+ }
+
+ ret = csio_mb_fw_retval(mbp);
+ if (ret != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto out;
+ }
+
+ if (finiver)
+ *finiver = ntohl(caps_cmd->finiver);
+ if (finicsum)
+ *finicsum = ntohl(caps_cmd->finicsum);
+ if (cfcsum)
+ *cfcsum = ntohl(caps_cmd->cfcsum);
+
+ /* Validate device capabilities */
+ if (csio_hw_validate_caps(hw, mbp)) {
+ rv = -ENOENT;
+ goto out;
+ }
+
+ /*
+ * And now tell the firmware to use the configuration we just loaded.
+ */
+ caps_cmd->op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE);
+ caps_cmd->cfvalid_to_len16 = htonl(FW_LEN16(*caps_cmd));
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD failed!\n");
+ goto out;
+ }
+
+ ret = csio_mb_fw_retval(mbp);
+ if (ret != FW_SUCCESS) {
+ csio_dbg(hw, "FW_CAPS_CONFIG_CMD returned %d!\n", rv);
+ goto out;
+ }
+
+ rv = 0;
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+}
+
+/*
+ * csio_get_device_params - Get device parameters.
+ * @hw: HW module
+ *
+ */
+static int
+csio_get_device_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 param[6];
+ int i, j = 0;
+
+ /* Initialize portids to -1 */
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ hw->pport[i].portid = -1;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get port vec information. */
+ param[0] = FW_PARAM_DEV(PORTVEC);
+
+ /* Get Core clock. */
+ param[1] = FW_PARAM_DEV(CCLK);
+
+ /* Get EQ id start and end. */
+ param[2] = FW_PARAM_PFVF(EQ_START);
+ param[3] = FW_PARAM_PFVF(EQ_END);
+
+ /* Get IQ id start and end. */
+ param[4] = FW_PARAM_PFVF(IQFLINT_START);
+ param[5] = FW_PARAM_PFVF(IQFLINT_END);
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(param), param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(param), param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* cache the information. */
+ hw->port_vec = param[0];
+ hw->vpd.cclk = param[1];
+ wrm->fw_eq_start = param[2];
+ wrm->fw_iq_start = param[4];
+
+ /* Using FW configured max iqs & eqs */
+ if ((hw->flags & CSIO_HWF_USING_SOFT_PARAMS) ||
+ !csio_is_hw_master(hw)) {
+ hw->cfg_niq = param[5] - param[4] + 1;
+ hw->cfg_neq = param[3] - param[2] + 1;
+ csio_dbg(hw, "Using fwconfig max niqs %d neqs %d\n",
+ hw->cfg_niq, hw->cfg_neq);
+ }
+
+ hw->port_vec &= csio_port_mask;
+
+ hw->num_pports = hweight32(hw->port_vec);
+
+ csio_dbg(hw, "Port vector: 0x%x, #ports: %d\n",
+ hw->port_vec, hw->num_pports);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ while ((hw->port_vec & (1 << j)) == 0)
+ j++;
+ hw->pport[i].portid = j++;
+ csio_dbg(hw, "Found Port:%d\n", hw->pport[i].portid);
+ }
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+
+/*
+ * csio_config_device_caps - Get and set device capabilities.
+ * @hw: HW module
+ *
+ */
+static int
+csio_config_device_caps(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv = -EINVAL;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, 0, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(r) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(r) returned %d!\n", retval);
+ goto out;
+ }
+
+ /* Validate device capabilities */
+ if (csio_hw_validate_caps(hw, mbp))
+ goto out;
+
+ /* Don't config device capabilities if already configured */
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+ rv = 0;
+ goto out;
+ }
+
+ /* Write back desired device capabilities */
+ csio_mb_caps_config(hw, mbp, CSIO_MB_DEFAULT_TMO, true, true,
+ false, true, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_CAPS_CONFIG_CMD(w) failed!\n");
+ goto out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_CAPS_CONFIG_CMD(w) returned %d!\n", retval);
+ goto out;
+ }
+
+ rv = 0;
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+}
+
+static int
+csio_config_global_rss(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ csio_rss_glb_config(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+ FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+ FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ |
+ FW_RSS_GLB_CONFIG_CMD_TNLALLLKP,
+ NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_RSS_GLB_CONFIG_CMD failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_RSS_GLB_CONFIG_CMD returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_config_pfvf - Configure Physical/Virtual functions settings.
+ * @hw: HW module
+ *
+ */
+static int
+csio_config_pfvf(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /*
+ * For now, allow all PFs to access to all ports using a pmask
+ * value of 0xF (M_FW_PFVF_CMD_PMASK). Once we have VFs, we will
+ * need to provide access based on some rule.
+ */
+ csio_mb_pfvf(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0, CSIO_NEQ,
+ CSIO_NETH_CTRL, CSIO_NIQ_FLINT, 0, 0, CSIO_NVI, CSIO_CMASK,
+ CSIO_PMASK, CSIO_NEXACTF, CSIO_R_CAPS, CSIO_WX_CAPS, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PFVF_CMD failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PFVF_CMD returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_enable_ports - Bring up all available ports.
+ * @hw: HW module.
+ *
+ */
+static int
+csio_enable_ports(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hw->num_pports; i++) {
+ portid = hw->pport[i].portid;
+
+ /* Read PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid,
+ false, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(r) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_port_rsp(hw, mbp, &retval,
+ &hw->pport[i].pcap);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(r) port:%d failed: 0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Write back PORT information */
+ csio_mb_port(hw, mbp, CSIO_MB_DEFAULT_TMO, portid, true,
+ (PAUSE_RX | PAUSE_TX), hw->pport[i].pcap, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_PORT_CMD(w) port:%d\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PORT_CMD(w) port:%d failed :0x%x\n",
+ portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ } /* For all ports */
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_get_fcoe_resinfo - Read fcoe fw resource info.
+ * @hw: HW module
+ * Issued with lock held.
+ */
+static int
+csio_get_fcoe_resinfo(struct csio_hw *hw)
+{
+ struct csio_fcoe_res_info *res_info = &hw->fres_info;
+ struct fw_fcoe_res_info_cmd *rsp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FW resource information */
+ csio_fcoe_read_res_info_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FW_FCOE_RES_INFO_CMD\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ rsp = (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_FCOE_RES_INFO_CMD failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ res_info->e_d_tov = ntohs(rsp->e_d_tov);
+ res_info->r_a_tov_seq = ntohs(rsp->r_a_tov_seq);
+ res_info->r_a_tov_els = ntohs(rsp->r_a_tov_els);
+ res_info->r_r_tov = ntohs(rsp->r_r_tov);
+ res_info->max_xchgs = ntohl(rsp->max_xchgs);
+ res_info->max_ssns = ntohl(rsp->max_ssns);
+ res_info->used_xchgs = ntohl(rsp->used_xchgs);
+ res_info->used_ssns = ntohl(rsp->used_ssns);
+ res_info->max_fcfs = ntohl(rsp->max_fcfs);
+ res_info->max_vnps = ntohl(rsp->max_vnps);
+ res_info->used_fcfs = ntohl(rsp->used_fcfs);
+ res_info->used_vnps = ntohl(rsp->used_vnps);
+
+ csio_dbg(hw, "max ssns:%d max xchgs:%d\n", res_info->max_ssns,
+ res_info->max_xchgs);
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+static int
+csio_hw_check_fwconfig(struct csio_hw *hw, u32 *param)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ u32 _param[1];
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /*
+ * Find out whether we're dealing with a version of
+ * the firmware which has configuration file support.
+ */
+ _param[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_CF));
+
+ csio_mb_params(hw, mbp, CSIO_MB_DEFAULT_TMO, hw->pfn, 0,
+ ARRAY_SIZE(_param), _param, NULL, false, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_PARAMS_CMD(read) failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_mb_process_read_params_rsp(hw, mbp, &retval,
+ ARRAY_SIZE(_param), _param);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_PARAMS_CMD(read) failed with ret:0x%x!\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ *param = _param[0];
+
+ return 0;
+}
+
+static int
+csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
+{
+ int ret = 0;
+ const struct firmware *cf;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev;
+ unsigned int mtype = 0, maddr = 0;
+ uint32_t *cfg_data;
+ int value_to_add = 0;
+
+ if (request_firmware(&cf, CSIO_CF_FNAME, dev) < 0) {
+ csio_err(hw, "could not find config file " CSIO_CF_FNAME
+ ",err: %d\n", ret);
+ return -ENOENT;
+ }
+
+ if (cf->size%4 != 0)
+ value_to_add = 4 - (cf->size % 4);
+
+ cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
+ if (cfg_data == NULL) {
+ ret = -ENOMEM;
+ goto leave;
+ }
+
+ memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
+ if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
+ ret = -EINVAL;
+ goto leave;
+ }
+
+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+
+ ret = csio_memory_write(hw, mtype, maddr,
+ cf->size + value_to_add, cfg_data);
+ if (ret == 0) {
+ csio_info(hw, "config file upgraded to " CSIO_CF_FNAME "\n");
+ strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
+ }
+
+leave:
+ kfree(cfg_data);
+ release_firmware(cf);
+ return ret;
+}
+
+/*
+ * HW initialization: contact FW, obtain config, perform basic init.
+ *
+ * If the firmware we're dealing with has Configuration File support, then
+ * we use that to perform all configuration -- either using the configuration
+ * file stored in flash on the adapter or using a filesystem-local file
+ * if available.
+ *
+ * If we don't have configuration file support in the firmware, then we'll
+ * have to set things up the old fashioned way with hard-coded register
+ * writes and firmware commands ...
+ */
+
+/*
+ * Attempt to initialize the HW via a Firmware Configuration File.
+ */
+static int
+csio_hw_use_fwconfig(struct csio_hw *hw, int reset, u32 *fw_cfg_param)
+{
+ unsigned int mtype, maddr;
+ int rv;
+ uint32_t finiver, finicsum, cfcsum;
+ int using_flash;
+ char path[64];
+
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ rv = csio_do_reset(hw, true);
+ if (rv != 0)
+ goto bye;
+ }
+
+ /*
+ * If we have a configuration file in host ,
+ * then use that. Otherwise, use the configuration file stored
+ * in the HW flash ...
+ */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_config(hw, fw_cfg_param, path);
+ spin_lock_irq(&hw->lock);
+ if (rv != 0) {
+ if (rv == -ENOENT) {
+ /*
+ * config file was not found. Use default
+ * config file from flash.
+ */
+ mtype = FW_MEMTYPE_CF_FLASH;
+ maddr = csio_hw_flash_cfg_addr(hw);
+ using_flash = 1;
+ } else {
+ /*
+ * we revert back to the hardwired config if
+ * flashing failed.
+ */
+ goto bye;
+ }
+ } else {
+ mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
+ maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
+ using_flash = 0;
+ }
+
+ hw->cfg_store = (uint8_t)mtype;
+
+ /*
+ * Issue a Capability Configuration command to the firmware to get it
+ * to parse the Configuration File.
+ */
+ rv = csio_hw_fw_config_file(hw, mtype, maddr, &finiver,
+ &finicsum, &cfcsum);
+ if (rv != 0)
+ goto bye;
+
+ hw->cfg_finiver = finiver;
+ hw->cfg_finicsum = finicsum;
+ hw->cfg_cfcsum = cfcsum;
+ hw->cfg_csum_status = true;
+
+ if (finicsum != cfcsum) {
+ csio_warn(hw,
+ "Config File checksum mismatch: csum=%#x, computed=%#x\n",
+ finicsum, cfcsum);
+
+ hw->cfg_csum_status = false;
+ }
+
+ /*
+ * Note that we're operating with parameters
+ * not supplied by the driver, rather than from hard-wired
+ * initialization constants buried in the driver.
+ */
+ hw->flags |= CSIO_HWF_USING_SOFT_PARAMS;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto bye;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /*
+ * And finally tell the firmware to initialize itself using the
+ * parameters from the Configuration File.
+ */
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+
+ csio_info(hw,
+ "Firmware Configuration File %s, version %#x, computed checksum %#x\n",
+ (using_flash ? "in device FLASH" : path), finiver, cfcsum);
+
+ return 0;
+
+ /*
+ * Something bad happened. Return the error ...
+ */
+bye:
+ hw->flags &= ~CSIO_HWF_USING_SOFT_PARAMS;
+ csio_dbg(hw, "Configuration file error %d\n", rv);
+ return rv;
+}
+
+/*
+ * Attempt to initialize the adapter via hard-coded, driver supplied
+ * parameters ...
+ */
+static int
+csio_hw_no_fwconfig(struct csio_hw *hw, int reset)
+{
+ int rv;
+ /*
+ * Reset device if necessary
+ */
+ if (reset) {
+ rv = csio_do_reset(hw, true);
+ if (rv != 0)
+ goto out;
+ }
+
+ /* Get and set device capabilities */
+ rv = csio_config_device_caps(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Config Global RSS command */
+ rv = csio_config_global_rss(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure PF/VF capabilities of device */
+ rv = csio_config_pfvf(hw);
+ if (rv != 0)
+ goto out;
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+
+out:
+ return rv;
+}
+
+/*
+ * Returns -EINVAL if attempts to flash the firmware failed
+ * else returns 0,
+ * if flashing was not attempted because the card had the
+ * latest firmware ECANCELED is returned
+ */
+static int
+csio_hw_flash_fw(struct csio_hw *hw)
+{
+ int ret = -ECANCELED;
+ const struct firmware *fw;
+ const struct fw_hdr *hdr;
+ u32 fw_ver;
+ struct pci_dev *pci_dev = hw->pdev;
+ struct device *dev = &pci_dev->dev ;
+
+ if (request_firmware(&fw, CSIO_FW_FNAME, dev) < 0) {
+ csio_err(hw, "could not find firmware image " CSIO_FW_FNAME
+ ",err: %d\n", ret);
+ return -EINVAL;
+ }
+
+ hdr = (const struct fw_hdr *)fw->data;
+ fw_ver = ntohl(hdr->fw_ver);
+ if (FW_HDR_FW_VER_MAJOR_GET(fw_ver) != FW_VERSION_MAJOR)
+ return -EINVAL; /* wrong major version, won't do */
+
+ /*
+ * If the flash FW is unusable or we found something newer, load it.
+ */
+ if (FW_HDR_FW_VER_MAJOR_GET(hw->fwrev) != FW_VERSION_MAJOR ||
+ fw_ver > hw->fwrev) {
+ ret = csio_hw_fw_upgrade(hw, hw->pfn, fw->data, fw->size,
+ /*force=*/false);
+ if (!ret)
+ csio_info(hw, "firmware upgraded to version %pI4 from "
+ CSIO_FW_FNAME "\n", &hdr->fw_ver);
+ else
+ csio_err(hw, "firmware upgrade failed! err=%d\n", ret);
+ }
+
+ release_firmware(fw);
+
+ return ret;
+}
+
+
+/*
+ * csio_hw_configure - Configure HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_configure(struct csio_hw *hw)
+{
+ int reset = 1;
+ int rv;
+ u32 param[1];
+
+ rv = csio_hw_dev_ready(hw);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* HW version */
+ hw->chip_ver = (char)csio_rd_reg32(hw, PL_REV);
+
+ /* Needed for FW download */
+ rv = csio_hw_get_flash_params(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to get serial flash params rv:%d\n", rv);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Set pci completion timeout value to 4 seconds. */
+ csio_set_pcie_completion_timeout(hw, 0xd);
+
+ csio_hw_set_mem_win(hw);
+
+ rv = csio_hw_get_fw_version(hw, &hw->fwrev);
+ if (rv != 0)
+ goto out;
+
+ csio_hw_print_fw_version(hw, "Firmware revision");
+
+ rv = csio_do_hello(hw, &hw->fw_state);
+ if (rv != 0) {
+ CSIO_INC_STATS(hw, n_err_fatal);
+ csio_post_event(&hw->sm, CSIO_HWE_FATAL);
+ goto out;
+ }
+
+ /* Read vpd */
+ rv = csio_hw_get_vpd_params(hw, &hw->vpd);
+ if (rv != 0)
+ goto out;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ rv = csio_hw_check_fw_version(hw);
+ if (rv == -EINVAL) {
+
+ /* Do firmware update */
+ spin_unlock_irq(&hw->lock);
+ rv = csio_hw_flash_fw(hw);
+ spin_lock_irq(&hw->lock);
+
+ if (rv == 0) {
+ reset = 0;
+ /*
+ * Note that the chip was reset as part of the
+ * firmware upgrade so we don't reset it again
+ * below and grab the new firmware version.
+ */
+ rv = csio_hw_check_fw_version(hw);
+ }
+ }
+ /*
+ * If the firmware doesn't support Configuration
+ * Files, use the old Driver-based, hard-wired
+ * initialization. Otherwise, try using the
+ * Configuration File support and fall back to the
+ * Driver-based initialization if there's no
+ * Configuration File found.
+ */
+ if (csio_hw_check_fwconfig(hw, param) == 0) {
+ rv = csio_hw_use_fwconfig(hw, reset, param);
+ if (rv == -ENOENT)
+ goto out;
+ if (rv != 0) {
+ csio_info(hw,
+ "No Configuration File present "
+ "on adapter. Using hard-wired "
+ "configuration parameters.\n");
+ rv = csio_hw_no_fwconfig(hw, reset);
+ }
+ } else {
+ rv = csio_hw_no_fwconfig(hw, reset);
+ }
+
+ if (rv != 0)
+ goto out;
+
+ } else {
+ if (hw->fw_state == CSIO_DEV_STATE_INIT) {
+
+ /* device parameters */
+ rv = csio_get_device_params(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Get device capabilities */
+ rv = csio_config_device_caps(hw);
+ if (rv != 0)
+ goto out;
+
+ /* Configure SGE */
+ csio_wr_sge_init(hw);
+
+ /* Post event to notify completion of configuration */
+ csio_post_event(&hw->sm, CSIO_HWE_INIT);
+ goto out;
+ }
+ } /* if not master */
+
+out:
+ return;
+}
+
+/*
+ * csio_hw_initialize - Initialize HW
+ * @hw - HW module
+ *
+ */
+static void
+csio_hw_initialize(struct csio_hw *hw)
+{
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ int rv;
+ int i;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ goto out;
+
+ csio_mb_initialize(hw, mbp, CSIO_MB_DEFAULT_TMO, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FW_INITIALIZE_CMD failed!\n");
+ goto free_and_out;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FW_INITIALIZE_CMD returned 0x%x!\n",
+ retval);
+ goto free_and_out;
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ }
+
+ rv = csio_get_fcoe_resinfo(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to read fcoe resource info: %d\n", rv);
+ goto out;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ rv = csio_config_queues(hw);
+ spin_lock_irq(&hw->lock);
+
+ if (rv != 0) {
+ csio_err(hw, "Config of queues failed!: %d\n", rv);
+ goto out;
+ }
+
+ for (i = 0; i < hw->num_pports; i++)
+ hw->pport[i].mod_type = FW_PORT_MOD_TYPE_NA;
+
+ if (csio_is_hw_master(hw) && hw->fw_state != CSIO_DEV_STATE_INIT) {
+ rv = csio_enable_ports(hw);
+ if (rv != 0) {
+ csio_err(hw, "Failed to enable ports: %d\n", rv);
+ goto out;
+ }
+ }
+
+ csio_post_event(&hw->sm, CSIO_HWE_INIT_DONE);
+ return;
+
+free_and_out:
+ mempool_free(mbp, hw->mb_mempool);
+out:
+ return;
+}
+
+#define PF_INTR_MASK (PFSW | PFCIM)
+
+/*
+ * csio_hw_intr_enable - Enable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Enable interrupts in HW registers.
+ */
+static void
+csio_hw_intr_enable(struct csio_hw *hw)
+{
+ uint16_t vec = (uint16_t)csio_get_mb_intr_idx(csio_hw_to_mbm(hw));
+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+ uint32_t pl = csio_rd_reg32(hw, PL_INT_ENABLE);
+
+ /*
+ * Set aivec for MSI/MSIX. PCIE_PF_CFG.INTXType is set up
+ * by FW, so do nothing for INTX.
+ */
+ if (hw->intr_mode == CSIO_IM_MSIX)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
+ AIVEC(AIVEC_MASK), vec);
+ else if (hw->intr_mode == CSIO_IM_MSI)
+ csio_set_reg_field(hw, MYPF_REG(PCIE_PF_CFG),
+ AIVEC(AIVEC_MASK), 0);
+
+ csio_wr_reg32(hw, PF_INTR_MASK, MYPF_REG(PL_PF_INT_ENABLE));
+
+ /* Turn on MB interrupts - this will internally flush PIO as well */
+ csio_mb_intr_enable(hw);
+
+ /* These are common registers - only a master can modify them */
+ if (csio_is_hw_master(hw)) {
+ /*
+ * Disable the Serial FLASH interrupt, if enabled!
+ */
+ pl &= (~SF);
+ csio_wr_reg32(hw, pl, PL_INT_ENABLE);
+
+ csio_wr_reg32(hw, ERR_CPL_EXCEED_IQE_SIZE |
+ EGRESS_SIZE_ERR | ERR_INVALID_CIDX_INC |
+ ERR_CPL_OPCODE_0 | ERR_DROPPED_DB |
+ ERR_DATA_CPL_ON_HIGH_QID1 |
+ ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
+ ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
+ ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
+ ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR,
+ SGE_INT_ENABLE3);
+ csio_set_reg_field(hw, PL_INT_MAP0, 0, 1 << pf);
+ }
+
+ hw->flags |= CSIO_HWF_HW_INTR_ENABLED;
+
+}
+
+/*
+ * csio_hw_intr_disable - Disable HW interrupts
+ * @hw: Pointer to HW module.
+ *
+ * Turn off Mailbox and PCI_PF_CFG interrupts.
+ */
+void
+csio_hw_intr_disable(struct csio_hw *hw)
+{
+ uint32_t pf = SOURCEPF_GET(csio_rd_reg32(hw, PL_WHOAMI));
+
+ if (!(hw->flags & CSIO_HWF_HW_INTR_ENABLED))
+ return;
+
+ hw->flags &= ~CSIO_HWF_HW_INTR_ENABLED;
+
+ csio_wr_reg32(hw, 0, MYPF_REG(PL_PF_INT_ENABLE));
+ if (csio_is_hw_master(hw))
+ csio_set_reg_field(hw, PL_INT_MAP0, 1 << pf, 0);
+
+ /* Turn off MB interrupts */
+ csio_mb_intr_disable(hw);
+
+}
+
+static void
+csio_hw_fatal_err(struct csio_hw *hw)
+{
+ csio_set_reg_field(hw, SGE_CONTROL, GLOBALENABLE, 0);
+ csio_hw_intr_disable(hw);
+
+ /* Do not reset HW, we may need FW state for debugging */
+ csio_fatal(hw, "HW Fatal error encountered!\n");
+}
+
+/*****************************************************************************/
+/* START: HW SM */
+/*****************************************************************************/
+/*
+ * csio_hws_uninit - Uninit state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_uninit(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_CFG:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_configuring - Configuring state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_configuring(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT:
+ csio_set_state(&hw->sm, csio_hws_initializing);
+ csio_hw_initialize(hw);
+ break;
+
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_initializing - Initialiazing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_initializing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_INIT_DONE:
+ csio_set_state(&hw->sm, csio_hws_ready);
+
+ /* Fan out event to all lnode SMs */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREADY);
+
+ /* Enable interrupts */
+ csio_hw_intr_enable(hw);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_do_bye(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_ready - Ready state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_ready(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ /* Remember the event */
+ hw->evtflag = evt;
+
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ case CSIO_HWE_FW_DLOAD:
+ case CSIO_HWE_SUSPEND:
+ case CSIO_HWE_PCI_REMOVE:
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_quiescing);
+ /* cleanup all outstanding cmds */
+ if (evt == CSIO_HWE_HBA_RESET ||
+ evt == CSIO_HWE_PCIERR_DETECTED)
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), false);
+ else
+ csio_scsim_cleanup_io(csio_hw_to_scsim(hw), true);
+
+ csio_hw_intr_disable(hw);
+ csio_hw_mbm_cleanup(hw);
+ csio_evtq_stop(hw);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWSTOP);
+ csio_evtq_flush(hw);
+ csio_mgmtm_cleanup(csio_hw_to_mgmtm(hw));
+ csio_post_event(&hw->sm, CSIO_HWE_QUIESCED);
+ break;
+
+ case CSIO_HWE_FATAL:
+ csio_set_state(&hw->sm, csio_hws_uninit);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiescing - Quiescing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiescing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_QUIESCED:
+ switch (hw->evtflag) {
+ case CSIO_HWE_FW_DLOAD:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Download firmware */
+ /* Fall through */
+
+ case CSIO_HWE_HBA_RESET:
+ csio_set_state(&hw->sm, csio_hws_resetting);
+ /* Start reset of the HBA */
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWRESET);
+ csio_wr_destroy_queues(hw, false);
+ csio_do_reset(hw, false);
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET_DONE);
+ break;
+
+ case CSIO_HWE_PCI_REMOVE:
+ csio_set_state(&hw->sm, csio_hws_removing);
+ csio_notify_lnodes(hw, CSIO_LN_NOTIFY_HWREMOVE);
+ csio_wr_destroy_queues(hw, true);
+ /* Now send the bye command */
+ csio_do_bye(hw);
+ break;
+
+ case CSIO_HWE_SUSPEND:
+ csio_set_state(&hw->sm, csio_hws_quiesced);
+ break;
+
+ case CSIO_HWE_PCIERR_DETECTED:
+ csio_set_state(&hw->sm, csio_hws_pcierr);
+ csio_wr_destroy_queues(hw, false);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_quiesced - Quiesced state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_quiesced(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_RESUME:
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_resetting - HW Resetting state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_resetting(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET_DONE:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_hws_removing - PCI Hotplug removing state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_removing(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_HBA_RESET:
+ if (!csio_is_hw_master(hw))
+ break;
+ /*
+ * The BYE should have alerady been issued, so we cant
+ * use the mailbox interface. Hence we use the PL_RST
+ * register directly.
+ */
+ csio_err(hw, "Resetting HW and waiting 2 seconds...\n");
+ csio_wr_reg32(hw, PIORSTMODE | PIORST, PL_RST);
+ mdelay(2000);
+ break;
+
+ /* Should never receive any new events */
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+
+ }
+}
+
+/*
+ * csio_hws_pcierr - PCI Error state
+ * @hw - HW module
+ * @evt - Event
+ *
+ */
+static void
+csio_hws_pcierr(struct csio_hw *hw, enum csio_hw_ev evt)
+{
+ hw->prev_evt = hw->cur_evt;
+ hw->cur_evt = evt;
+ CSIO_INC_STATS(hw, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_HWE_PCIERR_SLOT_RESET:
+ csio_evtq_start(hw);
+ csio_set_state(&hw->sm, csio_hws_configuring);
+ csio_hw_configure(hw);
+ break;
+
+ default:
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: HW SM */
+/*****************************************************************************/
+
+/* Slow path handlers */
+struct intr_info {
+ unsigned int mask; /* bits to check in interrupt status */
+ const char *msg; /* message to print or NULL */
+ short stat_idx; /* stat counter to increment or -1 */
+ unsigned short fatal; /* whether the condition reported is fatal */
+};
+
+/*
+ * csio_handle_intr_status - table driven interrupt handler
+ * @hw: HW instance
+ * @reg: the interrupt status register to process
+ * @acts: table of interrupt actions
+ *
+ * A table driven interrupt handler that applies a set of masks to an
+ * interrupt status word and performs the corresponding actions if the
+ * interrupts described by the mask have occured. The actions include
+ * optionally emitting a warning or alert message. The table is terminated
+ * by an entry specifying mask 0. Returns the number of fatal interrupt
+ * conditions.
+ */
+static int
+csio_handle_intr_status(struct csio_hw *hw, unsigned int reg,
+ const struct intr_info *acts)
+{
+ int fatal = 0;
+ unsigned int mask = 0;
+ unsigned int status = csio_rd_reg32(hw, reg);
+
+ for ( ; acts->mask; ++acts) {
+ if (!(status & acts->mask))
+ continue;
+ if (acts->fatal) {
+ fatal++;
+ csio_fatal(hw, "Fatal %s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ } else if (acts->msg)
+ csio_info(hw, "%s (0x%x)\n",
+ acts->msg, status & acts->mask);
+ mask |= acts->mask;
+ }
+ status &= mask;
+ if (status) /* clear processed interrupts */
+ csio_wr_reg32(hw, status, reg);
+ return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void
+csio_pcie_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info sysbus_intr_info[] = {
+ { RNPP, "RXNP array parity error", -1, 1 },
+ { RPCP, "RXPC array parity error", -1, 1 },
+ { RCIP, "RXCIF array parity error", -1, 1 },
+ { RCCP, "Rx completions control array parity error", -1, 1 },
+ { RFTP, "RXFT array parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_port_intr_info[] = {
+ { TPCP, "TXPC array parity error", -1, 1 },
+ { TNPP, "TXNP array parity error", -1, 1 },
+ { TFTP, "TXFT array parity error", -1, 1 },
+ { TCAP, "TXCA array parity error", -1, 1 },
+ { TCIP, "TXCIF array parity error", -1, 1 },
+ { RCAP, "RXCA array parity error", -1, 1 },
+ { OTDD, "outbound request TLP discarded", -1, 1 },
+ { RDPE, "Rx data parity error", -1, 1 },
+ { TDUE, "Tx uncorrectable data error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info pcie_intr_info[] = {
+ { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+ { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+ { MSIDATAPERR, "MSI data parity error", -1, 1 },
+ { MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+ { MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+ { MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+ { MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+ { PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+ { PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+ { TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+ { CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+ { CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+ { CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+ { DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+ { DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+ { DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+ { HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+ { HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+ { HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+ { CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+ { FIDPERR, "PCI FID parity error", -1, 1 },
+ { INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+ { MATAGPERR, "PCI MA tag parity error", -1, 1 },
+ { PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+ { RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+ { RXWRPERR, "PCI Rx write parity error", -1, 1 },
+ { RPLPERR, "PCI replay buffer parity error", -1, 1 },
+ { PCIESINT, "PCI core secondary fault", -1, 1 },
+ { PCIEPINT, "PCI core primary fault", -1, 1 },
+ { UNXSPLCPLERR, "PCI unexpected split completion error", -1,
+ 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+ sysbus_intr_info) +
+ csio_handle_intr_status(hw,
+ PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+ pcie_port_intr_info) +
+ csio_handle_intr_status(hw, PCIE_INT_CAUSE, pcie_intr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void csio_tp_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info tp_intr_info[] = {
+ { 0x3fffffff, "TP parity error", -1, 1 },
+ { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, TP_INT_CAUSE, tp_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void csio_sge_intr_handler(struct csio_hw *hw)
+{
+ uint64_t v;
+
+ static struct intr_info sge_intr_info[] = {
+ { ERR_CPL_EXCEED_IQE_SIZE,
+ "SGE received CPL exceeding IQE size", -1, 1 },
+ { ERR_INVALID_CIDX_INC,
+ "SGE GTS CIDX increment too large", -1, 0 },
+ { ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+ { ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+ { ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+ "SGE IQID > 1023 received CPL for FL", -1, 0 },
+ { ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+ 0 },
+ { ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+ 0 },
+ { ERR_ING_CTXT_PRIO,
+ "SGE too many priority ingress contexts", -1, 0 },
+ { ERR_EGR_CTXT_PRIO,
+ "SGE too many priority egress contexts", -1, 0 },
+ { INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+ { EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+ { 0, NULL, 0, 0 }
+ };
+
+ v = (uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE1) |
+ ((uint64_t)csio_rd_reg32(hw, SGE_INT_CAUSE2) << 32);
+ if (v) {
+ csio_fatal(hw, "SGE parity error (%#llx)\n",
+ (unsigned long long)v);
+ csio_wr_reg32(hw, (uint32_t)(v & 0xFFFFFFFF),
+ SGE_INT_CAUSE1);
+ csio_wr_reg32(hw, (uint32_t)(v >> 32), SGE_INT_CAUSE2);
+ }
+
+ v |= csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info);
+
+ if (csio_handle_intr_status(hw, SGE_INT_CAUSE3, sge_intr_info) ||
+ v != 0)
+ csio_hw_fatal_err(hw);
+}
+
+#define CIM_OBQ_INTR (OBQULP0PARERR | OBQULP1PARERR | OBQULP2PARERR |\
+ OBQULP3PARERR | OBQSGEPARERR | OBQNCSIPARERR)
+#define CIM_IBQ_INTR (IBQTP0PARERR | IBQTP1PARERR | IBQULPPARERR |\
+ IBQSGEHIPARERR | IBQSGELOPARERR | IBQNCSIPARERR)
+
+/*
+ * CIM interrupt handler.
+ */
+static void csio_cim_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cim_intr_info[] = {
+ { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+ { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
+ { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
+ { MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+ { MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+ { TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+ { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info cim_upintr_info[] = {
+ { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+ { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+ { ILLWRINT, "CIM illegal write", -1, 1 },
+ { ILLRDINT, "CIM illegal read", -1, 1 },
+ { ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+ { ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+ { SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+ { SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+ { BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+ { SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+ { SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+ { BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+ { SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+ { SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+ { BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+ { BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+ { SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
+ { SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
+ { BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
+ { BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
+ { SGLRDPLINT , "CIM single read from PL space", -1, 1 },
+ { SGLWRPLINT , "CIM single write to PL space", -1, 1 },
+ { BLKRDPLINT , "CIM block read from PL space", -1, 1 },
+ { BLKWRPLINT , "CIM block write to PL space", -1, 1 },
+ { REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
+ { RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
+ { TIMEOUTINT , "CIM PIF timeout", -1, 1 },
+ { TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, CIM_HOST_INT_CAUSE,
+ cim_intr_info) +
+ csio_handle_intr_status(hw, CIM_HOST_UPACC_INT_CAUSE,
+ cim_upintr_info);
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void csio_ulprx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulprx_intr_info[] = {
+ { 0x1800000, "ULPRX context error", -1, 1 },
+ { 0x7fffff, "ULPRX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_RX_INT_CAUSE, ulprx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void csio_ulptx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ulptx_intr_info[] = {
+ { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+ 0 },
+ { PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+ 0 },
+ { 0xfffffff, "ULPTX parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, ULP_TX_INT_CAUSE, ulptx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void csio_pmtx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmtx_intr_info[] = {
+ { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+ { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+ { ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+ { 0xffffff0, "PMTX framing error", -1, 1 },
+ { OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1,
+ 1 },
+ { ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+ { C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_TX_INT_CAUSE, pmtx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void csio_pmrx_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pmrx_intr_info[] = {
+ { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+ { 0x3ffff0, "PMRX framing error", -1, 1 },
+ { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+ { DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1,
+ 1 },
+ { IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+ { E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PM_RX_INT_CAUSE, pmrx_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void csio_cplsw_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info cplsw_intr_info[] = {
+ { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+ { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+ { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+ { SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+ { CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+ { ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, CPL_INTR_CAUSE, cplsw_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void csio_le_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info le_intr_info[] = {
+ { LIPMISS, "LE LIP miss", -1, 0 },
+ { LIP0, "LE 0 LIP error", -1, 0 },
+ { PARITYERR, "LE parity error", -1, 1 },
+ { UNKNOWNCMD, "LE unknown command", -1, 1 },
+ { REQQPARERR, "LE request queue parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, LE_DB_INT_CAUSE, le_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void csio_mps_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info mps_rx_intr_info[] = {
+ { 0xffffff, "MPS Rx parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_tx_intr_info[] = {
+ { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
+ { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+ { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
+ { TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
+ { BUBBLE, "MPS Tx underflow", -1, 1 },
+ { SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+ { FRMERR, "MPS Tx framing error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_trc_intr_info[] = {
+ { FILTMEM, "MPS TRC filter parity error", -1, 1 },
+ { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
+ { MISCPERR, "MPS TRC misc parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_sram_intr_info[] = {
+ { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_tx_intr_info[] = {
+ { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_stat_rx_intr_info[] = {
+ { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+ static struct intr_info mps_cls_intr_info[] = {
+ { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+ { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+ { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ int fat;
+
+ fat = csio_handle_intr_status(hw, MPS_RX_PERR_INT_CAUSE,
+ mps_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TX_INT_CAUSE,
+ mps_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_TRC_INT_CAUSE,
+ mps_trc_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_SRAM,
+ mps_stat_sram_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+ mps_stat_tx_intr_info) +
+ csio_handle_intr_status(hw, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+ mps_stat_rx_intr_info) +
+ csio_handle_intr_status(hw, MPS_CLS_INT_CAUSE,
+ mps_cls_intr_info);
+
+ csio_wr_reg32(hw, 0, MPS_INT_CAUSE);
+ csio_rd_reg32(hw, MPS_INT_CAUSE); /* flush */
+ if (fat)
+ csio_hw_fatal_err(hw);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void csio_mem_intr_handler(struct csio_hw *hw, int idx)
+{
+ static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+ unsigned int addr, cnt_addr, v;
+
+ if (idx <= MEM_EDC1) {
+ addr = EDC_REG(EDC_INT_CAUSE, idx);
+ cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+ } else {
+ addr = MC_INT_CAUSE;
+ cnt_addr = MC_ECC_STATUS;
+ }
+
+ v = csio_rd_reg32(hw, addr) & MEM_INT_MASK;
+ if (v & PERR_INT_CAUSE)
+ csio_fatal(hw, "%s FIFO parity error\n", name[idx]);
+ if (v & ECC_CE_INT_CAUSE) {
+ uint32_t cnt = ECC_CECNT_GET(csio_rd_reg32(hw, cnt_addr));
+
+ csio_wr_reg32(hw, ECC_CECNT_MASK, cnt_addr);
+ csio_warn(hw, "%u %s correctable ECC data error%s\n",
+ cnt, name[idx], cnt > 1 ? "s" : "");
+ }
+ if (v & ECC_UE_INT_CAUSE)
+ csio_fatal(hw, "%s uncorrectable ECC data error\n", name[idx]);
+
+ csio_wr_reg32(hw, v, addr);
+ if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void csio_ma_intr_handler(struct csio_hw *hw)
+{
+ uint32_t v, status = csio_rd_reg32(hw, MA_INT_CAUSE);
+
+ if (status & MEM_PERR_INT_CAUSE)
+ csio_fatal(hw, "MA parity error, parity status %#x\n",
+ csio_rd_reg32(hw, MA_PARITY_ERROR_STATUS));
+ if (status & MEM_WRAP_INT_CAUSE) {
+ v = csio_rd_reg32(hw, MA_INT_WRAP_STATUS);
+ csio_fatal(hw,
+ "MA address wrap-around error by client %u to address %#x\n",
+ MEM_WRAP_CLIENT_NUM_GET(v), MEM_WRAP_ADDRESS_GET(v) << 4);
+ }
+ csio_wr_reg32(hw, status, MA_INT_CAUSE);
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void csio_smb_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info smb_intr_info[] = {
+ { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+ { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+ { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, SMB_INT_CAUSE, smb_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void csio_ncsi_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info ncsi_intr_info[] = {
+ { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+ { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+ { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+ { RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, NCSI_INT_CAUSE, ncsi_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void csio_xgmac_intr_handler(struct csio_hw *hw, int port)
+{
+ uint32_t v = csio_rd_reg32(hw, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+
+ v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+ if (!v)
+ return;
+
+ if (v & TXFIFO_PRTY_ERR)
+ csio_fatal(hw, "XGMAC %d Tx FIFO parity error\n", port);
+ if (v & RXFIFO_PRTY_ERR)
+ csio_fatal(hw, "XGMAC %d Rx FIFO parity error\n", port);
+ csio_wr_reg32(hw, v, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void csio_pl_intr_handler(struct csio_hw *hw)
+{
+ static struct intr_info pl_intr_info[] = {
+ { FATALPERR, "T4 fatal parity error", -1, 1 },
+ { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+ { 0, NULL, 0, 0 }
+ };
+
+ if (csio_handle_intr_status(hw, PL_PL_INT_CAUSE, pl_intr_info))
+ csio_hw_fatal_err(hw);
+}
+
+/*
+ * csio_hw_slow_intr_handler - control path interrupt handler
+ * @hw: HW module
+ *
+ * Interrupt handler for non-data global interrupt events, e.g., errors.
+ * The designation 'slow' is because it involves register reads, while
+ * data interrupts typically don't involve any MMIOs.
+ */
+int
+csio_hw_slow_intr_handler(struct csio_hw *hw)
+{
+ uint32_t cause = csio_rd_reg32(hw, PL_INT_CAUSE);
+
+ if (!(cause & CSIO_GLBL_INTR_MASK)) {
+ CSIO_INC_STATS(hw, n_plint_unexp);
+ return 0;
+ }
+
+ csio_dbg(hw, "Slow interrupt! cause: 0x%x\n", cause);
+
+ CSIO_INC_STATS(hw, n_plint_cnt);
+
+ if (cause & CIM)
+ csio_cim_intr_handler(hw);
+
+ if (cause & MPS)
+ csio_mps_intr_handler(hw);
+
+ if (cause & NCSI)
+ csio_ncsi_intr_handler(hw);
+
+ if (cause & PL)
+ csio_pl_intr_handler(hw);
+
+ if (cause & SMB)
+ csio_smb_intr_handler(hw);
+
+ if (cause & XGMAC0)
+ csio_xgmac_intr_handler(hw, 0);
+
+ if (cause & XGMAC1)
+ csio_xgmac_intr_handler(hw, 1);
+
+ if (cause & XGMAC_KR0)
+ csio_xgmac_intr_handler(hw, 2);
+
+ if (cause & XGMAC_KR1)
+ csio_xgmac_intr_handler(hw, 3);
+
+ if (cause & PCIE)
+ csio_pcie_intr_handler(hw);
+
+ if (cause & MC)
+ csio_mem_intr_handler(hw, MEM_MC);
+
+ if (cause & EDC0)
+ csio_mem_intr_handler(hw, MEM_EDC0);
+
+ if (cause & EDC1)
+ csio_mem_intr_handler(hw, MEM_EDC1);
+
+ if (cause & LE)
+ csio_le_intr_handler(hw);
+
+ if (cause & TP)
+ csio_tp_intr_handler(hw);
+
+ if (cause & MA)
+ csio_ma_intr_handler(hw);
+
+ if (cause & PM_TX)
+ csio_pmtx_intr_handler(hw);
+
+ if (cause & PM_RX)
+ csio_pmrx_intr_handler(hw);
+
+ if (cause & ULP_RX)
+ csio_ulprx_intr_handler(hw);
+
+ if (cause & CPL_SWITCH)
+ csio_cplsw_intr_handler(hw);
+
+ if (cause & SGE)
+ csio_sge_intr_handler(hw);
+
+ if (cause & ULP_TX)
+ csio_ulptx_intr_handler(hw);
+
+ /* Clear the interrupts just processed for which we are the master. */
+ csio_wr_reg32(hw, cause & CSIO_GLBL_INTR_MASK, PL_INT_CAUSE);
+ csio_rd_reg32(hw, PL_INT_CAUSE); /* flush */
+
+ return 1;
+}
+
+/*****************************************************************************
+ * HW <--> mailbox interfacing routines.
+ ****************************************************************************/
+/*
+ * csio_mberr_worker - Worker thread (dpc) for mailbox/error completions
+ *
+ * @data: Private data pointer.
+ *
+ * Called from worker thread context.
+ */
+static void
+csio_mberr_worker(void *data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mbm *mbm = &hw->mbm;
+ LIST_HEAD(cbfn_q);
+ struct csio_mb *mbp_next;
+ int rv;
+
+ del_timer_sync(&mbm->timer);
+
+ spin_lock_irq(&hw->lock);
+ if (list_empty(&mbm->cbfn_q)) {
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ list_splice_tail_init(&mbm->cbfn_q, &cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+
+ /* Try to start waiting mailboxes */
+ if (!list_empty(&mbm->req_q)) {
+ mbp_next = list_first_entry(&mbm->req_q, struct csio_mb, list);
+ list_del_init(&mbp_next->list);
+
+ rv = csio_mb_issue(hw, mbp_next);
+ if (rv != 0)
+ list_add_tail(&mbp_next->list, &mbm->req_q);
+ else
+ CSIO_DEC_STATS(mbm, n_activeq);
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Now callback completions */
+ csio_mb_completions(hw, &cbfn_q);
+}
+
+/*
+ * csio_hw_mb_timer - Top-level Mailbox timeout handler.
+ *
+ * @data: private data pointer
+ *
+ **/
+static void
+csio_hw_mb_timer(uintptr_t data)
+{
+ struct csio_hw *hw = (struct csio_hw *)data;
+ struct csio_mb *mbp = NULL;
+
+ spin_lock_irq(&hw->lock);
+ mbp = csio_mb_tmo_handler(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Call back the function for the timed-out Mailbox */
+ if (mbp)
+ mbp->mb_cbfn(hw, mbp);
+
+}
+
+/*
+ * csio_hw_mbm_cleanup - Cleanup Mailbox module.
+ * @hw: HW module
+ *
+ * Called with lock held, should exit with lock held.
+ * Cancels outstanding mailboxes (waiting, in-flight) and gathers them
+ * into a local queue. Drops lock and calls the completions. Holds
+ * lock and returns.
+ */
+static void
+csio_hw_mbm_cleanup(struct csio_hw *hw)
+{
+ LIST_HEAD(cbfn_q);
+
+ csio_mb_cancel_all(hw, &cbfn_q);
+
+ spin_unlock_irq(&hw->lock);
+ csio_mb_completions(hw, &cbfn_q);
+ spin_lock_irq(&hw->lock);
+}
+
+/*****************************************************************************
+ * Event handling
+ ****************************************************************************/
+int
+csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ return -EINVAL;
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ return -ENOMEM;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+ memcpy((void *)evt_entry->data, evt_msg, len);
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+
+ return 0;
+}
+
+static int
+csio_enqueue_evt_lock(struct csio_hw *hw, enum csio_evt type, void *evt_msg,
+ uint16_t len, bool msg_sg)
+{
+ struct csio_evt_msg *evt_entry = NULL;
+ struct csio_fl_dma_buf *fl_sg;
+ uint32_t off = 0;
+ unsigned long flags;
+ int n, ret = 0;
+
+ if (type >= CSIO_EVT_MAX)
+ return -EINVAL;
+
+ if (len > CSIO_EVT_MSG_SIZE)
+ return -EINVAL;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (list_empty(&hw->evt_free_q)) {
+ csio_err(hw, "Failed to alloc evt entry, msg type %d len %d\n",
+ type, len);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ evt_entry = list_first_entry(&hw->evt_free_q,
+ struct csio_evt_msg, list);
+ list_del_init(&evt_entry->list);
+
+ /* copy event msg and queue the event */
+ evt_entry->type = type;
+
+ /* If Payload in SG list*/
+ if (msg_sg) {
+ fl_sg = (struct csio_fl_dma_buf *) evt_msg;
+ for (n = 0; (n < CSIO_MAX_FLBUF_PER_IQWR && off < len); n++) {
+ memcpy((void *)((uintptr_t)evt_entry->data + off),
+ fl_sg->flbufs[n].vaddr,
+ fl_sg->flbufs[n].len);
+ off += fl_sg->flbufs[n].len;
+ }
+ } else
+ memcpy((void *)evt_entry->data, evt_msg, len);
+
+ list_add_tail(&evt_entry->list, &hw->evt_active_q);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ CSIO_INC_STATS(hw, n_evt_activeq);
+out:
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return ret;
+}
+
+static void
+csio_free_evt(struct csio_hw *hw, struct csio_evt_msg *evt_entry)
+{
+ if (evt_entry) {
+ spin_lock_irq(&hw->lock);
+ list_del_init(&evt_entry->list);
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_DEC_STATS(hw, n_evt_activeq);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ spin_unlock_irq(&hw->lock);
+ }
+}
+
+void
+csio_evtq_flush(struct csio_hw *hw)
+{
+ uint32_t count;
+ count = 30;
+ while (hw->flags & CSIO_HWF_FWEVT_PENDING && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ CSIO_DB_ASSERT(!(hw->flags & CSIO_HWF_FWEVT_PENDING));
+}
+
+static void
+csio_evtq_stop(struct csio_hw *hw)
+{
+ hw->flags |= CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_start(struct csio_hw *hw)
+{
+ hw->flags &= ~CSIO_HWF_FWEVT_STOP;
+}
+
+static void
+csio_evtq_cleanup(struct csio_hw *hw)
+{
+ struct list_head *evt_entry, *next_entry;
+
+ /* Release outstanding events from activeq to freeq*/
+ if (!list_empty(&hw->evt_active_q))
+ list_splice_tail_init(&hw->evt_active_q, &hw->evt_free_q);
+
+ hw->stats.n_evt_activeq = 0;
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+
+ /* Freeup event entry */
+ list_for_each_safe(evt_entry, next_entry, &hw->evt_free_q) {
+ kfree(evt_entry);
+ CSIO_DEC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->stats.n_evt_freeq = 0;
+}
+
+
+static void
+csio_process_fwevtq_entry(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ __u8 op;
+ __be64 *data;
+ void *msg = NULL;
+ uint32_t msg_len = 0;
+ bool msg_sg = 0;
+
+ op = ((struct rss_header *) wr)->opcode;
+ if (op == CPL_FW6_PLD) {
+ CSIO_INC_STATS(hw, n_cpl_fw6_pld);
+ if (!flb || !flb->totlen) {
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ msg = (void *) flb;
+ msg_len = flb->totlen;
+ msg_sg = 1;
+
+ data = (__be64 *) msg;
+ } else if (op == CPL_FW6_MSG || op == CPL_FW4_MSG) {
+
+ CSIO_INC_STATS(hw, n_cpl_fw6_msg);
+ /* skip RSS header */
+ msg = (void *)((uintptr_t)wr + sizeof(__be64));
+ msg_len = (op == CPL_FW6_MSG) ? sizeof(struct cpl_fw6_msg) :
+ sizeof(struct cpl_fw4_msg);
+
+ data = (__be64 *) msg;
+ } else {
+ csio_warn(hw, "unexpected CPL %#x on FW event queue\n", op);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ return;
+ }
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt_lock(hw, CSIO_EVT_FW, msg,
+ (uint16_t)msg_len, msg_sg))
+ CSIO_INC_STATS(hw, n_evt_drop);
+}
+
+void
+csio_evtq_worker(struct work_struct *work)
+{
+ struct csio_hw *hw = container_of(work, struct csio_hw, evtq_work);
+ struct list_head *evt_entry, *next_entry;
+ LIST_HEAD(evt_q);
+ struct csio_evt_msg *evt_msg;
+ struct cpl_fw6_msg *msg;
+ struct csio_rnode *rn;
+ int rv = 0;
+ uint8_t evtq_stop = 0;
+
+ csio_dbg(hw, "event worker thread active evts#%d\n",
+ hw->stats.n_evt_activeq);
+
+ spin_lock_irq(&hw->lock);
+ while (!list_empty(&hw->evt_active_q)) {
+ list_splice_tail_init(&hw->evt_active_q, &evt_q);
+ spin_unlock_irq(&hw->lock);
+
+ list_for_each_safe(evt_entry, next_entry, &evt_q) {
+ evt_msg = (struct csio_evt_msg *) evt_entry;
+
+ /* Drop events if queue is STOPPED */
+ spin_lock_irq(&hw->lock);
+ if (hw->flags & CSIO_HWF_FWEVT_STOP)
+ evtq_stop = 1;
+ spin_unlock_irq(&hw->lock);
+ if (evtq_stop) {
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto free_evt;
+ }
+
+ switch (evt_msg->type) {
+ case CSIO_EVT_FW:
+ msg = (struct cpl_fw6_msg *)(evt_msg->data);
+
+ if ((msg->opcode == CPL_FW6_MSG ||
+ msg->opcode == CPL_FW4_MSG) &&
+ !msg->type) {
+ rv = csio_mb_fwevt_handler(hw,
+ msg->data);
+ if (!rv)
+ break;
+ /* Handle any remaining fw events */
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else if (msg->opcode == CPL_FW6_PLD) {
+
+ csio_fcoe_fwevt_handler(hw,
+ msg->opcode, msg->data);
+ } else {
+ csio_warn(hw,
+ "Unhandled FW msg op %x type %x\n",
+ msg->opcode, msg->type);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+ break;
+
+ case CSIO_EVT_MBX:
+ csio_mberr_worker(hw);
+ break;
+
+ case CSIO_EVT_DEV_LOSS:
+ memcpy(&rn, evt_msg->data, sizeof(rn));
+ csio_rnode_devloss_handler(rn);
+ break;
+
+ default:
+ csio_warn(hw, "Unhandled event %x on evtq\n",
+ evt_msg->type);
+ CSIO_INC_STATS(hw, n_evt_unexp);
+ break;
+ }
+free_evt:
+ csio_free_evt(hw, evt_msg);
+ }
+
+ spin_lock_irq(&hw->lock);
+ }
+ hw->flags &= ~CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irq(&hw->lock);
+}
+
+int
+csio_fwevtq_handler(struct csio_hw *hw)
+{
+ int rv;
+
+ if (csio_q_iqid(hw, hw->fwevt_iq_idx) == CSIO_MAX_QID) {
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+
+ rv = csio_wr_process_iq_idx(hw, hw->fwevt_iq_idx,
+ csio_process_fwevtq_entry, NULL);
+ return rv;
+}
+
+/****************************************************************************
+ * Entry points
+ ****************************************************************************/
+
+/* Management module */
+/*
+ * csio_mgmt_req_lookup - Lookup the given IO req exist in Active Q.
+ * mgmt - mgmt module
+ * @io_req - io request
+ *
+ * Return - 0:if given IO Req exists in active Q.
+ * -EINVAL :if lookup fails.
+ */
+int
+csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req)
+{
+ struct list_head *tmp;
+
+ /* Lookup ioreq in the ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ if (io_req == (struct csio_ioreq *)tmp)
+ return 0;
+ }
+ return -EINVAL;
+}
+
+#define ECM_MIN_TMO 1000 /* Minimum timeout value for req */
+
+/*
+ * csio_mgmts_tmo_handler - MGMT IO Timeout handler.
+ * @data - Event data.
+ *
+ * Return - none.
+ */
+static void
+csio_mgmt_tmo_handler(uintptr_t data)
+{
+ struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data;
+ struct list_head *tmp;
+ struct csio_ioreq *io_req;
+
+ csio_dbg(mgmtm->hw, "Mgmt timer invoked!\n");
+
+ spin_lock_irq(&mgmtm->hw->lock);
+
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ io_req->tmo -= min_t(uint32_t, io_req->tmo, ECM_MIN_TMO);
+
+ if (!io_req->tmo) {
+ /* Dequeue the request from retry Q. */
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ } else {
+ CSIO_DB_ASSERT(0);
+ }
+ }
+ }
+
+ /* If retry queue is not empty, re-arm timer */
+ if (!list_empty(&mgmtm->active_q))
+ mod_timer(&mgmtm->mgmt_timer,
+ jiffies + msecs_to_jiffies(ECM_MIN_TMO));
+ spin_unlock_irq(&mgmtm->hw->lock);
+}
+
+static void
+csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm)
+{
+ struct csio_hw *hw = mgmtm->hw;
+ struct csio_ioreq *io_req;
+ struct list_head *tmp;
+ uint32_t count;
+
+ count = 30;
+ /* Wait for all outstanding req to complete gracefully */
+ while ((!list_empty(&mgmtm->active_q)) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(2000);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* release outstanding req from ACTIVEQ */
+ list_for_each(tmp, &mgmtm->active_q) {
+ io_req = (struct csio_ioreq *) tmp;
+ tmp = csio_list_prev(tmp);
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ if (io_req->io_cbfn) {
+ /* io_req will be freed by completion handler */
+ io_req->wr_status = -ETIMEDOUT;
+ io_req->io_cbfn(mgmtm->hw, io_req);
+ }
+ }
+}
+
+/*
+ * csio_mgmt_init - Mgmt module init entry point
+ * @mgmtsm - mgmt module
+ * @hw - HW module
+ *
+ * Initialize mgmt timer, resource wait queue, active queue,
+ * completion q. Allocate Egress and Ingress
+ * WR queues and save off the queue index returned by the WR
+ * module for future use. Allocate and save off mgmt reqs in the
+ * mgmt_req_freelist for future use. Make sure their SM is initialized
+ * to uninit state.
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw)
+{
+ struct timer_list *timer = &mgmtm->mgmt_timer;
+
+ init_timer(timer);
+ timer->function = csio_mgmt_tmo_handler;
+ timer->data = (unsigned long)mgmtm;
+
+ INIT_LIST_HEAD(&mgmtm->active_q);
+ INIT_LIST_HEAD(&mgmtm->cbfn_q);
+
+ mgmtm->hw = hw;
+ /*mgmtm->iq_idx = hw->fwevt_iq_idx;*/
+
+ return 0;
+}
+
+/*
+ * csio_mgmtm_exit - MGMT module exit entry point
+ * @mgmtsm - mgmt module
+ *
+ * This function called during MGMT module uninit.
+ * Stop timers, free ioreqs allocated.
+ * Returns: None
+ *
+ */
+static void
+csio_mgmtm_exit(struct csio_mgmtm *mgmtm)
+{
+ del_timer_sync(&mgmtm->mgmt_timer);
+}
+
+
+/**
+ * csio_hw_start - Kicks off the HW State machine
+ * @hw: Pointer to HW module.
+ *
+ * It is assumed that the initialization is a synchronous operation.
+ * So when we return afer posting the event, the HW SM should be in
+ * the ready state, if there were no errors during init.
+ */
+int
+csio_hw_start(struct csio_hw *hw)
+{
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_CFG);
+ spin_unlock_irq(&hw->lock);
+
+ if (csio_is_hw_ready(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+int
+csio_hw_stop(struct csio_hw *hw)
+{
+ csio_post_event(&hw->sm, CSIO_HWE_PCI_REMOVE);
+
+ if (csio_is_hw_removing(hw))
+ return 0;
+ else
+ return -EINVAL;
+}
+
+/* Max reset retries */
+#define CSIO_MAX_RESET_RETRIES 3
+
+/**
+ * csio_hw_reset - Reset the hardware
+ * @hw: HW module.
+ *
+ * Caller should hold lock across this function.
+ */
+int
+csio_hw_reset(struct csio_hw *hw)
+{
+ if (!csio_is_hw_master(hw))
+ return -EPERM;
+
+ if (hw->rst_retries >= CSIO_MAX_RESET_RETRIES) {
+ csio_dbg(hw, "Max hw reset attempts reached..");
+ return -EINVAL;
+ }
+
+ hw->rst_retries++;
+ csio_post_event(&hw->sm, CSIO_HWE_HBA_RESET);
+
+ if (csio_is_hw_ready(hw)) {
+ hw->rst_retries = 0;
+ hw->stats.n_reset_start = jiffies_to_msecs(jiffies);
+ return 0;
+ } else
+ return -EINVAL;
+}
+
+/*
+ * csio_hw_get_device_id - Caches the Adapter's vendor & device id.
+ * @hw: HW module.
+ */
+static void
+csio_hw_get_device_id(struct csio_hw *hw)
+{
+ /* Is the adapter device id cached already ?*/
+ if (csio_is_dev_id_cached(hw))
+ return;
+
+ /* Get the PCI vendor & device id */
+ pci_read_config_word(hw->pdev, PCI_VENDOR_ID,
+ &hw->params.pci.vendor_id);
+ pci_read_config_word(hw->pdev, PCI_DEVICE_ID,
+ &hw->params.pci.device_id);
+
+ csio_dev_id_cached(hw);
+
+} /* csio_hw_get_device_id */
+
+/*
+ * csio_hw_set_description - Set the model, description of the hw.
+ * @hw: HW module.
+ * @ven_id: PCI Vendor ID
+ * @dev_id: PCI Device ID
+ */
+static void
+csio_hw_set_description(struct csio_hw *hw, uint16_t ven_id, uint16_t dev_id)
+{
+ uint32_t adap_type, prot_type;
+
+ if (ven_id == CSIO_VENDOR_ID) {
+ prot_type = (dev_id & CSIO_ASIC_DEVID_PROTO_MASK);
+ adap_type = (dev_id & CSIO_ASIC_DEVID_TYPE_MASK);
+
+ if (prot_type == CSIO_FPGA) {
+ memcpy(hw->model_desc,
+ csio_fcoe_adapters[13].description, 32);
+ } else if (prot_type == CSIO_T4_FCOE_ASIC) {
+ memcpy(hw->hw_ver,
+ csio_fcoe_adapters[adap_type].model_no, 16);
+ memcpy(hw->model_desc,
+ csio_fcoe_adapters[adap_type].description, 32);
+ } else {
+ char tempName[32] = "Chelsio FCoE Controller";
+ memcpy(hw->model_desc, tempName, 32);
+
+ CSIO_DB_ASSERT(0);
+ }
+ }
+} /* csio_hw_set_description */
+
+/**
+ * csio_hw_init - Initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ * Initialize the members of the HW module.
+ */
+int
+csio_hw_init(struct csio_hw *hw)
+{
+ int rv = -EINVAL;
+ uint32_t i;
+ uint16_t ven_id, dev_id;
+ struct csio_evt_msg *evt_entry;
+
+ INIT_LIST_HEAD(&hw->sm.sm_list);
+ csio_init_state(&hw->sm, csio_hws_uninit);
+ spin_lock_init(&hw->lock);
+ INIT_LIST_HEAD(&hw->sln_head);
+
+ /* Get the PCI vendor & device id */
+ csio_hw_get_device_id(hw);
+
+ strcpy(hw->name, CSIO_HW_NAME);
+
+ /* Set the model & its description */
+
+ ven_id = hw->params.pci.vendor_id;
+ dev_id = hw->params.pci.device_id;
+
+ csio_hw_set_description(hw, ven_id, dev_id);
+
+ /* Initialize default log level */
+ hw->params.log_level = (uint32_t) csio_dbg_level;
+
+ csio_set_fwevt_intr_idx(hw, -1);
+ csio_set_nondata_intr_idx(hw, -1);
+
+ /* Init all the modules: Mailbox, WorkRequest and Transport */
+ if (csio_mbm_init(csio_hw_to_mbm(hw), hw, csio_hw_mb_timer))
+ goto err;
+
+ rv = csio_wrm_init(csio_hw_to_wrm(hw), hw);
+ if (rv)
+ goto err_mbm_exit;
+
+ rv = csio_scsim_init(csio_hw_to_scsim(hw), hw);
+ if (rv)
+ goto err_wrm_exit;
+
+ rv = csio_mgmtm_init(csio_hw_to_mgmtm(hw), hw);
+ if (rv)
+ goto err_scsim_exit;
+ /* Pre-allocate evtq and initialize them */
+ INIT_LIST_HEAD(&hw->evt_active_q);
+ INIT_LIST_HEAD(&hw->evt_free_q);
+ for (i = 0; i < csio_evtq_sz; i++) {
+
+ evt_entry = kzalloc(sizeof(struct csio_evt_msg), GFP_KERNEL);
+ if (!evt_entry) {
+ csio_err(hw, "Failed to initialize eventq");
+ goto err_evtq_cleanup;
+ }
+
+ list_add_tail(&evt_entry->list, &hw->evt_free_q);
+ CSIO_INC_STATS(hw, n_evt_freeq);
+ }
+
+ hw->dev_num = dev_num;
+ dev_num++;
+
+ return 0;
+
+err_evtq_cleanup:
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+err_scsim_exit:
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+err_wrm_exit:
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+err_mbm_exit:
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+err:
+ return rv;
+}
+
+/**
+ * csio_hw_exit - Un-initialize HW module.
+ * @hw: Pointer to HW module.
+ *
+ */
+void
+csio_hw_exit(struct csio_hw *hw)
+{
+ csio_evtq_cleanup(hw);
+ csio_mgmtm_exit(csio_hw_to_mgmtm(hw));
+ csio_scsim_exit(csio_hw_to_scsim(hw));
+ csio_wrm_exit(csio_hw_to_wrm(hw), hw);
+ csio_mbm_exit(csio_hw_to_mbm(hw));
+}
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
new file mode 100644
index 000000000000..9edcca4c71af
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -0,0 +1,665 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_HW_H__
+#define __CSIO_HW_H__
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/compiler.h>
+#include <linux/cdev.h>
+#include <linux/list.h>
+#include <linux/mempool.h>
+#include <linux/io.h>
+#include <linux/spinlock_types.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_scsi.h"
+#include "csio_defs.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+
+/*
+ * An error value used by host. Should not clash with FW defined return values.
+ */
+#define FW_HOSTERROR 255
+
+#define CSIO_FW_FNAME "cxgb4/t4fw.bin"
+#define CSIO_CF_FNAME "cxgb4/t4-config.txt"
+
+#define FW_VERSION_MAJOR 1
+#define FW_VERSION_MINOR 2
+#define FW_VERSION_MICRO 8
+
+#define CSIO_HW_NAME "Chelsio FCoE Adapter"
+#define CSIO_MAX_PFN 8
+#define CSIO_MAX_PPORTS 4
+
+#define CSIO_MAX_LUN 0xFFFF
+#define CSIO_MAX_QUEUE 2048
+#define CSIO_MAX_CMD_PER_LUN 32
+#define CSIO_MAX_DDP_BUF_SIZE (1024 * 1024)
+#define CSIO_MAX_SECTOR_SIZE 128
+
+/* Interrupts */
+#define CSIO_EXTRA_MSI_IQS 2 /* Extra iqs for INTX/MSI mode
+ * (Forward intr iq + fw iq) */
+#define CSIO_EXTRA_VECS 2 /* non-data + FW evt */
+#define CSIO_MAX_SCSI_CPU 128
+#define CSIO_MAX_SCSI_QSETS (CSIO_MAX_SCSI_CPU * CSIO_MAX_PPORTS)
+#define CSIO_MAX_MSIX_VECS (CSIO_MAX_SCSI_QSETS + CSIO_EXTRA_VECS)
+
+/* Queues */
+enum {
+ CSIO_INTR_WRSIZE = 128,
+ CSIO_INTR_IQSIZE = ((CSIO_MAX_MSIX_VECS + 1) * CSIO_INTR_WRSIZE),
+ CSIO_FWEVT_WRSIZE = 128,
+ CSIO_FWEVT_IQLEN = 128,
+ CSIO_FWEVT_FLBUFS = 64,
+ CSIO_FWEVT_IQSIZE = (CSIO_FWEVT_WRSIZE * CSIO_FWEVT_IQLEN),
+ CSIO_HW_NIQ = 1,
+ CSIO_HW_NFLQ = 1,
+ CSIO_HW_NEQ = 1,
+ CSIO_HW_NINTXQ = 1,
+};
+
+struct csio_msix_entries {
+ unsigned short vector; /* Vector assigned by pci_enable_msix */
+ void *dev_id; /* Priv object associated w/ this msix*/
+ char desc[24]; /* Description of this vector */
+};
+
+struct csio_scsi_qset {
+ int iq_idx; /* Ingress index */
+ int eq_idx; /* Egress index */
+ uint32_t intr_idx; /* MSIX Vector index */
+};
+
+struct csio_scsi_cpu_info {
+ int16_t max_cpus;
+};
+
+extern int csio_dbg_level;
+extern int csio_force_master;
+extern unsigned int csio_port_mask;
+extern int csio_msi;
+
+#define CSIO_VENDOR_ID 0x1425
+#define CSIO_ASIC_DEVID_PROTO_MASK 0xFF00
+#define CSIO_ASIC_DEVID_TYPE_MASK 0x00FF
+#define CSIO_FPGA 0xA000
+#define CSIO_T4_FCOE_ASIC 0x4600
+
+#define CSIO_GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
+ EDC1 | LE | TP | MA | PM_TX | PM_RX | \
+ ULP_RX | CPL_SWITCH | SGE | \
+ ULP_TX | SF)
+
+/*
+ * Hard parameters used to initialize the card in the absence of a
+ * configuration file.
+ */
+enum {
+ /* General */
+ CSIO_SGE_DBFIFO_INT_THRESH = 10,
+
+ CSIO_SGE_RX_DMA_OFFSET = 2,
+
+ CSIO_SGE_FLBUF_SIZE1 = 65536,
+ CSIO_SGE_FLBUF_SIZE2 = 1536,
+ CSIO_SGE_FLBUF_SIZE3 = 9024,
+ CSIO_SGE_FLBUF_SIZE4 = 9216,
+ CSIO_SGE_FLBUF_SIZE5 = 2048,
+ CSIO_SGE_FLBUF_SIZE6 = 128,
+ CSIO_SGE_FLBUF_SIZE7 = 8192,
+ CSIO_SGE_FLBUF_SIZE8 = 16384,
+
+ CSIO_SGE_TIMER_VAL_0 = 5,
+ CSIO_SGE_TIMER_VAL_1 = 10,
+ CSIO_SGE_TIMER_VAL_2 = 20,
+ CSIO_SGE_TIMER_VAL_3 = 50,
+ CSIO_SGE_TIMER_VAL_4 = 100,
+ CSIO_SGE_TIMER_VAL_5 = 200,
+
+ CSIO_SGE_INT_CNT_VAL_0 = 1,
+ CSIO_SGE_INT_CNT_VAL_1 = 4,
+ CSIO_SGE_INT_CNT_VAL_2 = 8,
+ CSIO_SGE_INT_CNT_VAL_3 = 16,
+
+ /* Storage specific - used by FW_PFVF_CMD */
+ CSIO_WX_CAPS = FW_CMD_CAP_PF, /* w/x all */
+ CSIO_R_CAPS = FW_CMD_CAP_PF, /* r all */
+ CSIO_NVI = 4,
+ CSIO_NIQ_FLINT = 34,
+ CSIO_NETH_CTRL = 32,
+ CSIO_NEQ = 66,
+ CSIO_NEXACTF = 32,
+ CSIO_CMASK = FW_PFVF_CMD_CMASK_MASK,
+ CSIO_PMASK = FW_PFVF_CMD_PMASK_MASK,
+};
+
+/* Slowpath events */
+enum csio_evt {
+ CSIO_EVT_FW = 0, /* FW event */
+ CSIO_EVT_MBX, /* MBX event */
+ CSIO_EVT_SCN, /* State change notification */
+ CSIO_EVT_DEV_LOSS, /* Device loss event */
+ CSIO_EVT_MAX, /* Max supported event */
+};
+
+#define CSIO_EVT_MSG_SIZE 512
+#define CSIO_EVTQ_SIZE 512
+
+/* Event msg */
+struct csio_evt_msg {
+ struct list_head list; /* evt queue*/
+ enum csio_evt type;
+ uint8_t data[CSIO_EVT_MSG_SIZE];
+};
+
+enum {
+ EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */
+ SERNUM_LEN = 16, /* Serial # length */
+ EC_LEN = 16, /* E/C length */
+ ID_LEN = 16, /* ID length */
+ TRACE_LEN = 112, /* length of trace data and mask */
+};
+
+enum {
+ SF_PAGE_SIZE = 256, /* serial flash page size */
+ SF_SEC_SIZE = 64 * 1024, /* serial flash sector size */
+ SF_SIZE = SF_SEC_SIZE * 16, /* serial flash size */
+};
+
+enum { MEM_EDC0, MEM_EDC1, MEM_MC };
+
+enum {
+ MEMWIN0_APERTURE = 2048,
+ MEMWIN0_BASE = 0x1b800,
+ MEMWIN1_APERTURE = 32768,
+ MEMWIN1_BASE = 0x28000,
+ MEMWIN2_APERTURE = 65536,
+ MEMWIN2_BASE = 0x30000,
+};
+
+/* serial flash and firmware constants */
+enum {
+ SF_ATTEMPTS = 10, /* max retries for SF operations */
+
+ /* flash command opcodes */
+ SF_PROG_PAGE = 2, /* program page */
+ SF_WR_DISABLE = 4, /* disable writes */
+ SF_RD_STATUS = 5, /* read status register */
+ SF_WR_ENABLE = 6, /* enable writes */
+ SF_RD_DATA_FAST = 0xb, /* read flash */
+ SF_RD_ID = 0x9f, /* read ID */
+ SF_ERASE_SECTOR = 0xd8, /* erase sector */
+
+ FW_START_SEC = 8, /* first flash sector for FW */
+ FW_END_SEC = 15, /* last flash sector for FW */
+ FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
+ FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+
+ FLASH_CFG_MAX_SIZE = 0x10000 , /* max size of the flash config file*/
+ FLASH_CFG_OFFSET = 0x1f0000,
+ FLASH_CFG_START_SEC = FLASH_CFG_OFFSET / SF_SEC_SIZE,
+ FPGA_FLASH_CFG_OFFSET = 0xf0000 , /* if FPGA mode, then cfg file is
+ * at 1MB - 64KB */
+ FPGA_FLASH_CFG_START_SEC = FPGA_FLASH_CFG_OFFSET / SF_SEC_SIZE,
+};
+
+/*
+ * Flash layout.
+ */
+#define FLASH_START(start) ((start) * SF_SEC_SIZE)
+#define FLASH_MAX_SIZE(nsecs) ((nsecs) * SF_SEC_SIZE)
+
+enum {
+ /*
+ * Location of firmware image in FLASH.
+ */
+ FLASH_FW_START_SEC = 8,
+ FLASH_FW_NSECS = 8,
+ FLASH_FW_START = FLASH_START(FLASH_FW_START_SEC),
+ FLASH_FW_MAX_SIZE = FLASH_MAX_SIZE(FLASH_FW_NSECS),
+
+};
+
+#undef FLASH_START
+#undef FLASH_MAX_SIZE
+
+/* Management module */
+enum {
+ CSIO_MGMT_EQ_WRSIZE = 512,
+ CSIO_MGMT_IQ_WRSIZE = 128,
+ CSIO_MGMT_EQLEN = 64,
+ CSIO_MGMT_IQLEN = 64,
+};
+
+#define CSIO_MGMT_EQSIZE (CSIO_MGMT_EQLEN * CSIO_MGMT_EQ_WRSIZE)
+#define CSIO_MGMT_IQSIZE (CSIO_MGMT_IQLEN * CSIO_MGMT_IQ_WRSIZE)
+
+/* mgmt module stats */
+struct csio_mgmtm_stats {
+ uint32_t n_abort_req; /* Total abort request */
+ uint32_t n_abort_rsp; /* Total abort response */
+ uint32_t n_close_req; /* Total close request */
+ uint32_t n_close_rsp; /* Total close response */
+ uint32_t n_err; /* Total Errors */
+ uint32_t n_drop; /* Total request dropped */
+ uint32_t n_active; /* Count of active_q */
+ uint32_t n_cbfn; /* Count of cbfn_q */
+};
+
+/* MGMT module */
+struct csio_mgmtm {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ int eq_idx; /* Egress queue index */
+ int iq_idx; /* Ingress queue index */
+ int msi_vec; /* MSI vector */
+ struct list_head active_q; /* Outstanding ELS/CT */
+ struct list_head abort_q; /* Outstanding abort req */
+ struct list_head cbfn_q; /* Completion queue */
+ struct list_head mgmt_req_freelist; /* Free poll of reqs */
+ /* ELSCT request freelist*/
+ struct timer_list mgmt_timer; /* MGMT timer */
+ struct csio_mgmtm_stats stats; /* ELS/CT stats */
+};
+
+struct csio_adap_desc {
+ char model_no[16];
+ char description[32];
+};
+
+struct pci_params {
+ uint16_t vendor_id;
+ uint16_t device_id;
+ uint32_t vpd_cap_addr;
+ uint16_t speed;
+ uint8_t width;
+};
+
+/* User configurable hw parameters */
+struct csio_hw_params {
+ uint32_t sf_size; /* serial flash
+ * size in bytes
+ */
+ uint32_t sf_nsec; /* # of flash sectors */
+ struct pci_params pci;
+ uint32_t log_level; /* Module-level for
+ * debug log.
+ */
+};
+
+struct csio_vpd {
+ uint32_t cclk;
+ uint8_t ec[EC_LEN + 1];
+ uint8_t sn[SERNUM_LEN + 1];
+ uint8_t id[ID_LEN + 1];
+};
+
+struct csio_pport {
+ uint16_t pcap;
+ uint8_t portid;
+ uint8_t link_status;
+ uint16_t link_speed;
+ uint8_t mac[6];
+ uint8_t mod_type;
+ uint8_t rsvd1;
+ uint8_t rsvd2;
+ uint8_t rsvd3;
+};
+
+/* fcoe resource information */
+struct csio_fcoe_res_info {
+ uint16_t e_d_tov;
+ uint16_t r_a_tov_seq;
+ uint16_t r_a_tov_els;
+ uint16_t r_r_tov;
+ uint32_t max_xchgs;
+ uint32_t max_ssns;
+ uint32_t used_xchgs;
+ uint32_t used_ssns;
+ uint32_t max_fcfs;
+ uint32_t max_vnps;
+ uint32_t used_fcfs;
+ uint32_t used_vnps;
+};
+
+/* HW State machine Events */
+enum csio_hw_ev {
+ CSIO_HWE_CFG = (uint32_t)1, /* Starts off the State machine */
+ CSIO_HWE_INIT, /* Config done, start Init */
+ CSIO_HWE_INIT_DONE, /* Init Mailboxes sent, HW ready */
+ CSIO_HWE_FATAL, /* Fatal error during initialization */
+ CSIO_HWE_PCIERR_DETECTED,/* PCI error recovery detetced */
+ CSIO_HWE_PCIERR_SLOT_RESET, /* Slot reset after PCI recoviery */
+ CSIO_HWE_PCIERR_RESUME, /* Resume after PCI error recovery */
+ CSIO_HWE_QUIESCED, /* HBA quiesced */
+ CSIO_HWE_HBA_RESET, /* HBA reset requested */
+ CSIO_HWE_HBA_RESET_DONE, /* HBA reset completed */
+ CSIO_HWE_FW_DLOAD, /* FW download requested */
+ CSIO_HWE_PCI_REMOVE, /* PCI de-instantiation */
+ CSIO_HWE_SUSPEND, /* HW suspend for Online(hot) replacement */
+ CSIO_HWE_RESUME, /* HW resume for Online(hot) replacement */
+ CSIO_HWE_MAX, /* Max HW event */
+};
+
+/* hw stats */
+struct csio_hw_stats {
+ uint32_t n_evt_activeq; /* Number of event in active Q */
+ uint32_t n_evt_freeq; /* Number of event in free Q */
+ uint32_t n_evt_drop; /* Number of event droped */
+ uint32_t n_evt_unexp; /* Number of unexpected events */
+ uint32_t n_pcich_offline;/* Number of pci channel offline */
+ uint32_t n_lnlkup_miss; /* Number of lnode lookup miss */
+ uint32_t n_cpl_fw6_msg; /* Number of cpl fw6 message*/
+ uint32_t n_cpl_fw6_pld; /* Number of cpl fw6 payload*/
+ uint32_t n_cpl_unexp; /* Number of unexpected cpl */
+ uint32_t n_mbint_unexp; /* Number of unexpected mbox */
+ /* interrupt */
+ uint32_t n_plint_unexp; /* Number of unexpected PL */
+ /* interrupt */
+ uint32_t n_plint_cnt; /* Number of PL interrupt */
+ uint32_t n_int_stray; /* Number of stray interrupt */
+ uint32_t n_err; /* Number of hw errors */
+ uint32_t n_err_fatal; /* Number of fatal errors */
+ uint32_t n_err_nomem; /* Number of memory alloc failure */
+ uint32_t n_err_io; /* Number of IO failure */
+ enum csio_hw_ev n_evt_sm[CSIO_HWE_MAX]; /* Number of sm events */
+ uint64_t n_reset_start; /* Start time after the reset */
+ uint32_t rsvd1;
+};
+
+/* Defines for hw->flags */
+#define CSIO_HWF_MASTER 0x00000001 /* This is the Master
+ * function for the
+ * card.
+ */
+#define CSIO_HWF_HW_INTR_ENABLED 0x00000002 /* Are HW Interrupt
+ * enable bit set?
+ */
+#define CSIO_HWF_FWEVT_PENDING 0x00000004 /* FW events pending */
+#define CSIO_HWF_Q_MEM_ALLOCED 0x00000008 /* Queues have been
+ * allocated memory.
+ */
+#define CSIO_HWF_Q_FW_ALLOCED 0x00000010 /* Queues have been
+ * allocated in FW.
+ */
+#define CSIO_HWF_VPD_VALID 0x00000020 /* Valid VPD copied */
+#define CSIO_HWF_DEVID_CACHED 0X00000040 /* PCI vendor & device
+ * id cached */
+#define CSIO_HWF_FWEVT_STOP 0x00000080 /* Stop processing
+ * FW events
+ */
+#define CSIO_HWF_USING_SOFT_PARAMS 0x00000100 /* Using FW config
+ * params
+ */
+#define CSIO_HWF_HOST_INTR_ENABLED 0x00000200 /* Are host interrupts
+ * enabled?
+ */
+
+#define csio_is_hw_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HW_INTR_ENABLED)
+#define csio_is_host_intr_enabled(__hw) \
+ ((__hw)->flags & CSIO_HWF_HOST_INTR_ENABLED)
+#define csio_is_hw_master(__hw) ((__hw)->flags & CSIO_HWF_MASTER)
+#define csio_is_valid_vpd(__hw) ((__hw)->flags & CSIO_HWF_VPD_VALID)
+#define csio_is_dev_id_cached(__hw) ((__hw)->flags & CSIO_HWF_DEVID_CACHED)
+#define csio_valid_vpd_copied(__hw) ((__hw)->flags |= CSIO_HWF_VPD_VALID)
+#define csio_dev_id_cached(__hw) ((__hw)->flags |= CSIO_HWF_DEVID_CACHED)
+
+/* Defines for intr_mode */
+enum csio_intr_mode {
+ CSIO_IM_NONE = 0,
+ CSIO_IM_INTX = 1,
+ CSIO_IM_MSI = 2,
+ CSIO_IM_MSIX = 3,
+};
+
+/* Master HW structure: One per function */
+struct csio_hw {
+ struct csio_sm sm; /* State machine: should
+ * be the 1st member.
+ */
+ spinlock_t lock; /* Lock for hw */
+
+ struct csio_scsim scsim; /* SCSI module*/
+ struct csio_wrm wrm; /* Work request module*/
+ struct pci_dev *pdev; /* PCI device */
+
+ void __iomem *regstart; /* Virtual address of
+ * register map
+ */
+ /* SCSI queue sets */
+ uint32_t num_sqsets; /* Number of SCSI
+ * queue sets */
+ uint32_t num_scsi_msix_cpus; /* Number of CPUs that
+ * will be used
+ * for ingress
+ * processing.
+ */
+
+ struct csio_scsi_qset sqset[CSIO_MAX_PPORTS][CSIO_MAX_SCSI_CPU];
+ struct csio_scsi_cpu_info scsi_cpu_info[CSIO_MAX_PPORTS];
+
+ uint32_t evtflag; /* Event flag */
+ uint32_t flags; /* HW flags */
+
+ struct csio_mgmtm mgmtm; /* management module */
+ struct csio_mbm mbm; /* Mailbox module */
+
+ /* Lnodes */
+ uint32_t num_lns; /* Number of lnodes */
+ struct csio_lnode *rln; /* Root lnode */
+ struct list_head sln_head; /* Sibling node list
+ * list
+ */
+ int intr_iq_idx; /* Forward interrupt
+ * queue.
+ */
+ int fwevt_iq_idx; /* FW evt queue */
+ struct work_struct evtq_work; /* Worker thread for
+ * HW events.
+ */
+ struct list_head evt_free_q; /* freelist of evt
+ * elements
+ */
+ struct list_head evt_active_q; /* active evt queue*/
+
+ /* board related info */
+ char name[32];
+ char hw_ver[16];
+ char model_desc[32];
+ char drv_version[32];
+ char fwrev_str[32];
+ uint32_t optrom_ver;
+ uint32_t fwrev;
+ uint32_t tp_vers;
+ char chip_ver;
+ uint32_t cfg_finiver;
+ uint32_t cfg_finicsum;
+ uint32_t cfg_cfcsum;
+ uint8_t cfg_csum_status;
+ uint8_t cfg_store;
+ enum csio_dev_state fw_state;
+ struct csio_vpd vpd;
+
+ uint8_t pfn; /* Physical Function
+ * number
+ */
+ uint32_t port_vec; /* Port vector */
+ uint8_t num_pports; /* Number of physical
+ * ports.
+ */
+ uint8_t rst_retries; /* Reset retries */
+ uint8_t cur_evt; /* current s/m evt */
+ uint8_t prev_evt; /* Previous s/m evt */
+ uint32_t dev_num; /* device number */
+ struct csio_pport pport[CSIO_MAX_PPORTS]; /* Ports (XGMACs) */
+ struct csio_hw_params params; /* Hw parameters */
+
+ struct pci_pool *scsi_pci_pool; /* PCI pool for SCSI */
+ mempool_t *mb_mempool; /* Mailbox memory pool*/
+ mempool_t *rnode_mempool; /* rnode memory pool */
+
+ /* Interrupt */
+ enum csio_intr_mode intr_mode; /* INTx, MSI, MSIX */
+ uint32_t fwevt_intr_idx; /* FW evt MSIX/interrupt
+ * index
+ */
+ uint32_t nondata_intr_idx; /* nondata MSIX/intr
+ * idx
+ */
+
+ uint8_t cfg_neq; /* FW configured no of
+ * egress queues
+ */
+ uint8_t cfg_niq; /* FW configured no of
+ * iq queues.
+ */
+
+ struct csio_fcoe_res_info fres_info; /* Fcoe resource info */
+
+ /* MSIX vectors */
+ struct csio_msix_entries msix_entries[CSIO_MAX_MSIX_VECS];
+
+ struct dentry *debugfs_root; /* Debug FS */
+ struct csio_hw_stats stats; /* Hw statistics */
+};
+
+/* Register access macros */
+#define csio_reg(_b, _r) ((_b) + (_r))
+
+#define csio_rd_reg8(_h, _r) readb(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg16(_h, _r) readw(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg32(_h, _r) readl(csio_reg((_h)->regstart, (_r)))
+#define csio_rd_reg64(_h, _r) readq(csio_reg((_h)->regstart, (_r)))
+
+#define csio_wr_reg8(_h, _v, _r) writeb((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg16(_h, _v, _r) writew((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg32(_h, _v, _r) writel((_v), \
+ csio_reg((_h)->regstart, (_r)))
+#define csio_wr_reg64(_h, _v, _r) writeq((_v), \
+ csio_reg((_h)->regstart, (_r)))
+
+void csio_set_reg_field(struct csio_hw *, uint32_t, uint32_t, uint32_t);
+
+/* Core clocks <==> uSecs */
+static inline uint32_t
+csio_core_ticks_to_us(struct csio_hw *hw, uint32_t ticks)
+{
+ /* add Core Clock / 2 to round ticks to nearest uS */
+ return (ticks * 1000 + hw->vpd.cclk/2) / hw->vpd.cclk;
+}
+
+static inline uint32_t
+csio_us_to_core_ticks(struct csio_hw *hw, uint32_t us)
+{
+ return (us * hw->vpd.cclk) / 1000;
+}
+
+/* Easy access macros */
+#define csio_hw_to_wrm(hw) ((struct csio_wrm *)(&(hw)->wrm))
+#define csio_hw_to_mbm(hw) ((struct csio_mbm *)(&(hw)->mbm))
+#define csio_hw_to_scsim(hw) ((struct csio_scsim *)(&(hw)->scsim))
+#define csio_hw_to_mgmtm(hw) ((struct csio_mgmtm *)(&(hw)->mgmtm))
+
+#define CSIO_PCI_BUS(hw) ((hw)->pdev->bus->number)
+#define CSIO_PCI_DEV(hw) (PCI_SLOT((hw)->pdev->devfn))
+#define CSIO_PCI_FUNC(hw) (PCI_FUNC((hw)->pdev->devfn))
+
+#define csio_set_fwevt_intr_idx(_h, _i) ((_h)->fwevt_intr_idx = (_i))
+#define csio_get_fwevt_intr_idx(_h) ((_h)->fwevt_intr_idx)
+#define csio_set_nondata_intr_idx(_h, _i) ((_h)->nondata_intr_idx = (_i))
+#define csio_get_nondata_intr_idx(_h) ((_h)->nondata_intr_idx)
+
+/* Printing/logging */
+#define CSIO_DEVID(__dev) ((__dev)->dev_num)
+#define CSIO_DEVID_LO(__dev) (CSIO_DEVID((__dev)) & 0xFFFF)
+#define CSIO_DEVID_HI(__dev) ((CSIO_DEVID((__dev)) >> 16) & 0xFFFF)
+
+#define csio_info(__hw, __fmt, ...) \
+ dev_info(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_fatal(__hw, __fmt, ...) \
+ dev_crit(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_err(__hw, __fmt, ...) \
+ dev_err(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#define csio_warn(__hw, __fmt, ...) \
+ dev_warn(&(__hw)->pdev->dev, __fmt, ##__VA_ARGS__)
+
+#ifdef __CSIO_DEBUG__
+#define csio_dbg(__hw, __fmt, ...) \
+ csio_info((__hw), __fmt, ##__VA_ARGS__);
+#else
+#define csio_dbg(__hw, __fmt, ...)
+#endif
+
+int csio_mgmt_req_lookup(struct csio_mgmtm *, struct csio_ioreq *);
+void csio_hw_intr_disable(struct csio_hw *);
+int csio_hw_slow_intr_handler(struct csio_hw *hw);
+int csio_hw_start(struct csio_hw *);
+int csio_hw_stop(struct csio_hw *);
+int csio_hw_reset(struct csio_hw *);
+int csio_is_hw_ready(struct csio_hw *);
+int csio_is_hw_removing(struct csio_hw *);
+
+int csio_fwevtq_handler(struct csio_hw *);
+void csio_evtq_worker(struct work_struct *);
+int csio_enqueue_evt(struct csio_hw *hw, enum csio_evt type,
+ void *evt_msg, uint16_t len);
+void csio_evtq_flush(struct csio_hw *hw);
+
+int csio_request_irqs(struct csio_hw *);
+void csio_intr_enable(struct csio_hw *);
+void csio_intr_disable(struct csio_hw *, bool);
+
+struct csio_lnode *csio_lnode_alloc(struct csio_hw *);
+int csio_config_queues(struct csio_hw *);
+
+int csio_hw_mc_read(struct csio_hw *, uint32_t, __be32 *, uint64_t *);
+int csio_hw_edc_read(struct csio_hw *, int, uint32_t, __be32 *, uint64_t *);
+int csio_hw_init(struct csio_hw *);
+void csio_hw_exit(struct csio_hw *);
+#endif /* ifndef __CSIO_HW_H__ */
diff --git a/drivers/scsi/csiostor/csio_init.c b/drivers/scsi/csiostor/csio_init.c
new file mode 100644
index 000000000000..d81b03c5165b
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.c
@@ -0,0 +1,1262 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/mm.h>
+#include <linux/notifier.h>
+#include <linux/kdebug.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/export.h>
+
+#include "csio_init.h"
+#include "csio_defs.h"
+
+#define CSIO_MIN_MEMPOOL_SZ 64
+
+static struct dentry *csio_debugfs_root;
+
+static struct scsi_transport_template *csio_fcoe_transport;
+static struct scsi_transport_template *csio_fcoe_transport_vport;
+
+/*
+ * debugfs support
+ */
+static ssize_t
+csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
+{
+ loff_t pos = *ppos;
+ loff_t avail = file->f_path.dentry->d_inode->i_size;
+ unsigned int mem = (uintptr_t)file->private_data & 3;
+ struct csio_hw *hw = file->private_data - mem;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= avail)
+ return 0;
+ if (count > avail - pos)
+ count = avail - pos;
+
+ while (count) {
+ size_t len;
+ int ret, ofst;
+ __be32 data[16];
+
+ if (mem == MEM_MC)
+ ret = csio_hw_mc_read(hw, pos, data, NULL);
+ else
+ ret = csio_hw_edc_read(hw, mem, pos, data, NULL);
+ if (ret)
+ return ret;
+
+ ofst = pos % sizeof(data);
+ len = min(count, sizeof(data) - ofst);
+ if (copy_to_user(buf, (u8 *)data + ofst, len))
+ return -EFAULT;
+
+ buf += len;
+ pos += len;
+ count -= len;
+ }
+ count = pos - *ppos;
+ *ppos = pos;
+ return count;
+}
+
+static const struct file_operations csio_mem_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = csio_mem_read,
+ .llseek = default_llseek,
+};
+
+static void csio_add_debugfs_mem(struct csio_hw *hw, const char *name,
+ unsigned int idx, unsigned int size_mb)
+{
+ struct dentry *de;
+
+ de = debugfs_create_file(name, S_IRUSR, hw->debugfs_root,
+ (void *)hw + idx, &csio_mem_debugfs_fops);
+ if (de && de->d_inode)
+ de->d_inode->i_size = size_mb << 20;
+}
+
+static int csio_setup_debugfs(struct csio_hw *hw)
+{
+ int i;
+
+ if (IS_ERR_OR_NULL(hw->debugfs_root))
+ return -1;
+
+ i = csio_rd_reg32(hw, MA_TARGET_MEM_ENABLE);
+ if (i & EDRAM0_ENABLE)
+ csio_add_debugfs_mem(hw, "edc0", MEM_EDC0, 5);
+ if (i & EDRAM1_ENABLE)
+ csio_add_debugfs_mem(hw, "edc1", MEM_EDC1, 5);
+ if (i & EXT_MEM_ENABLE)
+ csio_add_debugfs_mem(hw, "mc", MEM_MC,
+ EXT_MEM_SIZE_GET(csio_rd_reg32(hw, MA_EXT_MEMORY_BAR)));
+ return 0;
+}
+
+/*
+ * csio_dfs_create - Creates and sets up per-hw debugfs.
+ *
+ */
+static int
+csio_dfs_create(struct csio_hw *hw)
+{
+ if (csio_debugfs_root) {
+ hw->debugfs_root = debugfs_create_dir(pci_name(hw->pdev),
+ csio_debugfs_root);
+ csio_setup_debugfs(hw);
+ }
+
+ return 0;
+}
+
+/*
+ * csio_dfs_destroy - Destroys per-hw debugfs.
+ */
+static int
+csio_dfs_destroy(struct csio_hw *hw)
+{
+ if (hw->debugfs_root)
+ debugfs_remove_recursive(hw->debugfs_root);
+
+ return 0;
+}
+
+/*
+ * csio_dfs_init - Debug filesystem initialization for the module.
+ *
+ */
+static int
+csio_dfs_init(void)
+{
+ csio_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+ if (!csio_debugfs_root)
+ pr_warn("Could not create debugfs entry, continuing\n");
+
+ return 0;
+}
+
+/*
+ * csio_dfs_exit - debugfs cleanup for the module.
+ */
+static void
+csio_dfs_exit(void)
+{
+ debugfs_remove(csio_debugfs_root);
+}
+
+/*
+ * csio_pci_init - PCI initialization.
+ * @pdev: PCI device.
+ * @bars: Bitmask of bars to be requested.
+ *
+ * Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ */
+static int
+csio_pci_init(struct pci_dev *pdev, int *bars)
+{
+ int rv = -ENODEV;
+
+ *bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ if (pci_enable_device_mem(pdev))
+ goto err;
+
+ if (pci_request_selected_regions(pdev, *bars, KBUILD_MODNAME))
+ goto err_disable_device;
+
+ pci_set_master(pdev);
+ pci_try_set_mwi(pdev);
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+ } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ } else {
+ dev_err(&pdev->dev, "No suitable DMA available.\n");
+ goto err_release_regions;
+ }
+
+ return 0;
+
+err_release_regions:
+ pci_release_selected_regions(pdev, *bars);
+err_disable_device:
+ pci_disable_device(pdev);
+err:
+ return rv;
+
+}
+
+/*
+ * csio_pci_exit - PCI unitialization.
+ * @pdev: PCI device.
+ * @bars: Bars to be released.
+ *
+ */
+static void
+csio_pci_exit(struct pci_dev *pdev, int *bars)
+{
+ pci_release_selected_regions(pdev, *bars);
+ pci_disable_device(pdev);
+}
+
+/*
+ * csio_hw_init_workers - Initialize the HW module's worker threads.
+ * @hw: HW module.
+ *
+ */
+static void
+csio_hw_init_workers(struct csio_hw *hw)
+{
+ INIT_WORK(&hw->evtq_work, csio_evtq_worker);
+}
+
+static void
+csio_hw_exit_workers(struct csio_hw *hw)
+{
+ cancel_work_sync(&hw->evtq_work);
+ flush_scheduled_work();
+}
+
+static int
+csio_create_queues(struct csio_hw *hw)
+{
+ int i, j;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_FW_ALLOCED)
+ return 0;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = csio_wr_iq_create(hw, NULL, hw->intr_iq_idx,
+ 0, hw->pport[0].portid, false, NULL);
+ if (rv != 0) {
+ csio_err(hw, " Forward Interrupt IQ failed!: %d\n", rv);
+ return rv;
+ }
+ }
+
+ /* FW event queue */
+ rv = csio_wr_iq_create(hw, NULL, hw->fwevt_iq_idx,
+ csio_get_fwevt_intr_idx(hw),
+ hw->pport[0].portid, true, NULL);
+ if (rv != 0) {
+ csio_err(hw, "FW event IQ config failed!: %d\n", rv);
+ return rv;
+ }
+
+ /* Create mgmt queue */
+ rv = csio_wr_eq_create(hw, NULL, mgmtm->eq_idx,
+ mgmtm->iq_idx, hw->pport[0].portid, NULL);
+
+ if (rv != 0) {
+ csio_err(hw, "Mgmt EQ create failed!: %d\n", rv);
+ goto err;
+ }
+
+ /* Create SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < info->max_cpus; j++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+
+ rv = csio_wr_iq_create(hw, NULL, sqset->iq_idx,
+ sqset->intr_idx, i, false, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module IQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ rv = csio_wr_eq_create(hw, NULL, sqset->eq_idx,
+ sqset->iq_idx, i, NULL);
+ if (rv != 0) {
+ csio_err(hw,
+ "SCSI module EQ config failed [%d][%d]:%d\n",
+ i, j, rv);
+ goto err;
+ }
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_FW_ALLOCED;
+ return 0;
+err:
+ csio_wr_destroy_queues(hw, true);
+ return -EINVAL;
+}
+
+/*
+ * csio_config_queues - Configure the DMA queues.
+ * @hw: HW module.
+ *
+ * Allocates memory for queues are registers them with FW.
+ */
+int
+csio_config_queues(struct csio_hw *hw)
+{
+ int i, j, idx, k = 0;
+ int rv;
+ struct csio_scsi_qset *sqset;
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_scsi_qset *orig;
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->flags & CSIO_HWF_Q_MEM_ALLOCED)
+ return csio_create_queues(hw);
+
+ /* Calculate number of SCSI queues for MSIX we would like */
+ hw->num_scsi_msix_cpus = num_online_cpus();
+ hw->num_sqsets = num_online_cpus() * hw->num_pports;
+
+ if (hw->num_sqsets > CSIO_MAX_SCSI_QSETS) {
+ hw->num_sqsets = CSIO_MAX_SCSI_QSETS;
+ hw->num_scsi_msix_cpus = CSIO_MAX_SCSI_CPU;
+ }
+
+ /* Initialize max_cpus, may get reduced during msix allocations */
+ for (i = 0; i < hw->num_pports; i++)
+ hw->scsi_cpu_info[i].max_cpus = hw->num_scsi_msix_cpus;
+
+ csio_dbg(hw, "nsqsets:%d scpus:%d\n",
+ hw->num_sqsets, hw->num_scsi_msix_cpus);
+
+ csio_intr_enable(hw);
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+
+ /* Allocate Forward interrupt iq. */
+ hw->intr_iq_idx = csio_wr_alloc_q(hw, CSIO_INTR_IQSIZE,
+ CSIO_INTR_WRSIZE, CSIO_INGRESS,
+ (void *)hw, 0, 0, NULL);
+ if (hw->intr_iq_idx == -1) {
+ csio_err(hw,
+ "Forward interrupt queue creation failed\n");
+ goto intr_disable;
+ }
+ }
+
+ /* Allocate the FW evt queue */
+ hw->fwevt_iq_idx = csio_wr_alloc_q(hw, CSIO_FWEVT_IQSIZE,
+ CSIO_FWEVT_WRSIZE,
+ CSIO_INGRESS, (void *)hw,
+ CSIO_FWEVT_FLBUFS, 0,
+ csio_fwevt_intx_handler);
+ if (hw->fwevt_iq_idx == -1) {
+ csio_err(hw, "FW evt queue creation failed\n");
+ goto intr_disable;
+ }
+
+ /* Allocate the mgmt queue */
+ mgmtm->eq_idx = csio_wr_alloc_q(hw, CSIO_MGMT_EQSIZE,
+ CSIO_MGMT_EQ_WRSIZE,
+ CSIO_EGRESS, (void *)hw, 0, 0, NULL);
+ if (mgmtm->eq_idx == -1) {
+ csio_err(hw, "Failed to alloc egress queue for mgmt module\n");
+ goto intr_disable;
+ }
+
+ /* Use FW IQ for MGMT req completion */
+ mgmtm->iq_idx = hw->fwevt_iq_idx;
+
+ /* Allocate SCSI queues */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ sqset = &hw->sqset[i][j];
+
+ if (j >= info->max_cpus) {
+ k = j % info->max_cpus;
+ orig = &hw->sqset[i][k];
+ sqset->eq_idx = orig->eq_idx;
+ sqset->iq_idx = orig->iq_idx;
+ continue;
+ }
+
+ idx = csio_wr_alloc_q(hw, csio_scsi_eqsize, 0,
+ CSIO_EGRESS, (void *)hw, 0, 0,
+ NULL);
+ if (idx == -1) {
+ csio_err(hw, "EQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+
+ sqset->eq_idx = idx;
+
+ idx = csio_wr_alloc_q(hw, CSIO_SCSI_IQSIZE,
+ CSIO_SCSI_IQ_WRSZ, CSIO_INGRESS,
+ (void *)hw, 0, 0,
+ csio_scsi_intx_handler);
+ if (idx == -1) {
+ csio_err(hw, "IQ creation failed for idx:%d\n",
+ idx);
+ goto intr_disable;
+ }
+ sqset->iq_idx = idx;
+ } /* for all CPUs */
+ } /* For all ports */
+
+ hw->flags |= CSIO_HWF_Q_MEM_ALLOCED;
+
+ rv = csio_create_queues(hw);
+ if (rv != 0)
+ goto intr_disable;
+
+ /*
+ * Now request IRQs for the vectors. In the event of a failure,
+ * cleanup is handled internally by this function.
+ */
+ rv = csio_request_irqs(hw);
+ if (rv != 0)
+ return -EINVAL;
+
+ return 0;
+
+intr_disable:
+ csio_intr_disable(hw, false);
+
+ return -EINVAL;
+}
+
+static int
+csio_resource_alloc(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv = -ENOMEM;
+
+ wrm->num_q = ((CSIO_MAX_SCSI_QSETS * 2) + CSIO_HW_NIQ +
+ CSIO_HW_NEQ + CSIO_HW_NFLQ + CSIO_HW_NINTXQ);
+
+ hw->mb_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_mb));
+ if (!hw->mb_mempool)
+ goto err;
+
+ hw->rnode_mempool = mempool_create_kmalloc_pool(CSIO_MIN_MEMPOOL_SZ,
+ sizeof(struct csio_rnode));
+ if (!hw->rnode_mempool)
+ goto err_free_mb_mempool;
+
+ hw->scsi_pci_pool = pci_pool_create("csio_scsi_pci_pool", hw->pdev,
+ CSIO_SCSI_RSP_LEN, 8, 0);
+ if (!hw->scsi_pci_pool)
+ goto err_free_rn_pool;
+
+ return 0;
+
+err_free_rn_pool:
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+err_free_mb_mempool:
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+err:
+ return rv;
+}
+
+static void
+csio_resource_free(struct csio_hw *hw)
+{
+ pci_pool_destroy(hw->scsi_pci_pool);
+ hw->scsi_pci_pool = NULL;
+ mempool_destroy(hw->rnode_mempool);
+ hw->rnode_mempool = NULL;
+ mempool_destroy(hw->mb_mempool);
+ hw->mb_mempool = NULL;
+}
+
+/*
+ * csio_hw_alloc - Allocate and initialize the HW module.
+ * @pdev: PCI device.
+ *
+ * Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ */
+static struct csio_hw *csio_hw_alloc(struct pci_dev *pdev)
+{
+ struct csio_hw *hw;
+
+ hw = kzalloc(sizeof(struct csio_hw), GFP_KERNEL);
+ if (!hw)
+ goto err;
+
+ hw->pdev = pdev;
+ strncpy(hw->drv_version, CSIO_DRV_VERSION, 32);
+
+ /* memory pool/DMA pool allocation */
+ if (csio_resource_alloc(hw))
+ goto err_free_hw;
+
+ /* Get the start address of registers from BAR 0 */
+ hw->regstart = ioremap_nocache(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ if (!hw->regstart) {
+ csio_err(hw, "Could not map BAR 0, regstart = %p\n",
+ hw->regstart);
+ goto err_resource_free;
+ }
+
+ csio_hw_init_workers(hw);
+
+ if (csio_hw_init(hw))
+ goto err_unmap_bar;
+
+ csio_dfs_create(hw);
+
+ csio_dbg(hw, "hw:%p\n", hw);
+
+ return hw;
+
+err_unmap_bar:
+ csio_hw_exit_workers(hw);
+ iounmap(hw->regstart);
+err_resource_free:
+ csio_resource_free(hw);
+err_free_hw:
+ kfree(hw);
+err:
+ return NULL;
+}
+
+/*
+ * csio_hw_free - Uninitialize and free the HW module.
+ * @hw: The HW module
+ *
+ * Disable interrupts, uninit the HW module, free resources, free hw.
+ */
+static void
+csio_hw_free(struct csio_hw *hw)
+{
+ csio_intr_disable(hw, true);
+ csio_hw_exit_workers(hw);
+ csio_hw_exit(hw);
+ iounmap(hw->regstart);
+ csio_dfs_destroy(hw);
+ csio_resource_free(hw);
+ kfree(hw);
+}
+
+/**
+ * csio_shost_init - Create and initialize the lnode module.
+ * @hw: The HW module.
+ * @dev: The device associated with this invocation.
+ * @probe: Called from probe context or not?
+ * @os_pln: Parent lnode if any.
+ *
+ * Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initializes lnode module and registers with SCSI ML
+ * via scsi_host_add. This function is shared between physical and
+ * virtual node ports.
+ */
+struct csio_lnode *
+csio_shost_init(struct csio_hw *hw, struct device *dev,
+ bool probe, struct csio_lnode *pln)
+{
+ struct Scsi_Host *shost = NULL;
+ struct csio_lnode *ln;
+
+ csio_fcoe_shost_template.cmd_per_lun = csio_lun_qdepth;
+ csio_fcoe_shost_vport_template.cmd_per_lun = csio_lun_qdepth;
+
+ /*
+ * hw->pdev is the physical port's PCI dev structure,
+ * which will be different from the NPIV dev structure.
+ */
+ if (dev == &hw->pdev->dev)
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_template,
+ sizeof(struct csio_lnode));
+ else
+ shost = scsi_host_alloc(
+ &csio_fcoe_shost_vport_template,
+ sizeof(struct csio_lnode));
+
+ if (!shost)
+ goto err;
+
+ ln = shost_priv(shost);
+ memset(ln, 0, sizeof(struct csio_lnode));
+
+ /* Link common lnode to this lnode */
+ ln->dev_num = (shost->host_no << 16);
+
+ shost->can_queue = CSIO_MAX_QUEUE;
+ shost->this_id = -1;
+ shost->unique_id = shost->host_no;
+ shost->max_cmd_len = 16; /* Max CDB length supported */
+ shost->max_id = min_t(uint32_t, csio_fcoe_rnodes,
+ hw->fres_info.max_ssns);
+ shost->max_lun = CSIO_MAX_LUN;
+ if (dev == &hw->pdev->dev)
+ shost->transportt = csio_fcoe_transport;
+ else
+ shost->transportt = csio_fcoe_transport_vport;
+
+ /* root lnode */
+ if (!hw->rln)
+ hw->rln = ln;
+
+ /* Other initialization here: Common, Transport specific */
+ if (csio_lnode_init(ln, hw, pln))
+ goto err_shost_put;
+
+ if (scsi_add_host(shost, dev))
+ goto err_lnode_exit;
+
+ return ln;
+
+err_lnode_exit:
+ csio_lnode_exit(ln);
+err_shost_put:
+ scsi_host_put(shost);
+err:
+ return NULL;
+}
+
+/**
+ * csio_shost_exit - De-instantiate the shost.
+ * @ln: The lnode module corresponding to the shost.
+ *
+ */
+void
+csio_shost_exit(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ /* Inform transport */
+ fc_remove_host(shost);
+
+ /* Inform SCSI ML */
+ scsi_remove_host(shost);
+
+ /* Flush all the events, so that any rnode removal events
+ * already queued are all handled, before we remove the lnode.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_evtq_flush(hw);
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_exit(ln);
+ scsi_host_put(shost);
+}
+
+struct csio_lnode *
+csio_lnode_alloc(struct csio_hw *hw)
+{
+ return csio_shost_init(hw, &hw->pdev->dev, false, NULL);
+}
+
+void
+csio_lnodes_block_request(struct csio_hw *hw)
+{
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_request(struct csio_hw *hw)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_block_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Blocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_block_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_unblock_by_port(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln;
+ struct Scsi_Host *shost;
+ struct csio_lnode *sln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "Failed to allocate lnodes_list");
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ if (sln->portid != portid)
+ continue;
+ lnode_list[cur_cnt++] = sln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "unblocking IOs on lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ shost = csio_ln_to_shost(ln);
+ scsi_unblock_requests(shost);
+ }
+ kfree(lnode_list);
+}
+
+void
+csio_lnodes_exit(struct csio_hw *hw, bool npiv)
+{
+ struct csio_lnode *sln;
+ struct csio_lnode *ln;
+ struct list_head *cur_ln, *cur_cln;
+ struct csio_lnode **lnode_list;
+ int cur_cnt = 0, ii;
+
+ lnode_list = kzalloc((sizeof(struct csio_lnode *) * hw->num_lns),
+ GFP_KERNEL);
+ if (!lnode_list) {
+ csio_err(hw, "lnodes_exit: Failed to allocate lnodes_list.\n");
+ return;
+ }
+
+ /* Get all child lnodes(NPIV ports) */
+ spin_lock_irq(&hw->lock);
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+
+ /* Traverse children lnodes */
+ list_for_each(cur_cln, &sln->cln_head)
+ lnode_list[cur_cnt++] = (struct csio_lnode *) cur_cln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete NPIV lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting child lnode: %p\n", lnode_list[ii]);
+ ln = lnode_list[ii];
+ fc_vport_terminate(ln->fc_vport);
+ }
+
+ /* Delete only npiv lnodes */
+ if (npiv)
+ goto free_lnodes;
+
+ cur_cnt = 0;
+ /* Get all physical lnodes */
+ spin_lock_irq(&hw->lock);
+ /* Traverse sibling lnodes */
+ list_for_each(cur_ln, &hw->sln_head) {
+ sln = (struct csio_lnode *) cur_ln;
+ lnode_list[cur_cnt++] = sln;
+ }
+ spin_unlock_irq(&hw->lock);
+
+ /* Delete physical lnodes */
+ for (ii = 0; ii < cur_cnt; ii++) {
+ csio_dbg(hw, "Deleting parent lnode: %p\n", lnode_list[ii]);
+ csio_shost_exit(lnode_list[ii]);
+ }
+
+free_lnodes:
+ kfree(lnode_list);
+}
+
+/*
+ * csio_lnode_init_post: Set lnode attributes after starting HW.
+ * @ln: lnode.
+ *
+ */
+static void
+csio_lnode_init_post(struct csio_lnode *ln)
+{
+ struct Scsi_Host *shost = csio_ln_to_shost(ln);
+
+ csio_fchost_attr_init(ln);
+
+ scsi_scan_host(shost);
+}
+
+/*
+ * csio_probe_one - Instantiate this function.
+ * @pdev: PCI device
+ * @id: Device ID
+ *
+ * This is the .probe() callback of the driver. This function:
+ * - Initializes the PCI function by enabling MMIO, setting bus
+ * mastership and setting DMA mask.
+ * - Allocates HW structure, DMA, memory resources, maps BARS to
+ * host memory and initializes HW module.
+ * - Allocates lnode structure via scsi_host_alloc, initializes
+ * shost, initialized lnode module and registers with SCSI ML
+ * via scsi_host_add.
+ * - Enables interrupts, and starts the chip by kicking off the
+ * HW state machine.
+ * - Once hardware is ready, initiated scan of the host via
+ * scsi_scan_host.
+ */
+static int csio_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ int rv;
+ int bars;
+ int i;
+ struct csio_hw *hw;
+ struct csio_lnode *ln;
+
+ rv = csio_pci_init(pdev, &bars);
+ if (rv)
+ goto err;
+
+ hw = csio_hw_alloc(pdev);
+ if (!hw) {
+ rv = -ENODEV;
+ goto err_pci_exit;
+ }
+
+ pci_set_drvdata(pdev, hw);
+
+ if (csio_hw_start(hw) != 0) {
+ dev_err(&pdev->dev,
+ "Failed to start FW, continuing in debug mode.\n");
+ return 0;
+ }
+
+ sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
+ FW_HDR_FW_VER_MAJOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MINOR_GET(hw->fwrev),
+ FW_HDR_FW_VER_MICRO_GET(hw->fwrev),
+ FW_HDR_FW_VER_BUILD_GET(hw->fwrev));
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_lnode_exit;
+
+ return 0;
+
+err_lnode_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ pci_set_drvdata(hw->pdev, NULL);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+err_pci_exit:
+ csio_pci_exit(pdev, &bars);
+err:
+ dev_err(&pdev->dev, "probe of device failed: %d\n", rv);
+ return rv;
+}
+
+/*
+ * csio_remove_one - Remove one instance of the driver at this PCI function.
+ * @pdev: PCI device
+ *
+ * Used during hotplug operation.
+ */
+static void csio_remove_one(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Stops lnode, Rnode s/m
+ * Quiesce IOs.
+ * All sessions with remote ports are unregistered.
+ */
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ pci_set_drvdata(pdev, NULL);
+ csio_pci_exit(pdev, &bars);
+}
+
+/*
+ * csio_pci_error_detected - PCI error was detected
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+
+ /* Post PCI error detected evt to HW s/m
+ * HW s/m handles this evt by quiescing IOs, unregisters rports
+ * and finally takes the device to offline.
+ */
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_DETECTED);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_intr_disable(hw, true);
+ pci_disable_device(pdev);
+ return state == pci_channel_io_perm_failure ?
+ PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
+}
+
+/*
+ * csio_pci_slot_reset - PCI slot has been reset.
+ * @pdev: PCI device
+ *
+ */
+static pci_ers_result_t
+csio_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ int ready;
+
+ if (pci_enable_device(pdev)) {
+ dev_err(&pdev->dev, "cannot re-enable device in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+ pci_save_state(pdev);
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+
+ /* Bring HW s/m to ready state.
+ * but don't resume IOs.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_post_event(&hw->sm, CSIO_HWE_PCIERR_SLOT_RESET);
+ ready = csio_is_hw_ready(hw);
+ spin_unlock_irq(&hw->lock);
+
+ if (ready) {
+ return PCI_ERS_RESULT_RECOVERED;
+ } else {
+ dev_err(&pdev->dev, "Can't initialize HW when in slot reset\n");
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+}
+
+/*
+ * csio_pci_resume - Resume normal operations
+ * @pdev: PCI device
+ *
+ */
+static void
+csio_pci_resume(struct pci_dev *pdev)
+{
+ struct csio_hw *hw = pci_get_drvdata(pdev);
+ struct csio_lnode *ln;
+ int rv = 0;
+ int i;
+
+ /* Bring the LINK UP and Resume IO */
+
+ for (i = 0; i < hw->num_pports; i++) {
+ ln = csio_shost_init(hw, &pdev->dev, true, NULL);
+ if (!ln) {
+ rv = -ENODEV;
+ break;
+ }
+ /* Initialize portid */
+ ln->portid = hw->pport[i].portid;
+
+ spin_lock_irq(&hw->lock);
+ if (csio_lnode_start(ln) != 0)
+ rv = -ENODEV;
+ spin_unlock_irq(&hw->lock);
+
+ if (rv)
+ break;
+
+ csio_lnode_init_post(ln);
+ }
+
+ if (rv)
+ goto err_resume_exit;
+
+ return;
+
+err_resume_exit:
+ csio_lnodes_block_request(hw);
+ spin_lock_irq(&hw->lock);
+ csio_hw_stop(hw);
+ spin_unlock_irq(&hw->lock);
+ csio_lnodes_unblock_request(hw);
+ csio_lnodes_exit(hw, 0);
+ csio_hw_free(hw);
+ dev_err(&pdev->dev, "resume of device failed: %d\n", rv);
+}
+
+static struct pci_error_handlers csio_err_handler = {
+ .error_detected = csio_pci_error_detected,
+ .slot_reset = csio_pci_slot_reset,
+ .resume = csio_pci_resume,
+};
+
+static DEFINE_PCI_DEVICE_TABLE(csio_pci_tbl) = {
+ CSIO_DEVICE(CSIO_DEVID_T440DBG_FCOE, 0), /* T440DBG FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420CR_FCOE, 0), /* T420CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T422CR_FCOE, 0), /* T422CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440CR_FCOE, 0), /* T440CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420BCH_FCOE, 0), /* T420BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440BCH_FCOE, 0), /* T440BCH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440CH_FCOE, 0), /* T440CH FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420SO_FCOE, 0), /* T420SO FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420CX_FCOE, 0), /* T420CX FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T420BT_FCOE, 0), /* T420BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T404BT_FCOE, 0), /* T404BT FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B420_FCOE, 0), /* B420 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_B404_FCOE, 0), /* B404 FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T480CR_FCOE, 0), /* T480 CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_T440LPCR_FCOE, 0), /* T440 LP-CR FCOE */
+ CSIO_DEVICE(CSIO_DEVID_PE10K, 0), /* PE10K FCOE */
+ CSIO_DEVICE(CSIO_DEVID_PE10K_PF1, 0), /* PE10K FCOE on PF1 */
+ { 0, 0, 0, 0, 0, 0, 0 }
+};
+
+
+static struct pci_driver csio_pci_driver = {
+ .name = KBUILD_MODNAME,
+ .driver = {
+ .owner = THIS_MODULE,
+ },
+ .id_table = csio_pci_tbl,
+ .probe = csio_probe_one,
+ .remove = csio_remove_one,
+ .err_handler = &csio_err_handler,
+};
+
+/*
+ * csio_init - Chelsio storage driver initialization function.
+ *
+ */
+static int __init
+csio_init(void)
+{
+ int rv = -ENOMEM;
+
+ pr_info("%s %s\n", CSIO_DRV_DESC, CSIO_DRV_VERSION);
+
+ csio_dfs_init();
+
+ csio_fcoe_transport = fc_attach_transport(&csio_fc_transport_funcs);
+ if (!csio_fcoe_transport)
+ goto err;
+
+ csio_fcoe_transport_vport =
+ fc_attach_transport(&csio_fc_transport_vport_funcs);
+ if (!csio_fcoe_transport_vport)
+ goto err_vport;
+
+ rv = pci_register_driver(&csio_pci_driver);
+ if (rv)
+ goto err_pci;
+
+ return 0;
+
+err_pci:
+ fc_release_transport(csio_fcoe_transport_vport);
+err_vport:
+ fc_release_transport(csio_fcoe_transport);
+err:
+ csio_dfs_exit();
+ return rv;
+}
+
+/*
+ * csio_exit - Chelsio storage driver uninitialization .
+ *
+ * Function that gets called in the unload path.
+ */
+static void __exit
+csio_exit(void)
+{
+ pci_unregister_driver(&csio_pci_driver);
+ csio_dfs_exit();
+ fc_release_transport(csio_fcoe_transport_vport);
+ fc_release_transport(csio_fcoe_transport);
+}
+
+module_init(csio_init);
+module_exit(csio_exit);
+MODULE_AUTHOR(CSIO_DRV_AUTHOR);
+MODULE_DESCRIPTION(CSIO_DRV_DESC);
+MODULE_LICENSE(CSIO_DRV_LICENSE);
+MODULE_DEVICE_TABLE(pci, csio_pci_tbl);
+MODULE_VERSION(CSIO_DRV_VERSION);
+MODULE_FIRMWARE(CSIO_FW_FNAME);
diff --git a/drivers/scsi/csiostor/csio_init.h b/drivers/scsi/csiostor/csio_init.h
new file mode 100644
index 000000000000..0838fd7ec9c7
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_init.h
@@ -0,0 +1,158 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_INIT_H__
+#define __CSIO_INIT_H__
+
+#include <linux/pci.h>
+#include <linux/if_ether.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_scsi.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_hw.h"
+
+#define CSIO_DRV_AUTHOR "Chelsio Communications"
+#define CSIO_DRV_LICENSE "Dual BSD/GPL"
+#define CSIO_DRV_DESC "Chelsio FCoE driver"
+#define CSIO_DRV_VERSION "1.0.0"
+
+#define CSIO_DEVICE(devid, idx) \
+{ PCI_VENDOR_ID_CHELSIO, (devid), PCI_ANY_ID, PCI_ANY_ID, 0, 0, (idx) }
+
+#define CSIO_IS_T4_FPGA(_dev) (((_dev) == CSIO_DEVID_PE10K) ||\
+ ((_dev) == CSIO_DEVID_PE10K_PF1))
+
+/* FCoE device IDs */
+#define CSIO_DEVID_PE10K 0xA000
+#define CSIO_DEVID_PE10K_PF1 0xA001
+#define CSIO_DEVID_T440DBG_FCOE 0x4600
+#define CSIO_DEVID_T420CR_FCOE 0x4601
+#define CSIO_DEVID_T422CR_FCOE 0x4602
+#define CSIO_DEVID_T440CR_FCOE 0x4603
+#define CSIO_DEVID_T420BCH_FCOE 0x4604
+#define CSIO_DEVID_T440BCH_FCOE 0x4605
+#define CSIO_DEVID_T440CH_FCOE 0x4606
+#define CSIO_DEVID_T420SO_FCOE 0x4607
+#define CSIO_DEVID_T420CX_FCOE 0x4608
+#define CSIO_DEVID_T420BT_FCOE 0x4609
+#define CSIO_DEVID_T404BT_FCOE 0x460A
+#define CSIO_DEVID_B420_FCOE 0x460B
+#define CSIO_DEVID_B404_FCOE 0x460C
+#define CSIO_DEVID_T480CR_FCOE 0x460D
+#define CSIO_DEVID_T440LPCR_FCOE 0x460E
+
+extern struct fc_function_template csio_fc_transport_funcs;
+extern struct fc_function_template csio_fc_transport_vport_funcs;
+
+void csio_fchost_attr_init(struct csio_lnode *);
+
+/* INTx handlers */
+void csio_scsi_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+void csio_fwevt_intx_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+/* Common os lnode APIs */
+void csio_lnodes_block_request(struct csio_hw *);
+void csio_lnodes_unblock_request(struct csio_hw *);
+void csio_lnodes_block_by_port(struct csio_hw *, uint8_t);
+void csio_lnodes_unblock_by_port(struct csio_hw *, uint8_t);
+
+struct csio_lnode *csio_shost_init(struct csio_hw *, struct device *, bool,
+ struct csio_lnode *);
+void csio_shost_exit(struct csio_lnode *);
+void csio_lnodes_exit(struct csio_hw *, bool);
+
+static inline struct Scsi_Host *
+csio_ln_to_shost(struct csio_lnode *ln)
+{
+ return container_of((void *)ln, struct Scsi_Host, hostdata[0]);
+}
+
+/* SCSI -- locking version of get/put ioreqs */
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim)
+{
+ struct csio_ioreq *ioreq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ ioreq = csio_get_scsi_ioreq(scsim);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+
+ return ioreq;
+}
+
+static inline void
+csio_put_scsi_ioreq_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct csio_ioreq *ioreq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq(scsim, ioreq);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ioreq_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&scsim->freelist_lock, flags);
+ csio_put_scsi_ioreq_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&scsim->freelist_lock, flags);
+}
+
+/* Called in interrupt context */
+static inline void
+csio_put_scsi_ddp_list_lock(struct csio_hw *hw, struct csio_scsim *scsim,
+ struct list_head *reqlist, int n)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_put_scsi_ddp_list(scsim, reqlist, n);
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+#endif /* ifndef __CSIO_INIT_H__ */
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
new file mode 100644
index 000000000000..7ee9777ae2c5
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -0,0 +1,624 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/string.h>
+
+#include "csio_init.h"
+#include "csio_hw.h"
+
+static irqreturn_t
+csio_nondata_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ int rv;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_hw_slow_intr_handler(hw);
+ rv = csio_mb_isr_handler(hw);
+
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_handler - Common FW event handler routine.
+ * @hw: HW module.
+ *
+ * This is the ISR for FW events. It is shared b/w MSIX
+ * and INTx handlers.
+ */
+static void
+csio_fwevt_handler(struct csio_hw *hw)
+{
+ int rv;
+ unsigned long flags;
+
+ rv = csio_fwevtq_handler(hw);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+} /* csio_fwevt_handler */
+
+/*
+ * csio_fwevt_isr() - FW events MSIX ISR
+ * @irq:
+ * @dev_id:
+ *
+ * Process WRs on the FW event queue.
+ *
+ */
+static irqreturn_t
+csio_fwevt_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_fwevt_handler(hw);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_fwevt_isr() - INTx wrapper for handling FW events.
+ * @irq:
+ * @dev_id:
+ */
+void
+csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ csio_fwevt_handler(hw);
+} /* csio_fwevt_intx_handler */
+
+/*
+ * csio_process_scsi_cmpl - Process a SCSI WR completion.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ *
+ */
+static void
+csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *cbfn_q)
+{
+ struct csio_ioreq *ioreq;
+ uint8_t *scsiwr;
+ uint8_t subop;
+ void *cmnd;
+ unsigned long flags;
+
+ ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
+ if (likely(ioreq)) {
+ if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
+ subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
+ ((struct fw_scsi_abrt_cls_wr *)
+ scsiwr)->sub_opcode_to_chk_all_io);
+
+ csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
+ subop ? "Close" : "Abort",
+ ioreq, ioreq->wr_status);
+
+ spin_lock_irqsave(&hw->lock, flags);
+ if (subop)
+ csio_scsi_closed(ioreq,
+ (struct list_head *)cbfn_q);
+ else
+ csio_scsi_aborted(ioreq,
+ (struct list_head *)cbfn_q);
+ /*
+ * We call scsi_done for I/Os that driver thinks aborts
+ * have timed out. If there is a race caused by FW
+ * completing abort at the exact same time that the
+ * driver has deteced the abort timeout, the following
+ * check prevents calling of scsi_done twice for the
+ * same command: once from the eh_abort_handler, another
+ * from csio_scsi_isr_handler(). This also avoids the
+ * need to check if csio_scsi_cmnd(req) is NULL in the
+ * fast path.
+ */
+ cmnd = csio_scsi_cmnd(ioreq);
+ if (unlikely(cmnd == NULL))
+ list_del_init(&ioreq->sm.sm_list);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (unlikely(cmnd == NULL))
+ csio_put_scsi_ioreq_lock(hw,
+ csio_hw_to_scsim(hw), ioreq);
+ } else {
+ spin_lock_irqsave(&hw->lock, flags);
+ csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
+ spin_unlock_irqrestore(&hw->lock, flags);
+ }
+ }
+}
+
+/*
+ * csio_scsi_isr_handler() - Common SCSI ISR handler.
+ * @iq: Ingress queue pointer.
+ *
+ * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
+ * by calling csio_wr_process_iq_idx. If there are completions on the
+ * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
+ * Once done, add these completions onto the freelist.
+ * This routine is shared b/w MSIX and INTx.
+ */
+static inline irqreturn_t
+csio_scsi_isr_handler(struct csio_q *iq)
+{
+ struct csio_hw *hw = (struct csio_hw *)iq->owner;
+ LIST_HEAD(cbfn_q);
+ struct list_head *tmp;
+ struct csio_scsim *scm;
+ struct csio_ioreq *ioreq;
+ int isr_completions = 0;
+
+ scm = csio_hw_to_scsim(hw);
+
+ if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
+ &cbfn_q) != 0))
+ return IRQ_NONE;
+
+ /* Call back the completion routines */
+ list_for_each(tmp, &cbfn_q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ isr_completions++;
+ ioreq->io_cbfn(hw, ioreq);
+ /* Release ddp buffer if used for this req */
+ if (unlikely(ioreq->dcopy))
+ csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
+ ioreq->nsge);
+ }
+
+ if (isr_completions) {
+ /* Return the ioreqs back to ioreq->freelist */
+ csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
+ isr_completions);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_isr() - SCSI MSIX handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+static irqreturn_t
+csio_scsi_isr(int irq, void *dev_id)
+{
+ struct csio_q *iq = (struct csio_q *) dev_id;
+ struct csio_hw *hw;
+
+ if (unlikely(!iq))
+ return IRQ_NONE;
+
+ hw = (struct csio_hw *)iq->owner;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ csio_scsi_isr_handler(iq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * csio_scsi_intx_handler() - SCSI INTx handler
+ * @irq:
+ * @dev_id:
+ *
+ * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
+ * for handling SCSI completions.
+ */
+void
+csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv)
+{
+ struct csio_q *iq = priv;
+
+ csio_scsi_isr_handler(iq);
+
+} /* csio_scsi_intx_handler */
+
+/*
+ * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
+ * @irq:
+ * @dev_id:
+ *
+ *
+ */
+static irqreturn_t
+csio_fcoe_isr(int irq, void *dev_id)
+{
+ struct csio_hw *hw = (struct csio_hw *) dev_id;
+ struct csio_q *intx_q = NULL;
+ int rv;
+ irqreturn_t ret = IRQ_NONE;
+ unsigned long flags;
+
+ if (unlikely(!hw))
+ return IRQ_NONE;
+
+ if (unlikely(pci_channel_offline(hw->pdev))) {
+ CSIO_INC_STATS(hw, n_pcich_offline);
+ return IRQ_NONE;
+ }
+
+ /* Disable the interrupt for this PCI function. */
+ if (hw->intr_mode == CSIO_IM_INTX)
+ csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI));
+
+ /*
+ * The read in the following function will flush the
+ * above write.
+ */
+ if (csio_hw_slow_intr_handler(hw))
+ ret = IRQ_HANDLED;
+
+ /* Get the INTx Forward interrupt IQ. */
+ intx_q = csio_get_q(hw, hw->intr_iq_idx);
+
+ CSIO_DB_ASSERT(intx_q);
+
+ /* IQ handler is not possible for intx_q, hence pass in NULL */
+ if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
+ ret = IRQ_HANDLED;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ rv = csio_mb_isr_handler(hw);
+ if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
+ hw->flags |= CSIO_HWF_FWEVT_PENDING;
+ spin_unlock_irqrestore(&hw->lock, flags);
+ schedule_work(&hw->evtq_work);
+ return IRQ_HANDLED;
+ }
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ return ret;
+}
+
+static void
+csio_add_msix_desc(struct csio_hw *hw)
+{
+ int i;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ int k = CSIO_EXTRA_VECS;
+ int len = sizeof(entryp->desc) - 1;
+ int cnt = hw->num_sqsets + k;
+
+ /* Non-data vector */
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+
+ entryp++;
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
+ entryp++;
+
+ /* Name SCSI vecs */
+ for (i = k; i < cnt; i++, entryp++) {
+ memset(entryp->desc, 0, len + 1);
+ snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
+ CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
+ CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
+ }
+}
+
+int
+csio_request_irqs(struct csio_hw *hw)
+{
+ int rv, i, j, k = 0;
+ struct csio_msix_entries *entryp = &hw->msix_entries[0];
+ struct csio_scsi_cpu_info *info;
+
+ if (hw->intr_mode != CSIO_IM_MSIX) {
+ rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
+ (hw->intr_mode == CSIO_IM_MSI) ?
+ 0 : IRQF_SHARED,
+ KBUILD_MODNAME, hw);
+ if (rv) {
+ if (hw->intr_mode == CSIO_IM_MSI)
+ pci_disable_msi(hw->pdev);
+ csio_err(hw, "Failed to allocate interrupt line.\n");
+ return -EINVAL;
+ }
+
+ goto out;
+ }
+
+ /* Add the MSIX vector descriptions */
+ csio_add_msix_desc(hw);
+
+ rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
+ entryp[k].desc, hw);
+ if (rv) {
+ csio_err(hw, "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k++].dev_id = (void *)hw;
+
+ /* Allocate IRQs for SCSI */
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ for (j = 0; j < info->max_cpus; j++, k++) {
+ struct csio_scsi_qset *sqset = &hw->sqset[i][j];
+ struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
+
+ rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
+ entryp[k].desc, q);
+ if (rv) {
+ csio_err(hw,
+ "IRQ request failed for vec %d err:%d\n",
+ entryp[k].vector, rv);
+ goto err;
+ }
+
+ entryp[k].dev_id = (void *)q;
+
+ } /* for all scsi cpus */
+ } /* for all ports */
+
+out:
+ hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
+
+ return 0;
+
+err:
+ for (i = 0; i < k; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ pci_disable_msix(hw->pdev);
+
+ return -EINVAL;
+}
+
+static void
+csio_disable_msix(struct csio_hw *hw, bool free)
+{
+ int i;
+ struct csio_msix_entries *entryp;
+ int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
+
+ if (free) {
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ free_irq(entryp->vector, entryp->dev_id);
+ }
+ }
+ pci_disable_msix(hw->pdev);
+}
+
+/* Reduce per-port max possible CPUs */
+static void
+csio_reduce_sqsets(struct csio_hw *hw, int cnt)
+{
+ int i;
+ struct csio_scsi_cpu_info *info;
+
+ while (cnt < hw->num_sqsets) {
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+ if (info->max_cpus > 1) {
+ info->max_cpus--;
+ hw->num_sqsets--;
+ if (hw->num_sqsets <= cnt)
+ break;
+ }
+ }
+ }
+
+ csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
+}
+
+static int
+csio_enable_msix(struct csio_hw *hw)
+{
+ int rv, i, j, k, n, min, cnt;
+ struct csio_msix_entries *entryp;
+ struct msix_entry *entries;
+ int extra = CSIO_EXTRA_VECS;
+ struct csio_scsi_cpu_info *info;
+
+ min = hw->num_pports + extra;
+ cnt = hw->num_sqsets + extra;
+
+ /* Max vectors required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
+ cnt = min_t(uint8_t, hw->cfg_niq, cnt);
+
+ entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ for (i = 0; i < cnt; i++)
+ entries[i].entry = (uint16_t)i;
+
+ csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
+
+ while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
+ cnt = rv;
+ if (!rv) {
+ if (cnt < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
+ csio_reduce_sqsets(hw, cnt - extra);
+ }
+ } else {
+ if (rv > 0) {
+ pci_disable_msix(hw->pdev);
+ csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
+ }
+
+ kfree(entries);
+ return -ENOMEM;
+ }
+
+ /* Save off vectors */
+ for (i = 0; i < cnt; i++) {
+ entryp = &hw->msix_entries[i];
+ entryp->vector = entries[i].vector;
+ }
+
+ /* Distribute vectors */
+ k = 0;
+ csio_set_nondata_intr_idx(hw, entries[k].entry);
+ csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
+ csio_set_fwevt_intr_idx(hw, entries[k++].entry);
+
+ for (i = 0; i < hw->num_pports; i++) {
+ info = &hw->scsi_cpu_info[i];
+
+ for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
+ n = (j % info->max_cpus) + k;
+ hw->sqset[i][j].intr_idx = entries[n].entry;
+ }
+
+ k += info->max_cpus;
+ }
+
+ kfree(entries);
+ return 0;
+}
+
+void
+csio_intr_enable(struct csio_hw *hw)
+{
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+
+ /* Try MSIX, then MSI or fall back to INTx */
+ if ((csio_msi == 2) && !csio_enable_msix(hw))
+ hw->intr_mode = CSIO_IM_MSIX;
+ else {
+ /* Max iqs required based on #niqs configured in fw */
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
+ !csio_is_hw_master(hw)) {
+ int extra = CSIO_EXTRA_MSI_IQS;
+
+ if (hw->cfg_niq < (hw->num_sqsets + extra)) {
+ csio_dbg(hw, "Reducing sqsets to %d\n",
+ hw->cfg_niq - extra);
+ csio_reduce_sqsets(hw, hw->cfg_niq - extra);
+ }
+ }
+
+ if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
+ hw->intr_mode = CSIO_IM_MSI;
+ else
+ hw->intr_mode = CSIO_IM_INTX;
+ }
+
+ csio_dbg(hw, "Using %s interrupt mode.\n",
+ (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
+ ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
+}
+
+void
+csio_intr_disable(struct csio_hw *hw, bool free)
+{
+ csio_hw_intr_disable(hw);
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_MSIX:
+ csio_disable_msix(hw, free);
+ break;
+ case CSIO_IM_MSI:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ pci_disable_msi(hw->pdev);
+ break;
+ case CSIO_IM_INTX:
+ if (free)
+ free_irq(hw->pdev->irq, hw);
+ break;
+ default:
+ break;
+ }
+ hw->intr_mode = CSIO_IM_NONE;
+ hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.c b/drivers/scsi/csiostor/csio_lnode.c
new file mode 100644
index 000000000000..ffe9be04dc39
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.c
@@ -0,0 +1,2135 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/utsname.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <asm/unaligned.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_gs.h>
+#include <scsi/fc/fc_ms.h>
+
+#include "csio_hw.h"
+#include "csio_mb.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+int csio_fcoe_rnodes = 1024;
+int csio_fdmi_enable = 1;
+
+#define PORT_ID_PTR(_x) ((uint8_t *)(&_x) + 1)
+
+/* Lnode SM declarations */
+static void csio_lns_uninit(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_online(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_ready(struct csio_lnode *, enum csio_ln_ev);
+static void csio_lns_offline(struct csio_lnode *, enum csio_ln_ev);
+
+static int csio_ln_mgmt_submit_req(struct csio_ioreq *,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type, struct csio_dma_buf *, uint32_t);
+
+/* LN event mapping */
+static enum csio_ln_ev fwevt_to_lnevt[] = {
+ CSIO_LNE_NONE, /* None */
+ CSIO_LNE_NONE, /* PLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PLOGI_RCVD */
+ CSIO_LNE_NONE, /* PLOGO_RCVD */
+ CSIO_LNE_NONE, /* PRLI_ACC_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_LNE_NONE, /* PRLI_RCVD */
+ CSIO_LNE_NONE, /* PRLO_RCVD */
+ CSIO_LNE_NONE, /* NPORT_ID_CHGD */
+ CSIO_LNE_LOGO, /* FLOGO_RCVD */
+ CSIO_LNE_LOGO, /* CLR_VIRT_LNK_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FLOGI_ACC_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_LNE_FAB_INIT_DONE,/* FDISC_ACC_RCVD */
+ CSIO_LNE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_LNE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_LNE_NONE, /* PRLI_TMO */
+ CSIO_LNE_NONE, /* ADISC_TMO */
+ CSIO_LNE_NONE, /* RSCN_DEV_LOST */
+ CSIO_LNE_NONE, /* SCR_ACC_RCVD */
+ CSIO_LNE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_LNE_NONE, /* LOGO_SNT */
+ CSIO_LNE_NONE, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_LNE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_LNE_NONE : \
+ fwevt_to_lnevt[_evt])
+
+#define csio_ct_rsp(cp) (((struct fc_ct_hdr *)cp)->ct_cmd)
+#define csio_ct_reason(cp) (((struct fc_ct_hdr *)cp)->ct_reason)
+#define csio_ct_expl(cp) (((struct fc_ct_hdr *)cp)->ct_explan)
+#define csio_ct_get_pld(cp) ((void *)(((uint8_t *)cp) + FC_CT_HDR_LEN))
+
+/*
+ * csio_ln_match_by_portid - lookup lnode using given portid.
+ * @hw: HW module
+ * @portid: port-id.
+ *
+ * If found, returns lnode matching given portid otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_portid(struct csio_hw *hw, uint8_t portid)
+{
+ struct csio_lnode *ln = hw->rln;
+ struct list_head *tmp;
+
+ /* Match siblings lnode with portid */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid == portid)
+ return ln;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_ln_lookup_by_vnpi - Lookup lnode using given vnp id.
+ * @hw - HW module
+ * @vnpi - vnp index.
+ * Returns - If found, returns lnode matching given vnp id
+ * otherwise returns NULL.
+ */
+static struct csio_lnode *
+csio_ln_lookup_by_vnpi(struct csio_hw *hw, uint32_t vnp_id)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (sln->vnp_flowid == vnp_id)
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (cln->vnp_flowid == vnp_id)
+ return cln;
+ }
+ }
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+}
+
+/**
+ * csio_lnode_lookup_by_wwpn - Lookup lnode using given wwpn.
+ * @hw: HW module.
+ * @wwpn: WWPN.
+ *
+ * If found, returns lnode matching given wwpn, returns NULL otherwise.
+ */
+struct csio_lnode *
+csio_lnode_lookup_by_wwpn(struct csio_hw *hw, uint8_t *wwpn)
+{
+ struct list_head *tmp1, *tmp2;
+ struct csio_lnode *sln = NULL, *cln = NULL;
+
+ if (list_empty(&hw->sln_head)) {
+ CSIO_INC_STATS(hw, n_lnlkup_miss);
+ return NULL;
+ }
+ /* Traverse sibling lnodes */
+ list_for_each(tmp1, &hw->sln_head) {
+ sln = (struct csio_lnode *) tmp1;
+
+ /* Match sibling lnode */
+ if (!memcmp(csio_ln_wwpn(sln), wwpn, 8))
+ return sln;
+
+ if (list_empty(&sln->cln_head))
+ continue;
+
+ /* Traverse children lnodes */
+ list_for_each(tmp2, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp2;
+
+ if (!memcmp(csio_ln_wwpn(cln), wwpn, 8))
+ return cln;
+ }
+ }
+ return NULL;
+}
+
+/* FDMI */
+static void
+csio_fill_ct_iu(void *buf, uint8_t type, uint8_t sub_type, uint16_t op)
+{
+ struct fc_ct_hdr *cmd = (struct fc_ct_hdr *)buf;
+ cmd->ct_rev = FC_CT_REV;
+ cmd->ct_fs_type = type;
+ cmd->ct_fs_subtype = sub_type;
+ cmd->ct_cmd = htons(op);
+}
+
+static int
+csio_hostname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s", init_utsname()->nodename) > 0)
+ return 0;
+ return -1;
+}
+
+static int
+csio_osname(uint8_t *buf, size_t buf_len)
+{
+ if (snprintf(buf, buf_len, "%s %s %s",
+ init_utsname()->sysname,
+ init_utsname()->release,
+ init_utsname()->version) > 0)
+ return 0;
+
+ return -1;
+}
+
+static inline void
+csio_append_attrib(uint8_t **ptr, uint16_t type, uint8_t *val, uint16_t len)
+{
+ struct fc_fdmi_attr_entry *ae = (struct fc_fdmi_attr_entry *)*ptr;
+ ae->type = htons(type);
+ len += 4; /* includes attribute type and length */
+ len = (len + 3) & ~3; /* should be multiple of 4 bytes */
+ ae->len = htons(len);
+ memcpy(ae->value, val, len);
+ *ptr += len;
+}
+
+/*
+ * csio_ln_fdmi_done - FDMI registeration completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_done(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ struct csio_lnode *ln = fdmi_req->lnode;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rpa cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rpa cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+}
+
+/*
+ * csio_ln_fdmi_rhba_cbfn - RHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_rhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ __be32 val;
+ __be16 mfs;
+ uint32_t numattrs = 0;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fs_fdmi_attrs *attrib_blk;
+ struct fc_fdmi_port_name *port_name;
+ uint8_t buf[64];
+ uint8_t *fc4_type;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi rhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi rhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+
+ /* Prepare CT hdr for RPA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RPA);
+
+ /* Prepare RPA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ port_name = (struct fc_fdmi_port_name *)pld;
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*port_name);
+
+ /* Start appending Port attributes */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ fc4_type = &buf[0];
+ memset(fc4_type, 0, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ fc4_type[2] = 1;
+ fc4_type[7] = 1;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_FC4TYPES,
+ fc4_type, FC_FDMI_PORT_ATTR_FC4TYPES_LEN);
+ numattrs++;
+ val = htonl(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_SUPPORTEDSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_SUPPORTEDSPEED_LEN);
+ numattrs++;
+
+ if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_1G)
+ val = htonl(FC_PORTSPEED_1GBIT);
+ else if (hw->pport[ln->portid].link_speed == FW_PORT_CAP_SPEED_10G)
+ val = htonl(FC_PORTSPEED_10GBIT);
+ else
+ val = htonl(CSIO_HBA_PORTSPEED_UNKNOWN);
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_CURRENTPORTSPEED,
+ (uint8_t *)&val,
+ FC_FDMI_PORT_ATTR_CURRENTPORTSPEED_LEN);
+ numattrs++;
+
+ mfs = ln->ln_sparm.csp.sp_bb_data;
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_MAXFRAMESIZE,
+ (uint8_t *)&mfs, FC_FDMI_PORT_ATTR_MAXFRAMESIZE_LEN);
+ numattrs++;
+
+ strcpy(buf, "csiostor");
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_OSDEVICENAME, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+
+ if (!csio_hostname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_PORT_ATTR_HOSTNAME,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+ attrib_blk->numattrs = htonl(numattrs);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+
+ /* Submit FDMI RPA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_done,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rpa req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dprt_cbfn - DPRT completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dprt_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ void *cmd;
+ uint8_t *pld;
+ uint32_t len = 0;
+ uint32_t numattrs = 0;
+ __be32 maxpayload = htonl(65536);
+ struct fc_fdmi_hba_identifier *hbaid;
+ struct csio_lnode *ln = fdmi_req->lnode;
+ struct fc_fdmi_rpl *reg_pl;
+ struct fs_fdmi_attrs *attrib_blk;
+ uint8_t buf[64];
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dprt cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dprt cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Prepare CT hdr for RHBA cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_RHBA);
+ len = FC_CT_HDR_LEN;
+
+ /* Prepare RHBA payload */
+ pld = (uint8_t *)csio_ct_get_pld(cmd);
+ hbaid = (struct fc_fdmi_hba_identifier *)pld;
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8); /* HBA identifer */
+ pld += sizeof(*hbaid);
+
+ /* Register one port per hba */
+ reg_pl = (struct fc_fdmi_rpl *)pld;
+ reg_pl->numport = htonl(1);
+ memcpy(&reg_pl->port[0].portname, csio_ln_wwpn(ln), 8);
+ pld += sizeof(*reg_pl);
+
+ /* Start appending HBA attributes hba */
+ attrib_blk = (struct fs_fdmi_attrs *)pld;
+ attrib_blk->numattrs = 0;
+ len += sizeof(attrib_blk->numattrs);
+ pld += sizeof(attrib_blk->numattrs);
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_NODENAME, csio_ln_wwnn(ln),
+ FC_FDMI_HBA_ATTR_NODENAME_LEN);
+ numattrs++;
+
+ memset(buf, 0, sizeof(buf));
+
+ strcpy(buf, "Chelsio Communications");
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MANUFACTURER, buf,
+ (uint16_t)strlen(buf));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_SERIALNUMBER,
+ hw->vpd.sn, (uint16_t)sizeof(hw->vpd.sn));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODEL, hw->vpd.id,
+ (uint16_t)sizeof(hw->vpd.id));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MODELDESCRIPTION,
+ hw->model_desc, (uint16_t)strlen(hw->model_desc));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_HARDWAREVERSION,
+ hw->hw_ver, (uint16_t)sizeof(hw->hw_ver));
+ numattrs++;
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_FIRMWAREVERSION,
+ hw->fwrev_str, (uint16_t)strlen(hw->fwrev_str));
+ numattrs++;
+
+ if (!csio_osname(buf, sizeof(buf))) {
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_OSNAMEVERSION,
+ buf, (uint16_t)strlen(buf));
+ numattrs++;
+ }
+
+ csio_append_attrib(&pld, FC_FDMI_HBA_ATTR_MAXCTPAYLOAD,
+ (uint8_t *)&maxpayload,
+ FC_FDMI_HBA_ATTR_MAXCTPAYLOAD_LEN);
+ len = (uint32_t)(pld - (uint8_t *)cmd);
+ numattrs++;
+ attrib_blk->numattrs = htonl(numattrs);
+
+ /* Submit FDMI RHBA request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_rhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi rhba req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/*
+ * csio_ln_fdmi_dhba_cbfn - DHBA completion
+ * @hw: HW context
+ * @fdmi_req: fdmi request
+ */
+static void
+csio_ln_fdmi_dhba_cbfn(struct csio_hw *hw, struct csio_ioreq *fdmi_req)
+{
+ struct csio_lnode *ln = fdmi_req->lnode;
+ void *cmd;
+ struct fc_fdmi_port_name *port_name;
+ uint32_t len;
+
+ if (fdmi_req->wr_status != FW_SUCCESS) {
+ csio_ln_dbg(ln, "WR error:%x in processing fdmi dhba cmd\n",
+ fdmi_req->wr_status);
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ }
+
+ if (!csio_is_rnode_ready(fdmi_req->rnode)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ return;
+ }
+ cmd = fdmi_req->dma_buf.vaddr;
+ if (ntohs(csio_ct_rsp(cmd)) != FC_FS_ACC) {
+ csio_ln_dbg(ln, "fdmi dhba cmd rejected reason %x expl %x\n",
+ csio_ct_reason(cmd), csio_ct_expl(cmd));
+ }
+
+ /* Send FDMI cmd to de-register any Port attributes if registered
+ * before
+ */
+
+ /* Prepare FDMI DPRT cmd */
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DPRT);
+ len = FC_CT_HDR_LEN;
+ port_name = (struct fc_fdmi_port_name *)csio_ct_get_pld(cmd);
+ memcpy(&port_name->portname, csio_ln_wwpn(ln), 8);
+ len += sizeof(*port_name);
+
+ /* Submit FDMI request */
+ spin_lock_irq(&hw->lock);
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dprt_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dprt req\n");
+ }
+ spin_unlock_irq(&hw->lock);
+}
+
+/**
+ * csio_ln_fdmi_start - Start an FDMI request.
+ * @ln: lnode
+ * @context: session context
+ *
+ * Issued with lock held.
+ */
+int
+csio_ln_fdmi_start(struct csio_lnode *ln, void *context)
+{
+ struct csio_ioreq *fdmi_req;
+ struct csio_rnode *fdmi_rn = (struct csio_rnode *)context;
+ void *cmd;
+ struct fc_fdmi_hba_identifier *hbaid;
+ uint32_t len;
+
+ if (!(ln->flags & CSIO_LNF_FDMI_ENABLE))
+ return -EPROTONOSUPPORT;
+
+ if (!csio_is_rnode_ready(fdmi_rn))
+ CSIO_INC_STATS(ln, n_fdmi_err);
+
+ /* Send FDMI cmd to de-register any HBA attributes if registered
+ * before
+ */
+
+ fdmi_req = ln->mgmt_req;
+ fdmi_req->lnode = ln;
+ fdmi_req->rnode = fdmi_rn;
+
+ /* Prepare FDMI DHBA cmd */
+ cmd = fdmi_req->dma_buf.vaddr;
+ memset(cmd, 0, FC_CT_HDR_LEN);
+ csio_fill_ct_iu(cmd, FC_FST_MGMT, FC_FDMI_SUBTYPE, FC_FDMI_DHBA);
+ len = FC_CT_HDR_LEN;
+
+ hbaid = (struct fc_fdmi_hba_identifier *)csio_ct_get_pld(cmd);
+ memcpy(&hbaid->id, csio_ln_wwpn(ln), 8);
+ len += sizeof(*hbaid);
+
+ /* Submit FDMI request */
+ if (csio_ln_mgmt_submit_req(fdmi_req, csio_ln_fdmi_dhba_cbfn,
+ FCOE_CT, &fdmi_req->dma_buf, len)) {
+ CSIO_INC_STATS(ln, n_fdmi_err);
+ csio_ln_dbg(ln, "Failed to issue fdmi dhba req\n");
+ }
+
+ return 0;
+}
+
+/*
+ * csio_ln_vnp_read_cbfn - vnp read completion handler.
+ * @hw: HW lnode
+ * @cbfn: Completion handler.
+ *
+ * Reads vnp response and updates ln parameters.
+ */
+static void
+csio_ln_vnp_read_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = ((struct csio_lnode *)mbp->priv);
+ struct fw_fcoe_vnp_cmd *rsp = (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+ struct fc_els_csp *csp;
+ struct fc_els_cssp *clsp;
+ enum fw_retval retval;
+ __be32 nport_id;
+
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "FCOE VNP read cmd returned error:0x%x\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+
+ memcpy(ln->mac, rsp->vnport_mac, sizeof(ln->mac));
+ memcpy(&nport_id, &rsp->vnport_mac[3], sizeof(uint8_t)*3);
+ ln->nport_id = ntohl(nport_id);
+ ln->nport_id = ln->nport_id >> 8;
+
+ /* Update WWNs */
+ /*
+ * This may look like a duplication of what csio_fcoe_enable_link()
+ * does, but is absolutely necessary if the vnpi changes between
+ * a FCOE LINK UP and FCOE LINK DOWN.
+ */
+ memcpy(csio_ln_wwnn(ln), rsp->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), rsp->vnport_wwpn, 8);
+
+ /* Copy common sparam */
+ csp = (struct fc_els_csp *)rsp->cmn_srv_parms;
+ ln->ln_sparm.csp.sp_hi_ver = csp->sp_hi_ver;
+ ln->ln_sparm.csp.sp_lo_ver = csp->sp_lo_ver;
+ ln->ln_sparm.csp.sp_bb_cred = csp->sp_bb_cred;
+ ln->ln_sparm.csp.sp_features = csp->sp_features;
+ ln->ln_sparm.csp.sp_bb_data = csp->sp_bb_data;
+ ln->ln_sparm.csp.sp_r_a_tov = csp->sp_r_a_tov;
+ ln->ln_sparm.csp.sp_e_d_tov = csp->sp_e_d_tov;
+
+ /* Copy word 0 & word 1 of class sparam */
+ clsp = (struct fc_els_cssp *)rsp->clsp_word_0_1;
+ ln->ln_sparm.clsp[2].cp_class = clsp->cp_class;
+ ln->ln_sparm.clsp[2].cp_init = clsp->cp_init;
+ ln->ln_sparm.clsp[2].cp_recip = clsp->cp_recip;
+ ln->ln_sparm.clsp[2].cp_rdfs = clsp->cp_rdfs;
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ /* Send an event to update local attribs */
+ csio_lnode_async_event(ln, CSIO_LN_FC_ATTRIB_UPDATE);
+}
+
+/*
+ * csio_ln_vnp_read - Read vnp params.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_vnp_read(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ /* Allocate Mbox request */
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Prepare VNP Command */
+ csio_fcoe_vnp_read_init_mb(ln, mbp,
+ CSIO_MB_DEFAULT_TMO,
+ ln->fcf_flowid,
+ ln->vnp_flowid,
+ cbfn);
+
+ /* Issue MBOX cmd */
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Failed to issue mbox FCoE VNP command\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_fcoe_enable_link - Enable fcoe link.
+ * @ln: lnode
+ * @enable: enable/disable
+ * Issued with lock held.
+ * Issues mbox cmd to bring up FCOE link on port associated with given ln.
+ */
+static int
+csio_fcoe_enable_link(struct csio_lnode *ln, bool enable)
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+ enum fw_retval retval;
+ uint8_t portid;
+ uint8_t sub_op;
+ struct fw_fcoe_link_cmd *lcmd;
+ int i;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ portid = ln->portid;
+ sub_op = enable ? FCOE_LINK_UP : FCOE_LINK_DOWN;
+
+ csio_dbg(hw, "bringing FCOE LINK %s on Port:%d\n",
+ sub_op ? "UP" : "DOWN", portid);
+
+ csio_write_fcoe_link_cond_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ portid, sub_op, 0, 0, 0, NULL);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE LINK cmd on port[%d]\n",
+ portid);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ retval = csio_mb_fw_retval(mbp);
+ if (retval != FW_SUCCESS) {
+ csio_err(hw,
+ "FCOE LINK %s cmd on port[%d] failed with "
+ "ret:x%x\n", sub_op ? "UP" : "DOWN", portid, retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (!enable)
+ goto out;
+
+ lcmd = (struct fw_fcoe_link_cmd *)mbp->mb;
+
+ memcpy(csio_ln_wwnn(ln), lcmd->vnport_wwnn, 8);
+ memcpy(csio_ln_wwpn(ln), lcmd->vnport_wwpn, 8);
+
+ for (i = 0; i < CSIO_MAX_PPORTS; i++)
+ if (hw->pport[i].portid == portid)
+ memcpy(hw->pport[i].mac, lcmd->phy_mac, 6);
+
+out:
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_read_fcf_cbfn - Read fcf parameters
+ * @ln: lnode
+ *
+ * read fcf response and Update ln fcf information.
+ */
+static void
+csio_ln_read_fcf_cbfn(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ struct csio_lnode *ln = (struct csio_lnode *)mbp->priv;
+ struct csio_fcf_info *fcf_info;
+ struct fw_fcoe_fcf_cmd *rsp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+ enum fw_retval retval;
+
+ retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+ if (retval != FW_SUCCESS) {
+ csio_ln_err(ln, "FCOE FCF cmd failed with ret x%x\n",
+ retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return;
+ }
+
+ spin_lock_irq(&hw->lock);
+ fcf_info = ln->fcfinfo;
+ fcf_info->priority = FW_FCOE_FCF_CMD_PRIORITY_GET(
+ ntohs(rsp->priority_pkd));
+ fcf_info->vf_id = ntohs(rsp->vf_id);
+ fcf_info->vlan_id = rsp->vlan_id;
+ fcf_info->max_fcoe_size = ntohs(rsp->max_fcoe_size);
+ fcf_info->fka_adv = be32_to_cpu(rsp->fka_adv);
+ fcf_info->fcfi = FW_FCOE_FCF_CMD_FCFI_GET(ntohl(rsp->op_to_fcfi));
+ fcf_info->fpma = FW_FCOE_FCF_CMD_FPMA_GET(rsp->fpma_to_portid);
+ fcf_info->spma = FW_FCOE_FCF_CMD_SPMA_GET(rsp->fpma_to_portid);
+ fcf_info->login = FW_FCOE_FCF_CMD_LOGIN_GET(rsp->fpma_to_portid);
+ fcf_info->portid = FW_FCOE_FCF_CMD_PORTID_GET(rsp->fpma_to_portid);
+ memcpy(fcf_info->fc_map, rsp->fc_map, sizeof(fcf_info->fc_map));
+ memcpy(fcf_info->mac, rsp->mac, sizeof(fcf_info->mac));
+ memcpy(fcf_info->name_id, rsp->name_id, sizeof(fcf_info->name_id));
+ memcpy(fcf_info->fabric, rsp->fabric, sizeof(fcf_info->fabric));
+ memcpy(fcf_info->spma_mac, rsp->spma_mac, sizeof(fcf_info->spma_mac));
+
+ spin_unlock_irq(&hw->lock);
+
+ mempool_free(mbp, hw->mb_mempool);
+}
+
+/*
+ * csio_ln_read_fcf_entry - Read fcf entry.
+ * @ln: lnode
+ * @cbfn: Completion handler.
+ *
+ * Issued with lock held.
+ */
+static int
+csio_ln_read_fcf_entry(struct csio_lnode *ln,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_hw *hw = ln->hwp;
+ struct csio_mb *mbp;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Get FCoE FCF information */
+ csio_fcoe_read_fcf_init_mb(ln, mbp, CSIO_MB_DEFAULT_TMO,
+ ln->portid, ln->fcf_flowid, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "failed to issue FCOE FCF cmd\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_handle_link_up - Logical Linkup event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none.
+ *
+ * This event is received from FW, when virtual link is established between
+ * Physical port[ENode] and FCF. If its new vnpi, then local node object is
+ * created on this FCF and set to [ONLINE] state.
+ * Lnode waits for FW_RDEV_CMD event to be received indicating that
+ * Fabric login is completed and lnode moves to [READY] state.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_up(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_lnode *ln = NULL;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ /* Pick lnode based on portid */
+ ln = csio_ln_lookup_by_portid(hw, portid);
+ if (!ln) {
+ csio_err(hw, "failed to lookup fcoe lnode on port:%d\n",
+ portid);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+
+ /* Check if lnode has valid vnp flowid */
+ if (ln->vnp_flowid != CSIO_INVALID_IDX) {
+ /* New VN-Port */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_alloc(hw);
+ spin_lock_irq(&hw->lock);
+ if (!ln) {
+ csio_err(hw,
+ "failed to allocate fcoe lnode"
+ "for port:%d vnpi:x%x\n",
+ portid, vnpi);
+ CSIO_DB_ASSERT(0);
+ return;
+ }
+ ln->portid = portid;
+ }
+ ln->vnp_flowid = vnpi;
+ ln->dev_num &= ~0xFFFF;
+ ln->dev_num |= vnpi;
+ }
+
+ /*Initialize fcfi */
+ ln->fcf_flowid = fcfi;
+
+ csio_info(hw, "Port:%d - FCOE LINK UP\n", portid);
+
+ CSIO_INC_STATS(ln, n_link_up);
+
+ /* Send LINKUP event to SM */
+ csio_post_event(&ln->sm, CSIO_LNE_LINKUP);
+}
+
+/*
+ * csio_post_event_rns
+ * @ln - FCOE lnode
+ * @evt - Given rnode event
+ * Returns - none
+ *
+ * Posts given rnode event to all FCOE rnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_rns(struct csio_lnode *ln, enum csio_rn_ev evt)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_post_event(&rn->sm, evt);
+ }
+}
+
+/*
+ * csio_cleanup_rns
+ * @ln - FCOE lnode
+ * Returns - none
+ *
+ * Frees all FCOE rnodes connected with given Lnode.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_cleanup_rns(struct csio_lnode *ln)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp, *next_rn;
+ struct csio_rnode *rn;
+
+ list_for_each_safe(tmp, next_rn, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ csio_put_rnode(ln, rn);
+ }
+
+}
+
+/*
+ * csio_post_event_lns
+ * @ln - FCOE lnode
+ * @evt - Given lnode event
+ * Returns - none
+ *
+ * Posts given lnode event to all FCOE lnodes connected with given Lnode.
+ * This routine is invoked when lnode receives LINK_DOWN/DOWN_LINK/CLOSE
+ * event.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_post_event_lns(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct list_head *tmp;
+ struct csio_lnode *cln, *sln;
+
+ /* If NPIV lnode, send evt only to that and return */
+ if (csio_is_npiv_ln(ln)) {
+ csio_post_event(&ln->sm, evt);
+ return;
+ }
+
+ sln = ln;
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &sln->cln_head) {
+ cln = (struct csio_lnode *) tmp;
+ csio_post_event(&cln->sm, evt);
+ }
+
+ /* Send evt to parent lnode */
+ csio_post_event(&ln->sm, evt);
+}
+
+/*
+ * csio_ln_down - Lcoal nport is down
+ * @ln - FCOE Lnode
+ * Returns - none
+ *
+ * Sends LINK_DOWN events to Lnode and its associated NPIVs lnodes.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_ln_down(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_LINK_DOWN);
+}
+
+/*
+ * csio_handle_link_down - Logical Linkdown event.
+ * @hw - HW module.
+ * @portid - Physical port number
+ * @fcfi - FCF index.
+ * @vnpi - VNP index.
+ * Returns - none
+ *
+ * This event is received from FW, when virtual link goes down between
+ * Physical port[ENode] and FCF. Lnode and its associated NPIVs lnode hosted on
+ * this vnpi[VN-Port] will be de-instantiated.
+ *
+ * This called with hw lock held
+ */
+static void
+csio_handle_link_down(struct csio_hw *hw, uint8_t portid, uint32_t fcfi,
+ uint32_t vnpi)
+{
+ struct csio_fcf_info *fp;
+ struct csio_lnode *ln;
+
+ /* Lookup lnode based on vnpi */
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (ln) {
+ fp = ln->fcfinfo;
+ CSIO_INC_STATS(ln, n_link_down);
+
+ /*Warn if linkdown received if lnode is not in ready state */
+ if (!csio_is_lnode_ready(ln)) {
+ csio_ln_warn(ln,
+ "warn: FCOE link is already in offline "
+ "Ignoring Fcoe linkdown event on portid %d\n",
+ portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* Verify portid */
+ if (fp->portid != portid) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid port %d\n", portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ /* verify fcfi */
+ if (ln->fcf_flowid != fcfi) {
+ csio_ln_warn(ln,
+ "warn: FCOE linkdown recv with "
+ "invalid fcfi x%x\n", fcfi);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ return;
+ }
+
+ csio_info(hw, "Port:%d - FCOE LINK DOWN\n", portid);
+
+ /* Send LINK_DOWN event to lnode s/m */
+ csio_ln_down(ln);
+
+ return;
+ } else {
+ csio_warn(hw,
+ "warn: FCOE linkdown recv with invalid vnpi x%x\n",
+ vnpi);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ }
+}
+
+/*
+ * csio_is_lnode_ready - Checks FCOE lnode is in ready state.
+ * @ln: Lnode module
+ *
+ * Returns True if FCOE lnode is in ready state.
+ */
+int
+csio_is_lnode_ready(struct csio_lnode *ln)
+{
+ return (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready));
+}
+
+/*****************************************************************************/
+/* START: Lnode SM */
+/*****************************************************************************/
+/*
+ * csio_lns_uninit - The request in uninit state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "uninit" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_uninit(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_online - The request in online state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "online" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_online(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_ln_warn(ln,
+ "warn: FCOE link is up already "
+ "Ignoring linkup on port:%d\n", ln->portid);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_set_state(&ln->sm, csio_lns_ready);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKUP);
+ spin_lock_irq(&hw->lock);
+
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ /* Fall through */
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_ready - The request in ready state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "ready" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_ready(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_FAB_INIT_DONE:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[ready].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_DOWN_LINK:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+
+ /* Host need to issue aborts in case if FW has not returned
+ * WRs with status "ABORTED"
+ */
+ spin_unlock_irq(&hw->lock);
+ csio_lnode_async_event(ln, CSIO_LN_FC_LINKDOWN);
+ spin_lock_irq(&hw->lock);
+
+ if (csio_is_phys_ln(ln)) {
+ /* Remove FCF entry */
+ list_del_init(&ln->fcfinfo->list);
+ }
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ case CSIO_LNE_LOGO:
+ csio_set_state(&ln->sm, csio_lns_offline);
+ csio_post_event_rns(ln, CSIO_RNFE_DOWN);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[uninit].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*
+ * csio_lns_offline - The request in offline state.
+ * @ln - FCOE lnode.
+ * @evt - Event to be processed.
+ *
+ * Process the given lnode event which is currently in "offline" state.
+ * Invoked with HW lock held.
+ * Return - none.
+ */
+static void
+csio_lns_offline(struct csio_lnode *ln, enum csio_ln_ev evt)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_lnode *rln = hw->rln;
+ int rv;
+
+ CSIO_INC_STATS(ln, n_evt_sm[evt]);
+ switch (evt) {
+ case CSIO_LNE_LINKUP:
+ csio_set_state(&ln->sm, csio_lns_online);
+ /* Read FCF only for physical lnode */
+ if (csio_is_phys_ln(ln)) {
+ rv = csio_ln_read_fcf_entry(ln,
+ csio_ln_read_fcf_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ break;
+ }
+
+ /* Add FCF record */
+ list_add_tail(&ln->fcfinfo->list, &rln->fcf_lsthead);
+ }
+
+ rv = csio_ln_vnp_read(ln, csio_ln_vnp_read_cbfn);
+ if (rv != 0) {
+ /* TODO: Send HW RESET event */
+ CSIO_INC_STATS(ln, n_err);
+ }
+ break;
+
+ case CSIO_LNE_LINK_DOWN:
+ case CSIO_LNE_DOWN_LINK:
+ case CSIO_LNE_LOGO:
+ csio_ln_dbg(ln,
+ "ignoring event %d recv from did x%x"
+ "in ln state[offline].\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_drop);
+ break;
+
+ case CSIO_LNE_CLOSE:
+ csio_set_state(&ln->sm, csio_lns_uninit);
+ csio_post_event_rns(ln, CSIO_RNFE_CLOSE);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "unexp ln event %d recv from did:x%x in "
+ "ln state[offline]\n", evt, ln->nport_id);
+ CSIO_INC_STATS(ln, n_evt_unexp);
+ CSIO_DB_ASSERT(0);
+ break;
+ } /* switch event */
+}
+
+/*****************************************************************************/
+/* END: Lnode SM */
+/*****************************************************************************/
+
+static void
+csio_free_fcfinfo(struct kref *kref)
+{
+ struct csio_fcf_info *fcfinfo = container_of(kref,
+ struct csio_fcf_info, kref);
+ kfree(fcfinfo);
+}
+
+/* Helper routines for attributes */
+/*
+ * csio_lnode_state_to_str - Get current state of FCOE lnode.
+ * @ln - lnode
+ * @str - state of lnode.
+ *
+ */
+void
+csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str)
+{
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_uninit)) {
+ strcpy(str, "UNINIT");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_ready)) {
+ strcpy(str, "READY");
+ return;
+ }
+ if (csio_get_state(ln) == ((csio_sm_state_t)csio_lns_offline)) {
+ strcpy(str, "OFFLINE");
+ return;
+ }
+ strcpy(str, "UNKNOWN");
+} /* csio_lnode_state_to_str */
+
+
+int
+csio_get_phy_port_stats(struct csio_hw *hw, uint8_t portid,
+ struct fw_fcoe_port_stats *port_stats)
+{
+ struct csio_mb *mbp;
+ struct fw_fcoe_port_cmd_params portparams;
+ enum fw_retval retval;
+ int idx;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "FCoE FCF PARAMS command out of memory!\n");
+ return -EINVAL;
+ }
+ portparams.portid = portid;
+
+ for (idx = 1; idx <= 3; idx++) {
+ portparams.idx = (idx-1)*6 + 1;
+ portparams.nstats = 6;
+ if (idx == 3)
+ portparams.nstats = 4;
+ csio_fcoe_read_portparams_init_mb(hw, mbp, CSIO_MB_DEFAULT_TMO,
+ &portparams, NULL);
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of FCoE port params failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_mb_process_portparams_rsp(hw, mbp, &retval,
+ &portparams, port_stats);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_wr_handler -Mgmt Work Request handler.
+ * @wr - WR.
+ * @len - WR len.
+ * This handler is invoked when an outstanding mgmt WR is completed.
+ * Its invoked in the context of FW event worker thread for every
+ * mgmt event received.
+ * Return - none.
+ */
+
+static void
+csio_ln_mgmt_wr_handler(struct csio_hw *hw, void *wr, uint32_t len)
+{
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ struct csio_ioreq *io_req = NULL;
+ struct fw_fcoe_els_ct_wr *wr_cmd;
+
+
+ wr_cmd = (struct fw_fcoe_els_ct_wr *) wr;
+
+ if (len < sizeof(struct fw_fcoe_els_ct_wr)) {
+ csio_err(mgmtm->hw,
+ "Invalid ELS CT WR length recvd, len:%x\n", len);
+ mgmtm->stats.n_err++;
+ return;
+ }
+
+ io_req = (struct csio_ioreq *) ((uintptr_t) wr_cmd->cookie);
+ io_req->wr_status = csio_wr_status(wr_cmd);
+
+ /* lookup ioreq exists in our active Q */
+ spin_lock_irq(&hw->lock);
+ if (csio_mgmt_req_lookup(mgmtm, io_req) != 0) {
+ csio_err(mgmtm->hw,
+ "Error- Invalid IO handle recv in WR. handle: %p\n",
+ io_req);
+ mgmtm->stats.n_err++;
+ spin_unlock_irq(&hw->lock);
+ return;
+ }
+
+ mgmtm = csio_hw_to_mgmtm(hw);
+
+ /* Dequeue from active queue */
+ list_del_init(&io_req->sm.sm_list);
+ mgmtm->stats.n_active--;
+ spin_unlock_irq(&hw->lock);
+
+ /* io_req will be freed by completion handler */
+ if (io_req->io_cbfn)
+ io_req->io_cbfn(hw, io_req);
+}
+
+/**
+ * csio_fcoe_fwevt_handler - Event handler for Firmware FCoE events.
+ * @hw: HW module
+ * @cpl_op: CPL opcode
+ * @cmd: FW cmd/WR.
+ *
+ * Process received FCoE cmd/WR event from FW.
+ */
+void
+csio_fcoe_fwevt_handler(struct csio_hw *hw, __u8 cpl_op, __be64 *cmd)
+{
+ struct csio_lnode *ln;
+ struct csio_rnode *rn;
+ uint8_t portid, opcode = *(uint8_t *)cmd;
+ struct fw_fcoe_link_cmd *lcmd;
+ struct fw_wr_hdr *wr;
+ struct fw_rdev_wr *rdev_wr;
+ enum fw_fcoe_link_status lstatus;
+ uint32_t fcfi, rdev_flowid, vnpi;
+ enum csio_ln_ev evt;
+
+ if (cpl_op == CPL_FW6_MSG && opcode == FW_FCOE_LINK_CMD) {
+
+ lcmd = (struct fw_fcoe_link_cmd *)cmd;
+ lstatus = lcmd->lstatus;
+ portid = FW_FCOE_LINK_CMD_PORTID_GET(
+ ntohl(lcmd->op_to_portid));
+ fcfi = FW_FCOE_LINK_CMD_FCFI_GET(ntohl(lcmd->sub_opcode_fcfi));
+ vnpi = FW_FCOE_LINK_CMD_VNPI_GET(ntohl(lcmd->vnpi_pkd));
+
+ if (lstatus == FCOE_LINKUP) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_up(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+
+ } else if (lstatus == FCOE_LINKDOWN) {
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ csio_handle_link_down(hw, portid, fcfi, vnpi);
+ spin_unlock_irq(&hw->lock);
+ /* HW un lock here */
+ } else {
+ csio_warn(hw, "Unexpected FCOE LINK status:0x%x\n",
+ lcmd->lstatus);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_PLD) {
+ wr = (struct fw_wr_hdr *) (cmd + 4);
+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi))
+ == FW_RDEV_WR) {
+
+ rdev_wr = (struct fw_rdev_wr *) (cmd + 4);
+
+ rdev_flowid = FW_RDEV_WR_FLOWID_GET(
+ ntohl(rdev_wr->alloc_to_len16));
+ vnpi = FW_RDEV_WR_ASSOC_FLOWID_GET(
+ ntohl(rdev_wr->flags_to_assoc_flowid));
+
+ csio_dbg(hw,
+ "FW_RDEV_WR: flowid:x%x ev_cause:x%x "
+ "vnpi:0x%x\n", rdev_flowid,
+ rdev_wr->event_cause, vnpi);
+
+ if (rdev_wr->protocol != PROT_FCOE) {
+ csio_err(hw,
+ "FW_RDEV_WR: invalid proto:x%x "
+ "received with flowid:x%x\n",
+ rdev_wr->protocol,
+ rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ return;
+ }
+
+ /* HW lock here */
+ spin_lock_irq(&hw->lock);
+ ln = csio_ln_lookup_by_vnpi(hw, vnpi);
+ if (!ln) {
+ csio_err(hw,
+ "FW_DEV_WR: invalid vnpi:x%x received "
+ "with flowid:x%x\n", vnpi, rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ rn = csio_confirm_rnode(ln, rdev_flowid,
+ &rdev_wr->u.fcoe_rdev);
+ if (!rn) {
+ csio_ln_dbg(ln,
+ "Failed to confirm rnode "
+ "for flowid:x%x\n", rdev_flowid);
+ CSIO_INC_STATS(hw, n_evt_drop);
+ goto out_pld;
+ }
+
+ /* save previous event for debugging */
+ ln->prev_evt = ln->cur_evt;
+ ln->cur_evt = rdev_wr->event_cause;
+ CSIO_INC_STATS(ln, n_evt_fw[rdev_wr->event_cause]);
+
+ /* Translate all the fabric events to lnode SM events */
+ evt = CSIO_FWE_TO_LNE(rdev_wr->event_cause);
+ if (evt) {
+ csio_ln_dbg(ln,
+ "Posting event to lnode event:%d "
+ "cause:%d flowid:x%x\n", evt,
+ rdev_wr->event_cause, rdev_flowid);
+ csio_post_event(&ln->sm, evt);
+ }
+
+ /* Handover event to rn SM here. */
+ csio_rnode_fwevt_handler(rn, rdev_wr->event_cause);
+out_pld:
+ spin_unlock_irq(&hw->lock);
+ return;
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else if (cpl_op == CPL_FW6_MSG) {
+ wr = (struct fw_wr_hdr *) (cmd);
+ if (FW_WR_OP_GET(be32_to_cpu(wr->hi)) == FW_FCOE_ELS_CT_WR) {
+ csio_ln_mgmt_wr_handler(hw, wr,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ } else {
+ csio_warn(hw, "unexpected WR op(0x%x) recv\n",
+ FW_WR_OP_GET(be32_to_cpu((wr->hi))));
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+ } else {
+ csio_warn(hw, "unexpected CPL op(0x%x) recv\n", opcode);
+ CSIO_INC_STATS(hw, n_cpl_unexp);
+ }
+}
+
+/**
+ * csio_lnode_start - Kickstart lnode discovery.
+ * @ln: lnode
+ *
+ * This routine kickstarts the discovery by issuing an FCOE_LINK (up) command.
+ */
+int
+csio_lnode_start(struct csio_lnode *ln)
+{
+ int rv = 0;
+ if (csio_is_phys_ln(ln) && !(ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ rv = csio_fcoe_enable_link(ln, 1);
+ ln->flags |= CSIO_LNF_LINK_ENABLE;
+ }
+
+ return rv;
+}
+
+/**
+ * csio_lnode_stop - Stop the lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to stop lnode and its associated NPIV
+ * lnodes.
+ */
+void
+csio_lnode_stop(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_DOWN_LINK);
+ if (csio_is_phys_ln(ln) && (ln->flags & CSIO_LNF_LINK_ENABLE)) {
+ csio_fcoe_enable_link(ln, 0);
+ ln->flags &= ~CSIO_LNF_LINK_ENABLE;
+ }
+ csio_ln_dbg(ln, "stopping ln :%p\n", ln);
+}
+
+/**
+ * csio_lnode_close - Close an lnode.
+ * @ln: lnode
+ *
+ * This routine is invoked by HW module to close an lnode and its
+ * associated NPIV lnodes. Lnode and its associated NPIV lnodes are
+ * set to uninitialized state.
+ */
+void
+csio_lnode_close(struct csio_lnode *ln)
+{
+ csio_post_event_lns(ln, CSIO_LNE_CLOSE);
+ if (csio_is_phys_ln(ln))
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+
+ csio_ln_dbg(ln, "closed ln :%p\n", ln);
+}
+
+/*
+ * csio_ln_prep_ecwr - Prepare ELS/CT WR.
+ * @io_req - IO request.
+ * @wr_len - WR len
+ * @immd_len - WR immediate data
+ * @sub_op - Sub opcode
+ * @sid - source portid.
+ * @did - destination portid
+ * @flow_id - flowid
+ * @fw_wr - ELS/CT WR to be prepared.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_prep_ecwr(struct csio_ioreq *io_req, uint32_t wr_len,
+ uint32_t immd_len, uint8_t sub_op, uint32_t sid,
+ uint32_t did, uint32_t flow_id, uint8_t *fw_wr)
+{
+ struct fw_fcoe_els_ct_wr *wr;
+ __be32 port_id;
+
+ wr = (struct fw_fcoe_els_ct_wr *)fw_wr;
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_FCOE_ELS_CT_WR) |
+ FW_FCOE_ELS_CT_WR_IMMDLEN(immd_len));
+
+ wr_len = DIV_ROUND_UP(wr_len, 16);
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(flow_id) |
+ FW_WR_LEN16(wr_len));
+ wr->els_ct_type = sub_op;
+ wr->ctl_pri = 0;
+ wr->cp_en_class = 0;
+ wr->cookie = io_req->fw_handle;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(
+ io_req->lnode->hwp, io_req->iq_idx));
+ wr->fl_to_sp = FW_FCOE_ELS_CT_WR_SP(1);
+ wr->tmo_val = (uint8_t) io_req->tmo;
+ port_id = htonl(sid);
+ memcpy(wr->l_id, PORT_ID_PTR(port_id), 3);
+ port_id = htonl(did);
+ memcpy(wr->r_id, PORT_ID_PTR(port_id), 3);
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(io_req->dma_buf.len);
+ wr->rsp_dmaaddr = cpu_to_be64(io_req->dma_buf.paddr);
+ return 0;
+}
+
+/*
+ * csio_ln_mgmt_submit_wr - Post elsct work request.
+ * @mgmtm - mgmtm
+ * @io_req - io request.
+ * @sub_op - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ * Prepares ELSCT Work request and sents it to FW.
+ * Returns: 0 - on success
+ */
+static int
+csio_ln_mgmt_submit_wr(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req,
+ uint8_t sub_op, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_wr_pair wrp;
+ struct csio_lnode *ln = io_req->lnode;
+ struct csio_rnode *rn = io_req->rnode;
+ struct csio_hw *hw = mgmtm->hw;
+ uint8_t fw_wr[64];
+ struct ulptx_sgl dsgl;
+ uint32_t wr_size = 0;
+ uint8_t im_len = 0;
+ uint32_t wr_off = 0;
+
+ int ret = 0;
+
+ /* Calculate WR Size for this ELS REQ */
+ wr_size = sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Send as immediate data if pld < 256 */
+ if (pld_len < 256) {
+ wr_size += ALIGN(pld_len, 8);
+ im_len = (uint8_t)pld_len;
+ } else
+ wr_size += sizeof(struct ulptx_sgl);
+
+ /* Roundup WR size in units of 16 bytes */
+ wr_size = ALIGN(wr_size, 16);
+
+ /* Get WR to send ELS REQ */
+ ret = csio_wr_get(hw, mgmtm->eq_idx, wr_size, &wrp);
+ if (ret != 0) {
+ csio_err(hw, "Failed to get WR for ec_req %p ret:%d\n",
+ io_req, ret);
+ return ret;
+ }
+
+ /* Prepare Generic WR used by all ELS/CT cmd */
+ csio_ln_prep_ecwr(io_req, wr_size, im_len, sub_op,
+ ln->nport_id, rn->nport_id,
+ csio_rn_flowid(rn),
+ &fw_wr[0]);
+
+ /* Copy ELS/CT WR CMD */
+ csio_wr_copy_to_wrp(&fw_wr[0], &wrp, wr_off,
+ sizeof(struct fw_fcoe_els_ct_wr));
+ wr_off += sizeof(struct fw_fcoe_els_ct_wr);
+
+ /* Copy payload to Immediate section of WR */
+ if (im_len)
+ csio_wr_copy_to_wrp(pld->vaddr, &wrp, wr_off, im_len);
+ else {
+ /* Program DSGL to dma payload */
+ dsgl.cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) |
+ ULPTX_MORE | ULPTX_NSGE(1));
+ dsgl.len0 = cpu_to_be32(pld_len);
+ dsgl.addr0 = cpu_to_be64(pld->paddr);
+ csio_wr_copy_to_wrp(&dsgl, &wrp, ALIGN(wr_off, 8),
+ sizeof(struct ulptx_sgl));
+ }
+
+ /* Issue work request to xmit ELS/CT req to FW */
+ csio_wr_issue(mgmtm->hw, mgmtm->eq_idx, false);
+ return ret;
+}
+
+/*
+ * csio_ln_mgmt_submit_req - Submit FCOE Mgmt request.
+ * @io_req - IO Request
+ * @io_cbfn - Completion handler.
+ * @req_type - ELS or CT request type
+ * @pld - Dma Payload buffer
+ * @pld_len - Payload len
+ *
+ *
+ * This API used submit managment ELS/CT request.
+ * This called with hw lock held
+ * Returns: 0 - on success
+ * -ENOMEM - on error.
+ */
+static int
+csio_ln_mgmt_submit_req(struct csio_ioreq *io_req,
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *),
+ enum fcoe_cmn_type req_type, struct csio_dma_buf *pld,
+ uint32_t pld_len)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(io_req->lnode);
+ struct csio_mgmtm *mgmtm = csio_hw_to_mgmtm(hw);
+ int rv;
+
+ io_req->io_cbfn = io_cbfn; /* Upper layer callback handler */
+ io_req->fw_handle = (uintptr_t) (io_req);
+ io_req->eq_idx = mgmtm->eq_idx;
+ io_req->iq_idx = mgmtm->iq_idx;
+
+ rv = csio_ln_mgmt_submit_wr(mgmtm, io_req, req_type, pld, pld_len);
+ if (rv == 0) {
+ list_add_tail(&io_req->sm.sm_list, &mgmtm->active_q);
+ mgmtm->stats.n_active++;
+ }
+ return rv;
+}
+
+/*
+ * csio_ln_fdmi_init - FDMI Init entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_init(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_dma_buf *dma_buf;
+
+ /* Allocate MGMT request required for FDMI */
+ ln->mgmt_req = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ln->mgmt_req) {
+ csio_ln_err(ln, "Failed to alloc ioreq for FDMI\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ return -ENOMEM;
+ }
+
+ /* Allocate Dma buffers for FDMI response Payload */
+ dma_buf = &ln->mgmt_req->dma_buf;
+ dma_buf->len = 2048;
+ dma_buf->vaddr = pci_alloc_consistent(hw->pdev, dma_buf->len,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw, "Failed to alloc DMA buffer for FDMI!\n");
+ kfree(ln->mgmt_req);
+ ln->mgmt_req = NULL;
+ return -ENOMEM;
+ }
+
+ ln->flags |= CSIO_LNF_FDMI_ENABLE;
+ return 0;
+}
+
+/*
+ * csio_ln_fdmi_exit - FDMI exit entry point.
+ * @ln: lnode
+ */
+static int
+csio_ln_fdmi_exit(struct csio_lnode *ln)
+{
+ struct csio_dma_buf *dma_buf;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (!ln->mgmt_req)
+ return 0;
+
+ dma_buf = &ln->mgmt_req->dma_buf;
+ if (dma_buf->vaddr)
+ pci_free_consistent(hw->pdev, dma_buf->len, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ln->mgmt_req);
+ return 0;
+}
+
+int
+csio_scan_done(struct csio_lnode *ln, unsigned long ticks,
+ unsigned long time, unsigned long max_scan_ticks,
+ unsigned long delta_scan_ticks)
+{
+ int rv = 0;
+
+ if (time >= max_scan_ticks)
+ return 1;
+
+ if (!ln->tgt_scan_tick)
+ ln->tgt_scan_tick = ticks;
+
+ if (((ticks - ln->tgt_scan_tick) >= delta_scan_ticks)) {
+ if (!ln->last_scan_ntgts)
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ else {
+ if (ln->last_scan_ntgts == ln->n_scsi_tgts)
+ return 1;
+
+ ln->last_scan_ntgts = ln->n_scsi_tgts;
+ }
+ ln->tgt_scan_tick = ticks;
+ }
+ return rv;
+}
+
+/*
+ * csio_notify_lnodes:
+ * @hw: HW module
+ * @note: Notification
+ *
+ * Called from the HW SM to fan out notifications to the
+ * Lnode SM. Since the HW SM is entered with lock held,
+ * there is no need to hold locks here.
+ *
+ */
+void
+csio_notify_lnodes(struct csio_hw *hw, enum csio_ln_notify note)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying all nodes of event %d\n", note);
+
+ /* Traverse children lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+
+ switch (note) {
+ case CSIO_LN_NOTIFY_HWREADY:
+ csio_lnode_start(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWRESET:
+ case CSIO_LN_NOTIFY_HWREMOVE:
+ csio_lnode_close(ln);
+ break;
+
+ case CSIO_LN_NOTIFY_HWSTOP:
+ csio_lnode_stop(ln);
+ break;
+
+ default:
+ break;
+
+ }
+ }
+}
+
+/*
+ * csio_disable_lnodes:
+ * @hw: HW module
+ * @portid:port id
+ * @disable: disable/enable flag.
+ * If disable=1, disables all lnode hosted on given physical port.
+ * otherwise enables all the lnodes on given phsysical port.
+ * This routine need to called with hw lock held.
+ */
+void
+csio_disable_lnodes(struct csio_hw *hw, uint8_t portid, bool disable)
+{
+ struct list_head *tmp;
+ struct csio_lnode *ln;
+
+ csio_dbg(hw, "Notifying event to all nodes of port:%d\n", portid);
+
+ /* Traverse sibling lnodes list and send evt */
+ list_for_each(tmp, &hw->sln_head) {
+ ln = (struct csio_lnode *) tmp;
+ if (ln->portid != portid)
+ continue;
+
+ if (disable)
+ csio_lnode_stop(ln);
+ else
+ csio_lnode_start(ln);
+ }
+}
+
+/*
+ * csio_ln_init - Initialize an lnode.
+ * @ln: lnode
+ *
+ */
+static int
+csio_ln_init(struct csio_lnode *ln)
+{
+ int rv = -EINVAL;
+ struct csio_lnode *rln, *pln;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_init_state(&ln->sm, csio_lns_uninit);
+ ln->vnp_flowid = CSIO_INVALID_IDX;
+ ln->fcf_flowid = CSIO_INVALID_IDX;
+
+ if (csio_is_root_ln(ln)) {
+
+ /* This is the lnode used during initialization */
+
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info), GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF record\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ INIT_LIST_HEAD(&ln->fcf_lsthead);
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+
+ } else { /* Either a non-root physical or a virtual lnode */
+
+ /*
+ * THe rest is common for non-root physical and NPIV lnodes.
+ * Just get references to all other modules
+ */
+ rln = csio_root_lnode(ln);
+
+ if (csio_is_npiv_ln(ln)) {
+ /* NPIV */
+ pln = csio_parent_lnode(ln);
+ kref_get(&pln->fcfinfo->kref);
+ ln->fcfinfo = pln->fcfinfo;
+ } else {
+ /* Another non-root physical lnode (FCF) */
+ ln->fcfinfo = kzalloc(sizeof(struct csio_fcf_info),
+ GFP_KERNEL);
+ if (!ln->fcfinfo) {
+ csio_ln_err(ln, "Failed to alloc FCF info\n");
+ CSIO_INC_STATS(hw, n_err_nomem);
+ goto err;
+ }
+
+ kref_init(&ln->fcfinfo->kref);
+
+ if (csio_fdmi_enable && csio_ln_fdmi_init(ln))
+ goto err;
+ }
+
+ } /* if (!csio_is_root_ln(ln)) */
+
+ return 0;
+err:
+ return rv;
+}
+
+static void
+csio_ln_exit(struct csio_lnode *ln)
+{
+ struct csio_lnode *pln;
+
+ csio_cleanup_rns(ln);
+ if (csio_is_npiv_ln(ln)) {
+ pln = csio_parent_lnode(ln);
+ kref_put(&pln->fcfinfo->kref, csio_free_fcfinfo);
+ } else {
+ kref_put(&ln->fcfinfo->kref, csio_free_fcfinfo);
+ if (csio_fdmi_enable)
+ csio_ln_fdmi_exit(ln);
+ }
+ ln->fcfinfo = NULL;
+}
+
+/**
+ * csio_lnode_init - Initialize the members of an lnode.
+ * @ln: lnode
+ *
+ */
+int
+csio_lnode_init(struct csio_lnode *ln, struct csio_hw *hw,
+ struct csio_lnode *pln)
+{
+ int rv = -EINVAL;
+
+ /* Link this lnode to hw */
+ csio_lnode_to_hw(ln) = hw;
+
+ /* Link child to parent if child lnode */
+ if (pln)
+ ln->pln = pln;
+ else
+ ln->pln = NULL;
+
+ /* Initialize scsi_tgt and timers to zero */
+ ln->n_scsi_tgts = 0;
+ ln->last_scan_ntgts = 0;
+ ln->tgt_scan_tick = 0;
+
+ /* Initialize rnode list */
+ INIT_LIST_HEAD(&ln->rnhead);
+ INIT_LIST_HEAD(&ln->cln_head);
+
+ /* Initialize log level for debug */
+ ln->params.log_level = hw->params.log_level;
+
+ if (csio_ln_init(ln))
+ goto err;
+
+ /* Add lnode to list of sibling or children lnodes */
+ spin_lock_irq(&hw->lock);
+ list_add_tail(&ln->sm.sm_list, pln ? &pln->cln_head : &hw->sln_head);
+ if (pln)
+ pln->num_vports++;
+ spin_unlock_irq(&hw->lock);
+
+ hw->num_lns++;
+
+ return 0;
+err:
+ csio_lnode_to_hw(ln) = NULL;
+ return rv;
+}
+
+/**
+ * csio_lnode_exit - De-instantiate an lnode.
+ * @ln: lnode
+ *
+ */
+void
+csio_lnode_exit(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ csio_ln_exit(ln);
+
+ /* Remove this lnode from hw->sln_head */
+ spin_lock_irq(&hw->lock);
+
+ list_del_init(&ln->sm.sm_list);
+
+ /* If it is children lnode, decrement the
+ * counter in its parent lnode
+ */
+ if (ln->pln)
+ ln->pln->num_vports--;
+
+ /* Update root lnode pointer */
+ if (list_empty(&hw->sln_head))
+ hw->rln = NULL;
+ else
+ hw->rln = (struct csio_lnode *)csio_list_next(&hw->sln_head);
+
+ spin_unlock_irq(&hw->lock);
+
+ csio_lnode_to_hw(ln) = NULL;
+ hw->num_lns--;
+}
diff --git a/drivers/scsi/csiostor/csio_lnode.h b/drivers/scsi/csiostor/csio_lnode.h
new file mode 100644
index 000000000000..8d84988ab06d
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_lnode.h
@@ -0,0 +1,255 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_LNODE_H__
+#define __CSIO_LNODE_H__
+
+#include <linux/kref.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <scsi/fc/fc_els.h>
+
+
+#include "csio_defs.h"
+#include "csio_hw.h"
+
+#define CSIO_FCOE_MAX_NPIV 128
+#define CSIO_FCOE_MAX_RNODES 2048
+
+/* FDMI port attribute unknown speed */
+#define CSIO_HBA_PORTSPEED_UNKNOWN 0x8000
+
+extern int csio_fcoe_rnodes;
+extern int csio_fdmi_enable;
+
+/* State machine evets */
+enum csio_ln_ev {
+ CSIO_LNE_NONE = (uint32_t)0,
+ CSIO_LNE_LINKUP,
+ CSIO_LNE_FAB_INIT_DONE,
+ CSIO_LNE_LINK_DOWN,
+ CSIO_LNE_DOWN_LINK,
+ CSIO_LNE_LOGO,
+ CSIO_LNE_CLOSE,
+ CSIO_LNE_MAX_EVENT,
+};
+
+
+struct csio_fcf_info {
+ struct list_head list;
+ uint8_t priority;
+ uint8_t mac[6];
+ uint8_t name_id[8];
+ uint8_t fabric[8];
+ uint16_t vf_id;
+ uint8_t vlan_id;
+ uint16_t max_fcoe_size;
+ uint8_t fc_map[3];
+ uint32_t fka_adv;
+ uint32_t fcfi;
+ uint8_t get_next:1;
+ uint8_t link_aff:1;
+ uint8_t fpma:1;
+ uint8_t spma:1;
+ uint8_t login:1;
+ uint8_t portid;
+ uint8_t spma_mac[6];
+ struct kref kref;
+};
+
+/* Defines for flags */
+#define CSIO_LNF_FIPSUPP 0x00000001 /* Fip Supported */
+#define CSIO_LNF_NPIVSUPP 0x00000002 /* NPIV supported */
+#define CSIO_LNF_LINK_ENABLE 0x00000004 /* Link enabled */
+#define CSIO_LNF_FDMI_ENABLE 0x00000008 /* FDMI support */
+
+/* Transport events */
+enum csio_ln_fc_evt {
+ CSIO_LN_FC_LINKUP = 1,
+ CSIO_LN_FC_LINKDOWN,
+ CSIO_LN_FC_RSCN,
+ CSIO_LN_FC_ATTRIB_UPDATE,
+};
+
+/* Lnode stats */
+struct csio_lnode_stats {
+ uint32_t n_link_up; /* Link down */
+ uint32_t n_link_down; /* Link up */
+ uint32_t n_err; /* error */
+ uint32_t n_err_nomem; /* memory not available */
+ uint32_t n_inval_parm; /* Invalid parameters */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* dropped event */
+ uint32_t n_rnode_match; /* matched rnode */
+ uint32_t n_dev_loss_tmo; /* Device loss timeout */
+ uint32_t n_fdmi_err; /* fdmi err */
+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ enum csio_ln_ev n_evt_sm[CSIO_LNE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_rnode_alloc; /* rnode allocated */
+ uint32_t n_rnode_free; /* rnode freed */
+ uint32_t n_rnode_nomem; /* rnode alloc failure */
+ uint32_t n_input_requests; /* Input Requests */
+ uint32_t n_output_requests; /* Output Requests */
+ uint32_t n_control_requests; /* Control Requests */
+ uint32_t n_input_bytes; /* Input Bytes */
+ uint32_t n_output_bytes; /* Output Bytes */
+ uint32_t rsvd1;
+};
+
+/* Common Lnode params */
+struct csio_lnode_params {
+ uint32_t ra_tov;
+ uint32_t fcfi;
+ uint32_t log_level; /* Module level for debugging */
+};
+
+struct csio_service_parms {
+ struct fc_els_csp csp; /* Common service parms */
+ uint8_t wwpn[8]; /* WWPN */
+ uint8_t wwnn[8]; /* WWNN */
+ struct fc_els_cssp clsp[4]; /* Class service params */
+ uint8_t vvl[16]; /* Vendor version level */
+};
+
+/* Lnode */
+struct csio_lnode {
+ struct csio_sm sm; /* State machine + sibling
+ * lnode list.
+ */
+ struct csio_hw *hwp; /* Pointer to the HW module */
+ uint8_t portid; /* Port ID */
+ uint8_t rsvd1;
+ uint16_t rsvd2;
+ uint32_t dev_num; /* Device number */
+ uint32_t flags; /* Flags */
+ struct list_head fcf_lsthead; /* FCF entries */
+ struct csio_fcf_info *fcfinfo; /* FCF in use */
+ struct csio_ioreq *mgmt_req; /* MGMT request */
+
+ /* FCoE identifiers */
+ uint8_t mac[6];
+ uint32_t nport_id;
+ struct csio_service_parms ln_sparm; /* Service parms */
+
+ /* Firmware identifiers */
+ uint32_t fcf_flowid; /*fcf flowid */
+ uint32_t vnp_flowid;
+ uint16_t ssn_cnt; /* Registered Session */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+
+ /* Children */
+ struct list_head cln_head; /* Head of the children lnode
+ * list.
+ */
+ uint32_t num_vports; /* Total NPIV/children LNodes*/
+ struct csio_lnode *pln; /* Parent lnode of child
+ * lnodes.
+ */
+ struct list_head cmpl_q; /* Pending I/Os on this lnode */
+
+ /* Remote node information */
+ struct list_head rnhead; /* Head of rnode list */
+ uint32_t num_reg_rnodes; /* Number of rnodes registered
+ * with the host.
+ */
+ uint32_t n_scsi_tgts; /* Number of scsi targets
+ * found
+ */
+ uint32_t last_scan_ntgts;/* Number of scsi targets
+ * found per last scan.
+ */
+ uint32_t tgt_scan_tick; /* timer started after
+ * new tgt found
+ */
+ /* FC transport data */
+ struct fc_vport *fc_vport;
+ struct fc_host_statistics fch_stats;
+
+ struct csio_lnode_stats stats; /* Common lnode stats */
+ struct csio_lnode_params params; /* Common lnode params */
+};
+
+#define csio_lnode_to_hw(ln) ((ln)->hwp)
+#define csio_root_lnode(ln) (csio_lnode_to_hw((ln))->rln)
+#define csio_parent_lnode(ln) ((ln)->pln)
+#define csio_ln_flowid(ln) ((ln)->vnp_flowid)
+#define csio_ln_wwpn(ln) ((ln)->ln_sparm.wwpn)
+#define csio_ln_wwnn(ln) ((ln)->ln_sparm.wwnn)
+
+#define csio_is_root_ln(ln) (((ln) == csio_root_lnode((ln))) ? 1 : 0)
+#define csio_is_phys_ln(ln) (((ln)->pln == NULL) ? 1 : 0)
+#define csio_is_npiv_ln(ln) (((ln)->pln != NULL) ? 1 : 0)
+
+
+#define csio_ln_dbg(_ln, _fmt, ...) \
+ csio_dbg(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_err(_ln, _fmt, ...) \
+ csio_err(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+#define csio_ln_warn(_ln, _fmt, ...) \
+ csio_warn(_ln->hwp, "%x:%x "_fmt, CSIO_DEVID_HI(_ln), \
+ CSIO_DEVID_LO(_ln), ##__VA_ARGS__);
+
+/* HW->Lnode notifications */
+enum csio_ln_notify {
+ CSIO_LN_NOTIFY_HWREADY = 1,
+ CSIO_LN_NOTIFY_HWSTOP,
+ CSIO_LN_NOTIFY_HWREMOVE,
+ CSIO_LN_NOTIFY_HWRESET,
+};
+
+void csio_fcoe_fwevt_handler(struct csio_hw *, __u8 cpl_op, __be64 *);
+int csio_is_lnode_ready(struct csio_lnode *);
+void csio_lnode_state_to_str(struct csio_lnode *ln, int8_t *str);
+struct csio_lnode *csio_lnode_lookup_by_wwpn(struct csio_hw *, uint8_t *);
+int csio_get_phy_port_stats(struct csio_hw *, uint8_t ,
+ struct fw_fcoe_port_stats *);
+int csio_scan_done(struct csio_lnode *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+void csio_notify_lnodes(struct csio_hw *, enum csio_ln_notify);
+void csio_disable_lnodes(struct csio_hw *, uint8_t, bool);
+void csio_lnode_async_event(struct csio_lnode *, enum csio_ln_fc_evt);
+int csio_ln_fdmi_start(struct csio_lnode *, void *);
+int csio_lnode_start(struct csio_lnode *);
+void csio_lnode_stop(struct csio_lnode *);
+void csio_lnode_close(struct csio_lnode *);
+int csio_lnode_init(struct csio_lnode *, struct csio_hw *,
+ struct csio_lnode *);
+void csio_lnode_exit(struct csio_lnode *);
+
+#endif /* ifndef __CSIO_LNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c
new file mode 100644
index 000000000000..5b27c48f6836
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.c
@@ -0,0 +1,1750 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/delay.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_mb.h"
+#include "csio_wr.h"
+
+#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
+
+/* MB Command/Response Helpers */
+/*
+ * csio_mb_fw_retval - FW return value from a mailbox response.
+ * @mbp: Mailbox structure
+ *
+ */
+enum fw_retval
+csio_mb_fw_retval(struct csio_mb *mbp)
+{
+ struct fw_cmd_hdr *hdr;
+
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ return FW_CMD_RETVAL_GET(ntohl(hdr->lo));
+}
+
+/*
+ * csio_mb_hello - FW HELLO command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @m_mbox: Master mailbox number, if any.
+ * @a_mbox: Mailbox number for asycn notifications.
+ * @master: Device mastership.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->err_to_clearinit = htonl(
+ FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
+ FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
+ FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
+ m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
+ FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
+ FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
+ FW_HELLO_CMD_CLEARINIT);
+
+}
+
+/*
+ * csio_mb_process_hello_rsp - FW HELLO response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @state: State that the function is in.
+ * @mpfn: Master pfn
+ *
+ */
+void
+csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, enum csio_dev_state *state,
+ uint8_t *mpfn)
+{
+ struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
+ uint32_t value;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS) {
+ hw->fwrev = ntohl(rsp->fwrev);
+
+ value = ntohl(rsp->err_to_clearinit);
+ *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
+
+ if (value & FW_HELLO_CMD_INIT)
+ *state = CSIO_DEV_STATE_INIT;
+ else if (value & FW_HELLO_CMD_ERR)
+ *state = CSIO_DEV_STATE_ERR;
+ else
+ *state = CSIO_DEV_STATE_UNINIT;
+ }
+}
+
+/*
+ * csio_mb_bye - FW BYE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_reset - FW RESET command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @reset: Type of reset.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reset, int halt,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->val = htonl(reset);
+ cmdp->halt_pkd = htonl(halt);
+
+}
+
+/*
+ * csio_mb_params - FW PARAMS command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: Command timeout.
+ * @pf: PF number.
+ * @vf: VF number.
+ * @nparams: Number of paramters
+ * @params: Parameter mnemonic array.
+ * @val: Parameter value array.
+ * @wr: Write/Read PARAMS.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ unsigned int pf, unsigned int vf, unsigned int nparams,
+ const u32 *params, u32 *val, bool wr,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ uint32_t i;
+ uint32_t temp_params = 0, temp_val = 0;
+ struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
+ __be32 *p = &cmdp->param[0].mnem;
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_WRITE : FW_CMD_READ) |
+ FW_PARAMS_CMD_PFN(pf) |
+ FW_PARAMS_CMD_VFN(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ /* Write Params */
+ if (wr) {
+ while (nparams--) {
+ temp_params = *params++;
+ temp_val = *val++;
+
+ *p++ = htonl(temp_params);
+ *p++ = htonl(temp_val);
+ }
+ } else {
+ for (i = 0; i < nparams; i++, p += 2) {
+ temp_params = *params++;
+ *p = htonl(temp_params);
+ }
+ }
+
+}
+
+/*
+ * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @nparams: Number of parameters
+ * @val: Parameter value array.
+ *
+ */
+void
+csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, unsigned int nparams,
+ u32 *val)
+{
+ struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
+ uint32_t i;
+ __be32 *p = &rsp->param[0].val;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
+
+ if (*retval == FW_SUCCESS)
+ for (i = 0; i < nparams; i++, p += 2)
+ *val++ = ntohl(*p);
+}
+
+/*
+ * csio_mb_ldst - FW LDST command
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: timeout
+ * @reg: register
+ *
+ */
+void
+csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
+{
+ struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
+ CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
+
+ /*
+ * Construct and send the Firmware LDST Command to retrieve the
+ * specified PCI-E Configuration Space register.
+ */
+ ldst_cmd->op_to_addrspace =
+ htonl(FW_CMD_OP(FW_LDST_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+ ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
+ ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+ ldst_cmd->u.pcie.ctrl_to_fn =
+ (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
+ ldst_cmd->u.pcie.r = (uint8_t)reg;
+}
+
+/*
+ *
+ * csio_mb_caps_config - FW Read/Write Capabilities command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @wr: Write if 1, Read if 0
+ * @init: Turn on initiator mode.
+ * @tgt: Turn on target mode.
+ * @cofld: If 1, Control Offload for FCoE
+ * @cbfn: Callback, if any.
+ *
+ * This helper assumes that cmdp has MB payload from a previous CAPS
+ * read command.
+ */
+void
+csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ bool wr, bool init, bool tgt, bool cofld,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_caps_config_cmd *cmdp =
+ (struct fw_caps_config_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_WRITE : FW_CMD_READ));
+ cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ /* Read config */
+ if (!wr)
+ return;
+
+ /* Write config */
+ cmdp->fcoecaps = 0;
+
+ if (cofld)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
+ if (init)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
+ if (tgt)
+ cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
+}
+
+void
+csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t tmo, uint8_t mode, unsigned int flags,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ struct fw_rss_glb_config_cmd *cmdp =
+ (struct fw_rss_glb_config_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+ cmdp->u.manual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+ cmdp->u.basicvirtual.mode_pkd =
+ htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+ cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+ }
+}
+
+
+/*
+ * csio_mb_pfvf - FW Write PF/VF capabilities command helper.
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @pf:
+ * @vf:
+ * @txq:
+ * @txq_eht_ctrl:
+ * @rxqi:
+ * @rxq:
+ * @tc:
+ * @vi:
+ * @pmask:
+ * @rcaps:
+ * @wxcaps:
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ unsigned int pf, unsigned int vf, unsigned int txq,
+ unsigned int txq_eth_ctrl, unsigned int rxqi,
+ unsigned int rxq, unsigned int tc, unsigned int vi,
+ unsigned int cmask, unsigned int pmask, unsigned int nexactf,
+ unsigned int rcaps, unsigned int wxcaps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_PFVF_CMD_PFN(pf) |
+ FW_PFVF_CMD_VFN(vf));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
+ FW_PFVF_CMD_NIQ(rxq));
+
+ cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE |
+ FW_PFVF_CMD_CMASK(cmask) |
+ FW_PFVF_CMD_PMASK(pmask) |
+ FW_PFVF_CMD_NEQ(txq));
+ cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) |
+ FW_PFVF_CMD_NVI(vi) |
+ FW_PFVF_CMD_NEXACTF(nexactf));
+ cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
+ FW_PFVF_CMD_WX_CAPS(wxcaps) |
+ FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+}
+
+#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/*
+ * csio_mb_port- FW PORT command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @portid: Port ID to get/set info
+ * @wr: Write/Read PORT information.
+ * @fc: Flow control
+ * @caps: Port capabilites to set.
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
+ unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
+ FW_CMD_REQUEST |
+ (wr ? FW_CMD_EXEC : FW_CMD_READ) |
+ FW_PORT_CMD_PORTID(portid));
+ if (!wr) {
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ return;
+ }
+
+ /* Set port */
+ cmdp->action_to_len16 = htonl(
+ FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ if (fc & PAUSE_RX)
+ lfc |= FW_PORT_CAP_FC_RX;
+ if (fc & PAUSE_TX)
+ lfc |= FW_PORT_CAP_FC_TX;
+
+ if (!(caps & FW_PORT_CAP_ANEG))
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
+ else
+ cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
+ lfc | mdi);
+}
+
+/*
+ * csio_mb_process_read_port_rsp - FW PORT command response processing helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @retval: Mailbox return value from Firmware
+ * @caps: port capabilities
+ *
+ */
+void
+csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval, uint16_t *caps)
+{
+ struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));
+
+ if (*retval == FW_SUCCESS)
+ *caps = ntohs(rsp->u.info.pcap);
+}
+
+/*
+ * csio_mb_initialize - FW INITIALIZE command helper
+ * @hw: The HW structure
+ * @mbp: Mailbox structure
+ * @tmo: COmmand timeout
+ * @cbfn: Callback, if any.
+ *
+ */
+void
+csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
+
+ cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE);
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+}
+
+/*
+ * csio_mb_iq_alloc - Initializes the mailbox to allocate an
+ * Ingress DMA queue in the firmware.
+ *
+ * @hw: The hw structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->type_to_iqandstindex = htonl(
+ FW_IQ_CMD_VIID(iq_params->viid) |
+ FW_IQ_CMD_TYPE(iq_params->type) |
+ FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
+
+ cmdp->fl0size = htons(iq_params->fl0size);
+ cmdp->fl0size = htons(iq_params->fl1size);
+
+} /* csio_mb_iq_alloc */
+
+/*
+ * csio_mb_iq_write - Initializes the mailbox for writing into an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private object
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
+ * @iq_params: Ingress queue params needed for writing.
+ * @cbfn: The call-back function
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this IQ write request can be cascaded with a previous
+ * IQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ uint32_t iq_start_stop = (iq_params->iq_start) ?
+ FW_IQ_CMD_IQSTART(1) :
+ FW_IQ_CMD_IQSTOP(1);
+
+ /*
+ * If this IQ write is cascaded with IQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(iq_start_stop |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->iqid |= htons(iq_params->iqid);
+ cmdp->fl0id |= htons(iq_params->fl0id);
+ cmdp->fl1id |= htons(iq_params->fl1id);
+ cmdp->type_to_iqandstindex |= htonl(
+ FW_IQ_CMD_IQANDST(iq_params->iqandst) |
+ FW_IQ_CMD_IQANUS(iq_params->iqanus) |
+ FW_IQ_CMD_IQANUD(iq_params->iqanud) |
+ FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
+ cmdp->iqdroprss_to_iqesize |= htons(
+ FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
+ FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
+ FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
+ FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
+ FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
+ FW_IQ_CMD_IQESIZE(iq_params->iqesize));
+
+ cmdp->iqsize |= htons(iq_params->iqsize);
+ cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
+
+ if (iq_params->type == 0) {
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
+ FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
+ }
+
+ if (iq_params->fl0size && iq_params->fl0addr &&
+ (iq_params->fl0id != 0xFFFF)) {
+
+ cmdp->iqns_to_fl0congen |= htonl(
+ FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
+ FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
+ FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
+ FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
+ cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
+ FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
+ FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
+ FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
+ FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
+ FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
+ cmdp->fl0size |= htons(iq_params->fl0size);
+ cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
+ }
+} /* csio_mb_iq_write */
+
+/*
+ * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
+ * Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Ingress queue params needed for allocation & writing.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
+ csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
+} /* csio_mb_iq_alloc_write */
+
+/*
+ * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
+ * of ingress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @iq_params: Ingress queue parameters, after allocation and write.
+ *
+ */
+void
+csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *ret_val,
+ struct csio_iq_params *iq_params)
+{
+ struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+ if (*ret_val == FW_SUCCESS) {
+ iq_params->physiqid = ntohs(rsp->physiqid);
+ iq_params->iqid = ntohs(rsp->iqid);
+ iq_params->fl0id = ntohs(rsp->fl0id);
+ iq_params->fl1id = ntohs(rsp->fl1id);
+ } else {
+ iq_params->physiqid = iq_params->iqid =
+ iq_params->fl0id = iq_params->fl1id = 0;
+ }
+} /* csio_mb_iq_alloc_write_rsp */
+
+/*
+ * csio_mb_iq_free - Initializes the mailbox for freeing a
+ * specified Ingress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @iq_params: Parameters of ingress queue, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_iq_params *iq_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_IQ_CMD_PFN(iq_params->pfn) |
+ FW_IQ_CMD_VFN(iq_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
+
+ cmdp->iqid = htons(iq_params->iqid);
+ cmdp->fl0id = htons(iq_params->fl0id);
+ cmdp->fl1id = htons(iq_params->fl1id);
+
+} /* csio_mb_iq_free */
+
+/*
+ * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
+ * an offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+static void
+csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_mb_eq_ofld_alloc */
+
+/*
+ * csio_mb_eq_ofld_write - Initializes the mailbox for writing
+ * an alloacted offload-egress queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
+ * because this EQ write request can be cascaded with a previous
+ * EQ alloc request, and we dont want to over-write the bits set by
+ * that request. This logic will work even in a non-cascaded case, since the
+ * cmdp structure is zeroed out by CSIO_INIT_MBP.
+ */
+static void
+csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, bool cascaded_req,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
+ FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
+
+ /*
+ * If this EQ write is cascaded with EQ alloc request, do not
+ * re-initialize with 0's.
+ *
+ */
+ if (!cascaded_req)
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_WRITE |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 |= htonl(eq_start_stop |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+
+ cmdp->fetchszm_to_iqid |= htonl(
+ FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
+ FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
+ FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
+ FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
+
+ cmdp->dcaen_to_eqsize |= htonl(
+ FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
+ FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
+ FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
+ FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
+ FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
+ FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
+
+ cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
+
+} /* csio_mb_eq_ofld_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
+ * writing into an Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
+ void *priv, uint32_t mb_tmo,
+ struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
+ csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
+ eq_ofld_params, cbfn);
+} /* csio_mb_eq_ofld_alloc_write */
+
+/*
+ * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
+ * & write egress DMA queue mailbox's response.
+ *
+ * @hw: The HW structure.
+ * @mbp: Mailbox structure to initialize.
+ * @retval: Firmware return value.
+ * @eq_ofld_params: (Offload) Egress queue paramters.
+ *
+ */
+void
+csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp, enum fw_retval *ret_val,
+ struct csio_eq_params *eq_ofld_params)
+{
+ struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
+
+ if (*ret_val == FW_SUCCESS) {
+ eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
+ ntohl(rsp->eqid_pkd));
+ eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
+ ntohl(rsp->physeqid_pkd));
+ } else
+ eq_ofld_params->eqid = 0;
+
+} /* csio_mb_eq_ofld_alloc_write_rsp */
+
+/*
+ * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
+ * specified Engress DMA Queue.
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @priv: Private data area.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
+ uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
+
+ cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
+ FW_CMD_REQUEST | FW_CMD_EXEC |
+ FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
+ FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
+ cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
+
+} /* csio_mb_eq_ofld_free */
+
+/*
+ * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
+ * condition.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call back function.
+ *
+ *
+ */
+void
+csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
+ uint8_t cos, bool link_status, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_link_cmd *cmdp =
+ (struct fw_fcoe_link_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_portid = htonl((
+ FW_CMD_OP(FW_FCOE_LINK_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_WRITE |
+ FW_FCOE_LINK_CMD_PORTID(port_id)));
+ cmdp->sub_opcode_fcfi = htonl(
+ FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
+ FW_FCOE_LINK_CMD_FCFI(fcfi));
+ cmdp->lstatus = link_status;
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_write_fcoe_link_cond_init_mb */
+
+/*
+ * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
+ * resource information(FW_GET_RES_INFO_CMD).
+ *
+ * @hw: The HW structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_res_info_cmd *cmdp =
+ (struct fw_fcoe_res_info_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+
+ cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ));
+
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_res_info_init_mb */
+
+/*
+ * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
+ * in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @iqid: iqid
+ * @vnport_wwnn: vnport WWNN
+ * @vnport_wwpn: vnport WWPN
+ * @cbfn: The call-back function.
+ *
+ *
+ */
+void
+csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
+ uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_FCOE_VNP_CMD_FCFI(fcfi)));
+
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+
+ cmdp->iqid = htons(iqid);
+
+ if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
+ cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
+
+ if (vnport_wwnn)
+ memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
+ if (vnport_wwpn)
+ memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
+
+} /* csio_fcoe_vnp_alloc_init_mb */
+
+/*
+ * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF Index.
+ * @vnpi: vnpi
+ * @cbfn: The call-back handler.
+ */
+void
+csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
+ * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
+ *
+ * @ln: The Lnode structure.
+ * @mbp: Mailbox structure to initialize.
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcfi: FCF flow id
+ * @vnpi: VNP flow id
+ * @cbfn: The call-back function.
+ * Return: None
+ */
+void
+csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_vnp_cmd *cmdp =
+ (struct fw_fcoe_vnp_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_EXEC |
+ FW_FCOE_VNP_CMD_FCFI(fcfi));
+ cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
+ FW_CMD_LEN16(sizeof(*cmdp) / 16));
+ cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
+}
+
+/*
+ * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
+ * FCF records.
+ *
+ * @ln: The Lnode structure
+ * @mbp: Mailbox structure to initialize
+ * @mb_tmo: Mailbox time-out period (in ms).
+ * @fcf_params: FC-Forwarder parameters.
+ * @cbfn: The call-back function
+ *
+ *
+ */
+void
+csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
+ uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct fw_fcoe_fcf_cmd *cmdp =
+ (struct fw_fcoe_fcf_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
+
+ cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ |
+ FW_FCOE_FCF_CMD_FCFI(fcfi));
+ cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
+
+} /* csio_fcoe_read_fcf_init_mb */
+
+void
+csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
+ uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *,
+ struct csio_mb *))
+{
+ struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+
+ CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
+ mbp->mb_size = 64;
+
+ cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |
+ FW_CMD_REQUEST | FW_CMD_READ);
+ cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));
+
+ cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
+ FW_FCOE_STATS_CMD_PORT(portparams->portid);
+
+ cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
+ FW_FCOE_STATS_CMD_PORT_VALID;
+
+} /* csio_fcoe_read_portparams_init_mb */
+
+void
+csio_mb_process_portparams_rsp(struct csio_hw *hw,
+ struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats)
+{
+ struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
+ struct fw_fcoe_port_stats stats;
+ uint8_t *src;
+ uint8_t *dst;
+
+ *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));
+
+ memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
+
+ if (*retval == FW_SUCCESS) {
+ dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
+ src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
+ memcpy(dst, src, (portparams->nstats * 8));
+ if (portparams->idx == 1) {
+ /* Get the first 6 flits from the Mailbox */
+ portstats->tx_bcast_bytes = stats.tx_bcast_bytes;
+ portstats->tx_bcast_frames = stats.tx_bcast_frames;
+ portstats->tx_mcast_bytes = stats.tx_mcast_bytes;
+ portstats->tx_mcast_frames = stats.tx_mcast_frames;
+ portstats->tx_ucast_bytes = stats.tx_ucast_bytes;
+ portstats->tx_ucast_frames = stats.tx_ucast_frames;
+ }
+ if (portparams->idx == 7) {
+ /* Get the second 6 flits from the Mailbox */
+ portstats->tx_drop_frames = stats.tx_drop_frames;
+ portstats->tx_offload_bytes = stats.tx_offload_bytes;
+ portstats->tx_offload_frames = stats.tx_offload_frames;
+#if 0
+ portstats->rx_pf_bytes = stats.rx_pf_bytes;
+ portstats->rx_pf_frames = stats.rx_pf_frames;
+#endif
+ portstats->rx_bcast_bytes = stats.rx_bcast_bytes;
+ portstats->rx_bcast_frames = stats.rx_bcast_frames;
+ portstats->rx_mcast_bytes = stats.rx_mcast_bytes;
+ }
+ if (portparams->idx == 13) {
+ /* Get the last 4 flits from the Mailbox */
+ portstats->rx_mcast_frames = stats.rx_mcast_frames;
+ portstats->rx_ucast_bytes = stats.rx_ucast_bytes;
+ portstats->rx_ucast_frames = stats.rx_ucast_frames;
+ portstats->rx_err_frames = stats.rx_err_frames;
+ }
+ }
+}
+
+/* Entry points/APIs for MB module */
+/*
+ * csio_mb_intr_enable - Enable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
+ */
+void
+csio_mb_intr_enable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+}
+
+/*
+ * csio_mb_intr_disable - Disable Interrupts from mailboxes.
+ * @hw: The HW structure
+ *
+ * Disable bit in HostInterruptEnable CIM register.
+ */
+void
+csio_mb_intr_disable(struct csio_hw *hw)
+{
+ csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+ csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
+}
+
+static void
+csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
+{
+ struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
+
+ if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
+ csio_info(hw, "FW print message:\n");
+ csio_info(hw, "\tdebug->dprtstridx = %d\n",
+ ntohs(dbg->u.prt.dprtstridx));
+ csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam0));
+ csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam1));
+ csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam2));
+ csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
+ ntohl(dbg->u.prt.dprtstrparam3));
+ } else {
+ /* This is a FW assertion */
+ csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+ dbg->u.assert.filename_0_7,
+ ntohl(dbg->u.assert.line),
+ ntohl(dbg->u.assert.x),
+ ntohl(dbg->u.assert.y));
+ }
+}
+
+static void
+csio_mb_debug_cmd_handler(struct csio_hw *hw)
+{
+ int i;
+ __be64 cmd[CSIO_MB_MAX_REGS];
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size = sizeof(struct fw_debug_cmd);
+
+ /* Copy mailbox data */
+ for (i = 0; i < size; i += 8)
+ cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
+
+ csio_mb_dump_fw_dbg(hw, cmd);
+
+ /* Notify FW of mailbox by setting owner as UP */
+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
+ ctl_reg);
+
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+}
+
+/*
+ * csio_mb_issue - generic routine for issuing Mailbox commands.
+ * @hw: The HW structure
+ * @mbp: Mailbox command to issue
+ *
+ * Caller should hold hw lock across this call.
+ */
+int
+csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
+{
+ uint32_t owner, ctl;
+ int i;
+ uint32_t ii;
+ __be64 *cmd = mbp->mb;
+ __be64 hdr;
+ struct csio_mbm *mbm = &hw->mbm;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size = mbp->mb_size;
+ int rv = -EINVAL;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /* Determine mode */
+ if (mbp->mb_cbfn == NULL) {
+ /* Need to issue/get results in the same context */
+ if (mbp->tmo < CSIO_MB_POLL_FREQ) {
+ csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
+ goto error_out;
+ }
+ } else if (!csio_is_host_intr_enabled(hw) ||
+ !csio_is_hw_intr_enabled(hw)) {
+ csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
+ *((uint8_t *)mbp->mb));
+ goto error_out;
+ }
+
+ if (mbm->mcurrent != NULL) {
+ /* Queue mbox cmd, if another mbox cmd is active */
+ if (mbp->mb_cbfn == NULL) {
+ rv = -EBUSY;
+ csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb));
+
+ goto error_out;
+ } else {
+ list_add_tail(&mbp->list, &mbm->req_q);
+ CSIO_INC_STATS(mbm, n_activeq);
+
+ return 0;
+ }
+ }
+
+ /* Now get ownership of mailbox */
+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+
+ if (!csio_mb_is_host_owner(owner)) {
+
+ for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
+ owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
+ /*
+ * Mailbox unavailable. In immediate mode, fail the command.
+ * In other modes, enqueue the request.
+ */
+ if (!csio_mb_is_host_owner(owner)) {
+ if (mbp->mb_cbfn == NULL) {
+ rv = owner ? -EBUSY : -ETIMEDOUT;
+
+ csio_dbg(hw,
+ "Couldnt own Mailbox %x op:0x%x "
+ "owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb), owner);
+ goto error_out;
+ } else {
+ if (mbm->mcurrent == NULL) {
+ csio_err(hw,
+ "Couldnt own Mailbox %x "
+ "op:0x%x owner:%x\n",
+ hw->pfn, *((uint8_t *)mbp->mb),
+ owner);
+ csio_err(hw,
+ "No outstanding driver"
+ " mailbox as well\n");
+ goto error_out;
+ }
+ }
+ }
+ }
+
+ /* Mailbox is available, copy mailbox data into it */
+ for (i = 0; i < size; i += 8) {
+ csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
+ cmd++;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ /* Start completion timers in non-immediate modes and notify FW */
+ if (mbp->mb_cbfn != NULL) {
+ mbm->mcurrent = mbp;
+ mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
+ csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
+ MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
+ } else
+ csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
+ ctl_reg);
+
+ /* Flush posted writes */
+ csio_rd_reg32(hw, ctl_reg);
+ wmb();
+
+ CSIO_INC_STATS(mbm, n_req);
+
+ if (mbp->mb_cbfn)
+ return 0;
+
+ /* Poll for completion in immediate mode */
+ cmd = mbp->mb;
+
+ for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
+ mdelay(CSIO_MB_POLL_FREQ);
+
+ /* Check for response */
+ ctl = csio_rd_reg32(hw, ctl_reg);
+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+
+ if (!(ctl & MBMSGVALID)) {
+ csio_wr_reg32(hw, 0, ctl_reg);
+ continue;
+ }
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ continue;
+ }
+
+ /* Copy response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+ csio_wr_reg32(hw, 0, ctl_reg);
+
+ if (csio_mb_fw_retval(mbp) != FW_SUCCESS)
+ CSIO_INC_STATS(mbm, n_err);
+
+ CSIO_INC_STATS(mbm, n_rsp);
+ return 0;
+ }
+ }
+
+ CSIO_INC_STATS(mbm, n_tmo);
+
+ csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
+ hw->pfn, *((uint8_t *)cmd));
+
+ return -ETIMEDOUT;
+
+error_out:
+ CSIO_INC_STATS(mbm, n_err);
+ return rv;
+}
+
+/*
+ * csio_mb_completions - Completion handler for Mailbox commands
+ * @hw: The HW structure
+ * @cbfn_q: Completion queue.
+ *
+ */
+void
+csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ enum fw_retval rv;
+
+ while (!list_empty(cbfn_q)) {
+ mbp = list_first_entry(cbfn_q, struct csio_mb, list);
+ list_del_init(&mbp->list);
+
+ rv = csio_mb_fw_retval(mbp);
+ if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
+ CSIO_INC_STATS(mbm, n_err);
+ else if (rv != FW_HOSTERROR)
+ CSIO_INC_STATS(mbm, n_rsp);
+
+ if (mbp->mb_cbfn)
+ mbp->mb_cbfn(hw, mbp);
+ }
+}
+
+static void
+csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
+{
+ static char *mod_str[] = {
+ NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
+ };
+
+ struct csio_pport *port = &hw->pport[port_id];
+
+ if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
+ csio_info(hw, "Port:%d - port module unplugged\n", port_id);
+ else if (port->mod_type < ARRAY_SIZE(mod_str))
+ csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
+ mod_str[port->mod_type]);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ csio_info(hw,
+ "Port:%d - unsupported optical port module "
+ "inserted\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ csio_info(hw,
+ "Port:%d - unknown port module inserted, forcing "
+ "TWINAX\n", port_id);
+ else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ csio_info(hw, "Port:%d - transceiver module error\n", port_id);
+ else
+ csio_info(hw, "Port:%d - unknown module type %d inserted\n",
+ port_id, port->mod_type);
+}
+
+int
+csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
+{
+ uint8_t opcode = *(uint8_t *)cmd;
+ struct fw_port_cmd *pcmd;
+ uint8_t port_id;
+ uint32_t link_status;
+ uint16_t action;
+ uint8_t mod_type;
+
+ if (opcode == FW_PORT_CMD) {
+ pcmd = (struct fw_port_cmd *)cmd;
+ port_id = FW_PORT_CMD_PORTID_GET(
+ ntohl(pcmd->op_to_portid));
+ action = FW_PORT_CMD_ACTION_GET(
+ ntohl(pcmd->action_to_len16));
+ if (action != FW_PORT_ACTION_GET_PORT_INFO) {
+ csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
+ action);
+ return -EINVAL;
+ }
+
+ link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
+ mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
+
+ hw->pport[port_id].link_status =
+ FW_PORT_CMD_LSTATUS_GET(link_status);
+ hw->pport[port_id].link_speed =
+ FW_PORT_CMD_LSPEED_GET(link_status);
+
+ csio_info(hw, "Port:%x - LINK %s\n", port_id,
+ FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
+
+ if (mod_type != hw->pport[port_id].mod_type) {
+ hw->pport[port_id].mod_type = mod_type;
+ csio_mb_portmod_changed(hw, port_id);
+ }
+ } else if (opcode == FW_DEBUG_CMD) {
+ csio_mb_dump_fw_dbg(hw, cmd);
+ } else {
+ csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_mb_isr_handler - Handle mailboxes related interrupts.
+ * @hw: The HW structure
+ *
+ * Called from the ISR to handle Mailbox related interrupts.
+ * HW Lock should be held across this call.
+ */
+int
+csio_mb_isr_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ __be64 *cmd;
+ uint32_t ctl, cim_cause, pl_cause;
+ int i;
+ uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
+ uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
+ int size;
+ __be64 hdr;
+ struct fw_cmd_hdr *fw_hdr;
+
+ pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
+ cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+
+ if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
+ CSIO_INC_STATS(hw, n_mbint_unexp);
+ return -EINVAL;
+ }
+
+ /*
+ * The cause registers below HAVE to be cleared in the SAME
+ * order as below: The low level cause register followed by
+ * the upper level cause register. In other words, CIM-cause
+ * first followed by PL-Cause next.
+ */
+ csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
+ csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
+
+ ctl = csio_rd_reg32(hw, ctl_reg);
+
+ if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
+
+ CSIO_DUMP_MB(hw, hw->pfn, data_reg);
+
+ if (!(ctl & MBMSGVALID)) {
+ csio_warn(hw,
+ "Stray mailbox interrupt recvd,"
+ " mailbox data not valid\n");
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+ return -EINVAL;
+ }
+
+ hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
+ fw_hdr = (struct fw_cmd_hdr *)&hdr;
+
+ switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
+ case FW_DEBUG_CMD:
+ csio_mb_debug_cmd_handler(hw);
+ return -EINVAL;
+#if 0
+ case FW_ERROR_CMD:
+ case FW_INITIALIZE_CMD: /* When we are not master */
+#endif
+ }
+
+ CSIO_ASSERT(mbp != NULL);
+
+ cmd = mbp->mb;
+ size = mbp->mb_size;
+ /* Get response */
+ for (i = 0; i < size; i += 8)
+ *cmd++ = cpu_to_be64(csio_rd_reg64
+ (hw, data_reg + i));
+
+ csio_wr_reg32(hw, 0, ctl_reg);
+ /* Flush */
+ csio_rd_reg32(hw, ctl_reg);
+
+ mbm->mcurrent = NULL;
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, &mbm->cbfn_q);
+ CSIO_INC_STATS(mbm, n_cbfnq);
+
+ /*
+ * Enqueue event to EventQ. Events processing happens
+ * in Event worker thread context
+ */
+ if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
+ CSIO_INC_STATS(hw, n_evt_drop);
+
+ return 0;
+
+ } else {
+ /*
+ * We can get here if mailbox MSIX vector is shared,
+ * or in INTx case. Or a stray interrupt.
+ */
+ csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
+ CSIO_INC_STATS(hw, n_int_stray);
+ return -EINVAL;
+ }
+}
+
+/*
+ * csio_mb_tmo_handler - Timeout handler
+ * @hw: The HW structure
+ *
+ */
+struct csio_mb *
+csio_mb_tmo_handler(struct csio_hw *hw)
+{
+ struct csio_mbm *mbm = &hw->mbm;
+ struct csio_mb *mbp = mbm->mcurrent;
+ struct fw_cmd_hdr *fw_hdr;
+
+ /*
+ * Could be a race b/w the completion handler and the timer
+ * and the completion handler won that race.
+ */
+ if (mbp == NULL) {
+ CSIO_DB_ASSERT(0);
+ return NULL;
+ }
+
+ fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
+ FW_CMD_OP_GET(ntohl(fw_hdr->hi)));
+
+ mbm->mcurrent = NULL;
+ CSIO_INC_STATS(mbm, n_tmo);
+ fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));
+
+ return mbp;
+}
+
+/*
+ * csio_mb_cancel_all - Cancel all waiting commands.
+ * @hw: The HW structure
+ * @cbfn_q: The callback queue.
+ *
+ * Caller should hold hw lock across this call.
+ */
+void
+csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
+{
+ struct csio_mb *mbp;
+ struct csio_mbm *mbm = &hw->mbm;
+ struct fw_cmd_hdr *hdr;
+ struct list_head *tmp;
+
+ if (mbm->mcurrent) {
+ mbp = mbm->mcurrent;
+
+ /* Stop mailbox completion timer */
+ del_timer_sync(&mbm->timer);
+
+ /* Add completion to tail of cbfn queue */
+ list_add_tail(&mbp->list, cbfn_q);
+ mbm->mcurrent = NULL;
+ }
+
+ if (!list_empty(&mbm->req_q)) {
+ list_splice_tail_init(&mbm->req_q, cbfn_q);
+ mbm->stats.n_activeq = 0;
+ }
+
+ if (!list_empty(&mbm->cbfn_q)) {
+ list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
+ mbm->stats.n_cbfnq = 0;
+ }
+
+ if (list_empty(cbfn_q))
+ return;
+
+ list_for_each(tmp, cbfn_q) {
+ mbp = (struct csio_mb *)tmp;
+ hdr = (struct fw_cmd_hdr *)(mbp->mb);
+
+ csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
+ hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));
+
+ CSIO_INC_STATS(mbm, n_cancel);
+ hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));
+ }
+}
+
+/*
+ * csio_mbm_init - Initialize Mailbox module
+ * @mbm: Mailbox module
+ * @hw: The HW structure
+ * @timer: Timing function for interrupting mailboxes
+ *
+ * Initialize timer and the request/response queues.
+ */
+int
+csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
+ void (*timer_fn)(uintptr_t))
+{
+ struct timer_list *timer = &mbm->timer;
+
+ init_timer(timer);
+ timer->function = timer_fn;
+ timer->data = (unsigned long)hw;
+
+ INIT_LIST_HEAD(&mbm->req_q);
+ INIT_LIST_HEAD(&mbm->cbfn_q);
+ csio_set_mb_intr_idx(mbm, -1);
+
+ return 0;
+}
+
+/*
+ * csio_mbm_exit - Uninitialize mailbox module
+ * @mbm: Mailbox module
+ *
+ * Stop timer.
+ */
+void
+csio_mbm_exit(struct csio_mbm *mbm)
+{
+ del_timer_sync(&mbm->timer);
+
+ CSIO_DB_ASSERT(mbm->mcurrent == NULL);
+ CSIO_DB_ASSERT(list_empty(&mbm->req_q));
+ CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
+}
diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h
new file mode 100644
index 000000000000..1788ea506f39
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_mb.h
@@ -0,0 +1,278 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_MB_H__
+#define __CSIO_MB_H__
+
+#include <linux/timer.h>
+#include <linux/completion.h>
+
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+#include "csio_defs.h"
+
+#define CSIO_STATS_OFFSET (2)
+#define CSIO_NUM_STATS_PER_MB (6)
+
+struct fw_fcoe_port_cmd_params {
+ uint8_t portid;
+ uint8_t idx;
+ uint8_t nstats;
+};
+
+#define CSIO_DUMP_MB(__hw, __num, __mb) \
+ csio_dbg(__hw, "\t%llx %llx %llx %llx %llx %llx %llx %llx\n", \
+ (unsigned long long)csio_rd_reg64(__hw, __mb), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 8), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 16), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 24), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 32), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 40), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 48), \
+ (unsigned long long)csio_rd_reg64(__hw, __mb + 56))
+
+#define CSIO_MB_MAX_REGS 8
+#define CSIO_MAX_MB_SIZE 64
+#define CSIO_MB_POLL_FREQ 5 /* 5 ms */
+#define CSIO_MB_DEFAULT_TMO FW_CMD_MAX_TIMEOUT
+
+/* Device master in HELLO command */
+enum csio_dev_master { CSIO_MASTER_CANT, CSIO_MASTER_MAY, CSIO_MASTER_MUST };
+
+enum csio_mb_owner { CSIO_MBOWNER_NONE, CSIO_MBOWNER_FW, CSIO_MBOWNER_PL };
+
+enum csio_dev_state {
+ CSIO_DEV_STATE_UNINIT,
+ CSIO_DEV_STATE_INIT,
+ CSIO_DEV_STATE_ERR
+};
+
+#define FW_PARAM_DEV(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+ (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+ FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)| \
+ FW_PARAMS_PARAM_Y(0) | \
+ FW_PARAMS_PARAM_Z(0))
+
+enum {
+ PAUSE_RX = 1 << 0,
+ PAUSE_TX = 1 << 1,
+ PAUSE_AUTONEG = 1 << 2
+};
+
+#define CSIO_INIT_MBP(__mbp, __cp, __tmo, __priv, __fn, __clear) \
+do { \
+ if (__clear) \
+ memset((__cp), 0, \
+ CSIO_MB_MAX_REGS * sizeof(__be64)); \
+ INIT_LIST_HEAD(&(__mbp)->list); \
+ (__mbp)->tmo = (__tmo); \
+ (__mbp)->priv = (void *)(__priv); \
+ (__mbp)->mb_cbfn = (__fn); \
+ (__mbp)->mb_size = sizeof(*(__cp)); \
+} while (0)
+
+struct csio_mbm_stats {
+ uint32_t n_req; /* number of mbox req */
+ uint32_t n_rsp; /* number of mbox rsp */
+ uint32_t n_activeq; /* number of mbox req active Q */
+ uint32_t n_cbfnq; /* number of mbox req cbfn Q */
+ uint32_t n_tmo; /* number of mbox timeout */
+ uint32_t n_cancel; /* number of mbox cancel */
+ uint32_t n_err; /* number of mbox error */
+};
+
+/* Driver version of Mailbox */
+struct csio_mb {
+ struct list_head list; /* for req/resp */
+ /* queue in driver */
+ __be64 mb[CSIO_MB_MAX_REGS]; /* MB in HW format */
+ int mb_size; /* Size of this
+ * mailbox.
+ */
+ uint32_t tmo; /* Timeout */
+ struct completion cmplobj; /* MB Completion
+ * object
+ */
+ void (*mb_cbfn) (struct csio_hw *, struct csio_mb *);
+ /* Callback fn */
+ void *priv; /* Owner private ptr */
+};
+
+struct csio_mbm {
+ uint32_t a_mbox; /* Async mbox num */
+ uint32_t intr_idx; /* Interrupt index */
+ struct timer_list timer; /* Mbox timer */
+ struct list_head req_q; /* Mbox request queue */
+ struct list_head cbfn_q; /* Mbox completion q */
+ struct csio_mb *mcurrent; /* Current mailbox */
+ uint32_t req_q_cnt; /* Outstanding mbox
+ * cmds
+ */
+ struct csio_mbm_stats stats; /* Statistics */
+};
+
+#define csio_set_mb_intr_idx(_m, _i) ((_m)->intr_idx = (_i))
+#define csio_get_mb_intr_idx(_m) ((_m)->intr_idx)
+
+struct csio_iq_params;
+struct csio_eq_params;
+
+enum fw_retval csio_mb_fw_retval(struct csio_mb *);
+
+/* MB helpers */
+void csio_mb_hello(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint32_t, uint32_t, enum csio_dev_master,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_hello_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, enum csio_dev_state *,
+ uint8_t *);
+
+void csio_mb_bye(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_reset(struct csio_hw *, struct csio_mb *, uint32_t, int, int,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_params(struct csio_hw *, struct csio_mb *, uint32_t, unsigned int,
+ unsigned int, unsigned int, const u32 *, u32 *, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_params_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, unsigned int , u32 *);
+
+void csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
+ int reg);
+
+void csio_mb_caps_config(struct csio_hw *, struct csio_mb *, uint32_t,
+ bool, bool, bool, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_rss_glb_config(struct csio_hw *, struct csio_mb *,
+ uint32_t, uint8_t, unsigned int,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_pfvf(struct csio_hw *, struct csio_mb *, uint32_t,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, unsigned int, unsigned int,
+ unsigned int, void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_port(struct csio_hw *, struct csio_mb *, uint32_t,
+ uint8_t, bool, uint32_t, uint16_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_read_port_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, uint16_t *);
+
+void csio_mb_initialize(struct csio_hw *, struct csio_mb *, uint32_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_iq_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_iq_params *);
+
+void csio_mb_iq_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_iq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t, struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *, struct csio_mb *,
+ enum fw_retval *, struct csio_eq_params *);
+
+void csio_mb_eq_ofld_free(struct csio_hw *, struct csio_mb *, void *,
+ uint32_t , struct csio_eq_params *,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_res_info_init_mb(struct csio_hw *, struct csio_mb *,
+ uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_write_fcoe_link_cond_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint8_t, uint32_t, uint8_t, bool, uint32_t,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t , uint16_t,
+ uint8_t [8], uint8_t [8],
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_read_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t , uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_vnp_free_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t , uint32_t, uint32_t ,
+ void (*) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_fcf_init_mb(struct csio_lnode *, struct csio_mb *,
+ uint32_t, uint32_t, uint32_t,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *));
+
+void csio_fcoe_read_portparams_init_mb(struct csio_hw *hw,
+ struct csio_mb *mbp, uint32_t mb_tmo,
+ struct fw_fcoe_port_cmd_params *portparams,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *));
+
+void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp,
+ enum fw_retval *retval,
+ struct fw_fcoe_port_cmd_params *portparams,
+ struct fw_fcoe_port_stats *portstats);
+
+/* MB module functions */
+int csio_mbm_init(struct csio_mbm *, struct csio_hw *,
+ void (*)(uintptr_t));
+void csio_mbm_exit(struct csio_mbm *);
+void csio_mb_intr_enable(struct csio_hw *);
+void csio_mb_intr_disable(struct csio_hw *);
+
+int csio_mb_issue(struct csio_hw *, struct csio_mb *);
+void csio_mb_completions(struct csio_hw *, struct list_head *);
+int csio_mb_fwevt_handler(struct csio_hw *, __be64 *);
+int csio_mb_isr_handler(struct csio_hw *);
+struct csio_mb *csio_mb_tmo_handler(struct csio_hw *);
+void csio_mb_cancel_all(struct csio_hw *, struct list_head *);
+
+#endif /* ifndef __CSIO_MB_H__ */
diff --git a/drivers/scsi/csiostor/csio_rnode.c b/drivers/scsi/csiostor/csio_rnode.c
new file mode 100644
index 000000000000..51c6a388de2b
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.c
@@ -0,0 +1,913 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/string.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_els.h>
+#include <scsi/fc/fc_fs.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+
+static int csio_rnode_init(struct csio_rnode *, struct csio_lnode *);
+static void csio_rnode_exit(struct csio_rnode *);
+
+/* Static machine forward declarations */
+static void csio_rns_uninit(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_ready(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_offline(struct csio_rnode *, enum csio_rn_ev);
+static void csio_rns_disappeared(struct csio_rnode *, enum csio_rn_ev);
+
+/* RNF event mapping */
+static enum csio_rn_ev fwevt_to_rnevt[] = {
+ CSIO_RNFE_NONE, /* None */
+ CSIO_RNFE_LOGGED_IN, /* PLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PLOGI_RJT_RCVD */
+ CSIO_RNFE_PLOGI_RECV, /* PLOGI_RCVD */
+ CSIO_RNFE_LOGO_RECV, /* PLOGO_RCVD */
+ CSIO_RNFE_PRLI_DONE, /* PRLI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* PRLI_RJT_RCVD */
+ CSIO_RNFE_PRLI_RECV, /* PRLI_RCVD */
+ CSIO_RNFE_PRLO_RECV, /* PRLO_RCVD */
+ CSIO_RNFE_NONE, /* NPORT_ID_CHGD */
+ CSIO_RNFE_LOGO_RECV, /* FLOGO_RCVD */
+ CSIO_RNFE_NONE, /* CLR_VIRT_LNK_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FLOGI_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_RJT_RCVD */
+ CSIO_RNFE_LOGGED_IN, /* FDISC_ACC_RCVD */
+ CSIO_RNFE_NONE, /* FDISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* FLOGI_TMO_MAX_RETRY */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_ACC */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_RJT */
+ CSIO_RNFE_NONE, /* IMPL_LOGO_ADISC_CNFLT */
+ CSIO_RNFE_NONE, /* PRLI_TMO */
+ CSIO_RNFE_NONE, /* ADISC_TMO */
+ CSIO_RNFE_NAME_MISSING, /* RSCN_DEV_LOST */
+ CSIO_RNFE_NONE, /* SCR_ACC_RCVD */
+ CSIO_RNFE_NONE, /* ADISC_RJT_RCVD */
+ CSIO_RNFE_NONE, /* LOGO_SNT */
+ CSIO_RNFE_LOGO_RECV, /* PROTO_ERR_IMPL_LOGO */
+};
+
+#define CSIO_FWE_TO_RNFE(_evt) ((_evt > PROTO_ERR_IMPL_LOGO) ? \
+ CSIO_RNFE_NONE : \
+ fwevt_to_rnevt[_evt])
+int
+csio_is_rnode_ready(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_ready);
+}
+
+static int
+csio_is_rnode_uninit(struct csio_rnode *rn)
+{
+ return csio_match_state(rn, csio_rns_uninit);
+}
+
+static int
+csio_is_rnode_wka(uint8_t rport_type)
+{
+ if ((rport_type == FLOGI_VFPORT) ||
+ (rport_type == FDISC_VFPORT) ||
+ (rport_type == NS_VNPORT) ||
+ (rport_type == FDMI_VNPORT))
+ return 1;
+
+ return 0;
+}
+
+/*
+ * csio_rn_lookup - Finds the rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flowid.
+ *
+ * Does the rnode lookup on the given lnode and flowid.If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->flowid == flowid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+/*
+ * csio_rn_lookup_wwpn - Finds the rnode with the given wwpn
+ * @ln: lnode
+ * @wwpn: wwpn
+ *
+ * Does the rnode lookup on the given lnode and wwpn. If no matching entry
+ * found, NULL is returned.
+ */
+static struct csio_rnode *
+csio_rn_lookup_wwpn(struct csio_lnode *ln, uint8_t *wwpn)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (!memcmp(csio_rn_wwpn(rn), wwpn, 8))
+ return rn;
+ }
+
+ return NULL;
+}
+
+/**
+ * csio_rnode_lookup_portid - Finds the rnode with the given portid
+ * @ln: lnode
+ * @portid: port id
+ *
+ * Lookup the rnode list for a given portid. If no matching entry
+ * found, NULL is returned.
+ */
+struct csio_rnode *
+csio_rnode_lookup_portid(struct csio_lnode *ln, uint32_t portid)
+{
+ struct csio_rnode *rnhead = (struct csio_rnode *) &ln->rnhead;
+ struct list_head *tmp;
+ struct csio_rnode *rn;
+
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+ rn = (struct csio_rnode *) tmp;
+ if (rn->nport_id == portid)
+ return rn;
+ }
+
+ return NULL;
+}
+
+static int
+csio_rn_dup_flowid(struct csio_lnode *ln, uint32_t rdev_flowid,
+ uint32_t *vnp_flowid)
+{
+ struct csio_rnode *rnhead;
+ struct list_head *tmp, *tmp1;
+ struct csio_rnode *rn;
+ struct csio_lnode *ln_tmp;
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ list_for_each(tmp1, &hw->sln_head) {
+ ln_tmp = (struct csio_lnode *) tmp1;
+ if (ln_tmp == ln)
+ continue;
+
+ rnhead = (struct csio_rnode *)&ln_tmp->rnhead;
+ list_for_each(tmp, &rnhead->sm.sm_list) {
+
+ rn = (struct csio_rnode *) tmp;
+ if (csio_is_rnode_ready(rn)) {
+ if (rn->flowid == rdev_flowid) {
+ *vnp_flowid = csio_ln_flowid(ln_tmp);
+ return 1;
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct csio_rnode *
+csio_alloc_rnode(struct csio_lnode *ln)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ struct csio_rnode *rn = mempool_alloc(hw->rnode_mempool, GFP_ATOMIC);
+ if (!rn)
+ goto err;
+
+ memset(rn, 0, sizeof(struct csio_rnode));
+ if (csio_rnode_init(rn, ln))
+ goto err_free;
+
+ CSIO_INC_STATS(ln, n_rnode_alloc);
+
+ return rn;
+
+err_free:
+ mempool_free(rn, hw->rnode_mempool);
+err:
+ CSIO_INC_STATS(ln, n_rnode_nomem);
+ return NULL;
+}
+
+static void
+csio_free_rnode(struct csio_rnode *rn)
+{
+ struct csio_hw *hw = csio_lnode_to_hw(csio_rnode_to_lnode(rn));
+
+ csio_rnode_exit(rn);
+ CSIO_INC_STATS(rn->lnp, n_rnode_free);
+ mempool_free(rn, hw->rnode_mempool);
+}
+
+/*
+ * csio_get_rnode - Gets rnode with the given flowid
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+static struct csio_rnode *
+csio_get_rnode(struct csio_lnode *ln, uint32_t flowid)
+{
+ struct csio_rnode *rn;
+
+ rn = csio_rn_lookup(ln, flowid);
+ if (!rn) {
+ rn = csio_alloc_rnode(ln);
+ if (!rn)
+ return NULL;
+
+ rn->flowid = flowid;
+ }
+
+ return rn;
+}
+
+/*
+ * csio_put_rnode - Frees the given rnode
+ * @ln - lnode
+ * @flowid - flow id.
+ *
+ * Does the rnode lookup on the given lnode and flowid. If no matching
+ * rnode found, then new rnode with given npid is allocated and returned.
+ */
+void
+csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn)
+{
+ CSIO_DB_ASSERT(csio_is_rnode_uninit(rn) != 0);
+ csio_free_rnode(rn);
+}
+
+/*
+ * csio_confirm_rnode - confirms rnode based on wwpn.
+ * @ln: lnode
+ * @rdev_flowid: remote device flowid
+ * @rdevp: remote device params
+ * This routines searches other rnode in list having same wwpn of new rnode.
+ * If there is a match, then matched rnode is returned and otherwise new rnode
+ * is returned.
+ * returns rnode.
+ */
+struct csio_rnode *
+csio_confirm_rnode(struct csio_lnode *ln, uint32_t rdev_flowid,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t rport_type;
+ struct csio_rnode *rn, *match_rn;
+ uint32_t vnp_flowid;
+ __be32 *port_id;
+
+ port_id = (__be32 *)&rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+
+ /* Drop rdev event for cntrl port */
+ if (rport_type == FAB_CTLR_VNPORT) {
+ csio_ln_dbg(ln,
+ "Unhandled rport_type:%d recv in rdev evt "
+ "ssni:x%x\n", rport_type, rdev_flowid);
+ return NULL;
+ }
+
+ /* Lookup on flowid */
+ rn = csio_rn_lookup(ln, rdev_flowid);
+ if (!rn) {
+
+ /* Drop events with duplicate flowid */
+ if (csio_rn_dup_flowid(ln, rdev_flowid, &vnp_flowid)) {
+ csio_ln_warn(ln,
+ "ssni:%x already active on vnpi:%x",
+ rdev_flowid, vnp_flowid);
+ return NULL;
+ }
+
+ /* Lookup on wwpn for NPORTs */
+ rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (!rn)
+ goto alloc_rnode;
+
+ } else {
+ /* Lookup well-known ports with nport id */
+ if (csio_is_rnode_wka(rport_type)) {
+ match_rn = csio_rnode_lookup_portid(ln,
+ ((ntohl(*port_id) >> 8) & CSIO_DID_MASK));
+ if (match_rn == NULL) {
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /*
+ * Now compare the wwpn to confirm that
+ * same port relogged in. If so update the matched rn.
+ * Else, go ahead and alloc a new rnode.
+ */
+ if (!memcmp(csio_rn_wwpn(match_rn), rdevp->wwpn, 8)) {
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already"
+ "active ssni:x%x\n",
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+
+ /* Update rn */
+ goto found_rnode;
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+
+ /* wwpn match */
+ if (!memcmp(csio_rn_wwpn(rn), rdevp->wwpn, 8))
+ goto found_rnode;
+
+ /* Search for rnode that have same wwpn */
+ match_rn = csio_rn_lookup_wwpn(ln, rdevp->wwpn);
+ if (match_rn != NULL) {
+ csio_ln_dbg(ln,
+ "ssni:x%x changed for rport name(wwpn):%llx "
+ "did:x%x\n", rdev_flowid,
+ wwn_to_u64(rdevp->wwpn),
+ match_rn->nport_id);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ rn = match_rn;
+ } else {
+ csio_ln_dbg(ln,
+ "rnode wwpn mismatch found ssni:x%x "
+ "name(wwpn):%llx\n",
+ rdev_flowid,
+ wwn_to_u64(csio_rn_wwpn(rn)));
+ if (csio_is_rnode_ready(rn)) {
+ csio_ln_warn(ln,
+ "rnode is already active "
+ "wwpn:%llx ssni:x%x\n",
+ wwn_to_u64(csio_rn_wwpn(rn)),
+ rdev_flowid);
+ CSIO_ASSERT(0);
+ }
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+ goto alloc_rnode;
+ }
+ }
+
+found_rnode:
+ csio_ln_dbg(ln, "found rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* Update flowid */
+ csio_rn_flowid(rn) = rdev_flowid;
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ CSIO_INC_STATS(ln, n_rnode_match);
+ return rn;
+
+alloc_rnode:
+ rn = csio_get_rnode(ln, rdev_flowid);
+ if (!rn)
+ return NULL;
+
+ csio_ln_dbg(ln, "alloc rnode:%p ssni:x%x name(wwpn):%llx\n",
+ rn, rdev_flowid, wwn_to_u64(rdevp->wwpn));
+
+ /* update rdev entry */
+ rn->rdev_entry = rdevp;
+ return rn;
+}
+
+/*
+ * csio_rn_verify_rparams - verify rparams.
+ * @ln: lnode
+ * @rn: rnode
+ * @rdevp: remote device params
+ * returns success if rparams are verified.
+ */
+static int
+csio_rn_verify_rparams(struct csio_lnode *ln, struct csio_rnode *rn,
+ struct fcoe_rdev_entry *rdevp)
+{
+ uint8_t null[8];
+ uint8_t rport_type;
+ uint8_t fc_class;
+ __be32 *did;
+
+ did = (__be32 *) &rdevp->r_id[0];
+ rport_type =
+ FW_RDEV_WR_RPORT_TYPE_GET(rdevp->rd_xfer_rdy_to_rport_type);
+ switch (rport_type) {
+ case FLOGI_VFPORT:
+ rn->role = CSIO_RNFR_FABRIC;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_FLOGI) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ /* NPIV support */
+ if (FW_RDEV_WR_NPIV_GET(rdevp->vft_to_qos))
+ ln->flags |= CSIO_LNF_NPIVSUPP;
+
+ break;
+
+ case NS_VNPORT:
+ rn->role = CSIO_RNFR_NS;
+ if (((ntohl(*did) >> 8) & CSIO_DID_MASK) != FC_FID_DIR_SERV) {
+ csio_ln_err(ln, "ssni:x%x invalid fabric portid\n",
+ csio_rn_flowid(rn));
+ return -EINVAL;
+ }
+ break;
+
+ case REG_FC4_VNPORT:
+ case REG_VNPORT:
+ rn->role = CSIO_RNFR_NPORT;
+ if (rdevp->event_cause == PRLI_ACC_RCVD ||
+ rdevp->event_cause == PRLI_RCVD) {
+ if (FW_RDEV_WR_TASK_RETRY_ID_GET(
+ rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_OVLY_ALLOW;
+
+ if (FW_RDEV_WR_RETRY_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_RETRY;
+
+ if (FW_RDEV_WR_CONF_CMPL_GET(rdevp->enh_disc_to_tgt))
+ rn->fcp_flags |= FCP_SPPF_CONF_COMPL;
+
+ if (FW_RDEV_WR_TGT_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_TARGET;
+
+ if (FW_RDEV_WR_INI_GET(rdevp->enh_disc_to_tgt))
+ rn->role |= CSIO_RNFR_INITIATOR;
+ }
+
+ break;
+
+ case FDMI_VNPORT:
+ case FAB_CTLR_VNPORT:
+ rn->role = 0;
+ break;
+
+ default:
+ csio_ln_err(ln, "ssni:x%x invalid rport type recv x%x\n",
+ csio_rn_flowid(rn), rport_type);
+ return -EINVAL;
+ }
+
+ /* validate wwpn/wwnn for Name server/remote port */
+ if (rport_type == REG_VNPORT || rport_type == NS_VNPORT) {
+ memset(null, 0, 8);
+ if (!memcmp(rdevp->wwnn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwnn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ if (!memcmp(rdevp->wwpn, null, 8)) {
+ csio_ln_err(ln,
+ "ssni:x%x invalid wwpn received from"
+ " rport did:x%x\n",
+ csio_rn_flowid(rn),
+ (ntohl(*did) & CSIO_DID_MASK));
+ return -EINVAL;
+ }
+
+ }
+
+ /* Copy wwnn, wwpn and nport id */
+ rn->nport_id = (ntohl(*did) >> 8) & CSIO_DID_MASK;
+ memcpy(csio_rn_wwnn(rn), rdevp->wwnn, 8);
+ memcpy(csio_rn_wwpn(rn), rdevp->wwpn, 8);
+ rn->rn_sparm.csp.sp_bb_data = rdevp->rcv_fr_sz;
+ fc_class = FW_RDEV_WR_CLASS_GET(rdevp->vft_to_qos);
+ rn->rn_sparm.clsp[fc_class - 1].cp_class = htons(FC_CPC_VALID);
+
+ return 0;
+}
+
+static void
+__csio_reg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ spin_unlock_irq(&hw->lock);
+ csio_reg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ if (rn->role & CSIO_RNFR_TARGET)
+ ln->n_scsi_tgts++;
+
+ if (rn->nport_id == FC_FID_MGMT_SERV)
+ csio_ln_fdmi_start(ln, (void *) rn);
+}
+
+static void
+__csio_unreg_rnode(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ LIST_HEAD(tmp_q);
+ int cmpl = 0;
+
+ if (!list_empty(&rn->host_cmpl_q)) {
+ csio_dbg(hw, "Returning completion queue I/Os\n");
+ list_splice_tail_init(&rn->host_cmpl_q, &tmp_q);
+ cmpl = 1;
+ }
+
+ if (rn->role & CSIO_RNFR_TARGET) {
+ ln->n_scsi_tgts--;
+ ln->last_scan_ntgts--;
+ }
+
+ spin_unlock_irq(&hw->lock);
+ csio_unreg_rnode(rn);
+ spin_lock_irq(&hw->lock);
+
+ /* Cleanup I/Os that were waiting for rnode to unregister */
+ if (cmpl)
+ csio_scsi_cleanup_io_q(csio_hw_to_scsim(hw), &tmp_q);
+
+}
+
+/*****************************************************************************/
+/* START: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rns_uninit -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_uninit(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ }
+ break;
+ case CSIO_RNFE_LOGO_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_ready -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_ready(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[ready]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_PRLI_DONE:
+ case CSIO_RNFE_PRLI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret)
+ __csio_reg_rnode(rn);
+ else
+ CSIO_INC_STATS(rn, n_err_inval);
+
+ break;
+ case CSIO_RNFE_DOWN:
+ csio_set_state(&rn->sm, csio_rns_offline);
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_LOGO_RECV:
+ csio_set_state(&rn->sm, csio_rns_offline);
+
+ __csio_unreg_rnode(rn);
+
+ /* FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /*
+ * Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ __csio_unreg_rnode(rn);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ __csio_unreg_rnode(rn);
+
+ /*
+ * FW expected to internally aborted outstanding SCSI WRs
+ * and return all SCSI WRs to host with status "ABORTED".
+ */
+
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[uninit]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_offline -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_offline(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_DOWN:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_drop);
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_NAME_MISSING:
+ csio_set_state(&rn->sm, csio_rns_disappeared);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did:x%x "
+ "in rn state[offline]\n", csio_rn_flowid(rn), evt,
+ rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*
+ * csio_rns_disappeared -
+ * @rn - rnode
+ * @evt - SM event.
+ *
+ */
+static void
+csio_rns_disappeared(struct csio_rnode *rn, enum csio_rn_ev evt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ int ret = 0;
+
+ CSIO_INC_STATS(rn, n_evt_sm[evt]);
+
+ switch (evt) {
+ case CSIO_RNFE_LOGGED_IN:
+ case CSIO_RNFE_PLOGI_RECV:
+ ret = csio_rn_verify_rparams(ln, rn, rn->rdev_entry);
+ if (!ret) {
+ csio_set_state(&rn->sm, csio_rns_ready);
+ __csio_reg_rnode(rn);
+ } else {
+ CSIO_INC_STATS(rn, n_err_inval);
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+ }
+ break;
+
+ case CSIO_RNFE_CLOSE:
+ /* Each rnode receives CLOSE event when driver is removed or
+ * device is reset.
+ * Note: All outstanding IOs on remote port need to returned
+ * to uppper layer with appropriate error before sending
+ * CLOSE event
+ */
+ csio_set_state(&rn->sm, csio_rns_uninit);
+ break;
+
+ case CSIO_RNFE_DOWN:
+ case CSIO_RNFE_NAME_MISSING:
+ csio_ln_dbg(ln,
+ "ssni:x%x Ignoring event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ break;
+
+ default:
+ csio_ln_dbg(ln,
+ "ssni:x%x unexp event %d recv from did x%x"
+ "in rn state[disappeared]\n", csio_rn_flowid(rn),
+ evt, rn->nport_id);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ break;
+ }
+}
+
+/*****************************************************************************/
+/* END: Rnode SM */
+/*****************************************************************************/
+
+/*
+ * csio_rnode_devloss_handler - Device loss event handler
+ * @rn: rnode
+ *
+ * Post event to close rnode SM and free rnode.
+ */
+void
+csio_rnode_devloss_handler(struct csio_rnode *rn)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+
+ /* ignore if same rnode came back as online */
+ if (csio_is_rnode_ready(rn))
+ return;
+
+ csio_post_event(&rn->sm, CSIO_RNFE_CLOSE);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/**
+ * csio_rnode_fwevt_handler - Event handler for firmware rnode events.
+ * @rn: rnode
+ *
+ */
+void
+csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt)
+{
+ struct csio_lnode *ln = csio_rnode_to_lnode(rn);
+ enum csio_rn_ev evt;
+
+ evt = CSIO_FWE_TO_RNFE(fwevt);
+ if (!evt) {
+ csio_ln_err(ln, "ssni:x%x Unhandled FW Rdev event: %d\n",
+ csio_rn_flowid(rn), fwevt);
+ CSIO_INC_STATS(rn, n_evt_unexp);
+ return;
+ }
+ CSIO_INC_STATS(rn, n_evt_fw[fwevt]);
+
+ /* Track previous & current events for debugging */
+ rn->prev_evt = rn->cur_evt;
+ rn->cur_evt = fwevt;
+
+ /* Post event to rnode SM */
+ csio_post_event(&rn->sm, evt);
+
+ /* Free rn if in uninit state */
+ if (csio_is_rnode_uninit(rn))
+ csio_put_rnode(ln, rn);
+}
+
+/*
+ * csio_rnode_init - Initialize rnode.
+ * @rn: RNode
+ * @ln: Associated lnode
+ *
+ * Caller is responsible for holding the lock. The lock is required
+ * to be held for inserting the rnode in ln->rnhead list.
+ */
+static int
+csio_rnode_init(struct csio_rnode *rn, struct csio_lnode *ln)
+{
+ csio_rnode_to_lnode(rn) = ln;
+ csio_init_state(&rn->sm, csio_rns_uninit);
+ INIT_LIST_HEAD(&rn->host_cmpl_q);
+ csio_rn_flowid(rn) = CSIO_INVALID_IDX;
+
+ /* Add rnode to list of lnodes->rnhead */
+ list_add_tail(&rn->sm.sm_list, &ln->rnhead);
+
+ return 0;
+}
+
+static void
+csio_rnode_exit(struct csio_rnode *rn)
+{
+ list_del_init(&rn->sm.sm_list);
+ CSIO_DB_ASSERT(list_empty(&rn->host_cmpl_q));
+}
diff --git a/drivers/scsi/csiostor/csio_rnode.h b/drivers/scsi/csiostor/csio_rnode.h
new file mode 100644
index 000000000000..a3b434c801da
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_rnode.h
@@ -0,0 +1,141 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_RNODE_H__
+#define __CSIO_RNODE_H__
+
+#include "csio_defs.h"
+
+/* State machine evets */
+enum csio_rn_ev {
+ CSIO_RNFE_NONE = (uint32_t)0, /* None */
+ CSIO_RNFE_LOGGED_IN, /* [N/F]Port login
+ * complete.
+ */
+ CSIO_RNFE_PRLI_DONE, /* PRLI completed */
+ CSIO_RNFE_PLOGI_RECV, /* Received PLOGI */
+ CSIO_RNFE_PRLI_RECV, /* Received PLOGI */
+ CSIO_RNFE_LOGO_RECV, /* Received LOGO */
+ CSIO_RNFE_PRLO_RECV, /* Received PRLO */
+ CSIO_RNFE_DOWN, /* Rnode is down */
+ CSIO_RNFE_CLOSE, /* Close rnode */
+ CSIO_RNFE_NAME_MISSING, /* Rnode name missing
+ * in name server.
+ */
+ CSIO_RNFE_MAX_EVENT,
+};
+
+/* rnode stats */
+struct csio_rnode_stats {
+ uint32_t n_err; /* error */
+ uint32_t n_err_inval; /* invalid parameter */
+ uint32_t n_err_nomem; /* error nomem */
+ uint32_t n_evt_unexp; /* unexpected event */
+ uint32_t n_evt_drop; /* unexpected event */
+ uint32_t n_evt_fw[RSCN_DEV_LOST]; /* fw events */
+ enum csio_rn_ev n_evt_sm[CSIO_RNFE_MAX_EVENT]; /* State m/c events */
+ uint32_t n_lun_rst; /* Number of resets of
+ * of LUNs under this
+ * target
+ */
+ uint32_t n_lun_rst_fail; /* Number of LUN reset
+ * failures.
+ */
+ uint32_t n_tgt_rst; /* Number of target resets */
+ uint32_t n_tgt_rst_fail; /* Number of target reset
+ * failures.
+ */
+};
+
+/* Defines for rnode role */
+#define CSIO_RNFR_INITIATOR 0x1
+#define CSIO_RNFR_TARGET 0x2
+#define CSIO_RNFR_FABRIC 0x4
+#define CSIO_RNFR_NS 0x8
+#define CSIO_RNFR_NPORT 0x10
+
+struct csio_rnode {
+ struct csio_sm sm; /* State machine -
+ * should be the
+ * 1st member
+ */
+ struct csio_lnode *lnp; /* Pointer to owning
+ * Lnode */
+ uint32_t flowid; /* Firmware ID */
+ struct list_head host_cmpl_q; /* SCSI IOs
+ * pending to completed
+ * to Mid-layer.
+ */
+ /* FC identifiers for remote node */
+ uint32_t nport_id;
+ uint16_t fcp_flags; /* FCP Flags */
+ uint8_t cur_evt; /* Current event */
+ uint8_t prev_evt; /* Previous event */
+ uint32_t role; /* Fabric/Target/
+ * Initiator/NS
+ */
+ struct fcoe_rdev_entry *rdev_entry; /* Rdev entry */
+ struct csio_service_parms rn_sparm;
+
+ /* FC transport attributes */
+ struct fc_rport *rport; /* FC transport rport */
+ uint32_t supp_classes; /* Supported FC classes */
+ uint32_t maxframe_size; /* Max Frame size */
+ uint32_t scsi_id; /* Transport given SCSI id */
+
+ struct csio_rnode_stats stats; /* Common rnode stats */
+};
+
+#define csio_rn_flowid(rn) ((rn)->flowid)
+#define csio_rn_wwpn(rn) ((rn)->rn_sparm.wwpn)
+#define csio_rn_wwnn(rn) ((rn)->rn_sparm.wwnn)
+#define csio_rnode_to_lnode(rn) ((rn)->lnp)
+
+int csio_is_rnode_ready(struct csio_rnode *rn);
+void csio_rnode_state_to_str(struct csio_rnode *rn, int8_t *str);
+
+struct csio_rnode *csio_rnode_lookup_portid(struct csio_lnode *, uint32_t);
+struct csio_rnode *csio_confirm_rnode(struct csio_lnode *,
+ uint32_t, struct fcoe_rdev_entry *);
+
+void csio_rnode_fwevt_handler(struct csio_rnode *rn, uint8_t fwevt);
+
+void csio_put_rnode(struct csio_lnode *ln, struct csio_rnode *rn);
+
+void csio_reg_rnode(struct csio_rnode *);
+void csio_unreg_rnode(struct csio_rnode *);
+
+void csio_rnode_devloss_handler(struct csio_rnode *);
+
+#endif /* ifndef __CSIO_RNODE_H__ */
diff --git a/drivers/scsi/csiostor/csio_scsi.c b/drivers/scsi/csiostor/csio_scsi.c
new file mode 100644
index 000000000000..ddd38e5eb0e7
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.c
@@ -0,0 +1,2555 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/ctype.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/export.h>
+#include <linux/module.h>
+#include <asm/unaligned.h>
+#include <asm/page.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_transport_fc.h>
+
+#include "csio_hw.h"
+#include "csio_lnode.h"
+#include "csio_rnode.h"
+#include "csio_scsi.h"
+#include "csio_init.h"
+
+int csio_scsi_eqsize = 65536;
+int csio_scsi_iqlen = 128;
+int csio_scsi_ioreqs = 2048;
+uint32_t csio_max_scan_tmo;
+uint32_t csio_delta_scan_tmo = 5;
+int csio_lun_qdepth = 32;
+
+static int csio_ddp_descs = 128;
+
+static int csio_do_abrt_cls(struct csio_hw *,
+ struct csio_ioreq *, bool);
+
+static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev);
+static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev);
+
+/*
+ * csio_scsi_match_io - Match an ioreq with the given SCSI level data.
+ * @ioreq: The I/O request
+ * @sld: Level information
+ *
+ * Should be called with lock held.
+ *
+ */
+static bool
+csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld)
+{
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq);
+
+ switch (sld->level) {
+ case CSIO_LEV_LUN:
+ if (scmnd == NULL)
+ return false;
+
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode) &&
+ ((uint64_t)scmnd->device->lun == sld->oslun));
+
+ case CSIO_LEV_RNODE:
+ return ((ioreq->lnode == sld->lnode) &&
+ (ioreq->rnode == sld->rnode));
+ case CSIO_LEV_LNODE:
+ return (ioreq->lnode == sld->lnode);
+ case CSIO_LEV_ALL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*
+ * csio_scsi_gather_active_ios - Gather active I/Os based on level
+ * @scm: SCSI module
+ * @sld: Level information
+ * @dest: The queue where these I/Os have to be gathered.
+ *
+ * Should be called with lock held.
+ */
+static void
+csio_scsi_gather_active_ios(struct csio_scsim *scm,
+ struct csio_scsi_level_data *sld,
+ struct list_head *dest)
+{
+ struct list_head *tmp, *next;
+
+ if (list_empty(&scm->active_q))
+ return;
+
+ /* Just splice the entire active_q into dest */
+ if (sld->level == CSIO_LEV_ALL) {
+ list_splice_tail_init(&scm->active_q, dest);
+ return;
+ }
+
+ list_for_each_safe(tmp, next, &scm->active_q) {
+ if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) {
+ list_del_init(tmp);
+ list_add_tail(tmp, dest);
+ }
+ }
+}
+
+static inline bool
+csio_scsi_itnexus_loss_error(uint16_t error)
+{
+ switch (error) {
+ case FW_ERR_LINK_DOWN:
+ case FW_RDEV_NOT_READY:
+ case FW_ERR_RDEV_LOST:
+ case FW_ERR_RDEV_LOGO:
+ case FW_ERR_RDEV_IMPL_LOGO:
+ return 1;
+ }
+ return 0;
+}
+
+static inline void
+csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq,
+ uint8_t oq, uint8_t sq)
+{
+ char stag[2];
+
+ if (scsi_populate_tag_msg(scmnd, stag)) {
+ switch (stag[0]) {
+ case HEAD_OF_QUEUE_TAG:
+ *tag = hq;
+ break;
+ case ORDERED_QUEUE_TAG:
+ *tag = oq;
+ break;
+ default:
+ *tag = sq;
+ break;
+ }
+ } else
+ *tag = 0;
+}
+
+/*
+ * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ *
+ * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests.
+ */
+static inline void
+csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr)
+{
+ struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ /* Check for Task Management */
+ if (likely(scmnd->SCp.Message == 0)) {
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = 0;
+ fcp_cmnd->fc_cmdref = 0;
+ fcp_cmnd->fc_pri_ta = 0;
+
+ memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16);
+ csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta,
+ FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE);
+ fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd));
+
+ if (req->nsge)
+ if (req->datadir == DMA_TO_DEVICE)
+ fcp_cmnd->fc_flags = FCP_CFL_WRDATA;
+ else
+ fcp_cmnd->fc_flags = FCP_CFL_RDDATA;
+ else
+ fcp_cmnd->fc_flags = 0;
+ } else {
+ memset(fcp_cmnd, 0, sizeof(*fcp_cmnd));
+ int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun);
+ fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message;
+ }
+}
+
+/*
+ * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) |
+ FW_SCSI_CMD_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ wr->r3 = 0;
+ memset(&wr->r5, 0, 8);
+
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r6 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r4_lo[0] = 0;
+ wr->u.fcoe.r4_lo[1] = 0;
+
+ /* Frame a FCP command */
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr +
+ sizeof(struct fw_scsi_cmd_wr)));
+}
+
+#define CSIO_SCSI_CMD_WR_SZ(_imm) \
+ (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \
+ ALIGN((_imm), 16)) /* Immed data */
+
+#define CSIO_SCSI_CMD_WR_SZ_16(_imm) \
+ (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16))
+
+/*
+ * csio_scsi_cmd - Create a SCSI CMD WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR.
+ *
+ */
+static inline void
+csio_scsi_cmd(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (unlikely(req->drv_status != 0))
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_cmd_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_cmd_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*
+ * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL
+ * @hw: HW module
+ * @req: IO request
+ * @sgl: ULP TX SGL pointer.
+ *
+ */
+static inline void
+csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req,
+ struct ulptx_sgl *sgl)
+{
+ struct ulptx_sge_pair *sge_pair = NULL;
+ struct scatterlist *sgel;
+ uint32_t i = 0;
+ uint32_t xfer_len;
+ struct list_head *tmp;
+ struct csio_dma_buf *dma_buf;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE |
+ ULPTX_NSGE(req->nsge));
+ /* Now add the data SGLs */
+ if (likely(!req->dcopy)) {
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(sg_dma_address(sgel));
+ sgl->len0 = cpu_to_be32(sg_dma_len(sgel));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ continue;
+ }
+ if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[1] = cpu_to_be32(
+ sg_dma_len(sgel));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(
+ sg_dma_address(sgel));
+ sge_pair->len[0] = cpu_to_be32(
+ sg_dma_len(sgel));
+ }
+ }
+ } else {
+ /* Program sg elements with driver's DDP buffer */
+ xfer_len = scsi_bufflen(scmnd);
+ list_for_each(tmp, &req->gen_list) {
+ dma_buf = (struct csio_dma_buf *)tmp;
+ if (i == 0) {
+ sgl->addr0 = cpu_to_be64(dma_buf->paddr);
+ sgl->len0 = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair = (struct ulptx_sge_pair *)(sgl + 1);
+ } else if ((i - 1) & 0x1) {
+ sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[1] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ sge_pair++;
+ } else {
+ sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr);
+ sge_pair->len[0] = cpu_to_be32(
+ min(xfer_len, dma_buf->len));
+ }
+ xfer_len -= min(xfer_len, dma_buf->len);
+ i++;
+ }
+ }
+}
+
+/*
+ * csio_scsi_init_read_wr - Initialize the READ SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_read_wr.
+ */
+static inline void
+csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) |
+ FW_SCSI_READ_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/*
+ * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR.
+ * @req: IO req structure.
+ * @wrp: DMA location to place the payload.
+ * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL
+ *
+ * Wrapper for populating fw_scsi_write_wr.
+ */
+static inline void
+csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp;
+ struct ulptx_sgl *sgl;
+ struct csio_dma_buf *dma_buf;
+ uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) |
+ FW_SCSI_WRITE_WR_IMMDLEN(imm));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(DIV_ROUND_UP(size, 16)));
+ wr->cookie = (uintptr_t)req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t)(req->tmo);
+ wr->use_xfer_cnt = 1;
+ wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd));
+ /* Get RSP DMA buffer */
+ dma_buf = &req->dma_buf;
+
+ /* Prepare RSP SGL */
+ wr->rsp_dmalen = cpu_to_be32(dma_buf->len);
+ wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr);
+
+ wr->r4 = 0;
+
+ wr->u.fcoe.ctl_pri = 0;
+ wr->u.fcoe.cp_en_class = 0;
+ wr->u.fcoe.r3_lo[0] = 0;
+ wr->u.fcoe.r3_lo[1] = 0;
+ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr)));
+
+ /* Move WR pointer past command and immediate data */
+ sgl = (struct ulptx_sgl *)((uintptr_t)wrp +
+ sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16));
+
+ /* Fill in the DSGL */
+ csio_scsi_init_ultptx_dsgl(hw, req, sgl);
+}
+
+/* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */
+#define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \
+do { \
+ (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \
+ ALIGN((imm), 16) + /* Immed data */ \
+ sizeof(struct ulptx_sgl); /* ulptx_sgl */ \
+ \
+ if (unlikely((req)->nsge > 1)) \
+ (sz) += (sizeof(struct ulptx_sge_pair) * \
+ (ALIGN(((req)->nsge - 1), 2) / 2)); \
+ /* Data SGE */ \
+} while (0)
+
+/*
+ * csio_scsi_read - Create a SCSI READ WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI READ WR.
+ *
+ */
+static inline void
+csio_scsi_read(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_read_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_read_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_scsi_write - Create a SCSI WRITE WR.
+ * @req: IO req structure.
+ *
+ * Gets a WR slot in the ingress queue and initializes it with
+ * SCSI WRITE WR.
+ *
+ */
+static inline void
+csio_scsi_write(struct csio_ioreq *req)
+{
+ struct csio_wr_pair wrp;
+ uint32_t size;
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len);
+ size = ALIGN(size, 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (likely(req->drv_status == 0)) {
+ if (likely(wrp.size1 >= size)) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_write_wr(req, wrp.addr1, size);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_write_wr(req, (void *)tmpwr, size);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+ }
+}
+
+/*
+ * csio_setup_ddp - Setup DDP buffers for Read request.
+ * @req: IO req structure.
+ *
+ * Checks SGLs/Data buffers are virtually contiguous required for DDP.
+ * If contiguous,driver posts SGLs in the WR otherwise post internal
+ * buffers for such request for DDP.
+ */
+static inline void
+csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req)
+{
+#ifdef __CSIO_DEBUG__
+ struct csio_hw *hw = req->lnode->hwp;
+#endif
+ struct scatterlist *sgel = NULL;
+ struct scsi_cmnd *scmnd = csio_scsi_cmnd(req);
+ uint64_t sg_addr = 0;
+ uint32_t ddp_pagesz = 4096;
+ uint32_t buf_off;
+ struct csio_dma_buf *dma_buf = NULL;
+ uint32_t alloc_len = 0;
+ uint32_t xfer_len = 0;
+ uint32_t sg_len = 0;
+ uint32_t i;
+
+ scsi_for_each_sg(scmnd, sgel, req->nsge, i) {
+ sg_addr = sg_dma_address(sgel);
+ sg_len = sg_dma_len(sgel);
+
+ buf_off = sg_addr & (ddp_pagesz - 1);
+
+ /* Except 1st buffer,all buffer addr have to be Page aligned */
+ if (i != 0 && buf_off) {
+ csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n",
+ sg_addr, sg_len);
+ goto unaligned;
+ }
+
+ /* Except last buffer,all buffer must end on page boundary */
+ if ((i != (req->nsge - 1)) &&
+ ((buf_off + sg_len) & (ddp_pagesz - 1))) {
+ csio_dbg(hw,
+ "SGL addr not ending on page boundary"
+ "(%llx:%d)\n", sg_addr, sg_len);
+ goto unaligned;
+ }
+ }
+
+ /* SGL's are virtually contiguous. HW will DDP to SGLs */
+ req->dcopy = 0;
+ csio_scsi_read(req);
+
+ return;
+
+unaligned:
+ CSIO_INC_STATS(scsim, n_unaligned);
+ /*
+ * For unaligned SGLs, driver will allocate internal DDP buffer.
+ * Once command is completed data from DDP buffer copied to SGLs
+ */
+ req->dcopy = 1;
+
+ /* Use gen_list to store the DDP buffers */
+ INIT_LIST_HEAD(&req->gen_list);
+ xfer_len = scsi_bufflen(scmnd);
+
+ i = 0;
+ /* Allocate ddp buffers for this request */
+ while (alloc_len < xfer_len) {
+ dma_buf = csio_get_scsi_ddp(scsim);
+ if (dma_buf == NULL || i > scsim->max_sge) {
+ req->drv_status = -EBUSY;
+ break;
+ }
+ alloc_len += dma_buf->len;
+ /* Added to IO req */
+ list_add_tail(&dma_buf->list, &req->gen_list);
+ i++;
+ }
+
+ if (!req->drv_status) {
+ /* set number of ddp bufs used */
+ req->nsge = i;
+ csio_scsi_read(req);
+ return;
+ }
+
+ /* release dma descs */
+ if (i > 0)
+ csio_put_scsi_ddp_list(scsim, &req->gen_list, i);
+}
+
+/*
+ * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR.
+ * @req: IO req structure.
+ * @addr: DMA location to place the payload.
+ * @size: Size of WR
+ * @abort: abort OR close
+ *
+ * Wrapper for populating fw_scsi_cmd_wr.
+ */
+static inline void
+csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size,
+ bool abort)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_rnode *rn = req->rnode;
+ struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr;
+
+ wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR));
+ wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) |
+ FW_WR_LEN16(
+ DIV_ROUND_UP(size, 16)));
+
+ wr->cookie = (uintptr_t) req;
+ wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx));
+ wr->tmo_val = (uint8_t) req->tmo;
+ /* 0 for CHK_ALL_IO tells FW to look up t_cookie */
+ wr->sub_opcode_to_chk_all_io =
+ (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) |
+ FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0));
+ wr->r3[0] = 0;
+ wr->r3[1] = 0;
+ wr->r3[2] = 0;
+ wr->r3[3] = 0;
+ /* Since we re-use the same ioreq for abort as well */
+ wr->t_cookie = (uintptr_t) req;
+}
+
+static inline void
+csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort)
+{
+ struct csio_wr_pair wrp;
+ struct csio_hw *hw = req->lnode->hwp;
+ uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16);
+
+ req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp);
+ if (req->drv_status != 0)
+ return;
+
+ if (wrp.size1 >= size) {
+ /* Initialize WR in one shot */
+ csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort);
+ } else {
+ uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx);
+ /*
+ * Make a temporary copy of the WR and write back
+ * the copy into the WR pair.
+ */
+ csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort);
+ memcpy(wrp.addr1, tmpwr, wrp.size1);
+ memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1);
+ }
+}
+
+/*****************************************************************************/
+/* START: SCSI SM */
+/*****************************************************************************/
+static void
+csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_START_IO:
+
+ if (req->nsge) {
+ if (req->datadir == DMA_TO_DEVICE) {
+ req->dcopy = 0;
+ csio_scsi_write(req);
+ } else
+ csio_setup_ddp(scsim, req);
+ } else {
+ csio_scsi_cmd(req);
+ }
+
+ if (likely(req->drv_status == 0)) {
+ /* change state and enqueue on active_q */
+ csio_set_state(&req->sm, csio_scsis_io_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_active);
+
+ return;
+ }
+ break;
+
+ case CSIO_SCSIE_START_TM:
+ csio_scsi_cmd(req);
+ if (req->drv_status == 0) {
+ /*
+ * NOTE: We collect the affected I/Os prior to issuing
+ * LUN reset, and not after it. This is to prevent
+ * aborting I/Os that get issued after the LUN reset,
+ * but prior to LUN reset completion (in the event that
+ * the host stack has not blocked I/Os to a LUN that is
+ * being reset.
+ */
+ csio_set_state(&req->sm, csio_scsis_tm_active);
+ list_add_tail(&req->sm.sm_list, &scsim->active_q);
+ csio_wr_issue(hw, req->eq_idx, false);
+ CSIO_INC_STATS(scsim, n_tm_active);
+ }
+ return;
+
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * NOTE:
+ * We could get here due to :
+ * - a window in the cleanup path of the SCSI module
+ * (csio_scsi_abort_io()). Please see NOTE in this function.
+ * - a window in the time we tried to issue an abort/close
+ * of a request to FW, and the FW completed the request
+ * itself.
+ * Print a message for now, and return INVAL either way.
+ */
+ req->drv_status = -EINVAL;
+ csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn;
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ /*
+ * In MSIX mode, with multiple queues, the SCSI compeltions
+ * could reach us sooner than the FW events sent to indicate
+ * I-T nexus loss (link down, remote device logo etc). We
+ * dont want to be returning such I/Os to the upper layer
+ * immediately, since we wouldnt have reported the I-T nexus
+ * loss itself. This forces us to serialize such completions
+ * with the reporting of the I-T nexus loss. Therefore, we
+ * internally queue up such up such completions in the rnode.
+ * The reporting of I-T nexus loss to the upper layer is then
+ * followed by the returning of I/Os in this internal queue.
+ * Having another state alongwith another queue helps us take
+ * actions for events such as ABORT received while we are
+ * in this rnode queue.
+ */
+ if (unlikely(req->wr_status != FW_SUCCESS)) {
+ rn = req->rnode;
+ /*
+ * FW says remote device is lost, but rnode
+ * doesnt reflect it.
+ */
+ if (csio_scsi_itnexus_loss_error(req->wr_status) &&
+ csio_is_rnode_ready(rn)) {
+ csio_set_state(&req->sm,
+ csio_scsis_shost_cmpl_await);
+ list_add_tail(&req->sm.sm_list,
+ &rn->host_cmpl_q);
+ }
+ }
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ CSIO_DEC_STATS(scm, n_tm_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ csio_scsi_abrt_cls(req, SCSI_ABORT);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_aborting);
+ }
+ break;
+
+
+ case CSIO_SCSIE_CLOSE:
+ csio_scsi_abrt_cls(req, SCSI_CLOSE);
+ if (req->drv_status == 0) {
+ csio_wr_issue(hw, req->eq_idx, false);
+ csio_set_state(&req->sm, csio_scsis_closing);
+ }
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_tm_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in aborting st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the ABORTED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the ABORT and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_ABORT:
+ CSIO_INC_STATS(scm, n_abrt_dups);
+ break;
+
+ case CSIO_SCSIE_ABORTED:
+
+ csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n",
+ req, req->wr_status, req->drv_status);
+ /*
+ * Check if original I/O WR completed before the Abort
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_warn(hw,
+ "Abort completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * There are the following possible scenarios:
+ * 1. The abort completed successfully, FW returned FW_SUCCESS.
+ * 2. The completion of an I/O and the receipt of
+ * abort for that I/O by the FW crossed each other.
+ * The FW returned FW_EINVAL. The original I/O would have
+ * returned with FW_SUCCESS or any other SCSI error.
+ * 3. The FW couldnt sent the abort out on the wire, as there
+ * was an I-T nexus loss (link down, remote device logged
+ * out etc). FW sent back an appropriate IT nexus loss status
+ * for the abort.
+ * 4. FW sent an abort, but abort timed out (remote device
+ * didnt respond). FW replied back with
+ * FW_SCSI_ABORT_TIMEDOUT.
+ * 5. FW couldnt genuinely abort the request for some reason,
+ * and sent us an error.
+ *
+ * The first 3 scenarios are treated as succesful abort
+ * operations by the host, while the last 2 are failed attempts
+ * to abort. Manipulate the return value of the request
+ * appropriately, so that host can convey these results
+ * back to the upper layer.
+ */
+ if ((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL) ||
+ csio_scsi_itnexus_loss_error(req->wr_status))
+ req->wr_status = FW_SCSI_ABORT_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * We can receive this event from the module
+ * cleanup paths, if the FW forgot to reply to the ABORT WR
+ * and left this ioreq in this state. For now, just ignore
+ * the event. The CLOSE event is sent to this state, as
+ * the LINK may have already gone down.
+ */
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ struct csio_hw *hw = req->lnode->hwp;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ switch (evt) {
+ case CSIO_SCSIE_COMPLETED:
+ csio_dbg(hw,
+ "ioreq %p recvd cmpltd (wr_status:%d) "
+ "in closing st\n", req, req->wr_status);
+ /*
+ * Use -ECANCELED to explicitly tell the CLOSED event that
+ * the original I/O was returned to driver by FW.
+ * We dont really care if the I/O was returned with success by
+ * FW (because the CLOSE and completion of the I/O crossed each
+ * other), or any other return value. Once we are in aborting
+ * state, the success or failure of the I/O is unimportant to
+ * us.
+ */
+ req->drv_status = -ECANCELED;
+ break;
+
+ case CSIO_SCSIE_CLOSED:
+ /*
+ * Check if original I/O WR completed before the Close
+ * completion.
+ */
+ if (req->drv_status != -ECANCELED) {
+ csio_fatal(hw,
+ "Close completed before original I/O,"
+ " req:%p\n", req);
+ CSIO_DB_ASSERT(0);
+ }
+
+ /*
+ * Either close succeeded, or we issued close to FW at the
+ * same time FW compelted it to us. Either way, the I/O
+ * is closed.
+ */
+ CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) ||
+ (req->wr_status == FW_EINVAL));
+ req->wr_status = FW_SCSI_CLOSE_REQUESTED;
+
+ CSIO_DEC_STATS(scm, n_active);
+ list_del_init(&req->sm.sm_list);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ case CSIO_SCSIE_CLOSE:
+ break;
+
+ case CSIO_SCSIE_DRVCLEANUP:
+ req->wr_status = FW_HOSTERROR;
+ CSIO_DEC_STATS(scm, n_active);
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+
+ default:
+ csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+static void
+csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt)
+{
+ switch (evt) {
+ case CSIO_SCSIE_ABORT:
+ case CSIO_SCSIE_CLOSE:
+ /*
+ * Just succeed the abort request, and hope that
+ * the remote device unregister path will cleanup
+ * this I/O to the upper layer within a sane
+ * amount of time.
+ */
+ /*
+ * A close can come in during a LINK DOWN. The FW would have
+ * returned us the I/O back, but not the remote device lost
+ * FW event. In this interval, if the I/O times out at the upper
+ * layer, a close can come in. Take the same action as abort:
+ * return success, and hope that the remote device unregister
+ * path will cleanup this I/O. If the FW still doesnt send
+ * the msg, the close times out, and the upper layer resorts
+ * to the next level of error recovery.
+ */
+ req->drv_status = 0;
+ break;
+ case CSIO_SCSIE_DRVCLEANUP:
+ csio_set_state(&req->sm, csio_scsis_uninit);
+ break;
+ default:
+ csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n",
+ evt, req);
+ CSIO_DB_ASSERT(0);
+ }
+}
+
+/*
+ * csio_scsi_cmpl_handler - WR completion handler for SCSI.
+ * @hw: HW module.
+ * @wr: The completed WR from the ingress queue.
+ * @len: Length of the WR.
+ * @flb: Freelist buffer array.
+ * @priv: Private object
+ * @scsiwr: Pointer to SCSI WR.
+ *
+ * This is the WR completion handler called per completion from the
+ * ISR. It is called with lock held. It walks past the RSS and CPL message
+ * header where the actual WR is present.
+ * It then gets the status, WR handle (ioreq pointer) and the len of
+ * the WR, based on WR opcode. Only on a non-good status is the entire
+ * WR copied into the WR cache (ioreq->fw_wr).
+ * The ioreq corresponding to the WR is returned to the caller.
+ * NOTE: The SCSI queue doesnt allocate a freelist today, hence
+ * no freelist buffer is expected.
+ */
+struct csio_ioreq *
+csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len,
+ struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr)
+{
+ struct csio_ioreq *ioreq = NULL;
+ struct cpl_fw6_msg *cpl;
+ uint8_t *tempwr;
+ uint8_t status;
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+
+ /* skip RSS header */
+ cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64));
+
+ if (unlikely(cpl->opcode != CPL_FW6_MSG)) {
+ csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n",
+ cpl->opcode);
+ CSIO_INC_STATS(scm, n_inval_cplop);
+ return NULL;
+ }
+
+ tempwr = (uint8_t *)(cpl->data);
+ status = csio_wr_status(tempwr);
+ *scsiwr = tempwr;
+
+ if (likely((*tempwr == FW_SCSI_READ_WR) ||
+ (*tempwr == FW_SCSI_WRITE_WR) ||
+ (*tempwr == FW_SCSI_CMD_WR))) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_read_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+
+ return ioreq;
+ }
+
+ if (*tempwr == FW_SCSI_ABRT_CLS_WR) {
+ ioreq = (struct csio_ioreq *)((uintptr_t)
+ (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie));
+ CSIO_DB_ASSERT(virt_addr_valid(ioreq));
+
+ ioreq->wr_status = status;
+ return ioreq;
+ }
+
+ csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr);
+ CSIO_INC_STATS(scm, n_inval_scsiop);
+ return NULL;
+}
+
+/*
+ * csio_scsi_cleanup_io_q - Cleanup the given queue.
+ * @scm: SCSI module.
+ * @q: Queue to be cleaned up.
+ *
+ * Called with lock held. Has to exit with lock held.
+ */
+void
+csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_ioreq *ioreq;
+ struct list_head *tmp, *next;
+ struct scsi_cmnd *scmnd;
+
+ /* Call back the completion routines of the active_q */
+ list_for_each_safe(tmp, next, q) {
+ ioreq = (struct csio_ioreq *)tmp;
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ scmnd = csio_scsi_cmnd(ioreq);
+ spin_unlock_irq(&hw->lock);
+
+ /*
+ * Upper layers may have cleared this command, hence this
+ * check to avoid accessing stale references.
+ */
+ if (scmnd != NULL)
+ ioreq->io_cbfn(hw, ioreq);
+
+ spin_lock_irq(&scm->freelist_lock);
+ csio_put_scsi_ioreq(scm, ioreq);
+ spin_unlock_irq(&scm->freelist_lock);
+
+ spin_lock_irq(&hw->lock);
+ }
+}
+
+#define CSIO_SCSI_ABORT_Q_POLL_MS 2000
+
+static void
+csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd)
+{
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_hw *hw = ln->hwp;
+ int ready = 0;
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int rv;
+
+ if (csio_scsi_cmnd(ioreq) != scmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ if (rv != 0) {
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+ }
+}
+
+/*
+ * csio_scsi_abort_io_q - Abort all I/Os on given queue
+ * @scm: SCSI module.
+ * @q: Queue to abort.
+ * @tmo: Timeout in ms
+ *
+ * Attempt to abort all I/Os on given queue, and wait for a max
+ * of tmo milliseconds for them to complete. Returns success
+ * if all I/Os are aborted. Else returns -ETIMEDOUT.
+ * Should be entered with lock held. Exits with lock held.
+ * NOTE:
+ * Lock has to be held across the loop that aborts I/Os, since dropping the lock
+ * in between can cause the list to be corrupted. As a result, the caller
+ * of this function has to ensure that the number of I/os to be aborted
+ * is finite enough to not cause lock-held-for-too-long issues.
+ */
+static int
+csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo)
+{
+ struct csio_hw *hw = scm->hw;
+ struct list_head *tmp, *next;
+ int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS);
+ struct scsi_cmnd *scmnd;
+
+ if (list_empty(q))
+ return 0;
+
+ csio_dbg(hw, "Aborting SCSI I/Os\n");
+
+ /* Now abort/close I/Os in the queue passed */
+ list_for_each_safe(tmp, next, q) {
+ scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp);
+ csio_abrt_cls((struct csio_ioreq *)tmp, scmnd);
+ }
+
+ /* Wait till all active I/Os are completed/aborted/closed */
+ while (!list_empty(q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all aborts completed */
+ if (list_empty(q))
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+/*
+ * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module.
+ * @scm: SCSI module.
+ * @abort: abort required.
+ * Called with lock held, should exit with lock held.
+ * Can sleep when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort)
+{
+ struct csio_hw *hw = scm->hw;
+ int rv = 0;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ /* No I/Os pending */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Wait until all active I/Os are completed */
+ while (!list_empty(&scm->active_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&scm->active_q))
+ return 0;
+
+ /* Else abort */
+ if (abort) {
+ rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000);
+ if (rv == 0)
+ return rv;
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ }
+
+ csio_scsi_cleanup_io_q(scm, &scm->active_q);
+
+ CSIO_DB_ASSERT(list_empty(&scm->active_q));
+
+ return rv;
+}
+
+/*
+ * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode.
+ * @scm: SCSI module.
+ * @lnode: lnode
+ *
+ * Called with lock held, should exit with lock held.
+ * Can sleep (with dropped lock) when waiting for I/Os to complete.
+ */
+int
+csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln)
+{
+ struct csio_hw *hw = scm->hw;
+ struct csio_scsi_level_data sld;
+ int rv;
+ int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS);
+
+ csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln);
+
+ sld.level = CSIO_LEV_LNODE;
+ sld.lnode = ln;
+ INIT_LIST_HEAD(&ln->cmpl_q);
+ csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q);
+
+ /* No I/Os pending on this lnode */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ /* Wait until all active I/Os on this lnode are completed */
+ while (!list_empty(&ln->cmpl_q) && count--) {
+ spin_unlock_irq(&hw->lock);
+ msleep(CSIO_SCSI_ABORT_Q_POLL_MS);
+ spin_lock_irq(&hw->lock);
+ }
+
+ /* all I/Os completed */
+ if (list_empty(&ln->cmpl_q))
+ return 0;
+
+ csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln);
+
+ /* I/Os are pending, abort them */
+ rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000);
+ if (rv != 0) {
+ csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n");
+ csio_scsi_cleanup_io_q(scm, &ln->cmpl_q);
+ }
+
+ CSIO_DB_ASSERT(list_empty(&ln->cmpl_q));
+
+ return rv;
+}
+
+static ssize_t
+csio_show_hw_state(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (csio_is_hw_ready(hw))
+ return snprintf(buf, PAGE_SIZE, "ready\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "not ready\n");
+}
+
+/* Device reset */
+static ssize_t
+csio_device_reset(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+
+ if (*buf != '1')
+ return -EINVAL;
+
+ /* Delete NPIV lnodes */
+ csio_lnodes_exit(hw, 1);
+
+ /* Block upper IOs */
+ csio_lnodes_block_request(hw);
+
+ spin_lock_irq(&hw->lock);
+ csio_hw_reset(hw);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_request(hw);
+ return count;
+}
+
+/* disable port */
+static ssize_t
+csio_disable_port(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ bool disable;
+
+ if (*buf == '1' || *buf == '0')
+ disable = (*buf == '1') ? true : false;
+ else
+ return -EINVAL;
+
+ /* Block upper IOs */
+ csio_lnodes_block_by_port(hw, ln->portid);
+
+ spin_lock_irq(&hw->lock);
+ csio_disable_lnodes(hw, ln->portid, disable);
+ spin_unlock_irq(&hw->lock);
+
+ /* Unblock upper IOs */
+ csio_lnodes_unblock_by_port(hw, ln->portid);
+ return count;
+}
+
+/* Show debug level */
+static ssize_t
+csio_show_dbg_level(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level);
+}
+
+/* Store debug level */
+static ssize_t
+csio_store_dbg_level(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ uint32_t dbg_level = 0;
+
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+
+ if (sscanf(buf, "%i", &dbg_level))
+ return -EINVAL;
+
+ ln->params.log_level = dbg_level;
+ hw->params.log_level = dbg_level;
+
+ return 0;
+}
+
+static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL);
+static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset);
+static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port);
+static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level,
+ csio_store_dbg_level);
+
+static struct device_attribute *csio_fcoe_lport_attrs[] = {
+ &dev_attr_hw_state,
+ &dev_attr_device_reset,
+ &dev_attr_disable_port,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static ssize_t
+csio_show_num_reg_rnodes(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct csio_lnode *ln = shost_priv(class_to_shost(dev));
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes);
+}
+
+static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL);
+
+static struct device_attribute *csio_fcoe_vport_attrs[] = {
+ &dev_attr_num_reg_rnodes,
+ &dev_attr_dbg_level,
+ NULL,
+};
+
+static inline uint32_t
+csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct scatterlist *sg;
+ uint32_t bytes_left;
+ uint32_t bytes_copy;
+ uint32_t buf_off = 0;
+ uint32_t start_off = 0;
+ uint32_t sg_off = 0;
+ void *sg_addr;
+ void *buf_addr;
+ struct csio_dma_buf *dma_buf;
+
+ bytes_left = scsi_bufflen(scmnd);
+ sg = scsi_sglist(scmnd);
+ dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list);
+
+ /* Copy data from driver buffer to SGs of SCSI CMD */
+ while (bytes_left > 0 && sg && dma_buf) {
+ if (buf_off >= dma_buf->len) {
+ buf_off = 0;
+ dma_buf = (struct csio_dma_buf *)
+ csio_list_next(dma_buf);
+ continue;
+ }
+
+ if (start_off >= sg->length) {
+ start_off -= sg->length;
+ sg = sg_next(sg);
+ continue;
+ }
+
+ buf_addr = dma_buf->vaddr + buf_off;
+ sg_off = sg->offset + start_off;
+ bytes_copy = min((dma_buf->len - buf_off),
+ sg->length - start_off);
+ bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)),
+ bytes_copy);
+
+ sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT));
+ if (!sg_addr) {
+ csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n",
+ sg, req);
+ break;
+ }
+
+ csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n",
+ sg_addr, sg_off, buf_addr, bytes_copy);
+ memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy);
+ kunmap_atomic(sg_addr);
+
+ start_off += bytes_copy;
+ buf_off += bytes_copy;
+ bytes_left -= bytes_copy;
+ }
+
+ if (bytes_left > 0)
+ return DID_ERROR;
+ else
+ return DID_OK;
+}
+
+/*
+ * csio_scsi_err_handler - SCSI error handler.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static inline void
+csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_scsim *scm = csio_hw_to_scsim(hw);
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags, scsi_status = 0;
+ uint32_t host_status = DID_OK;
+ uint32_t rsp_len = 0, sns_len = 0;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+
+ switch (req->wr_status) {
+ case FW_HOSTERROR:
+ if (unlikely(!csio_is_hw_ready(hw)))
+ return;
+
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_hosterror);
+
+ break;
+ case FW_SCSI_RSP_ERR:
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+ flags = fcp_resp->resp.fr_flags;
+ scsi_status = fcp_resp->resp.fr_status;
+
+ if (flags & FCP_RSP_LEN_VAL) {
+ rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len);
+ if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) ||
+ (rsp_info->rsp_code != FCP_TMF_CMPL)) {
+ host_status = DID_ERROR;
+ goto out;
+ }
+ }
+
+ if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) {
+ sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len);
+ if (sns_len > SCSI_SENSE_BUFFERSIZE)
+ sns_len = SCSI_SENSE_BUFFERSIZE;
+
+ memcpy(cmnd->sense_buffer,
+ &rsp_info->_fr_resvd[0] + rsp_len, sns_len);
+ CSIO_INC_STATS(scm, n_autosense);
+ }
+
+ scsi_set_resid(cmnd, 0);
+
+ /* Under run */
+ if (flags & FCP_RESID_UNDER) {
+ scsi_set_resid(cmnd,
+ be32_to_cpu(fcp_resp->ext.fr_resid));
+
+ if (!(flags & FCP_SNS_LEN_VAL) &&
+ (scsi_status == SAM_STAT_GOOD) &&
+ ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd))
+ < cmnd->underflow))
+ host_status = DID_ERROR;
+ } else if (flags & FCP_RESID_OVER)
+ host_status = DID_ERROR;
+
+ CSIO_INC_STATS(scm, n_rsperror);
+ break;
+
+ case FW_SCSI_OVER_FLOW_ERR:
+ csio_warn(hw,
+ "Over-flow error,cmnd:0x%x expected len:0x%x"
+ " resid:0x%x\n", cmnd->cmnd[0],
+ scsi_bufflen(cmnd), scsi_get_resid(cmnd));
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_ovflerror);
+ break;
+
+ case FW_SCSI_UNDER_FLOW_ERR:
+ csio_warn(hw,
+ "Under-flow error,cmnd:0x%x expected"
+ " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n",
+ cmnd->cmnd[0], scsi_bufflen(cmnd),
+ scsi_get_resid(cmnd), cmnd->device->lun,
+ rn->flowid);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_unflerror);
+ break;
+
+ case FW_SCSI_ABORT_REQUESTED:
+ case FW_SCSI_ABORTED:
+ case FW_SCSI_CLOSE_REQUESTED:
+ csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd,
+ cmnd->cmnd[0],
+ (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ?
+ "closed" : "aborted");
+ /*
+ * csio_eh_abort_handler checks this value to
+ * succeed or fail the abort request.
+ */
+ host_status = DID_REQUEUE;
+ if (req->wr_status == FW_SCSI_CLOSE_REQUESTED)
+ CSIO_INC_STATS(scm, n_closed);
+ else
+ CSIO_INC_STATS(scm, n_aborted);
+ break;
+
+ case FW_SCSI_ABORT_TIMEDOUT:
+ /* FW timed out the abort itself */
+ csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n",
+ req, cmnd, req->wr_status);
+ host_status = DID_ERROR;
+ CSIO_INC_STATS(scm, n_abrt_timedout);
+ break;
+
+ case FW_RDEV_NOT_READY:
+ /*
+ * In firmware, a RDEV can get into this state
+ * temporarily, before moving into dissapeared/lost
+ * state. So, the driver should complete the request equivalent
+ * to device-disappeared!
+ */
+ CSIO_INC_STATS(scm, n_rdev_nr_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOST:
+ CSIO_INC_STATS(scm, n_rdev_lost_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_LOGO:
+ CSIO_INC_STATS(scm, n_rdev_logo_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_RDEV_IMPL_LOGO:
+ host_status = DID_ERROR;
+ break;
+
+ case FW_ERR_LINK_DOWN:
+ CSIO_INC_STATS(scm, n_link_down_error);
+ host_status = DID_ERROR;
+ break;
+
+ case FW_FCOE_NO_XCHG:
+ CSIO_INC_STATS(scm, n_no_xchg_error);
+ host_status = DID_ERROR;
+ break;
+
+ default:
+ csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n",
+ req->wr_status, req, cmnd);
+ CSIO_DB_ASSERT(0);
+
+ CSIO_INC_STATS(scm, n_unknown_error);
+ host_status = DID_ERROR;
+ break;
+ }
+
+out:
+ if (req->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+
+ /* Wake up waiting threads */
+ csio_scsi_cmnd(req) = NULL;
+ complete_all(&req->cmplobj);
+}
+
+/*
+ * csio_scsi_cbfn - SCSI callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ */
+static void
+csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ uint8_t scsi_status = SAM_STAT_GOOD;
+ uint32_t host_status = DID_OK;
+
+ if (likely(req->wr_status == FW_SUCCESS)) {
+ if (req->nsge > 0) {
+ scsi_dma_unmap(cmnd);
+ if (req->dcopy)
+ host_status = csio_scsi_copy_to_sgl(hw, req);
+ }
+
+ cmnd->result = (((host_status) << 16) | scsi_status);
+ cmnd->scsi_done(cmnd);
+ csio_scsi_cmnd(req) = NULL;
+ CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success);
+ } else {
+ /* Error handling */
+ csio_scsi_err_handler(hw, req);
+ }
+}
+
+/**
+ * csio_queuecommand - Entry point to kickstart an I/O request.
+ * @host: The scsi_host pointer.
+ * @cmnd: The I/O request from ML.
+ *
+ * This routine does the following:
+ * - Checks for HW and Rnode module readiness.
+ * - Gets a free ioreq structure (which is already initialized
+ * to uninit during its allocation).
+ * - Maps SG elements.
+ * - Initializes ioreq members.
+ * - Kicks off the SCSI state machine for this IO.
+ * - Returns busy status on error.
+ */
+static int
+csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ unsigned long flags;
+ int nsge = 0;
+ int rv = SCSI_MLQUEUE_HOST_BUSY, nr;
+ int retval;
+ int cpu;
+ struct csio_scsi_qset *sqset;
+ struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
+
+ if (!blk_rq_cpu_valid(cmnd->request))
+ cpu = smp_processor_id();
+ else
+ cpu = cmnd->request->cpu;
+
+ sqset = &hw->sqset[ln->portid][cpu];
+
+ nr = fc_remote_port_chkready(rport);
+ if (nr) {
+ cmnd->result = nr;
+ CSIO_INC_STATS(scsim, n_rn_nr_error);
+ goto err_done;
+ }
+
+ if (unlikely(!csio_is_hw_ready(hw))) {
+ cmnd->result = (DID_REQUEUE << 16);
+ CSIO_INC_STATS(scsim, n_hw_nr_error);
+ goto err_done;
+ }
+
+ /* Get req->nsge, if there are SG elements to be mapped */
+ nsge = scsi_dma_map(cmnd);
+ if (unlikely(nsge < 0)) {
+ CSIO_INC_STATS(scsim, n_dmamap_error);
+ goto err;
+ }
+
+ /* Do we support so many mappings? */
+ if (unlikely(nsge > scsim->max_sge)) {
+ csio_warn(hw,
+ "More SGEs than can be supported."
+ " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge);
+ CSIO_INC_STATS(scsim, n_unsupp_sge_error);
+ goto err_dma_unmap;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+ if (!ioreq) {
+ csio_err(hw, "Out of I/O request elements. Active #:%d\n",
+ scsim->stats.n_active);
+ CSIO_INC_STATS(scsim, n_no_req_error);
+ goto err_dma_unmap;
+ }
+
+ ioreq->nsge = nsge;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+ ioreq->wr_status = 0;
+ ioreq->drv_status = 0;
+ csio_scsi_cmnd(ioreq) = (void *)cmnd;
+ ioreq->tmo = 0;
+ ioreq->datadir = cmnd->sc_data_direction;
+
+ if (cmnd->sc_data_direction == DMA_TO_DEVICE) {
+ CSIO_INC_STATS(ln, n_output_requests);
+ ln->stats.n_output_bytes += scsi_bufflen(cmnd);
+ } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) {
+ CSIO_INC_STATS(ln, n_input_requests);
+ ln->stats.n_input_bytes += scsi_bufflen(cmnd);
+ } else
+ CSIO_INC_STATS(ln, n_control_requests);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_scsi_cbfn;
+
+ /* Needed during abort */
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Message = 0;
+
+ /* Kick off SCSI IO SM on the ioreq */
+ spin_lock_irqsave(&hw->lock, flags);
+ retval = csio_scsi_start_io(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
+ ioreq, retval);
+ CSIO_INC_STATS(scsim, n_busy_error);
+ goto err_put_req;
+ }
+
+ return 0;
+
+err_put_req:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+err_dma_unmap:
+ if (nsge > 0)
+ scsi_dma_unmap(cmnd);
+err:
+ return rv;
+
+err_done:
+ cmnd->scsi_done(cmnd);
+ return 0;
+}
+
+static int
+csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort)
+{
+ int rv;
+ int cpu = smp_processor_id();
+ struct csio_lnode *ln = ioreq->lnode;
+ struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu];
+
+ ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS;
+ /*
+ * Use current processor queue for posting the abort/close, but retain
+ * the ingress queue ID of the original I/O being aborted/closed - we
+ * need the abort/close completion to be received on the same queue
+ * as the original I/O.
+ */
+ ioreq->eq_idx = sqset->eq_idx;
+
+ if (abort == SCSI_ABORT)
+ rv = csio_scsi_abort(ioreq);
+ else
+ rv = csio_scsi_close(ioreq);
+
+ return rv;
+}
+
+static int
+csio_eh_abort_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ int ready = 0, ret;
+ unsigned long tmo = 0;
+ int rv;
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ ioreq = (struct csio_ioreq *)cmnd->host_scribble;
+ if (!ioreq)
+ return SUCCESS;
+
+ if (!rn)
+ return FAILED;
+
+ csio_dbg(hw,
+ "Request to abort ioreq:%p cmd:%p cdb:%08llx"
+ " ssni:0x%x lun:%d iq:0x%x\n",
+ ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid,
+ cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx));
+
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) {
+ CSIO_INC_STATS(scsim, n_abrt_race_comp);
+ return SUCCESS;
+ }
+
+ ready = csio_is_lnode_ready(ln);
+ tmo = CSIO_SCSI_ABRT_TMO_MS;
+
+ spin_lock_irq(&hw->lock);
+ rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE));
+ spin_unlock_irq(&hw->lock);
+
+ if (rv != 0) {
+ if (rv == -EINVAL) {
+ /* Return success, if abort/close request issued on
+ * already completed IO
+ */
+ return SUCCESS;
+ }
+ if (ready)
+ CSIO_INC_STATS(scsim, n_abrt_busy_error);
+ else
+ CSIO_INC_STATS(scsim, n_cls_busy_error);
+
+ goto inval_scmnd;
+ }
+
+ /* Wait for completion */
+ init_completion(&ioreq->cmplobj);
+ wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo));
+
+ /* FW didnt respond to abort within our timeout */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+
+ csio_err(hw, "Abort timed out -- req: %p\n", ioreq);
+ CSIO_INC_STATS(scsim, n_abrt_timedout);
+
+inval_scmnd:
+ if (ioreq->nsge > 0)
+ scsi_dma_unmap(cmnd);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_cmnd(ioreq) = NULL;
+ spin_unlock_irq(&hw->lock);
+
+ cmnd->result = (DID_ERROR << 16);
+ cmnd->scsi_done(cmnd);
+
+ return FAILED;
+ }
+
+ /* FW successfully aborted the request */
+ if (host_byte(cmnd->result) == DID_REQUEUE) {
+ csio_info(hw,
+ "Aborted SCSI command to (%d:%d) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return SUCCESS;
+ } else {
+ csio_info(hw,
+ "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n",
+ cmnd->device->id, cmnd->device->lun,
+ cmnd->serial_number);
+ return FAILED;
+ }
+}
+
+/*
+ * csio_tm_cbfn - TM callback function.
+ * @hw: HW module.
+ * @req: IO request.
+ *
+ * Cache the result in 'cmnd', since ioreq will be freed soon
+ * after we return from here, and the waiting thread shouldnt trust
+ * the ioreq contents.
+ */
+static void
+csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req)
+{
+ struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req);
+ struct csio_dma_buf *dma_buf;
+ uint8_t flags = 0;
+ struct fcp_resp_with_ext *fcp_resp;
+ struct fcp_resp_rsp_info *rsp_info;
+
+ csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n",
+ req, req->wr_status);
+
+ /* Cache FW return status */
+ cmnd->SCp.Status = req->wr_status;
+
+ /* Special handling based on FCP response */
+
+ /*
+ * FW returns us this error, if flags were set. FCP4 says
+ * FCP_RSP_LEN_VAL in flags shall be set for TM completions.
+ * So if a target were to set this bit, we expect that the
+ * rsp_code is set to FCP_TMF_CMPL for a successful TM
+ * completion. Any other rsp_code means TM operation failed.
+ * If a target were to just ignore setting flags, we treat
+ * the TM operation as success, and FW returns FW_SUCCESS.
+ */
+ if (req->wr_status == FW_SCSI_RSP_ERR) {
+ dma_buf = &req->dma_buf;
+ fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr;
+ rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1);
+
+ flags = fcp_resp->resp.fr_flags;
+
+ /* Modify return status if flags indicate success */
+ if (flags & FCP_RSP_LEN_VAL)
+ if (rsp_info->rsp_code == FCP_TMF_CMPL)
+ cmnd->SCp.Status = FW_SUCCESS;
+
+ csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code);
+ }
+
+ /* Wake up the TM handler thread */
+ csio_scsi_cmnd(req) = NULL;
+}
+
+static int
+csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd)
+{
+ struct csio_lnode *ln = shost_priv(cmnd->device->host);
+ struct csio_hw *hw = csio_lnode_to_hw(ln);
+ struct csio_scsim *scsim = csio_hw_to_scsim(hw);
+ struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata);
+ struct csio_ioreq *ioreq = NULL;
+ struct csio_scsi_qset *sqset;
+ unsigned long flags;
+ int retval;
+ int count, ret;
+ LIST_HEAD(local_q);
+ struct csio_scsi_level_data sld;
+
+ if (!rn)
+ goto fail;
+
+ csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n",
+ cmnd->device->lun, rn->flowid, rn->scsi_id);
+
+ if (!csio_is_lnode_ready(ln)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " local node vnpi:0x%x (LUN:%d)\n",
+ ln->vnp_flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Lnode is ready, now wait on rport node readiness */
+ ret = fc_block_scsi_eh(cmnd);
+ if (ret)
+ return ret;
+
+ /*
+ * If we have blocked in the previous call, at this point, either the
+ * remote node has come back online, or device loss timer has fired
+ * and the remote node is destroyed. Allow the LUN reset only for
+ * the former case, since LUN reset is a TMF I/O on the wire, and we
+ * need a valid session to issue it.
+ */
+ if (fc_remote_port_chkready(rn->rport)) {
+ csio_err(hw,
+ "LUN reset cannot be issued on non-ready"
+ " remote node ssni:0x%x (LUN:%d)\n",
+ rn->flowid, cmnd->device->lun);
+ goto fail;
+ }
+
+ /* Get a free ioreq structure - SM is already set to uninit */
+ ioreq = csio_get_scsi_ioreq_lock(hw, scsim);
+
+ if (!ioreq) {
+ csio_err(hw, "Out of IO request elements. Active # :%d\n",
+ scsim->stats.n_active);
+ goto fail;
+ }
+
+ sqset = &hw->sqset[ln->portid][smp_processor_id()];
+ ioreq->nsge = 0;
+ ioreq->lnode = ln;
+ ioreq->rnode = rn;
+ ioreq->iq_idx = sqset->iq_idx;
+ ioreq->eq_idx = sqset->eq_idx;
+
+ csio_scsi_cmnd(ioreq) = cmnd;
+ cmnd->host_scribble = (unsigned char *)ioreq;
+ cmnd->SCp.Status = 0;
+
+ cmnd->SCp.Message = FCP_TMF_LUN_RESET;
+ ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000;
+
+ /*
+ * FW times the LUN reset for ioreq->tmo, so we got to wait a little
+ * longer (10s for now) than that to allow FW to return the timed
+ * out command.
+ */
+ count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS);
+
+ /* Set cbfn */
+ ioreq->io_cbfn = csio_tm_cbfn;
+
+ /* Save of the ioreq info for later use */
+ sld.level = CSIO_LEV_LUN;
+ sld.lnode = ioreq->lnode;
+ sld.rnode = ioreq->rnode;
+ sld.oslun = (uint64_t)cmnd->device->lun;
+
+ spin_lock_irqsave(&hw->lock, flags);
+ /* Kick off TM SM on the ioreq */
+ retval = csio_scsi_start_tm(ioreq);
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+ if (retval != 0) {
+ csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n",
+ ioreq, retval);
+ goto fail_ret_ioreq;
+ }
+
+ csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n",
+ count * (CSIO_SCSI_TM_POLL_MS / 1000));
+ /* Wait for completion */
+ while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd)
+ && count--)
+ msleep(CSIO_SCSI_TM_POLL_MS);
+
+ /* LUN reset timed-out */
+ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) {
+ csio_err(hw, "LUN reset (%d:%d) timed out\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ spin_lock_irq(&hw->lock);
+ csio_scsi_drvcleanup(ioreq);
+ list_del_init(&ioreq->sm.sm_list);
+ spin_unlock_irq(&hw->lock);
+
+ goto fail_ret_ioreq;
+ }
+
+ /* LUN reset returned, check cached status */
+ if (cmnd->SCp.Status != FW_SUCCESS) {
+ csio_err(hw, "LUN reset failed (%d:%d), status: %d\n",
+ cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status);
+ goto fail;
+ }
+
+ /* LUN reset succeeded, Start aborting affected I/Os */
+ /*
+ * Since the host guarantees during LUN reset that there
+ * will not be any more I/Os to that LUN, until the LUN reset
+ * completes, we gather pending I/Os after the LUN reset.
+ */
+ spin_lock_irq(&hw->lock);
+ csio_scsi_gather_active_ios(scsim, &sld, &local_q);
+
+ retval = csio_scsi_abort_io_q(scsim, &local_q, 30000);
+ spin_unlock_irq(&hw->lock);
+
+ /* Aborts may have timed out */
+ if (retval != 0) {
+ csio_err(hw,
+ "Attempt to abort I/Os during LUN reset of %d"
+ " returned %d\n", cmnd->device->lun, retval);
+ /* Return I/Os back to active_q */
+ spin_lock_irq(&hw->lock);
+ list_splice_tail_init(&local_q, &scsim->active_q);
+ spin_unlock_irq(&hw->lock);
+ goto fail;
+ }
+
+ CSIO_INC_STATS(rn, n_lun_rst);
+
+ csio_info(hw, "LUN reset occurred (%d:%d)\n",
+ cmnd->device->id, cmnd->device->lun);
+
+ return SUCCESS;
+
+fail_ret_ioreq:
+ csio_put_scsi_ioreq_lock(hw, scsim, ioreq);
+fail:
+ CSIO_INC_STATS(rn, n_lun_rst_fail);
+ return FAILED;
+}
+
+static int
+csio_slave_alloc(struct scsi_device *sdev)
+{
+ struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+
+ if (!rport || fc_remote_port_chkready(rport))
+ return -ENXIO;
+
+ sdev->hostdata = *((struct csio_lnode **)(rport->dd_data));
+
+ return 0;
+}
+
+static int
+csio_slave_configure(struct scsi_device *sdev)
+{
+ if (sdev->tagged_supported)
+ scsi_activate_tcq(sdev, csio_lun_qdepth);
+ else
+ scsi_deactivate_tcq(sdev, csio_lun_qdepth);
+
+ return 0;
+}
+
+static void
+csio_slave_destroy(struct scsi_device *sdev)
+{
+ sdev->hostdata = NULL;
+}
+
+static int
+csio_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct csio_lnode *ln = shost_priv(shost);
+ int rv = 1;
+
+ spin_lock_irq(shost->host_lock);
+ if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list))
+ goto out;
+
+ rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ,
+ csio_delta_scan_tmo * HZ);
+out:
+ spin_unlock_irq(shost->host_lock);
+
+ return rv;
+}
+
+struct scsi_host_template csio_fcoe_shost_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_lport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+};
+
+struct scsi_host_template csio_fcoe_shost_vport_template = {
+ .module = THIS_MODULE,
+ .name = CSIO_DRV_DESC,
+ .proc_name = KBUILD_MODNAME,
+ .queuecommand = csio_queuecommand,
+ .eh_abort_handler = csio_eh_abort_handler,
+ .eh_device_reset_handler = csio_eh_lun_reset_handler,
+ .slave_alloc = csio_slave_alloc,
+ .slave_configure = csio_slave_configure,
+ .slave_destroy = csio_slave_destroy,
+ .scan_finished = csio_scan_finished,
+ .this_id = -1,
+ .sg_tablesize = CSIO_SCSI_MAX_SGE,
+ .cmd_per_lun = CSIO_MAX_CMD_PER_LUN,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = csio_fcoe_vport_attrs,
+ .max_sectors = CSIO_MAX_SECTOR_SIZE,
+};
+
+/*
+ * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ * @buf_size: buffer size
+ * @num_buf : Number of buffers.
+ *
+ * This routine allocates DMA buffers required for SCSI Data xfer, if
+ * each SGL buffer for a SCSI Read request posted by SCSI midlayer are
+ * not virtually contiguous.
+ */
+static int
+csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw,
+ int buf_size, int num_buf)
+{
+ int n = 0;
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc = NULL;
+ uint32_t unit_size = 0;
+
+ if (!num_buf)
+ return 0;
+
+ if (!buf_size)
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&scm->ddp_freelist);
+
+ /* Align buf size to page size */
+ buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK;
+ /* Initialize dma descriptors */
+ for (n = 0; n < num_buf; n++) {
+ /* Set unit size to request size */
+ unit_size = buf_size;
+ ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL);
+ if (!ddp_desc) {
+ csio_err(hw,
+ "Failed to allocate ddp descriptors,"
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ddp);
+ goto no_mem;
+ }
+
+ /* Allocate Dma buffers for DDP */
+ ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size,
+ &ddp_desc->paddr);
+ if (!ddp_desc->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer (ddp) allocation"
+ " failed!\n");
+ kfree(ddp_desc);
+ goto no_mem;
+ }
+
+ ddp_desc->len = unit_size;
+
+ /* Added it to scsi ddp freelist */
+ list_add_tail(&ddp_desc->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+ }
+
+ return 0;
+no_mem:
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+
+ return -ENOMEM;
+}
+
+/*
+ * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs.
+ * @scm: SCSI Module
+ * @hw: HW device.
+ *
+ * This routine frees ddp buffers.
+ */
+static void
+csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ struct list_head *tmp;
+ struct csio_dma_buf *ddp_desc;
+
+ /* release dma descs back to freelist and free dma memory */
+ list_for_each(tmp, &scm->ddp_freelist) {
+ ddp_desc = (struct csio_dma_buf *) tmp;
+ tmp = csio_list_prev(tmp);
+ pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr,
+ ddp_desc->paddr);
+ list_del_init(&ddp_desc->list);
+ kfree(ddp_desc);
+ }
+ scm->stats.n_free_ddp = 0;
+}
+
+/**
+ * csio_scsim_init - Initialize SCSI Module
+ * @scm: SCSI Module
+ * @hw: HW module
+ *
+ */
+int
+csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw)
+{
+ int i;
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ INIT_LIST_HEAD(&scm->active_q);
+ scm->hw = hw;
+
+ scm->proto_cmd_len = sizeof(struct fcp_cmnd);
+ scm->proto_rsp_len = CSIO_SCSI_RSP_LEN;
+ scm->max_sge = CSIO_SCSI_MAX_SGE;
+
+ spin_lock_init(&scm->freelist_lock);
+
+ /* Pre-allocate ioreqs and initialize them */
+ INIT_LIST_HEAD(&scm->ioreq_freelist);
+ for (i = 0; i < csio_scsi_ioreqs; i++) {
+
+ ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL);
+ if (!ioreq) {
+ csio_err(hw,
+ "I/O request element allocation failed, "
+ " Num allocated = %d.\n",
+ scm->stats.n_free_ioreq);
+
+ goto free_ioreq;
+ }
+
+ /* Allocate Dma buffers for Response Payload */
+ dma_buf = &ioreq->dma_buf;
+ dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL,
+ &dma_buf->paddr);
+ if (!dma_buf->vaddr) {
+ csio_err(hw,
+ "SCSI response DMA buffer allocation"
+ " failed!\n");
+ kfree(ioreq);
+ goto free_ioreq;
+ }
+
+ dma_buf->len = scm->proto_rsp_len;
+
+ /* Set state to uninit */
+ csio_init_state(&ioreq->sm, csio_scsis_uninit);
+ INIT_LIST_HEAD(&ioreq->gen_list);
+ init_completion(&ioreq->cmplobj);
+
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+ }
+
+ if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs))
+ goto free_ioreq;
+
+ return 0;
+
+free_ioreq:
+ /*
+ * Free up existing allocations, since an error
+ * from here means we are returning for good
+ */
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ return -ENOMEM;
+}
+
+/**
+ * csio_scsim_exit: Uninitialize SCSI Module
+ * @scm: SCSI Module
+ *
+ */
+void
+csio_scsim_exit(struct csio_scsim *scm)
+{
+ struct csio_ioreq *ioreq;
+ struct csio_dma_buf *dma_buf;
+
+ while (!list_empty(&scm->ioreq_freelist)) {
+ struct csio_sm *tmp;
+
+ tmp = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&tmp->sm_list);
+ ioreq = (struct csio_ioreq *)tmp;
+
+ dma_buf = &ioreq->dma_buf;
+ pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr,
+ dma_buf->paddr);
+
+ kfree(ioreq);
+ }
+
+ scm->stats.n_free_ioreq = 0;
+
+ csio_scsi_free_ddp_bufs(scm, scm->hw);
+}
diff --git a/drivers/scsi/csiostor/csio_scsi.h b/drivers/scsi/csiostor/csio_scsi.h
new file mode 100644
index 000000000000..2257c3dcf724
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_scsi.h
@@ -0,0 +1,342 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_SCSI_H__
+#define __CSIO_SCSI_H__
+
+#include <linux/spinlock_types.h>
+#include <linux/completion.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/fc/fc_fcp.h>
+
+#include "csio_defs.h"
+#include "csio_wr.h"
+
+extern struct scsi_host_template csio_fcoe_shost_template;
+extern struct scsi_host_template csio_fcoe_shost_vport_template;
+
+extern int csio_scsi_eqsize;
+extern int csio_scsi_iqlen;
+extern int csio_scsi_ioreqs;
+extern uint32_t csio_max_scan_tmo;
+extern uint32_t csio_delta_scan_tmo;
+extern int csio_lun_qdepth;
+
+/*
+ **************************** NOTE *******************************
+ * How do we calculate MAX FCoE SCSI SGEs? Here is the math:
+ * Max Egress WR size = 512 bytes
+ * One SCSI egress WR has the following fixed no of bytes:
+ * 48 (sizeof(struct fw_scsi_write[read]_wr)) - FW WR
+ * + 32 (sizeof(struct fc_fcp_cmnd)) - Immediate FCP_CMD
+ * ------
+ * 80
+ * ------
+ * That leaves us with 512 - 96 = 432 bytes for data SGE. Using
+ * struct ulptx_sgl header for the SGE consumes:
+ * - 4 bytes for cmnd_sge.
+ * - 12 bytes for the first SGL.
+ * That leaves us with 416 bytes for the remaining SGE pairs. Which is
+ * is 416 / 24 (size(struct ulptx_sge_pair)) = 17 SGE pairs,
+ * or 34 SGEs. Adding the first SGE fetches us 35 SGEs.
+ */
+#define CSIO_SCSI_MAX_SGE 35
+#define CSIO_SCSI_ABRT_TMO_MS 60000
+#define CSIO_SCSI_LUNRST_TMO_MS 60000
+#define CSIO_SCSI_TM_POLL_MS 2000 /* should be less than
+ * all TM timeouts.
+ */
+#define CSIO_SCSI_IQ_WRSZ 128
+#define CSIO_SCSI_IQSIZE (csio_scsi_iqlen * CSIO_SCSI_IQ_WRSZ)
+
+#define CSIO_MAX_SNS_LEN 128
+#define CSIO_SCSI_RSP_LEN (FCP_RESP_WITH_EXT + 4 + CSIO_MAX_SNS_LEN)
+
+/* Reference to scsi_cmnd */
+#define csio_scsi_cmnd(req) ((req)->scratch1)
+
+struct csio_scsi_stats {
+ uint64_t n_tot_success; /* Total number of good I/Os */
+ uint32_t n_rn_nr_error; /* No. of remote-node-not-
+ * ready errors
+ */
+ uint32_t n_hw_nr_error; /* No. of hw-module-not-
+ * ready errors
+ */
+ uint32_t n_dmamap_error; /* No. of DMA map erros */
+ uint32_t n_unsupp_sge_error; /* No. of too-many-SGes
+ * errors.
+ */
+ uint32_t n_no_req_error; /* No. of Out-of-ioreqs error */
+ uint32_t n_busy_error; /* No. of -EBUSY errors */
+ uint32_t n_hosterror; /* No. of FW_HOSTERROR I/O */
+ uint32_t n_rsperror; /* No. of response errors */
+ uint32_t n_autosense; /* No. of auto sense replies */
+ uint32_t n_ovflerror; /* No. of overflow errors */
+ uint32_t n_unflerror; /* No. of underflow errors */
+ uint32_t n_rdev_nr_error;/* No. of rdev not
+ * ready errors
+ */
+ uint32_t n_rdev_lost_error;/* No. of rdev lost errors */
+ uint32_t n_rdev_logo_error;/* No. of rdev logo errors */
+ uint32_t n_link_down_error;/* No. of link down errors */
+ uint32_t n_no_xchg_error; /* No. no exchange error */
+ uint32_t n_unknown_error;/* No. of unhandled errors */
+ uint32_t n_aborted; /* No. of aborted I/Os */
+ uint32_t n_abrt_timedout; /* No. of abort timedouts */
+ uint32_t n_abrt_fail; /* No. of abort failures */
+ uint32_t n_abrt_dups; /* No. of duplicate aborts */
+ uint32_t n_abrt_race_comp; /* No. of aborts that raced
+ * with completions.
+ */
+ uint32_t n_abrt_busy_error;/* No. of abort failures
+ * due to -EBUSY.
+ */
+ uint32_t n_closed; /* No. of closed I/Os */
+ uint32_t n_cls_busy_error; /* No. of close failures
+ * due to -EBUSY.
+ */
+ uint32_t n_active; /* No. of IOs in active_q */
+ uint32_t n_tm_active; /* No. of TMs in active_q */
+ uint32_t n_wcbfn; /* No. of I/Os in worker
+ * cbfn q
+ */
+ uint32_t n_free_ioreq; /* No. of freelist entries */
+ uint32_t n_free_ddp; /* No. of DDP freelist */
+ uint32_t n_unaligned; /* No. of Unaligned SGls */
+ uint32_t n_inval_cplop; /* No. invalid CPL op's in IQ */
+ uint32_t n_inval_scsiop; /* No. invalid scsi op's in IQ*/
+};
+
+struct csio_scsim {
+ struct csio_hw *hw; /* Pointer to HW moduel */
+ uint8_t max_sge; /* Max SGE */
+ uint8_t proto_cmd_len; /* Proto specific SCSI
+ * cmd length
+ */
+ uint16_t proto_rsp_len; /* Proto specific SCSI
+ * response length
+ */
+ spinlock_t freelist_lock; /* Lock for ioreq freelist */
+ struct list_head active_q; /* Outstanding SCSI I/Os */
+ struct list_head ioreq_freelist; /* Free list of ioreq's */
+ struct list_head ddp_freelist; /* DDP descriptor freelist */
+ struct csio_scsi_stats stats; /* This module's statistics */
+};
+
+/* State machine defines */
+enum csio_scsi_ev {
+ CSIO_SCSIE_START_IO = 1, /* Start a regular SCSI IO */
+ CSIO_SCSIE_START_TM, /* Start a TM IO */
+ CSIO_SCSIE_COMPLETED, /* IO Completed */
+ CSIO_SCSIE_ABORT, /* Abort IO */
+ CSIO_SCSIE_ABORTED, /* IO Aborted */
+ CSIO_SCSIE_CLOSE, /* Close exchange */
+ CSIO_SCSIE_CLOSED, /* Exchange closed */
+ CSIO_SCSIE_DRVCLEANUP, /* Driver wants to manually
+ * cleanup this I/O.
+ */
+};
+
+enum csio_scsi_lev {
+ CSIO_LEV_ALL = 1,
+ CSIO_LEV_LNODE,
+ CSIO_LEV_RNODE,
+ CSIO_LEV_LUN,
+};
+
+struct csio_scsi_level_data {
+ enum csio_scsi_lev level;
+ struct csio_rnode *rnode;
+ struct csio_lnode *lnode;
+ uint64_t oslun;
+};
+
+static inline struct csio_ioreq *
+csio_get_scsi_ioreq(struct csio_scsim *scm)
+{
+ struct csio_sm *req;
+
+ if (likely(!list_empty(&scm->ioreq_freelist))) {
+ req = list_first_entry(&scm->ioreq_freelist,
+ struct csio_sm, sm_list);
+ list_del_init(&req->sm_list);
+ CSIO_DEC_STATS(scm, n_free_ioreq);
+ return (struct csio_ioreq *)req;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ioreq(struct csio_scsim *scm, struct csio_ioreq *ioreq)
+{
+ list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist);
+ CSIO_INC_STATS(scm, n_free_ioreq);
+}
+
+static inline void
+csio_put_scsi_ioreq_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_init(reqlist, &scm->ioreq_freelist);
+ scm->stats.n_free_ioreq += n;
+}
+
+static inline struct csio_dma_buf *
+csio_get_scsi_ddp(struct csio_scsim *scm)
+{
+ struct csio_dma_buf *ddp;
+
+ if (likely(!list_empty(&scm->ddp_freelist))) {
+ ddp = list_first_entry(&scm->ddp_freelist,
+ struct csio_dma_buf, list);
+ list_del_init(&ddp->list);
+ CSIO_DEC_STATS(scm, n_free_ddp);
+ return ddp;
+ } else
+ return NULL;
+}
+
+static inline void
+csio_put_scsi_ddp(struct csio_scsim *scm, struct csio_dma_buf *ddp)
+{
+ list_add_tail(&ddp->list, &scm->ddp_freelist);
+ CSIO_INC_STATS(scm, n_free_ddp);
+}
+
+static inline void
+csio_put_scsi_ddp_list(struct csio_scsim *scm, struct list_head *reqlist,
+ int n)
+{
+ list_splice_tail_init(reqlist, &scm->ddp_freelist);
+ scm->stats.n_free_ddp += n;
+}
+
+static inline void
+csio_scsi_completed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_COMPLETED);
+ if (csio_list_deleted(&ioreq->sm.sm_list))
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_aborted(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORTED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_closed(struct csio_ioreq *ioreq, struct list_head *cbfn_q)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSED);
+ list_add_tail(&ioreq->sm.sm_list, cbfn_q);
+}
+
+static inline void
+csio_scsi_drvcleanup(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_DRVCLEANUP);
+}
+
+/*
+ * csio_scsi_start_io - Kick starts the IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_io(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_IO);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_start_tm - Kicks off the Task management IO SM.
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_start_tm(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_START_TM);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_abort - Abort an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_abort(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_ABORT);
+ return ioreq->drv_status;
+}
+
+/*
+ * csio_scsi_close - Close an IO request
+ * @req: io request SM.
+ *
+ * needs to be called with lock held.
+ */
+static inline int
+csio_scsi_close(struct csio_ioreq *ioreq)
+{
+ csio_post_event(&ioreq->sm, CSIO_SCSIE_CLOSE);
+ return ioreq->drv_status;
+}
+
+void csio_scsi_cleanup_io_q(struct csio_scsim *, struct list_head *);
+int csio_scsim_cleanup_io(struct csio_scsim *, bool abort);
+int csio_scsim_cleanup_io_lnode(struct csio_scsim *,
+ struct csio_lnode *);
+struct csio_ioreq *csio_scsi_cmpl_handler(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *,
+ void *, uint8_t **);
+int csio_scsi_qconfig(struct csio_hw *);
+int csio_scsim_init(struct csio_scsim *, struct csio_hw *);
+void csio_scsim_exit(struct csio_scsim *);
+
+#endif /* __CSIO_SCSI_H__ */
diff --git a/drivers/scsi/csiostor/csio_wr.c b/drivers/scsi/csiostor/csio_wr.c
new file mode 100644
index 000000000000..c32df1bdaa97
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.c
@@ -0,0 +1,1632 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/compiler.h>
+#include <linux/slab.h>
+#include <asm/page.h>
+#include <linux/cache.h>
+
+#include "csio_hw.h"
+#include "csio_wr.h"
+#include "csio_mb.h"
+#include "csio_defs.h"
+
+int csio_intr_coalesce_cnt; /* value:SGE_INGRESS_RX_THRESHOLD[0] */
+static int csio_sge_thresh_reg; /* SGE_INGRESS_RX_THRESHOLD[0] */
+
+int csio_intr_coalesce_time = 10; /* value:SGE_TIMER_VALUE_1 */
+static int csio_sge_timer_reg = 1;
+
+#define CSIO_SET_FLBUF_SIZE(_hw, _reg, _val) \
+ csio_wr_reg32((_hw), (_val), SGE_FL_BUFFER_SIZE##_reg)
+
+static void
+csio_get_flbuf_size(struct csio_hw *hw, struct csio_sge *sge, uint32_t reg)
+{
+ sge->sge_fl_buf_size[reg] = csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE0 +
+ reg * sizeof(uint32_t));
+}
+
+/* Free list buffer size */
+static inline uint32_t
+csio_wr_fl_bufsz(struct csio_sge *sge, struct csio_dma_buf *buf)
+{
+ return sge->sge_fl_buf_size[buf->paddr & 0xF];
+}
+
+/* Size of the egress queue status page */
+static inline uint32_t
+csio_wr_qstat_pgsz(struct csio_hw *hw)
+{
+ return (hw->wrm.sge.sge_control & EGRSTATUSPAGESIZE(1)) ? 128 : 64;
+}
+
+/* Ring freelist doorbell */
+static inline void
+csio_wr_ring_fldb(struct csio_hw *hw, struct csio_q *flq)
+{
+ /*
+ * Ring the doorbell only when we have atleast CSIO_QCREDIT_SZ
+ * number of bytes in the freelist queue. This translates to atleast
+ * 8 freelist buffer pointers (since each pointer is 8 bytes).
+ */
+ if (flq->inc_idx >= 8) {
+ csio_wr_reg32(hw, DBPRIO(1) | QID(flq->un.fl.flid) |
+ PIDX(flq->inc_idx / 8),
+ MYPF_REG(SGE_PF_KDOORBELL));
+ flq->inc_idx &= 7;
+ }
+}
+
+/* Write a 0 cidx increment value to enable SGE interrupts for this queue */
+static void
+csio_wr_sge_intr_enable(struct csio_hw *hw, uint16_t iqid)
+{
+ csio_wr_reg32(hw, CIDXINC(0) |
+ INGRESSQID(iqid) |
+ TIMERREG(X_TIMERREG_RESTART_COUNTER),
+ MYPF_REG(SGE_PF_GTS));
+}
+
+/*
+ * csio_wr_fill_fl - Populate the FL buffers of a FL queue.
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ * Fill up freelist buffer entries with buffers of size specified
+ * in the size register.
+ *
+ */
+static int
+csio_wr_fill_fl(struct csio_hw *hw, struct csio_q *flq)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ __be64 *d = (__be64 *)(flq->vstart);
+ struct csio_dma_buf *buf = &flq->un.fl.bufs[0];
+ uint64_t paddr;
+ int sreg = flq->un.fl.sreg;
+ int n = flq->credits;
+
+ while (n--) {
+ buf->len = sge->sge_fl_buf_size[sreg];
+ buf->vaddr = pci_alloc_consistent(hw->pdev, buf->len,
+ &buf->paddr);
+ if (!buf->vaddr) {
+ csio_err(hw, "Could only fill %d buffers!\n", n + 1);
+ return -ENOMEM;
+ }
+
+ paddr = buf->paddr | (sreg & 0xF);
+
+ *d++ = cpu_to_be64(paddr);
+ buf++;
+ }
+
+ return 0;
+}
+
+/*
+ * csio_wr_update_fl -
+ * @hw: HW module.
+ * @flq: Freelist queue.
+ *
+ *
+ */
+static inline void
+csio_wr_update_fl(struct csio_hw *hw, struct csio_q *flq, uint16_t n)
+{
+
+ flq->inc_idx += n;
+ flq->pidx += n;
+ if (unlikely(flq->pidx >= flq->credits))
+ flq->pidx -= (uint16_t)flq->credits;
+
+ CSIO_INC_STATS(flq, n_flq_refill);
+}
+
+/*
+ * csio_wr_alloc_q - Allocate a WR queue and initialize it.
+ * @hw: HW module
+ * @qsize: Size of the queue in bytes
+ * @wrsize: Since of WR in this queue, if fixed.
+ * @type: Type of queue (Ingress/Egress/Freelist)
+ * @owner: Module that owns this queue.
+ * @nflb: Number of freelist buffers for FL.
+ * @sreg: What is the FL buffer size register?
+ * @iq_int_handler: Ingress queue handler in INTx mode.
+ *
+ * This function allocates and sets up a queue for the caller
+ * of size qsize, aligned at the required boundary. This is subject to
+ * be free entries being available in the queue array. If one is found,
+ * it is initialized with the allocated queue, marked as being used (owner),
+ * and a handle returned to the caller in form of the queue's index
+ * into the q_arr array.
+ * If user has indicated a freelist (by specifying nflb > 0), create
+ * another queue (with its own index into q_arr) for the freelist. Allocate
+ * memory for DMA buffer metadata (vaddr, len etc). Save off the freelist
+ * idx in the ingress queue's flq.idx. This is how a Freelist is associated
+ * with its owning ingress queue.
+ */
+int
+csio_wr_alloc_q(struct csio_hw *hw, uint32_t qsize, uint32_t wrsize,
+ uint16_t type, void *owner, uint32_t nflb, int sreg,
+ iq_handler_t iq_intx_handler)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q, *flq;
+ int free_idx = wrm->free_qidx;
+ int ret_idx = free_idx;
+ uint32_t qsz;
+ int flq_idx;
+
+ if (free_idx >= wrm->num_q) {
+ csio_err(hw, "No more free queues.\n");
+ return -1;
+ }
+
+ switch (type) {
+ case CSIO_EGRESS:
+ qsz = ALIGN(qsize, CSIO_QCREDIT_SZ) + csio_wr_qstat_pgsz(hw);
+ break;
+ case CSIO_INGRESS:
+ switch (wrsize) {
+ case 16:
+ case 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ csio_err(hw, "Invalid Ingress queue WR size:%d\n",
+ wrsize);
+ return -1;
+ }
+
+ /*
+ * Number of elements must be a multiple of 16
+ * So this includes status page size
+ */
+ qsz = ALIGN(qsize/wrsize, 16) * wrsize;
+
+ break;
+ case CSIO_FREELIST:
+ qsz = ALIGN(qsize/wrsize, 8) * wrsize + csio_wr_qstat_pgsz(hw);
+ break;
+ default:
+ csio_err(hw, "Invalid queue type: 0x%x\n", type);
+ return -1;
+ }
+
+ q = wrm->q_arr[free_idx];
+
+ q->vstart = pci_alloc_consistent(hw->pdev, qsz, &q->pstart);
+ if (!q->vstart) {
+ csio_err(hw,
+ "Failed to allocate DMA memory for "
+ "queue at id: %d size: %d\n", free_idx, qsize);
+ return -1;
+ }
+
+ /*
+ * We need to zero out the contents, importantly for ingress,
+ * since we start with a generatiom bit of 1 for ingress.
+ */
+ memset(q->vstart, 0, qsz);
+
+ q->type = type;
+ q->owner = owner;
+ q->pidx = q->cidx = q->inc_idx = 0;
+ q->size = qsz;
+ q->wr_sz = wrsize; /* If using fixed size WRs */
+
+ wrm->free_qidx++;
+
+ if (type == CSIO_INGRESS) {
+ /* Since queue area is set to zero */
+ q->un.iq.genbit = 1;
+
+ /*
+ * Ingress queue status page size is always the size of
+ * the ingress queue entry.
+ */
+ q->credits = (qsz - q->wr_sz) / q->wr_sz;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - q->wr_sz);
+
+ /* Allocate memory for FL if requested */
+ if (nflb > 0) {
+ flq_idx = csio_wr_alloc_q(hw, nflb * sizeof(__be64),
+ sizeof(__be64), CSIO_FREELIST,
+ owner, 0, sreg, NULL);
+ if (flq_idx == -1) {
+ csio_err(hw,
+ "Failed to allocate FL queue"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ /* Associate the new FL with the Ingress quue */
+ q->un.iq.flq_idx = flq_idx;
+
+ flq = wrm->q_arr[q->un.iq.flq_idx];
+ flq->un.fl.bufs = kzalloc(flq->credits *
+ sizeof(struct csio_dma_buf),
+ GFP_KERNEL);
+ if (!flq->un.fl.bufs) {
+ csio_err(hw,
+ "Failed to allocate FL queue bufs"
+ " for IQ idx:%d\n", free_idx);
+ return -1;
+ }
+
+ flq->un.fl.packen = 0;
+ flq->un.fl.offset = 0;
+ flq->un.fl.sreg = sreg;
+
+ /* Fill up the free list buffers */
+ if (csio_wr_fill_fl(hw, flq))
+ return -1;
+
+ /*
+ * Make sure in a FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated,otherwise HW thinks
+ * FLQ is empty.
+ */
+ flq->pidx = flq->inc_idx = flq->credits - 8;
+ } else {
+ q->un.iq.flq_idx = -1;
+ }
+
+ /* Associate the IQ INTx handler. */
+ q->un.iq.iq_intx_handler = iq_intx_handler;
+
+ csio_q_iqid(hw, ret_idx) = CSIO_MAX_QID;
+
+ } else if (type == CSIO_EGRESS) {
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / CSIO_QCREDIT_SZ;
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_eqid(hw, ret_idx) = CSIO_MAX_QID;
+ } else { /* Freelist */
+ q->credits = (qsz - csio_wr_qstat_pgsz(hw)) / sizeof(__be64);
+ q->vwrap = (void *)((uintptr_t)(q->vstart) + qsz
+ - csio_wr_qstat_pgsz(hw));
+ csio_q_flid(hw, ret_idx) = CSIO_MAX_QID;
+ }
+
+ return ret_idx;
+}
+
+/*
+ * csio_wr_iq_create_rsp - Response handler for IQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that got created.
+ *
+ * Handle FW_IQ_CMD mailbox completion. Save off the assigned IQ/FL ids.
+ */
+static int
+csio_wr_iq_create_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ struct csio_iq_params iqp;
+ enum fw_retval retval;
+ uint32_t iq_id;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ csio_mb_iq_alloc_write_rsp(hw, mbp, &retval, &iqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "IQ cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_iqid(hw, iq_idx) = iqp.iqid;
+ csio_q_physiqid(hw, iq_idx) = iqp.physiqid;
+ csio_q_pidx(hw, iq_idx) = csio_q_cidx(hw, iq_idx) = 0;
+ csio_q_inc_idx(hw, iq_idx) = 0;
+
+ /* Actual iq-id. */
+ iq_id = iqp.iqid - hw->wrm.fw_iq_start;
+
+ /* Set the iq-id to iq map table. */
+ if (iq_id >= CSIO_MAX_IQ) {
+ csio_err(hw,
+ "Exceeding MAX_IQ(%d) supported!"
+ " iqid:%d rel_iqid:%d FW iq_start:%d\n",
+ CSIO_MAX_IQ, iq_id, iqp.iqid, hw->wrm.fw_iq_start);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+ csio_q_set_intr_map(hw, iq_idx, iq_id);
+
+ /*
+ * During FW_IQ_CMD, FW sets interrupt_sent bit to 1 in the SGE
+ * ingress context of this queue. This will block interrupts to
+ * this queue until the next GTS write. Therefore, we do a
+ * 0-cidx increment GTS write for this queue just to clear the
+ * interrupt_sent bit. This will re-enable interrupts to this
+ * queue.
+ */
+ csio_wr_sge_intr_enable(hw, iqp.physiqid);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ csio_q_flid(hw, flq_idx) = iqp.fl0id;
+ csio_q_cidx(hw, flq_idx) = 0;
+ csio_q_pidx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+ csio_q_inc_idx(hw, flq_idx) = csio_q_credits(hw, flq_idx) - 8;
+
+ /* Now update SGE about the buffers allocated during init */
+ csio_wr_ring_fldb(hw, flq);
+ }
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_iq_create - Configure an Ingress queue with FW.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index in the WR module.
+ * @vec: MSIX vector.
+ * @portid: PCIE Channel to be associated with this queue.
+ * @async: Is this a FW asynchronous message handling queue?
+ * @cbfn: Completion callback.
+ *
+ * This API configures an ingress queue with FW by issuing a FW_IQ_CMD mailbox
+ * with alloc/write bits set.
+ */
+int
+csio_wr_iq_create(struct csio_hw *hw, void *priv, int iq_idx,
+ uint32_t vec, uint8_t portid, bool async,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+ csio_q_portid(hw, iq_idx) = portid;
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "IQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ switch (hw->intr_mode) {
+ case CSIO_IM_INTX:
+ case CSIO_IM_MSI:
+ /* For interrupt forwarding queue only */
+ if (hw->intr_iq_idx == iq_idx)
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ else
+ iqp.iqandst = X_INTERRUPTDESTINATION_IQ;
+ iqp.iqandstindex =
+ csio_q_physiqid(hw, hw->intr_iq_idx);
+ break;
+ case CSIO_IM_MSIX:
+ iqp.iqandst = X_INTERRUPTDESTINATION_PCIE;
+ iqp.iqandstindex = (uint16_t)vec;
+ break;
+ case CSIO_IM_NONE:
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ /* Pass in the ingress queue cmd parameters */
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iq_start = 1;
+ iqp.viid = 0;
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+ iqp.iqasynch = async;
+ if (csio_intr_coalesce_cnt)
+ iqp.iqanus = X_UPDATESCHEDULING_COUNTER_OPTTIMER;
+ else
+ iqp.iqanus = X_UPDATESCHEDULING_TIMER;
+ iqp.iqanud = X_UPDATEDELIVERY_INTERRUPT;
+ iqp.iqpciech = portid;
+ iqp.iqintcntthresh = (uint8_t)csio_sge_thresh_reg;
+
+ switch (csio_q_wr_sz(hw, iq_idx)) {
+ case 16:
+ iqp.iqesize = 0; break;
+ case 32:
+ iqp.iqesize = 1; break;
+ case 64:
+ iqp.iqesize = 2; break;
+ case 128:
+ iqp.iqesize = 3; break;
+ }
+
+ iqp.iqsize = csio_q_size(hw, iq_idx) /
+ csio_q_wr_sz(hw, iq_idx);
+ iqp.iqaddr = csio_q_pstart(hw, iq_idx);
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1) {
+ struct csio_q *flq = hw->wrm.q_arr[flq_idx];
+
+ iqp.fl0paden = 1;
+ iqp.fl0packen = flq->un.fl.packen ? 1 : 0;
+ iqp.fl0fbmin = X_FETCHBURSTMIN_64B;
+ iqp.fl0fbmax = X_FETCHBURSTMAX_512B;
+ iqp.fl0size = csio_q_size(hw, flq_idx) / CSIO_QCREDIT_SZ;
+ iqp.fl0addr = csio_q_pstart(hw, flq_idx);
+ }
+
+ csio_mb_iq_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of IQ cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_create_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_create_rsp - Response handler for EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that got created.
+ *
+ * Handle FW_EQ_OFLD_CMD mailbox completion. Save off the assigned EQ ids.
+ */
+static int
+csio_wr_eq_cfg_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ struct csio_eq_params eqp;
+ enum fw_retval retval;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ csio_mb_eq_ofld_alloc_write_rsp(hw, mbp, &retval, &eqp);
+
+ if (retval != FW_SUCCESS) {
+ csio_err(hw, "EQ OFLD cmd returned 0x%x!\n", retval);
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ csio_q_eqid(hw, eq_idx) = (uint16_t)eqp.eqid;
+ csio_q_physeqid(hw, eq_idx) = (uint16_t)eqp.physeqid;
+ csio_q_pidx(hw, eq_idx) = csio_q_cidx(hw, eq_idx) = 0;
+ csio_q_inc_idx(hw, eq_idx) = 0;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return 0;
+}
+
+/*
+ * csio_wr_eq_create - Configure an Egress queue with FW.
+ * @hw: HW module.
+ * @priv: Private data.
+ * @eq_idx: Egress queue index in the WR module.
+ * @iq_idx: Associated ingress queue index.
+ * @cbfn: Completion callback.
+ *
+ * This API configures a offload egress queue with FW by issuing a
+ * FW_EQ_OFLD_CMD (with alloc + write ) mailbox.
+ */
+int
+csio_wr_eq_create(struct csio_hw *hw, void *priv, int eq_idx,
+ int iq_idx, uint8_t portid,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp) {
+ csio_err(hw, "EQ command out of memory!\n");
+ return -ENOMEM;
+ }
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqstart = 1;
+ eqp.hostfcmode = X_HOSTFCMODE_STATUS_PAGE;
+ eqp.iqid = csio_q_iqid(hw, iq_idx);
+ eqp.fbmin = X_FETCHBURSTMIN_64B;
+ eqp.fbmax = X_FETCHBURSTMAX_512B;
+ eqp.cidxfthresh = 0;
+ eqp.pciechn = portid;
+ eqp.eqsize = csio_q_size(hw, eq_idx) / CSIO_QCREDIT_SZ;
+ eqp.eqaddr = csio_q_pstart(hw, eq_idx);
+
+ csio_mb_eq_ofld_alloc_write(hw, mbp, priv, CSIO_MB_DEFAULT_TMO,
+ &eqp, cbfn);
+
+ if (csio_mb_issue(hw, mbp)) {
+ csio_err(hw, "Issue of EQ OFLD cmd failed!\n");
+ mempool_free(mbp, hw->mb_mempool);
+ return -EINVAL;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_cfg_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_iq_destroy_rsp - Response handler for IQ removal.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @iq_idx: Ingress queue that was freed.
+ *
+ * Handle FW_IQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_iq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int iq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_iq_destroy - Free an ingress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @iq_idx: Ingress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an ingress queue by issuing the FW_IQ_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_iq_destroy(struct csio_hw *hw, void *priv, int iq_idx,
+ void (*cbfn)(struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_iq_params iqp;
+ int flq_idx;
+
+ memset(&iqp, 0, sizeof(struct csio_iq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ iqp.pfn = hw->pfn;
+ iqp.vfn = 0;
+ iqp.iqid = csio_q_iqid(hw, iq_idx);
+ iqp.type = FW_IQ_TYPE_FL_INT_CAP;
+
+ flq_idx = csio_q_iq_flq_idx(hw, iq_idx);
+ if (flq_idx != -1)
+ iqp.fl0id = csio_q_flid(hw, flq_idx);
+ else
+ iqp.fl0id = 0xFFFF;
+
+ iqp.fl1id = 0xFFFF;
+
+ csio_mb_iq_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &iqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_iq_destroy_rsp(hw, mbp, iq_idx);
+}
+
+/*
+ * csio_wr_eq_destroy_rsp - Response handler for OFLD EQ creation.
+ * @hw: The HW module.
+ * @mbp: Mailbox.
+ * @eq_idx: Egress queue that was freed.
+ *
+ * Handle FW_OFLD_EQ_CMD (free) mailbox completion.
+ */
+static int
+csio_wr_eq_destroy_rsp(struct csio_hw *hw, struct csio_mb *mbp, int eq_idx)
+{
+ enum fw_retval retval = csio_mb_fw_retval(mbp);
+ int rv = 0;
+
+ if (retval != FW_SUCCESS)
+ rv = -EINVAL;
+
+ mempool_free(mbp, hw->mb_mempool);
+
+ return rv;
+}
+
+/*
+ * csio_wr_eq_destroy - Free an Egress queue.
+ * @hw: The HW module.
+ * @priv: Private data object.
+ * @eq_idx: Egress queue index to destroy
+ * @cbfn: Completion callback.
+ *
+ * This API frees an Egress queue by issuing the FW_EQ_OFLD_CMD
+ * with the free bit set.
+ */
+static int
+csio_wr_eq_destroy(struct csio_hw *hw, void *priv, int eq_idx,
+ void (*cbfn) (struct csio_hw *, struct csio_mb *))
+{
+ int rv = 0;
+ struct csio_mb *mbp;
+ struct csio_eq_params eqp;
+
+ memset(&eqp, 0, sizeof(struct csio_eq_params));
+
+ mbp = mempool_alloc(hw->mb_mempool, GFP_ATOMIC);
+ if (!mbp)
+ return -ENOMEM;
+
+ eqp.pfn = hw->pfn;
+ eqp.vfn = 0;
+ eqp.eqid = csio_q_eqid(hw, eq_idx);
+
+ csio_mb_eq_ofld_free(hw, mbp, priv, CSIO_MB_DEFAULT_TMO, &eqp, cbfn);
+
+ rv = csio_mb_issue(hw, mbp);
+ if (rv != 0) {
+ mempool_free(mbp, hw->mb_mempool);
+ return rv;
+ }
+
+ if (cbfn != NULL)
+ return 0;
+
+ return csio_wr_eq_destroy_rsp(hw, mbp, eq_idx);
+}
+
+/*
+ * csio_wr_cleanup_eq_stpg - Cleanup Egress queue status page
+ * @hw: HW module
+ * @qidx: Egress queue index
+ *
+ * Cleanup the Egress queue status page.
+ */
+static void
+csio_wr_cleanup_eq_stpg(struct csio_hw *hw, int qidx)
+{
+ struct csio_q *q = csio_hw_to_wrm(hw)->q_arr[qidx];
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+
+ memset(stp, 0, sizeof(*stp));
+}
+
+/*
+ * csio_wr_cleanup_iq_ftr - Cleanup Footer entries in IQ
+ * @hw: HW module
+ * @qidx: Ingress queue index
+ *
+ * Cleanup the footer entries in the given ingress queue,
+ * set to 1 the internal copy of genbit.
+ */
+static void
+csio_wr_cleanup_iq_ftr(struct csio_hw *hw, int qidx)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *wr;
+ struct csio_iqwr_footer *ftr;
+ uint32_t i = 0;
+
+ /* set to 1 since we are just about zero out genbit */
+ q->un.iq.genbit = 1;
+
+ for (i = 0; i < q->credits; i++) {
+ /* Get the WR */
+ wr = (void *)((uintptr_t)q->vstart +
+ (i * q->wr_sz));
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ /* Zero out footer */
+ memset(ftr, 0, sizeof(*ftr));
+ }
+}
+
+int
+csio_wr_destroy_queues(struct csio_hw *hw, bool cmd)
+{
+ int i, flq_idx;
+ struct csio_q *q;
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ int rv;
+
+ for (i = 0; i < wrm->free_qidx; i++) {
+ q = wrm->q_arr[i];
+
+ switch (q->type) {
+ case CSIO_EGRESS:
+ if (csio_q_eqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_eq_stpg(hw, i);
+ if (!cmd) {
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_eq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_eqid(hw, i) = CSIO_MAX_QID;
+ }
+ case CSIO_INGRESS:
+ if (csio_q_iqid(hw, i) != CSIO_MAX_QID) {
+ csio_wr_cleanup_iq_ftr(hw, i);
+ if (!cmd) {
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) =
+ CSIO_MAX_QID;
+ continue;
+ }
+
+ rv = csio_wr_iq_destroy(hw, NULL, i, NULL);
+ if ((rv == -EBUSY) || (rv == -ETIMEDOUT))
+ cmd = false;
+
+ csio_q_iqid(hw, i) = CSIO_MAX_QID;
+ flq_idx = csio_q_iq_flq_idx(hw, i);
+ if (flq_idx != -1)
+ csio_q_flid(hw, flq_idx) = CSIO_MAX_QID;
+ }
+ default:
+ break;
+ }
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_FW_ALLOCED;
+
+ return 0;
+}
+
+/*
+ * csio_wr_get - Get requested size of WR entry/entries from queue.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @size: Cumulative size of Work request(s).
+ * @wrp: Work request pair.
+ *
+ * If requested credits are available, return the start address of the
+ * work request in the work request pair. Set pidx accordingly and
+ * return.
+ *
+ * NOTE about WR pair:
+ * ==================
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/size to be passed back to the caller -
+ * hence Work request pair format.
+ */
+int
+csio_wr_get(struct csio_hw *hw, int qidx, uint32_t size,
+ struct csio_wr_pair *wrp)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+ void *cwr = (void *)((uintptr_t)(q->vstart) +
+ (q->pidx * CSIO_QCREDIT_SZ));
+ struct csio_qstatus_page *stp = (struct csio_qstatus_page *)q->vwrap;
+ uint16_t cidx = q->cidx = ntohs(stp->cidx);
+ uint16_t pidx = q->pidx;
+ uint32_t req_sz = ALIGN(size, CSIO_QCREDIT_SZ);
+ int req_credits = req_sz / CSIO_QCREDIT_SZ;
+ int credits;
+
+ CSIO_DB_ASSERT(q->owner != NULL);
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+ CSIO_DB_ASSERT(cidx <= q->credits);
+
+ /* Calculate credits */
+ if (pidx > cidx) {
+ credits = q->credits - (pidx - cidx) - 1;
+ } else if (cidx > pidx) {
+ credits = cidx - pidx - 1;
+ } else {
+ /* cidx == pidx, empty queue */
+ credits = q->credits;
+ CSIO_INC_STATS(q, n_qempty);
+ }
+
+ /*
+ * Check if we have enough credits.
+ * credits = 1 implies queue is full.
+ */
+ if (!credits || (req_credits > credits)) {
+ CSIO_INC_STATS(q, n_qfull);
+ return -EBUSY;
+ }
+
+ /*
+ * If we are here, we have enough credits to satisfy the
+ * request. Check if we are near the end of q, and if WR spills over.
+ * If it does, use the first addr/size to cover the queue until
+ * the end. Fit the remainder portion of the request at the top
+ * of queue and return it in the second addr/len. Set pidx
+ * accordingly.
+ */
+ if (unlikely(((uintptr_t)cwr + req_sz) > (uintptr_t)(q->vwrap))) {
+ wrp->addr1 = cwr;
+ wrp->size1 = (uint32_t)((uintptr_t)q->vwrap - (uintptr_t)cwr);
+ wrp->addr2 = q->vstart;
+ wrp->size2 = req_sz - wrp->size1;
+ q->pidx = (uint16_t)(ALIGN(wrp->size2, CSIO_QCREDIT_SZ) /
+ CSIO_QCREDIT_SZ);
+ CSIO_INC_STATS(q, n_qwrap);
+ CSIO_INC_STATS(q, n_eq_wr_split);
+ } else {
+ wrp->addr1 = cwr;
+ wrp->size1 = req_sz;
+ wrp->addr2 = NULL;
+ wrp->size2 = 0;
+ q->pidx += (uint16_t)req_credits;
+
+ /* We are the end of queue, roll back pidx to top of queue */
+ if (unlikely(q->pidx == q->credits)) {
+ q->pidx = 0;
+ CSIO_INC_STATS(q, n_qwrap);
+ }
+ }
+
+ q->inc_idx = (uint16_t)req_credits;
+
+ CSIO_INC_STATS(q, n_tot_reqs);
+
+ return 0;
+}
+
+/*
+ * csio_wr_copy_to_wrp - Copies given data into WR.
+ * @data_buf - Data buffer
+ * @wrp - Work request pair.
+ * @wr_off - Work request offset.
+ * @data_len - Data length.
+ *
+ * Copies the given data in Work Request. Work request pair(wrp) specifies
+ * address information of Work request.
+ * Returns: none
+ */
+void
+csio_wr_copy_to_wrp(void *data_buf, struct csio_wr_pair *wrp,
+ uint32_t wr_off, uint32_t data_len)
+{
+ uint32_t nbytes;
+
+ /* Number of space available in buffer addr1 of WRP */
+ nbytes = ((wrp->size1 - wr_off) >= data_len) ?
+ data_len : (wrp->size1 - wr_off);
+
+ memcpy((uint8_t *) wrp->addr1 + wr_off, data_buf, nbytes);
+ data_len -= nbytes;
+
+ /* Write the remaining data from the begining of circular buffer */
+ if (data_len) {
+ CSIO_DB_ASSERT(data_len <= wrp->size2);
+ CSIO_DB_ASSERT(wrp->addr2 != NULL);
+ memcpy(wrp->addr2, (uint8_t *) data_buf + nbytes, data_len);
+ }
+}
+
+/*
+ * csio_wr_issue - Notify chip of Work request.
+ * @hw: HW module.
+ * @qidx: Index of queue.
+ * @prio: 0: Low priority, 1: High priority
+ *
+ * Rings the SGE Doorbell by writing the current producer index of the passed
+ * in queue into the register.
+ *
+ */
+int
+csio_wr_issue(struct csio_hw *hw, int qidx, bool prio)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *q = wrm->q_arr[qidx];
+
+ CSIO_DB_ASSERT((qidx >= 0) && (qidx < wrm->free_qidx));
+
+ wmb();
+ /* Ring SGE Doorbell writing q->pidx into it */
+ csio_wr_reg32(hw, DBPRIO(prio) | QID(q->un.eq.physeqid) |
+ PIDX(q->inc_idx), MYPF_REG(SGE_PF_KDOORBELL));
+ q->inc_idx = 0;
+
+ return 0;
+}
+
+static inline uint32_t
+csio_wr_avail_qcredits(struct csio_q *q)
+{
+ if (q->pidx > q->cidx)
+ return q->pidx - q->cidx;
+ else if (q->cidx > q->pidx)
+ return q->credits - (q->cidx - q->pidx);
+ else
+ return 0; /* cidx == pidx, empty queue */
+}
+
+/*
+ * csio_wr_inval_flq_buf - Invalidate a free list buffer entry.
+ * @hw: HW module.
+ * @flq: The freelist queue.
+ *
+ * Invalidate the driver's version of a freelist buffer entry,
+ * without freeing the associated the DMA memory. The entry
+ * to be invalidated is picked up from the current Free list
+ * queue cidx.
+ *
+ */
+static inline void
+csio_wr_inval_flq_buf(struct csio_hw *hw, struct csio_q *flq)
+{
+ flq->cidx++;
+ if (flq->cidx == flq->credits) {
+ flq->cidx = 0;
+ CSIO_INC_STATS(flq, n_qwrap);
+ }
+}
+
+/*
+ * csio_wr_process_fl - Process a freelist completion.
+ * @hw: HW module.
+ * @q: The ingress queue attached to the Freelist.
+ * @wr: The freelist completion WR in the ingress queue.
+ * @len_to_qid: The lower 32-bits of the first flit of the RSP footer
+ * @iq_handler: Caller's handler for this completion.
+ * @priv: Private pointer of caller
+ *
+ */
+static inline void
+csio_wr_process_fl(struct csio_hw *hw, struct csio_q *q,
+ void *wr, uint32_t len_to_qid,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ struct csio_fl_dma_buf flb;
+ struct csio_dma_buf *buf, *fbuf;
+ uint32_t bufsz, len, lastlen = 0;
+ int n;
+ struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx];
+
+ CSIO_DB_ASSERT(flq != NULL);
+
+ len = len_to_qid;
+
+ if (len & IQWRF_NEWBUF) {
+ if (flq->un.fl.offset > 0) {
+ csio_wr_inval_flq_buf(hw, flq);
+ flq->un.fl.offset = 0;
+ }
+ len = IQWRF_LEN_GET(len);
+ }
+
+ CSIO_DB_ASSERT(len != 0);
+
+ flb.totlen = len;
+
+ /* Consume all freelist buffers used for len bytes */
+ for (n = 0, fbuf = flb.flbufs; ; n++, fbuf++) {
+ buf = &flq->un.fl.bufs[flq->cidx];
+ bufsz = csio_wr_fl_bufsz(sge, buf);
+
+ fbuf->paddr = buf->paddr;
+ fbuf->vaddr = buf->vaddr;
+
+ flb.offset = flq->un.fl.offset;
+ lastlen = min(bufsz, len);
+ fbuf->len = lastlen;
+
+ len -= lastlen;
+ if (!len)
+ break;
+ csio_wr_inval_flq_buf(hw, flq);
+ }
+
+ flb.defer_free = flq->un.fl.packen ? 0 : 1;
+
+ iq_handler(hw, wr, q->wr_sz - sizeof(struct csio_iqwr_footer),
+ &flb, priv);
+
+ if (flq->un.fl.packen)
+ flq->un.fl.offset += ALIGN(lastlen, sge->csio_fl_align);
+ else
+ csio_wr_inval_flq_buf(hw, flq);
+
+}
+
+/*
+ * csio_is_new_iqwr - Is this a new Ingress queue entry ?
+ * @q: Ingress quueue.
+ * @ftr: Ingress queue WR SGE footer.
+ *
+ * The entry is new if our generation bit matches the corresponding
+ * bit in the footer of the current WR.
+ */
+static inline bool
+csio_is_new_iqwr(struct csio_q *q, struct csio_iqwr_footer *ftr)
+{
+ return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT));
+}
+
+/*
+ * csio_wr_process_iq - Process elements in Ingress queue.
+ * @hw: HW pointer
+ * @qidx: Index of queue
+ * @iq_handler: Handler for this queue
+ * @priv: Caller's private pointer
+ *
+ * This routine walks through every entry of the ingress queue, calling
+ * the provided iq_handler with the entry, until the generation bit
+ * flips.
+ */
+int
+csio_wr_process_iq(struct csio_hw *hw, struct csio_q *q,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ void *wr = (void *)((uintptr_t)q->vstart + (q->cidx * q->wr_sz));
+ struct csio_iqwr_footer *ftr;
+ uint32_t wr_type, fw_qid, qid;
+ struct csio_q *q_completed;
+ struct csio_q *flq = csio_iq_has_fl(q) ?
+ wrm->q_arr[q->un.iq.flq_idx] : NULL;
+ int rv = 0;
+
+ /* Get the footer */
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+
+ /*
+ * When q wrapped around last time, driver should have inverted
+ * ic.genbit as well.
+ */
+ while (csio_is_new_iqwr(q, ftr)) {
+
+ CSIO_DB_ASSERT(((uintptr_t)wr + q->wr_sz) <=
+ (uintptr_t)q->vwrap);
+ rmb();
+ wr_type = IQWRF_TYPE_GET(ftr->u.type_gen);
+
+ switch (wr_type) {
+ case X_RSPD_TYPE_CPL:
+ /* Subtract footer from WR len */
+ iq_handler(hw, wr, q->wr_sz - sizeof(*ftr), NULL, priv);
+ break;
+ case X_RSPD_TYPE_FLBUF:
+ csio_wr_process_fl(hw, q, wr,
+ ntohl(ftr->pldbuflen_qid),
+ iq_handler, priv);
+ break;
+ case X_RSPD_TYPE_INTR:
+ fw_qid = ntohl(ftr->pldbuflen_qid);
+ qid = fw_qid - wrm->fw_iq_start;
+ q_completed = hw->wrm.intr_map[qid];
+
+ if (unlikely(qid ==
+ csio_q_physiqid(hw, hw->intr_iq_idx))) {
+ /*
+ * We are already in the Forward Interrupt
+ * Interrupt Queue Service! Do-not service
+ * again!
+ *
+ */
+ } else {
+ CSIO_DB_ASSERT(q_completed);
+ CSIO_DB_ASSERT(
+ q_completed->un.iq.iq_intx_handler);
+
+ /* Call the queue handler. */
+ q_completed->un.iq.iq_intx_handler(hw, NULL,
+ 0, NULL, (void *)q_completed);
+ }
+ break;
+ default:
+ csio_warn(hw, "Unknown resp type 0x%x received\n",
+ wr_type);
+ CSIO_INC_STATS(q, n_rsp_unknown);
+ break;
+ }
+
+ /*
+ * Ingress *always* has fixed size WR entries. Therefore,
+ * there should always be complete WRs towards the end of
+ * queue.
+ */
+ if (((uintptr_t)wr + q->wr_sz) == (uintptr_t)q->vwrap) {
+
+ /* Roll over to start of queue */
+ q->cidx = 0;
+ wr = q->vstart;
+
+ /* Toggle genbit */
+ q->un.iq.genbit ^= 0x1;
+
+ CSIO_INC_STATS(q, n_qwrap);
+ } else {
+ q->cidx++;
+ wr = (void *)((uintptr_t)(q->vstart) +
+ (q->cidx * q->wr_sz));
+ }
+
+ ftr = (struct csio_iqwr_footer *)((uintptr_t)wr +
+ (q->wr_sz - sizeof(*ftr)));
+ q->inc_idx++;
+
+ } /* while (q->un.iq.genbit == hdr->genbit) */
+
+ /*
+ * We need to re-arm SGE interrupts in case we got a stray interrupt,
+ * especially in msix mode. With INTx, this may be a common occurence.
+ */
+ if (unlikely(!q->inc_idx)) {
+ CSIO_INC_STATS(q, n_stray_comp);
+ rv = -EINVAL;
+ goto restart;
+ }
+
+ /* Replenish free list buffers if pending falls below low water mark */
+ if (flq) {
+ uint32_t avail = csio_wr_avail_qcredits(flq);
+ if (avail <= 16) {
+ /* Make sure in FLQ, atleast 1 credit (8 FL buffers)
+ * remains unpopulated otherwise HW thinks
+ * FLQ is empty.
+ */
+ csio_wr_update_fl(hw, flq, (flq->credits - 8) - avail);
+ csio_wr_ring_fldb(hw, flq);
+ }
+ }
+
+restart:
+ /* Now inform SGE about our incremental index value */
+ csio_wr_reg32(hw, CIDXINC(q->inc_idx) |
+ INGRESSQID(q->un.iq.physiqid) |
+ TIMERREG(csio_sge_timer_reg),
+ MYPF_REG(SGE_PF_GTS));
+ q->stats.n_tot_rsps += q->inc_idx;
+
+ q->inc_idx = 0;
+
+ return rv;
+}
+
+int
+csio_wr_process_iq_idx(struct csio_hw *hw, int qidx,
+ void (*iq_handler)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *priv)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_q *iq = wrm->q_arr[qidx];
+
+ return csio_wr_process_iq(hw, iq, iq_handler, priv);
+}
+
+static int
+csio_closest_timer(struct csio_sge *s, int time)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+ delta = time - s->timer_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static int
+csio_closest_thresh(struct csio_sge *s, int cnt)
+{
+ int i, delta, match = 0, min_delta = INT_MAX;
+
+ for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+ delta = cnt - s->counter_val[i];
+ if (delta < 0)
+ delta = -delta;
+ if (delta < min_delta) {
+ min_delta = delta;
+ match = i;
+ }
+ }
+ return match;
+}
+
+static void
+csio_wr_fixup_host_params(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t clsz = L1_CACHE_BYTES;
+ uint32_t s_hps = PAGE_SHIFT - 10;
+ uint32_t ingpad = 0;
+ uint32_t stat_len = clsz > 64 ? 128 : 64;
+
+ csio_wr_reg32(hw, HOSTPAGESIZEPF0(s_hps) | HOSTPAGESIZEPF1(s_hps) |
+ HOSTPAGESIZEPF2(s_hps) | HOSTPAGESIZEPF3(s_hps) |
+ HOSTPAGESIZEPF4(s_hps) | HOSTPAGESIZEPF5(s_hps) |
+ HOSTPAGESIZEPF6(s_hps) | HOSTPAGESIZEPF7(s_hps),
+ SGE_HOST_PAGE_SIZE);
+
+ sge->csio_fl_align = clsz < 32 ? 32 : clsz;
+ ingpad = ilog2(sge->csio_fl_align) - 5;
+
+ csio_set_reg_field(hw, SGE_CONTROL, INGPADBOUNDARY_MASK |
+ EGRSTATUSPAGESIZE(1),
+ INGPADBOUNDARY(ingpad) |
+ EGRSTATUSPAGESIZE(stat_len != 64));
+
+ /* FL BUFFER SIZE#0 is Page size i,e already aligned to cache line */
+ csio_wr_reg32(hw, PAGE_SIZE, SGE_FL_BUFFER_SIZE0);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE2) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE2);
+ csio_wr_reg32(hw,
+ (csio_rd_reg32(hw, SGE_FL_BUFFER_SIZE3) +
+ sge->csio_fl_align - 1) & ~(sge->csio_fl_align - 1),
+ SGE_FL_BUFFER_SIZE3);
+
+ csio_wr_reg32(hw, HPZ0(PAGE_SHIFT - 12), ULP_RX_TDDP_PSZ);
+
+ /* default value of rx_dma_offset of the NIC driver */
+ csio_set_reg_field(hw, SGE_CONTROL, PKTSHIFT_MASK,
+ PKTSHIFT(CSIO_SGE_RX_DMA_OFFSET));
+}
+
+static void
+csio_init_intr_coalesce_parms(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+
+ csio_sge_thresh_reg = csio_closest_thresh(sge, csio_intr_coalesce_cnt);
+ if (csio_intr_coalesce_cnt) {
+ csio_sge_thresh_reg = 0;
+ csio_sge_timer_reg = X_TIMERREG_RESTART_COUNTER;
+ return;
+ }
+
+ csio_sge_timer_reg = csio_closest_timer(sge, csio_intr_coalesce_time);
+}
+
+/*
+ * csio_wr_get_sge - Get SGE register values.
+ * @hw: HW module.
+ *
+ * Used by non-master functions and by master-functions relying on config file.
+ */
+static void
+csio_wr_get_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ uint32_t ingpad;
+ int i;
+ u32 timer_value_0_and_1, timer_value_2_and_3, timer_value_4_and_5;
+ u32 ingress_rx_threshold;
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+ ingpad = INGPADBOUNDARY_GET(sge->sge_control);
+
+ switch (ingpad) {
+ case X_INGPCIEBOUNDARY_32B:
+ sge->csio_fl_align = 32; break;
+ case X_INGPCIEBOUNDARY_64B:
+ sge->csio_fl_align = 64; break;
+ case X_INGPCIEBOUNDARY_128B:
+ sge->csio_fl_align = 128; break;
+ case X_INGPCIEBOUNDARY_256B:
+ sge->csio_fl_align = 256; break;
+ case X_INGPCIEBOUNDARY_512B:
+ sge->csio_fl_align = 512; break;
+ case X_INGPCIEBOUNDARY_1024B:
+ sge->csio_fl_align = 1024; break;
+ case X_INGPCIEBOUNDARY_2048B:
+ sge->csio_fl_align = 2048; break;
+ case X_INGPCIEBOUNDARY_4096B:
+ sge->csio_fl_align = 4096; break;
+ }
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ timer_value_0_and_1 = csio_rd_reg32(hw, SGE_TIMER_VALUE_0_AND_1);
+ timer_value_2_and_3 = csio_rd_reg32(hw, SGE_TIMER_VALUE_2_AND_3);
+ timer_value_4_and_5 = csio_rd_reg32(hw, SGE_TIMER_VALUE_4_AND_5);
+
+ sge->timer_val[0] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE0_GET(timer_value_0_and_1));
+ sge->timer_val[1] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE1_GET(timer_value_0_and_1));
+ sge->timer_val[2] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE2_GET(timer_value_2_and_3));
+ sge->timer_val[3] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE3_GET(timer_value_2_and_3));
+ sge->timer_val[4] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE4_GET(timer_value_4_and_5));
+ sge->timer_val[5] = (uint16_t)csio_core_ticks_to_us(hw,
+ TIMERVALUE5_GET(timer_value_4_and_5));
+
+ ingress_rx_threshold = csio_rd_reg32(hw, SGE_INGRESS_RX_THRESHOLD);
+ sge->counter_val[0] = THRESHOLD_0_GET(ingress_rx_threshold);
+ sge->counter_val[1] = THRESHOLD_1_GET(ingress_rx_threshold);
+ sge->counter_val[2] = THRESHOLD_2_GET(ingress_rx_threshold);
+ sge->counter_val[3] = THRESHOLD_3_GET(ingress_rx_threshold);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+/*
+ * csio_wr_set_sge - Initialize SGE registers
+ * @hw: HW module.
+ *
+ * Used by Master function to initialize SGE registers in the absence
+ * of a config file.
+ */
+static void
+csio_wr_set_sge(struct csio_hw *hw)
+{
+ struct csio_wrm *wrm = csio_hw_to_wrm(hw);
+ struct csio_sge *sge = &wrm->sge;
+ int i;
+
+ /*
+ * Set up our basic SGE mode to deliver CPL messages to our Ingress
+ * Queue and Packet Date to the Free List.
+ */
+ csio_set_reg_field(hw, SGE_CONTROL, RXPKTCPLMODE(1), RXPKTCPLMODE(1));
+
+ sge->sge_control = csio_rd_reg32(hw, SGE_CONTROL);
+
+ /* sge->csio_fl_align is set up by csio_wr_fixup_host_params(). */
+
+ /*
+ * Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
+ * and generate an interrupt when this occurs so we can recover.
+ */
+ csio_set_reg_field(hw, SGE_DBFIFO_STATUS,
+ HP_INT_THRESH(HP_INT_THRESH_MASK) |
+ LP_INT_THRESH(LP_INT_THRESH_MASK),
+ HP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH) |
+ LP_INT_THRESH(CSIO_SGE_DBFIFO_INT_THRESH));
+ csio_set_reg_field(hw, SGE_DOORBELL_CONTROL, ENABLE_DROP,
+ ENABLE_DROP);
+
+ /* SGE_FL_BUFFER_SIZE0 is set up by csio_wr_fixup_host_params(). */
+
+ CSIO_SET_FLBUF_SIZE(hw, 1, CSIO_SGE_FLBUF_SIZE1);
+ CSIO_SET_FLBUF_SIZE(hw, 2, CSIO_SGE_FLBUF_SIZE2);
+ CSIO_SET_FLBUF_SIZE(hw, 3, CSIO_SGE_FLBUF_SIZE3);
+ CSIO_SET_FLBUF_SIZE(hw, 4, CSIO_SGE_FLBUF_SIZE4);
+ CSIO_SET_FLBUF_SIZE(hw, 5, CSIO_SGE_FLBUF_SIZE5);
+ CSIO_SET_FLBUF_SIZE(hw, 6, CSIO_SGE_FLBUF_SIZE6);
+ CSIO_SET_FLBUF_SIZE(hw, 7, CSIO_SGE_FLBUF_SIZE7);
+ CSIO_SET_FLBUF_SIZE(hw, 8, CSIO_SGE_FLBUF_SIZE8);
+
+ for (i = 0; i < CSIO_SGE_FL_SIZE_REGS; i++)
+ csio_get_flbuf_size(hw, sge, i);
+
+ /* Initialize interrupt coalescing attributes */
+ sge->timer_val[0] = CSIO_SGE_TIMER_VAL_0;
+ sge->timer_val[1] = CSIO_SGE_TIMER_VAL_1;
+ sge->timer_val[2] = CSIO_SGE_TIMER_VAL_2;
+ sge->timer_val[3] = CSIO_SGE_TIMER_VAL_3;
+ sge->timer_val[4] = CSIO_SGE_TIMER_VAL_4;
+ sge->timer_val[5] = CSIO_SGE_TIMER_VAL_5;
+
+ sge->counter_val[0] = CSIO_SGE_INT_CNT_VAL_0;
+ sge->counter_val[1] = CSIO_SGE_INT_CNT_VAL_1;
+ sge->counter_val[2] = CSIO_SGE_INT_CNT_VAL_2;
+ sge->counter_val[3] = CSIO_SGE_INT_CNT_VAL_3;
+
+ csio_wr_reg32(hw, THRESHOLD_0(sge->counter_val[0]) |
+ THRESHOLD_1(sge->counter_val[1]) |
+ THRESHOLD_2(sge->counter_val[2]) |
+ THRESHOLD_3(sge->counter_val[3]),
+ SGE_INGRESS_RX_THRESHOLD);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE0(csio_us_to_core_ticks(hw, sge->timer_val[0])) |
+ TIMERVALUE1(csio_us_to_core_ticks(hw, sge->timer_val[1])),
+ SGE_TIMER_VALUE_0_AND_1);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE2(csio_us_to_core_ticks(hw, sge->timer_val[2])) |
+ TIMERVALUE3(csio_us_to_core_ticks(hw, sge->timer_val[3])),
+ SGE_TIMER_VALUE_2_AND_3);
+
+ csio_wr_reg32(hw,
+ TIMERVALUE4(csio_us_to_core_ticks(hw, sge->timer_val[4])) |
+ TIMERVALUE5(csio_us_to_core_ticks(hw, sge->timer_val[5])),
+ SGE_TIMER_VALUE_4_AND_5);
+
+ csio_init_intr_coalesce_parms(hw);
+}
+
+void
+csio_wr_sge_init(struct csio_hw *hw)
+{
+ /*
+ * If we are master:
+ * - If we plan to use the config file, we need to fixup some
+ * host specific registers, and read the rest of the SGE
+ * configuration.
+ * - If we dont plan to use the config file, we need to initialize
+ * SGE entirely, including fixing the host specific registers.
+ * If we arent the master, we are only allowed to read and work off of
+ * the already initialized SGE values.
+ *
+ * Therefore, before calling this function, we assume that the master-
+ * ship of the card, and whether to use config file or not, have
+ * already been decided. In other words, CSIO_HWF_USING_SOFT_PARAMS and
+ * CSIO_HWF_MASTER should be set/unset.
+ */
+ if (csio_is_hw_master(hw)) {
+ csio_wr_fixup_host_params(hw);
+
+ if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS)
+ csio_wr_get_sge(hw);
+ else
+ csio_wr_set_sge(hw);
+ } else
+ csio_wr_get_sge(hw);
+}
+
+/*
+ * csio_wrm_init - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW pointer
+ *
+ * Allocates memory for an array of queue pointers starting at q_arr.
+ */
+int
+csio_wrm_init(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+
+ if (!wrm->num_q) {
+ csio_err(hw, "Num queues is not set\n");
+ return -EINVAL;
+ }
+
+ wrm->q_arr = kzalloc(sizeof(struct csio_q *) * wrm->num_q, GFP_KERNEL);
+ if (!wrm->q_arr)
+ goto err;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ wrm->q_arr[i] = kzalloc(sizeof(struct csio_q), GFP_KERNEL);
+ if (!wrm->q_arr[i]) {
+ while (--i >= 0)
+ kfree(wrm->q_arr[i]);
+ goto err_free_arr;
+ }
+ }
+ wrm->free_qidx = 0;
+
+ return 0;
+
+err_free_arr:
+ kfree(wrm->q_arr);
+err:
+ return -ENOMEM;
+}
+
+/*
+ * csio_wrm_exit - Initialize Work request module.
+ * @wrm: WR module
+ * @hw: HW module
+ *
+ * Uninitialize WR module. Free q_arr and pointers in it.
+ * We have the additional job of freeing the DMA memory associated
+ * with the queues.
+ */
+void
+csio_wrm_exit(struct csio_wrm *wrm, struct csio_hw *hw)
+{
+ int i;
+ uint32_t j;
+ struct csio_q *q;
+ struct csio_dma_buf *buf;
+
+ for (i = 0; i < wrm->num_q; i++) {
+ q = wrm->q_arr[i];
+
+ if (wrm->free_qidx && (i < wrm->free_qidx)) {
+ if (q->type == CSIO_FREELIST) {
+ if (!q->un.fl.bufs)
+ continue;
+ for (j = 0; j < q->credits; j++) {
+ buf = &q->un.fl.bufs[j];
+ if (!buf->vaddr)
+ continue;
+ pci_free_consistent(hw->pdev, buf->len,
+ buf->vaddr,
+ buf->paddr);
+ }
+ kfree(q->un.fl.bufs);
+ }
+ pci_free_consistent(hw->pdev, q->size,
+ q->vstart, q->pstart);
+ }
+ kfree(q);
+ }
+
+ hw->flags &= ~CSIO_HWF_Q_MEM_ALLOCED;
+
+ kfree(wrm->q_arr);
+}
diff --git a/drivers/scsi/csiostor/csio_wr.h b/drivers/scsi/csiostor/csio_wr.h
new file mode 100644
index 000000000000..8d30e7ac1f5e
--- /dev/null
+++ b/drivers/scsi/csiostor/csio_wr.h
@@ -0,0 +1,512 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CSIO_WR_H__
+#define __CSIO_WR_H__
+
+#include <linux/cache.h>
+
+#include "csio_defs.h"
+#include "t4fw_api.h"
+#include "t4fw_api_stor.h"
+
+/*
+ * SGE register field values.
+ */
+#define X_INGPCIEBOUNDARY_32B 0
+#define X_INGPCIEBOUNDARY_64B 1
+#define X_INGPCIEBOUNDARY_128B 2
+#define X_INGPCIEBOUNDARY_256B 3
+#define X_INGPCIEBOUNDARY_512B 4
+#define X_INGPCIEBOUNDARY_1024B 5
+#define X_INGPCIEBOUNDARY_2048B 6
+#define X_INGPCIEBOUNDARY_4096B 7
+
+/* GTS register */
+#define X_TIMERREG_COUNTER0 0
+#define X_TIMERREG_COUNTER1 1
+#define X_TIMERREG_COUNTER2 2
+#define X_TIMERREG_COUNTER3 3
+#define X_TIMERREG_COUNTER4 4
+#define X_TIMERREG_COUNTER5 5
+#define X_TIMERREG_RESTART_COUNTER 6
+#define X_TIMERREG_UPDATE_CIDX 7
+
+/*
+ * Egress Context field values
+ */
+#define X_FETCHBURSTMIN_16B 0
+#define X_FETCHBURSTMIN_32B 1
+#define X_FETCHBURSTMIN_64B 2
+#define X_FETCHBURSTMIN_128B 3
+
+#define X_FETCHBURSTMAX_64B 0
+#define X_FETCHBURSTMAX_128B 1
+#define X_FETCHBURSTMAX_256B 2
+#define X_FETCHBURSTMAX_512B 3
+
+#define X_HOSTFCMODE_NONE 0
+#define X_HOSTFCMODE_INGRESS_QUEUE 1
+#define X_HOSTFCMODE_STATUS_PAGE 2
+#define X_HOSTFCMODE_BOTH 3
+
+/*
+ * Ingress Context field values
+ */
+#define X_UPDATESCHEDULING_TIMER 0
+#define X_UPDATESCHEDULING_COUNTER_OPTTIMER 1
+
+#define X_UPDATEDELIVERY_NONE 0
+#define X_UPDATEDELIVERY_INTERRUPT 1
+#define X_UPDATEDELIVERY_STATUS_PAGE 2
+#define X_UPDATEDELIVERY_BOTH 3
+
+#define X_INTERRUPTDESTINATION_PCIE 0
+#define X_INTERRUPTDESTINATION_IQ 1
+
+#define X_RSPD_TYPE_FLBUF 0
+#define X_RSPD_TYPE_CPL 1
+#define X_RSPD_TYPE_INTR 2
+
+/* WR status is at the same position as retval in a CMD header */
+#define csio_wr_status(_wr) \
+ (FW_CMD_RETVAL_GET(ntohl(((struct fw_cmd_hdr *)(_wr))->lo)))
+
+struct csio_hw;
+
+extern int csio_intr_coalesce_cnt;
+extern int csio_intr_coalesce_time;
+
+/* Ingress queue params */
+struct csio_iq_params {
+
+ uint8_t iq_start:1;
+ uint8_t iq_stop:1;
+ uint8_t pfn:3;
+
+ uint8_t vfn;
+
+ uint16_t physiqid;
+ uint16_t iqid;
+
+ uint16_t fl0id;
+ uint16_t fl1id;
+
+ uint8_t viid;
+
+ uint8_t type;
+ uint8_t iqasynch;
+ uint8_t reserved4;
+
+ uint8_t iqandst;
+ uint8_t iqanus;
+ uint8_t iqanud;
+
+ uint16_t iqandstindex;
+
+ uint8_t iqdroprss;
+ uint8_t iqpciech;
+ uint8_t iqdcaen;
+
+ uint8_t iqdcacpu;
+ uint8_t iqintcntthresh;
+ uint8_t iqo;
+
+ uint8_t iqcprio;
+ uint8_t iqesize;
+
+ uint16_t iqsize;
+
+ uint64_t iqaddr;
+
+ uint8_t iqflintiqhsen;
+ uint8_t reserved5;
+ uint8_t iqflintcongen;
+ uint8_t iqflintcngchmap;
+
+ uint32_t reserved6;
+
+ uint8_t fl0hostfcmode;
+ uint8_t fl0cprio;
+ uint8_t fl0paden;
+ uint8_t fl0packen;
+ uint8_t fl0congen;
+ uint8_t fl0dcaen;
+
+ uint8_t fl0dcacpu;
+ uint8_t fl0fbmin;
+
+ uint8_t fl0fbmax;
+ uint8_t fl0cidxfthresho;
+ uint8_t fl0cidxfthresh;
+
+ uint16_t fl0size;
+
+ uint64_t fl0addr;
+
+ uint64_t reserved7;
+
+ uint8_t fl1hostfcmode;
+ uint8_t fl1cprio;
+ uint8_t fl1paden;
+ uint8_t fl1packen;
+ uint8_t fl1congen;
+ uint8_t fl1dcaen;
+
+ uint8_t fl1dcacpu;
+ uint8_t fl1fbmin;
+
+ uint8_t fl1fbmax;
+ uint8_t fl1cidxfthresho;
+ uint8_t fl1cidxfthresh;
+
+ uint16_t fl1size;
+
+ uint64_t fl1addr;
+};
+
+/* Egress queue params */
+struct csio_eq_params {
+
+ uint8_t pfn;
+ uint8_t vfn;
+
+ uint8_t eqstart:1;
+ uint8_t eqstop:1;
+
+ uint16_t physeqid;
+ uint32_t eqid;
+
+ uint8_t hostfcmode:2;
+ uint8_t cprio:1;
+ uint8_t pciechn:3;
+
+ uint16_t iqid;
+
+ uint8_t dcaen:1;
+ uint8_t dcacpu:5;
+
+ uint8_t fbmin:3;
+ uint8_t fbmax:3;
+
+ uint8_t cidxfthresho:1;
+ uint8_t cidxfthresh:3;
+
+ uint16_t eqsize;
+
+ uint64_t eqaddr;
+};
+
+struct csio_dma_buf {
+ struct list_head list;
+ void *vaddr; /* Virtual address */
+ dma_addr_t paddr; /* Physical address */
+ uint32_t len; /* Buffer size */
+};
+
+/* Generic I/O request structure */
+struct csio_ioreq {
+ struct csio_sm sm; /* SM, List
+ * should be the first member
+ */
+ int iq_idx; /* Ingress queue index */
+ int eq_idx; /* Egress queue index */
+ uint32_t nsge; /* Number of SG elements */
+ uint32_t tmo; /* Driver timeout */
+ uint32_t datadir; /* Data direction */
+ struct csio_dma_buf dma_buf; /* Req/resp DMA buffers */
+ uint16_t wr_status; /* WR completion status */
+ int16_t drv_status; /* Driver internal status */
+ struct csio_lnode *lnode; /* Owner lnode */
+ struct csio_rnode *rnode; /* Src/destination rnode */
+ void (*io_cbfn) (struct csio_hw *, struct csio_ioreq *);
+ /* completion callback */
+ void *scratch1; /* Scratch area 1.
+ */
+ void *scratch2; /* Scratch area 2. */
+ struct list_head gen_list; /* Any list associated with
+ * this ioreq.
+ */
+ uint64_t fw_handle; /* Unique handle passed
+ * to FW
+ */
+ uint8_t dcopy; /* Data copy required */
+ uint8_t reserved1;
+ uint16_t reserved2;
+ struct completion cmplobj; /* ioreq completion object */
+} ____cacheline_aligned_in_smp;
+
+/*
+ * Egress status page for egress cidx updates
+ */
+struct csio_qstatus_page {
+ __be32 qid;
+ __be16 cidx;
+ __be16 pidx;
+};
+
+
+enum {
+ CSIO_MAX_FLBUF_PER_IQWR = 4,
+ CSIO_QCREDIT_SZ = 64, /* pidx/cidx increments
+ * in bytes
+ */
+ CSIO_MAX_QID = 0xFFFF,
+ CSIO_MAX_IQ = 128,
+
+ CSIO_SGE_NTIMERS = 6,
+ CSIO_SGE_NCOUNTERS = 4,
+ CSIO_SGE_FL_SIZE_REGS = 16,
+};
+
+/* Defines for type */
+enum {
+ CSIO_EGRESS = 1,
+ CSIO_INGRESS = 2,
+ CSIO_FREELIST = 3,
+};
+
+/*
+ * Structure for footer (last 2 flits) of Ingress Queue Entry.
+ */
+struct csio_iqwr_footer {
+ __be32 hdrbuflen_pidx;
+ __be32 pldbuflen_qid;
+ union {
+ u8 type_gen;
+ __be64 last_flit;
+ } u;
+};
+
+#define IQWRF_NEWBUF (1 << 31)
+#define IQWRF_LEN_GET(x) (((x) >> 0) & 0x7fffffffU)
+#define IQWRF_GEN_SHIFT 7
+#define IQWRF_TYPE_GET(x) (((x) >> 4) & 0x3U)
+
+
+/*
+ * WR pair:
+ * ========
+ * A WR can start towards the end of a queue, and then continue at the
+ * beginning, since the queue is considered to be circular. This will
+ * require a pair of address/len to be passed back to the caller -
+ * hence the Work request pair structure.
+ */
+struct csio_wr_pair {
+ void *addr1;
+ uint32_t size1;
+ void *addr2;
+ uint32_t size2;
+};
+
+/*
+ * The following structure is used by ingress processing to return the
+ * free list buffers to consumers.
+ */
+struct csio_fl_dma_buf {
+ struct csio_dma_buf flbufs[CSIO_MAX_FLBUF_PER_IQWR];
+ /* Freelist DMA buffers */
+ int offset; /* Offset within the
+ * first FL buf.
+ */
+ uint32_t totlen; /* Total length */
+ uint8_t defer_free; /* Free of buffer can
+ * deferred
+ */
+};
+
+/* Data-types */
+typedef void (*iq_handler_t)(struct csio_hw *, void *, uint32_t,
+ struct csio_fl_dma_buf *, void *);
+
+struct csio_iq {
+ uint16_t iqid; /* Queue ID */
+ uint16_t physiqid; /* Physical Queue ID */
+ uint16_t genbit; /* Generation bit,
+ * initially set to 1
+ */
+ int flq_idx; /* Freelist queue index */
+ iq_handler_t iq_intx_handler; /* IQ INTx handler routine */
+};
+
+struct csio_eq {
+ uint16_t eqid; /* Qid */
+ uint16_t physeqid; /* Physical Queue ID */
+ uint8_t wrap[512]; /* Temp area for q-wrap around*/
+};
+
+struct csio_fl {
+ uint16_t flid; /* Qid */
+ uint16_t packen; /* Packing enabled? */
+ int offset; /* Offset within FL buf */
+ int sreg; /* Size register */
+ struct csio_dma_buf *bufs; /* Free list buffer ptr array
+ * indexed using flq->cidx/pidx
+ */
+};
+
+struct csio_qstats {
+ uint32_t n_tot_reqs; /* Total no. of Requests */
+ uint32_t n_tot_rsps; /* Total no. of responses */
+ uint32_t n_qwrap; /* Queue wraps */
+ uint32_t n_eq_wr_split; /* Number of split EQ WRs */
+ uint32_t n_qentry; /* Queue entry */
+ uint32_t n_qempty; /* Queue empty */
+ uint32_t n_qfull; /* Queue fulls */
+ uint32_t n_rsp_unknown; /* Unknown response type */
+ uint32_t n_stray_comp; /* Stray completion intr */
+ uint32_t n_flq_refill; /* Number of FL refills */
+};
+
+/* Queue metadata */
+struct csio_q {
+ uint16_t type; /* Type: Ingress/Egress/FL */
+ uint16_t pidx; /* producer index */
+ uint16_t cidx; /* consumer index */
+ uint16_t inc_idx; /* Incremental index */
+ uint32_t wr_sz; /* Size of all WRs in this q
+ * if fixed
+ */
+ void *vstart; /* Base virtual address
+ * of queue
+ */
+ void *vwrap; /* Virtual end address to
+ * wrap around at
+ */
+ uint32_t credits; /* Size of queue in credits */
+ void *owner; /* Owner */
+ union { /* Queue contexts */
+ struct csio_iq iq;
+ struct csio_eq eq;
+ struct csio_fl fl;
+ } un;
+
+ dma_addr_t pstart; /* Base physical address of
+ * queue
+ */
+ uint32_t portid; /* PCIE Channel */
+ uint32_t size; /* Size of queue in bytes */
+ struct csio_qstats stats; /* Statistics */
+} ____cacheline_aligned_in_smp;
+
+struct csio_sge {
+ uint32_t csio_fl_align; /* Calculated and cached
+ * for fast path
+ */
+ uint32_t sge_control; /* padding, boundaries,
+ * lengths, etc.
+ */
+ uint32_t sge_host_page_size; /* Host page size */
+ uint32_t sge_fl_buf_size[CSIO_SGE_FL_SIZE_REGS];
+ /* free list buffer sizes */
+ uint16_t timer_val[CSIO_SGE_NTIMERS];
+ uint8_t counter_val[CSIO_SGE_NCOUNTERS];
+};
+
+/* Work request module */
+struct csio_wrm {
+ int num_q; /* Number of queues */
+ struct csio_q **q_arr; /* Array of queue pointers
+ * allocated dynamically
+ * based on configured values
+ */
+ uint32_t fw_iq_start; /* Start ID of IQ for this fn*/
+ uint32_t fw_eq_start; /* Start ID of EQ for this fn*/
+ struct csio_q *intr_map[CSIO_MAX_IQ];
+ /* IQ-id to IQ map table. */
+ int free_qidx; /* queue idx of free queue */
+ struct csio_sge sge; /* SGE params */
+};
+
+#define csio_get_q(__hw, __idx) ((__hw)->wrm.q_arr[__idx])
+#define csio_q_type(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->type)
+#define csio_q_pidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pidx)
+#define csio_q_cidx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->cidx)
+#define csio_q_inc_idx(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->inc_idx)
+#define csio_q_vstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->vstart)
+#define csio_q_pstart(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->pstart)
+#define csio_q_size(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->size)
+#define csio_q_credits(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->credits)
+#define csio_q_portid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->portid)
+#define csio_q_wr_sz(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->wr_sz)
+#define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
+#define csio_q_physiqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
+#define csio_q_iq_flq_idx(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
+#define csio_q_eqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.eqid)
+#define csio_q_flid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.fl.flid)
+
+#define csio_q_physeqid(__hw, __idx) \
+ ((__hw)->wrm.q_arr[(__idx)]->un.eq.physeqid)
+#define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
+
+#define csio_q_iq_to_flid(__hw, __iq_idx) \
+ csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
+#define csio_q_set_intr_map(__hw, __iq_idx, __rel_iq_id) \
+ (__hw)->wrm.intr_map[__rel_iq_id] = csio_get_q(__hw, __iq_idx)
+#define csio_q_eq_wrap(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.eq.wrap)
+
+struct csio_mb;
+
+int csio_wr_alloc_q(struct csio_hw *, uint32_t, uint32_t,
+ uint16_t, void *, uint32_t, int, iq_handler_t);
+int csio_wr_iq_create(struct csio_hw *, void *, int,
+ uint32_t, uint8_t, bool,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_eq_create(struct csio_hw *, void *, int, int, uint8_t,
+ void (*)(struct csio_hw *, struct csio_mb *));
+int csio_wr_destroy_queues(struct csio_hw *, bool cmd);
+
+
+int csio_wr_get(struct csio_hw *, int, uint32_t,
+ struct csio_wr_pair *);
+void csio_wr_copy_to_wrp(void *, struct csio_wr_pair *, uint32_t, uint32_t);
+int csio_wr_issue(struct csio_hw *, int, bool);
+int csio_wr_process_iq(struct csio_hw *, struct csio_q *,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+int csio_wr_process_iq_idx(struct csio_hw *, int,
+ void (*)(struct csio_hw *, void *,
+ uint32_t, struct csio_fl_dma_buf *,
+ void *),
+ void *);
+
+void csio_wr_sge_init(struct csio_hw *);
+int csio_wrm_init(struct csio_wrm *, struct csio_hw *);
+void csio_wrm_exit(struct csio_wrm *, struct csio_hw *);
+
+#endif /* ifndef __CSIO_WR_H__ */
diff --git a/drivers/scsi/csiostor/t4fw_api_stor.h b/drivers/scsi/csiostor/t4fw_api_stor.h
new file mode 100644
index 000000000000..097e52c0f8e1
--- /dev/null
+++ b/drivers/scsi/csiostor/t4fw_api_stor.h
@@ -0,0 +1,539 @@
+/*
+ * This file is part of the Chelsio FCoE driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_API_STOR_H_
+#define _T4FW_API_STOR_H_
+
+
+/******************************************************************************
+ * R E T U R N V A L U E S
+ ********************************/
+
+enum fw_fcoe_link_sub_op {
+ FCOE_LINK_DOWN = 0x0,
+ FCOE_LINK_UP = 0x1,
+ FCOE_LINK_COND = 0x2,
+};
+
+enum fw_fcoe_link_status {
+ FCOE_LINKDOWN = 0x0,
+ FCOE_LINKUP = 0x1,
+};
+
+enum fw_ofld_prot {
+ PROT_FCOE = 0x1,
+ PROT_ISCSI = 0x2,
+};
+
+enum rport_type_fcoe {
+ FLOGI_VFPORT = 0x1, /* 0xfffffe */
+ FDISC_VFPORT = 0x2, /* 0xfffffe */
+ NS_VNPORT = 0x3, /* 0xfffffc */
+ REG_FC4_VNPORT = 0x4, /* any FC4 type VN_PORT */
+ REG_VNPORT = 0x5, /* 0xfffxxx - non FC4 port in switch */
+ FDMI_VNPORT = 0x6, /* 0xfffffa */
+ FAB_CTLR_VNPORT = 0x7, /* 0xfffffd */
+};
+
+enum event_cause_fcoe {
+ PLOGI_ACC_RCVD = 0x01,
+ PLOGI_RJT_RCVD = 0x02,
+ PLOGI_RCVD = 0x03,
+ PLOGO_RCVD = 0x04,
+ PRLI_ACC_RCVD = 0x05,
+ PRLI_RJT_RCVD = 0x06,
+ PRLI_RCVD = 0x07,
+ PRLO_RCVD = 0x08,
+ NPORT_ID_CHGD = 0x09,
+ FLOGO_RCVD = 0x0a,
+ CLR_VIRT_LNK_RCVD = 0x0b,
+ FLOGI_ACC_RCVD = 0x0c,
+ FLOGI_RJT_RCVD = 0x0d,
+ FDISC_ACC_RCVD = 0x0e,
+ FDISC_RJT_RCVD = 0x0f,
+ FLOGI_TMO_MAX_RETRY = 0x10,
+ IMPL_LOGO_ADISC_ACC = 0x11,
+ IMPL_LOGO_ADISC_RJT = 0x12,
+ IMPL_LOGO_ADISC_CNFLT = 0x13,
+ PRLI_TMO = 0x14,
+ ADISC_TMO = 0x15,
+ RSCN_DEV_LOST = 0x16,
+ SCR_ACC_RCVD = 0x17,
+ ADISC_RJT_RCVD = 0x18,
+ LOGO_SNT = 0x19,
+ PROTO_ERR_IMPL_LOGO = 0x1a,
+};
+
+enum fcoe_cmn_type {
+ FCOE_ELS,
+ FCOE_CT,
+ FCOE_SCSI_CMD,
+ FCOE_UNSOL_ELS,
+};
+
+enum fw_wr_stor_opcodes {
+ FW_RDEV_WR = 0x38,
+ FW_FCOE_ELS_CT_WR = 0x30,
+ FW_SCSI_WRITE_WR = 0x31,
+ FW_SCSI_READ_WR = 0x32,
+ FW_SCSI_CMD_WR = 0x33,
+ FW_SCSI_ABRT_CLS_WR = 0x34,
+};
+
+struct fw_rdev_wr {
+ __be32 op_to_immdlen;
+ __be32 alloc_to_len16;
+ __be64 cookie;
+ u8 protocol;
+ u8 event_cause;
+ u8 cur_state;
+ u8 prev_state;
+ __be32 flags_to_assoc_flowid;
+ union rdev_entry {
+ struct fcoe_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 rjt_reason;
+ u8 cur_login_st;
+ u8 prev_login_st;
+ __be16 rcv_fr_sz;
+ u8 rd_xfer_rdy_to_rport_type;
+ u8 vft_to_qos;
+ u8 org_proc_assoc_to_acc_rsp_code;
+ u8 enh_disc_to_tgt;
+ u8 wwnn[8];
+ u8 wwpn[8];
+ __be16 iqid;
+ u8 fc_oui[3];
+ u8 r_id[3];
+ } fcoe_rdev;
+ struct iscsi_rdev_entry {
+ __be32 flowid;
+ u8 protocol;
+ u8 event_cause;
+ u8 flags;
+ u8 r3;
+ __be16 iscsi_opts;
+ __be16 tcp_opts;
+ __be16 ip_opts;
+ __be16 max_rcv_len;
+ __be16 max_snd_len;
+ __be16 first_brst_len;
+ __be16 max_brst_len;
+ __be16 r4;
+ __be16 def_time2wait;
+ __be16 def_time2ret;
+ __be16 nop_out_intrvl;
+ __be16 non_scsi_to;
+ __be16 isid;
+ __be16 tsid;
+ __be16 port;
+ __be16 tpgt;
+ u8 r5[6];
+ __be16 iqid;
+ } iscsi_rdev;
+ } u;
+};
+
+#define FW_RDEV_WR_FLOWID_GET(x) (((x) >> 8) & 0xfffff)
+#define FW_RDEV_WR_ASSOC_FLOWID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_RDEV_WR_RPORT_TYPE_GET(x) (((x) >> 0) & 0x1f)
+#define FW_RDEV_WR_NPIV_GET(x) (((x) >> 6) & 0x1)
+#define FW_RDEV_WR_CLASS_GET(x) (((x) >> 4) & 0x3)
+#define FW_RDEV_WR_TASK_RETRY_ID_GET(x) (((x) >> 5) & 0x1)
+#define FW_RDEV_WR_RETRY_GET(x) (((x) >> 4) & 0x1)
+#define FW_RDEV_WR_CONF_CMPL_GET(x) (((x) >> 3) & 0x1)
+#define FW_RDEV_WR_INI_GET(x) (((x) >> 1) & 0x1)
+#define FW_RDEV_WR_TGT_GET(x) (((x) >> 0) & 0x1)
+
+struct fw_fcoe_els_ct_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 els_ct_type;
+ u8 ctl_pri;
+ u8 cp_en_class;
+ __be16 xfer_cnt;
+ u8 fl_to_sp;
+ u8 l_id[3];
+ u8 r5;
+ u8 r_id[3];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_FCOE_ELS_CT_WR_OPCODE(x) ((x) << 24)
+#define FW_FCOE_ELS_CT_WR_OPCODE_GET(x) (((x) >> 24) & 0xff)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN(x) ((x) << 0)
+#define FW_FCOE_ELS_CT_WR_IMMDLEN_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_ELS_CT_WR_SP(x) ((x) << 0)
+
+struct fw_scsi_write_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_write_priv {
+ struct fcoe_write_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_write_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_WRITE_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_read_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 use_xfer_cnt;
+ union fw_scsi_read_priv {
+ struct fcoe_read_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r3_lo[2];
+ } fcoe;
+ struct iscsi_read_priv {
+ u8 r3[4];
+ } iscsi;
+ } u;
+ __be32 xfer_cnt;
+ __be32 ini_xfer_cnt;
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r4;
+};
+
+#define FW_SCSI_READ_WR_IMMDLEN(x) ((x) << 0)
+
+struct fw_scsi_cmd_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 r3;
+ union fw_scsi_cmd_priv {
+ struct fcoe_cmd_priv {
+ u8 ctl_pri;
+ u8 cp_en_class;
+ u8 r4_lo[2];
+ } fcoe;
+ struct iscsi_cmd_priv {
+ u8 r4[4];
+ } iscsi;
+ } u;
+ u8 r5[8];
+ __be64 rsp_dmaaddr;
+ __be32 rsp_dmalen;
+ __be32 r6;
+};
+
+#define FW_SCSI_CMD_WR_IMMDLEN(x) ((x) << 0)
+
+#define SCSI_ABORT 0
+#define SCSI_CLOSE 1
+
+struct fw_scsi_abrt_cls_wr {
+ __be32 op_immdlen;
+ __be32 flowid_len16;
+ u64 cookie;
+ __be16 iqid;
+ u8 tmo_val;
+ u8 sub_opcode_to_chk_all_io;
+ u8 r3[4];
+ u64 t_cookie;
+};
+
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(x) ((x) << 2)
+#define FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(x) (((x) >> 2) & 0x3f)
+#define FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(x) ((x) << 0)
+
+enum fw_cmd_stor_opcodes {
+ FW_FCOE_RES_INFO_CMD = 0x31,
+ FW_FCOE_LINK_CMD = 0x32,
+ FW_FCOE_VNP_CMD = 0x33,
+ FW_FCOE_SPARAMS_CMD = 0x35,
+ FW_FCOE_STATS_CMD = 0x37,
+ FW_FCOE_FCF_CMD = 0x38,
+};
+
+struct fw_fcoe_res_info_cmd {
+ __be32 op_to_read;
+ __be32 retval_len16;
+ __be16 e_d_tov;
+ __be16 r_a_tov_seq;
+ __be16 r_a_tov_els;
+ __be16 r_r_tov;
+ __be32 max_xchgs;
+ __be32 max_ssns;
+ __be32 used_xchgs;
+ __be32 used_ssns;
+ __be32 max_fcfs;
+ __be32 max_vnps;
+ __be32 used_fcfs;
+ __be32 used_vnps;
+};
+
+struct fw_fcoe_link_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ __be32 sub_opcode_fcfi;
+ u8 r3;
+ u8 lstatus;
+ __be16 flags;
+ u8 r4;
+ u8 set_vlan;
+ __be16 vlan_id;
+ __be32 vnpi_pkd;
+ __be16 r6;
+ u8 phy_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+};
+
+#define FW_FCOE_LINK_CMD_PORTID(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+#define FW_FCOE_LINK_CMD_SUB_OPCODE(x) ((x) << 24U)
+#define FW_FCOE_LINK_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_LINK_CMD_FCFI_GET(x) (((x) >> 0) & 0xffffff)
+#define FW_FCOE_LINK_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_vnp_cmd {
+ __be32 op_to_fcfi;
+ __be32 alloc_to_len16;
+ __be32 gen_wwn_to_vnpi;
+ __be32 vf_id;
+ __be16 iqid;
+ u8 vnport_mac[6];
+ u8 vnport_wwnn[8];
+ u8 vnport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 clsp_word_0_1[8];
+};
+
+#define FW_FCOE_VNP_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_ALLOC (1U << 31)
+#define FW_FCOE_VNP_CMD_FREE (1U << 30)
+#define FW_FCOE_VNP_CMD_MODIFY (1U << 29)
+#define FW_FCOE_VNP_CMD_GEN_WWN (1U << 22)
+#define FW_FCOE_VNP_CMD_VFID_EN (1U << 20)
+#define FW_FCOE_VNP_CMD_VNPI(x) ((x) << 0)
+#define FW_FCOE_VNP_CMD_VNPI_GET(x) (((x) >> 0) & 0xfffff)
+
+struct fw_fcoe_sparams_cmd {
+ __be32 op_to_portid;
+ __be32 retval_len16;
+ u8 r3[7];
+ u8 cos;
+ u8 lport_wwnn[8];
+ u8 lport_wwpn[8];
+ u8 cmn_srv_parms[16];
+ u8 cls_srv_parms[16];
+};
+
+#define FW_FCOE_SPARAMS_CMD_PORTID(x) ((x) << 0)
+
+struct fw_fcoe_stats_cmd {
+ __be32 op_to_flowid;
+ __be32 free_to_len16;
+ union fw_fcoe_stats {
+ struct fw_fcoe_stats_ctl {
+ u8 nstats_port;
+ u8 port_valid_ix;
+ __be16 r6;
+ __be32 r7;
+ __be64 stat0;
+ __be64 stat1;
+ __be64 stat2;
+ __be64 stat3;
+ __be64 stat4;
+ __be64 stat5;
+ } ctl;
+ struct fw_fcoe_port_stats {
+ __be64 tx_bcast_bytes;
+ __be64 tx_bcast_frames;
+ __be64 tx_mcast_bytes;
+ __be64 tx_mcast_frames;
+ __be64 tx_ucast_bytes;
+ __be64 tx_ucast_frames;
+ __be64 tx_drop_frames;
+ __be64 tx_offload_bytes;
+ __be64 tx_offload_frames;
+ __be64 rx_bcast_bytes;
+ __be64 rx_bcast_frames;
+ __be64 rx_mcast_bytes;
+ __be64 rx_mcast_frames;
+ __be64 rx_ucast_bytes;
+ __be64 rx_ucast_frames;
+ __be64 rx_err_frames;
+ } port_stats;
+ struct fw_fcoe_fcf_stats {
+ __be32 fip_tx_bytes;
+ __be32 fip_tx_fr;
+ __be64 fcf_ka;
+ __be64 mcast_adv_rcvd;
+ __be16 ucast_adv_rcvd;
+ __be16 sol_sent;
+ __be16 vlan_req;
+ __be16 vlan_rpl;
+ __be16 clr_vlink;
+ __be16 link_down;
+ __be16 link_up;
+ __be16 logo;
+ __be16 flogi_req;
+ __be16 flogi_rpl;
+ __be16 fdisc_req;
+ __be16 fdisc_rpl;
+ __be16 fka_prd_chg;
+ __be16 fc_map_chg;
+ __be16 vfid_chg;
+ u8 no_fka_req;
+ u8 no_vnp;
+ } fcf_stats;
+ struct fw_fcoe_pcb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 vnp_ka;
+ __be32 unsol_els_rcvd;
+ __be64 unsol_cmd_rcvd;
+ __be16 implicit_logo;
+ __be16 flogi_inv_sparm;
+ __be16 fdisc_inv_sparm;
+ __be16 flogi_rjt;
+ __be16 fdisc_rjt;
+ __be16 no_ssn;
+ __be16 mac_flt_fail;
+ __be16 inv_fr_rcvd;
+ } pcb_stats;
+ struct fw_fcoe_scb_stats {
+ __be64 tx_bytes;
+ __be64 tx_frames;
+ __be64 rx_bytes;
+ __be64 rx_frames;
+ __be32 host_abrt_req;
+ __be32 adap_auto_abrt;
+ __be32 adap_abrt_rsp;
+ __be32 host_ios_req;
+ __be16 ssn_offl_ios;
+ __be16 ssn_not_rdy_ios;
+ u8 rx_data_ddp_err;
+ u8 ddp_flt_set_err;
+ __be16 rx_data_fr_err;
+ u8 bad_st_abrt_req;
+ u8 no_io_abrt_req;
+ u8 abort_tmo;
+ u8 abort_tmo_2;
+ __be32 abort_req;
+ u8 no_ppod_res_tmo;
+ u8 bp_tmo;
+ u8 adap_auto_cls;
+ u8 no_io_cls_req;
+ __be32 host_cls_req;
+ __be64 unsol_cmd_rcvd;
+ __be32 plogi_req_rcvd;
+ __be32 prli_req_rcvd;
+ __be16 logo_req_rcvd;
+ __be16 prlo_req_rcvd;
+ __be16 plogi_rjt_rcvd;
+ __be16 prli_rjt_rcvd;
+ __be32 adisc_req_rcvd;
+ __be32 rscn_rcvd;
+ __be32 rrq_req_rcvd;
+ __be32 unsol_els_rcvd;
+ u8 adisc_rjt_rcvd;
+ u8 scr_rjt;
+ u8 ct_rjt;
+ u8 inval_bls_rcvd;
+ __be32 ba_rjt_rcvd;
+ } scb_stats;
+ } u;
+};
+
+#define FW_FCOE_STATS_CMD_FLOWID(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_FREE (1U << 30)
+#define FW_FCOE_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_FCOE_STATS_CMD_PORT(x) ((x) << 0)
+#define FW_FCOE_STATS_CMD_PORT_VALID (1U << 7)
+#define FW_FCOE_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_fcoe_fcf_cmd {
+ __be32 op_to_fcfi;
+ __be32 retval_len16;
+ __be16 priority_pkd;
+ u8 mac[6];
+ u8 name_id[8];
+ u8 fabric[8];
+ __be16 vf_id;
+ __be16 max_fcoe_size;
+ u8 vlan_id;
+ u8 fc_map[3];
+ __be32 fka_adv;
+ __be32 r6;
+ u8 r7_hi;
+ u8 fpma_to_portid;
+ u8 spma_mac[6];
+ __be64 r8;
+};
+
+#define FW_FCOE_FCF_CMD_FCFI(x) ((x) << 0)
+#define FW_FCOE_FCF_CMD_FCFI_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_FCOE_FCF_CMD_PRIORITY_GET(x) (((x) >> 0) & 0xff)
+#define FW_FCOE_FCF_CMD_FPMA_GET(x) (((x) >> 6) & 0x1)
+#define FW_FCOE_FCF_CMD_SPMA_GET(x) (((x) >> 5) & 0x1)
+#define FW_FCOE_FCF_CMD_LOGIN_GET(x) (((x) >> 4) & 0x1)
+#define FW_FCOE_FCF_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+
+#endif /* _T4FW_API_STOR_H_ */
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index f924b3c3720e..3fecf35ba292 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -1564,6 +1564,7 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
break;
case CXGB4_STATE_DETACH:
pr_info("cdev 0x%p, DETACH.\n", cdev);
+ cxgbi_device_unregister(cdev);
break;
default:
pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c
index 13aeca3d51f2..fed486bfd3f4 100644
--- a/drivers/scsi/dc395x.c
+++ b/drivers/scsi/dc395x.c
@@ -489,7 +489,7 @@ struct ParameterData {
int def; /* default value */
int safe; /* safe value */
};
-static struct ParameterData __devinitdata cfg_data[] = {
+static struct ParameterData cfg_data[] = {
{ /* adapter id */
CFG_PARAM_UNSET,
0,
@@ -574,7 +574,7 @@ MODULE_PARM_DESC(reset_delay, "Reset delay in seconds. Default 1 (0-180)");
* set_safe_settings - if the use_safe_settings option is set then
* set all values to the safe and slow values.
**/
-static void __devinit set_safe_settings(void)
+static void set_safe_settings(void)
{
if (use_safe_settings)
{
@@ -593,7 +593,7 @@ static void __devinit set_safe_settings(void)
* fix_settings - reset any boot parameters which are out of range
* back to the default values.
**/
-static void __devinit fix_settings(void)
+static void fix_settings(void)
{
int i;
@@ -620,7 +620,7 @@ static void __devinit fix_settings(void)
* Mapping from the eeprom delay index value (index into this array)
* to the number of actual seconds that the delay should be for.
*/
-static char __devinitdata eeprom_index_to_delay_map[] =
+static char eeprom_index_to_delay_map[] =
{ 1, 3, 5, 10, 16, 30, 60, 120 };
@@ -630,7 +630,7 @@ static char __devinitdata eeprom_index_to_delay_map[] =
*
* @eeprom: The eeprom structure in which we find the delay index to map.
**/
-static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
+static void eeprom_index_to_delay(struct NvRamType *eeprom)
{
eeprom->delay_time = eeprom_index_to_delay_map[eeprom->delay_time];
}
@@ -643,7 +643,7 @@ static void __devinit eeprom_index_to_delay(struct NvRamType *eeprom)
*
* @delay: The delay, in seconds, to find the eeprom index for.
**/
-static int __devinit delay_to_eeprom_index(int delay)
+static int delay_to_eeprom_index(int delay)
{
u8 idx = 0;
while (idx < 7 && eeprom_index_to_delay_map[idx] < delay)
@@ -659,7 +659,7 @@ static int __devinit delay_to_eeprom_index(int delay)
*
* @eeprom: The eeprom data to override with command line options.
**/
-static void __devinit eeprom_override(struct NvRamType *eeprom)
+static void eeprom_override(struct NvRamType *eeprom)
{
u8 id;
@@ -3747,13 +3747,13 @@ static struct DeviceCtlBlk *device_alloc(struct AdapterCtlBlk *acb,
dcb->max_command = 1;
dcb->target_id = target;
dcb->target_lun = lun;
+ dcb->dev_mode = eeprom->target[target].cfg0;
#ifndef DC395x_NO_DISCONNECT
dcb->identify_msg =
IDENTIFY(dcb->dev_mode & NTC_DO_DISCONNECT, lun);
#else
dcb->identify_msg = IDENTIFY(0, lun);
#endif
- dcb->dev_mode = eeprom->target[target].cfg0;
dcb->inquiry7 = 0;
dcb->sync_mode = 0;
dcb->min_nego_period = clock_period[period_index];
@@ -3938,7 +3938,7 @@ static void dc395x_slave_destroy(struct scsi_device *scsi_device)
*
* @io_port: base I/O address
**/
-static void __devinit trms1040_wait_30us(unsigned long io_port)
+static void trms1040_wait_30us(unsigned long io_port)
{
/* ScsiPortStallExecution(30); wait 30 us */
outb(5, io_port + TRM_S1040_GEN_TIMER);
@@ -3955,7 +3955,7 @@ static void __devinit trms1040_wait_30us(unsigned long io_port)
* @cmd: SB + op code (command) to send
* @addr: address to send
**/
-static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
+static void trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
{
int i;
u8 send_data;
@@ -4000,7 +4000,7 @@ static void __devinit trms1040_write_cmd(unsigned long io_port, u8 cmd, u8 addr)
* @addr: offset into EEPROM
* @byte: bytes to write
**/
-static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
+static void trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
{
int i;
u8 send_data;
@@ -4054,7 +4054,7 @@ static void __devinit trms1040_set_data(unsigned long io_port, u8 addr, u8 byte)
* @eeprom: the data to write
* @io_port: the base io port
**/
-static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
+static void trms1040_write_all(struct NvRamType *eeprom, unsigned long io_port)
{
u8 *b_eeprom = (u8 *)eeprom;
u8 addr;
@@ -4094,7 +4094,7 @@ static void __devinit trms1040_write_all(struct NvRamType *eeprom, unsigned long
*
* Returns the byte read.
**/
-static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
+static u8 trms1040_get_data(unsigned long io_port, u8 addr)
{
int i;
u8 read_byte;
@@ -4132,7 +4132,7 @@ static u8 __devinit trms1040_get_data(unsigned long io_port, u8 addr)
* @eeprom: where to store the data
* @io_port: the base io port
**/
-static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
+static void trms1040_read_all(struct NvRamType *eeprom, unsigned long io_port)
{
u8 *b_eeprom = (u8 *)eeprom;
u8 addr;
@@ -4162,7 +4162,7 @@ static void __devinit trms1040_read_all(struct NvRamType *eeprom, unsigned long
* @eeprom: caller allocated strcuture to read the eeprom data into
* @io_port: io port to read from
**/
-static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
+static void check_eeprom(struct NvRamType *eeprom, unsigned long io_port)
{
u16 *w_eeprom = (u16 *)eeprom;
u16 w_addr;
@@ -4232,7 +4232,7 @@ static void __devinit check_eeprom(struct NvRamType *eeprom, unsigned long io_po
*
* @eeprom: The eeprom data strucutre to show details for.
**/
-static void __devinit print_eeprom_settings(struct NvRamType *eeprom)
+static void print_eeprom_settings(struct NvRamType *eeprom)
{
dprintkl(KERN_INFO, "Used settings: AdapterID=%02i, Speed=%i(%02i.%01iMHz), dev_mode=0x%02x\n",
eeprom->scsi_id,
@@ -4260,7 +4260,7 @@ static void adapter_sg_tables_free(struct AdapterCtlBlk *acb)
/*
* Allocate SG tables; as we have to pci_map them, an SG list (struct SGentry*)
* should never cross a page boundary */
-static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
+static int adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
{
const unsigned mem_needed = (DC395x_MAX_SRB_CNT+1)
*SEGMENTX_LEN;
@@ -4306,7 +4306,7 @@ static int __devinit adapter_sg_tables_alloc(struct AdapterCtlBlk *acb)
*
* @acb: The adapter to print the information for.
**/
-static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
+static void adapter_print_config(struct AdapterCtlBlk *acb)
{
u8 bval;
@@ -4350,7 +4350,7 @@ static void __devinit adapter_print_config(struct AdapterCtlBlk *acb)
*
* @acb: The adapter to initialize.
**/
-static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
+static void adapter_init_params(struct AdapterCtlBlk *acb)
{
struct NvRamType *eeprom = &acb->eeprom;
int i;
@@ -4412,7 +4412,7 @@ static void __devinit adapter_init_params(struct AdapterCtlBlk *acb)
*
* @host: The scsi host instance to fill in the values for.
**/
-static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
+static void adapter_init_scsi_host(struct Scsi_Host *host)
{
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)host->hostdata;
struct NvRamType *eeprom = &acb->eeprom;
@@ -4453,7 +4453,7 @@ static void __devinit adapter_init_scsi_host(struct Scsi_Host *host)
*
* @acb: The adapter which we are to init.
**/
-static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
+static void adapter_init_chip(struct AdapterCtlBlk *acb)
{
struct NvRamType *eeprom = &acb->eeprom;
@@ -4506,8 +4506,8 @@ static void __devinit adapter_init_chip(struct AdapterCtlBlk *acb)
* Returns 0 if the initialization succeeds, any other value on
* failure.
**/
-static int __devinit adapter_init(struct AdapterCtlBlk *acb,
- unsigned long io_port, u32 io_port_len, unsigned int irq)
+static int adapter_init(struct AdapterCtlBlk *acb, unsigned long io_port,
+ u32 io_port_len, unsigned int irq)
{
if (!request_region(io_port, io_port_len, DC395X_NAME)) {
dprintkl(KERN_ERR, "Failed to reserve IO region 0x%lx\n", io_port);
@@ -4794,8 +4794,7 @@ static void banner_display(void)
*
* Returns 0 on success, or an error code (-ve) on failure.
**/
-static int __devinit dc395x_init_one(struct pci_dev *dev,
- const struct pci_device_id *id)
+static int dc395x_init_one(struct pci_dev *dev, const struct pci_device_id *id)
{
struct Scsi_Host *scsi_host = NULL;
struct AdapterCtlBlk *acb = NULL;
@@ -4861,7 +4860,7 @@ fail:
*
* @dev: The PCI device to initialize.
**/
-static void __devexit dc395x_remove_one(struct pci_dev *dev)
+static void dc395x_remove_one(struct pci_dev *dev)
{
struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)(scsi_host->hostdata);
@@ -4892,7 +4891,7 @@ static struct pci_driver dc395x_driver = {
.name = DC395X_NAME,
.id_table = dc395x_pci_table,
.probe = dc395x_init_one,
- .remove = __devexit_p(dc395x_remove_one),
+ .remove = dc395x_remove_one,
};
diff --git a/drivers/scsi/dmx3191d.c b/drivers/scsi/dmx3191d.c
index 207352cc70cc..4b0dd8c56707 100644
--- a/drivers/scsi/dmx3191d.c
+++ b/drivers/scsi/dmx3191d.c
@@ -68,8 +68,8 @@ static struct scsi_host_template dmx3191d_driver_template = {
.use_clustering = DISABLE_CLUSTERING,
};
-static int __devinit dmx3191d_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int dmx3191d_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
struct Scsi_Host *shost;
unsigned long io;
@@ -123,7 +123,7 @@ static int __devinit dmx3191d_probe_one(struct pci_dev *pdev,
return error;
}
-static void __devexit dmx3191d_remove_one(struct pci_dev *pdev)
+static void dmx3191d_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -150,7 +150,7 @@ static struct pci_driver dmx3191d_pci_driver = {
.name = DMX3191D_DRIVER_NAME,
.id_table = dmx3191d_pci_tbl,
.probe = dmx3191d_probe_one,
- .remove = __devexit_p(dmx3191d_remove_one),
+ .remove = dmx3191d_remove_one,
};
static int __init dmx3191d_init(void)
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 7aca9fd8a11c..08c3bc398da2 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2152,7 +2152,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
*/
port_id = fip->port_id;
if (fip->probe_tries)
- port_id = prandom32(&fip->rnd_state) & 0xffff;
+ port_id = prandom_u32_state(&fip->rnd_state) & 0xffff;
else if (!port_id)
port_id = fip->lp->wwpn & 0xffff;
if (!port_id || port_id == 0xffff)
@@ -2177,7 +2177,7 @@ static void fcoe_ctlr_vn_restart(struct fcoe_ctlr *fip)
static void fcoe_ctlr_vn_start(struct fcoe_ctlr *fip)
{
fip->probe_tries = 0;
- prandom32_seed(&fip->rnd_state, fip->lp->wwpn);
+ prandom_seed_state(&fip->rnd_state, fip->lp->wwpn);
fcoe_ctlr_vn_restart(fip);
}
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 1a2a1e5824e3..fff682976c56 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -1771,7 +1771,7 @@ struct scsi_host_template fdomain_driver_template = {
#ifndef PCMCIA
#ifdef CONFIG_PCI
-static struct pci_device_id fdomain_pci_tbl[] __devinitdata = {
+static struct pci_device_id fdomain_pci_tbl[] = {
{ PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ }
diff --git a/drivers/scsi/fnic/Makefile b/drivers/scsi/fnic/Makefile
index 37c3440bc17c..383598fadf04 100644
--- a/drivers/scsi/fnic/Makefile
+++ b/drivers/scsi/fnic/Makefile
@@ -7,6 +7,8 @@ fnic-y := \
fnic_res.o \
fnic_fcs.o \
fnic_scsi.o \
+ fnic_trace.o \
+ fnic_debugfs.o \
vnic_cq.o \
vnic_dev.o \
vnic_intr.o \
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 95a5ba29320d..98436c363035 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -26,6 +26,7 @@
#include <scsi/libfcoe.h>
#include "fnic_io.h"
#include "fnic_res.h"
+#include "fnic_trace.h"
#include "vnic_dev.h"
#include "vnic_wq.h"
#include "vnic_rq.h"
@@ -56,6 +57,34 @@
#define FNIC_NO_TAG -1
/*
+ * Command flags to identify the type of command and for other future
+ * use.
+ */
+#define FNIC_NO_FLAGS 0
+#define FNIC_IO_INITIALIZED BIT(0)
+#define FNIC_IO_ISSUED BIT(1)
+#define FNIC_IO_DONE BIT(2)
+#define FNIC_IO_REQ_NULL BIT(3)
+#define FNIC_IO_ABTS_PENDING BIT(4)
+#define FNIC_IO_ABORTED BIT(5)
+#define FNIC_IO_ABTS_ISSUED BIT(6)
+#define FNIC_IO_TERM_ISSUED BIT(7)
+#define FNIC_IO_INTERNAL_TERM_ISSUED BIT(8)
+#define FNIC_IO_ABT_TERM_DONE BIT(9)
+#define FNIC_IO_ABT_TERM_REQ_NULL BIT(10)
+#define FNIC_IO_ABT_TERM_TIMED_OUT BIT(11)
+#define FNIC_DEVICE_RESET BIT(12) /* Device reset request */
+#define FNIC_DEV_RST_ISSUED BIT(13)
+#define FNIC_DEV_RST_TIMED_OUT BIT(14)
+#define FNIC_DEV_RST_ABTS_ISSUED BIT(15)
+#define FNIC_DEV_RST_TERM_ISSUED BIT(16)
+#define FNIC_DEV_RST_DONE BIT(17)
+#define FNIC_DEV_RST_REQ_NULL BIT(18)
+#define FNIC_DEV_RST_ABTS_DONE BIT(19)
+#define FNIC_DEV_RST_TERM_DONE BIT(20)
+#define FNIC_DEV_RST_ABTS_PENDING BIT(21)
+
+/*
* Usage of the scsi_cmnd scratchpad.
* These fields are locked by the hashed io_req_lock.
*/
@@ -64,6 +93,7 @@
#define CMD_ABTS_STATUS(Cmnd) ((Cmnd)->SCp.Message)
#define CMD_LR_STATUS(Cmnd) ((Cmnd)->SCp.have_data_in)
#define CMD_TAG(Cmnd) ((Cmnd)->SCp.sent_command)
+#define CMD_FLAGS(Cmnd) ((Cmnd)->SCp.Status)
#define FCPIO_INVALID_CODE 0x100 /* hdr_status value unused by firmware */
@@ -71,9 +101,28 @@
#define FNIC_HOST_RESET_TIMEOUT 10000 /* mSec */
#define FNIC_RMDEVICE_TIMEOUT 1000 /* mSec */
#define FNIC_HOST_RESET_SETTLE_TIME 30 /* Sec */
+#define FNIC_ABT_TERM_DELAY_TIMEOUT 500 /* mSec */
#define FNIC_MAX_FCP_TARGET 256
+/**
+ * state_flags to identify host state along along with fnic's state
+ **/
+#define __FNIC_FLAGS_FWRESET BIT(0) /* fwreset in progress */
+#define __FNIC_FLAGS_BLOCK_IO BIT(1) /* IOs are blocked */
+
+#define FNIC_FLAGS_NONE (0)
+#define FNIC_FLAGS_FWRESET (__FNIC_FLAGS_FWRESET | \
+ __FNIC_FLAGS_BLOCK_IO)
+
+#define FNIC_FLAGS_IO_BLOCKED (__FNIC_FLAGS_BLOCK_IO)
+
+#define fnic_set_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 0)
+
+#define fnic_clear_state_flags(fnicp, st_flags) \
+ __fnic_set_state_flags(fnicp, st_flags, 1)
+
extern unsigned int fnic_log_level;
#define FNIC_MAIN_LOGGING 0x01
@@ -170,6 +219,9 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
+ atomic_t in_flight; /* io counter */
+ u32 _reserved; /* fill hole */
+ unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
spinlock_t fnic_lock;
@@ -267,4 +319,12 @@ const char *fnic_state_to_str(unsigned int state);
void fnic_log_q_error(struct fnic *fnic);
void fnic_handle_link_event(struct fnic *fnic);
+int fnic_is_abts_pending(struct fnic *, struct scsi_cmnd *);
+
+static inline int
+fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags)
+{
+ return ((fnic->state_flags & st_flags) == st_flags);
+}
+void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long);
#endif /* _FNIC_H_ */
diff --git a/drivers/scsi/fnic/fnic_debugfs.c b/drivers/scsi/fnic/fnic_debugfs.c
new file mode 100644
index 000000000000..adc1f7f471f5
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_debugfs.c
@@ -0,0 +1,314 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/debugfs.h>
+#include "fnic.h"
+
+static struct dentry *fnic_trace_debugfs_root;
+static struct dentry *fnic_trace_debugfs_file;
+static struct dentry *fnic_trace_enable;
+
+/*
+ * fnic_trace_ctrl_open - Open the trace_enable file
+ * @inode: The inode pointer.
+ * @file: The file pointer to attach the trace enable/disable flag.
+ *
+ * Description:
+ * This routine opens a debugsfs file trace_enable.
+ *
+ * Returns:
+ * This function returns zero if successful.
+ */
+static int fnic_trace_ctrl_open(struct inode *inode, struct file *filp)
+{
+ filp->private_data = inode->i_private;
+ return 0;
+}
+
+/*
+ * fnic_trace_ctrl_read - Read a trace_enable debugfs file
+ * @filp: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @cnt: The number of bytes to read.
+ * @ppos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads value of variable fnic_tracing_enabled
+ * and stores into local @buf. It will start reading file at @ppos and
+ * copy up to @cnt of data to @ubuf from @buf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read.
+ */
+static ssize_t fnic_trace_ctrl_read(struct file *filp,
+ char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ int len;
+ len = sprintf(buf, "%u\n", fnic_tracing_enabled);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+}
+
+/*
+ * fnic_trace_ctrl_write - Write to trace_enable debugfs file
+ * @filp: The file pointer to write from.
+ * @ubuf: The buffer to copy the data from.
+ * @cnt: The number of bytes to write.
+ * @ppos: The position in the file to start writing to.
+ *
+ * Description:
+ * This routine writes data from user buffer @ubuf to buffer @buf and
+ * sets fnic_tracing_enabled value as per user input.
+ *
+ * Returns:
+ * This function returns the amount of data that was written.
+ */
+static ssize_t fnic_trace_ctrl_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ char buf[64];
+ unsigned long val;
+ int ret;
+
+ if (cnt >= sizeof(buf))
+ return -EINVAL;
+
+ if (copy_from_user(&buf, ubuf, cnt))
+ return -EFAULT;
+
+ buf[cnt] = 0;
+
+ ret = kstrtoul(buf, 10, &val);
+ if (ret < 0)
+ return ret;
+
+ fnic_tracing_enabled = val;
+ (*ppos)++;
+
+ return cnt;
+}
+
+/*
+ * fnic_trace_debugfs_open - Open the fnic trace log
+ * @inode: The inode pointer
+ * @file: The file pointer to attach the log output
+ *
+ * Description:
+ * This routine is the entry point for the debugfs open file operation.
+ * It allocates the necessary buffer for the log, fills the buffer from
+ * the in-memory log and then returns a pointer to that log in
+ * the private_data field in @file.
+ *
+ * Returns:
+ * This function returns zero if successful. On error it will return
+ * a negative error value.
+ */
+static int fnic_trace_debugfs_open(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt;
+ fnic_dbg_prt = kzalloc(sizeof(fnic_dbgfs_t), GFP_KERNEL);
+ if (!fnic_dbg_prt)
+ return -ENOMEM;
+
+ fnic_dbg_prt->buffer = vmalloc((3*(trace_max_pages * PAGE_SIZE)));
+ if (!fnic_dbg_prt->buffer) {
+ kfree(fnic_dbg_prt);
+ return -ENOMEM;
+ }
+ memset((void *)fnic_dbg_prt->buffer, 0,
+ (3*(trace_max_pages * PAGE_SIZE)));
+ fnic_dbg_prt->buffer_len = fnic_get_trace_data(fnic_dbg_prt);
+ file->private_data = fnic_dbg_prt;
+ return 0;
+}
+
+/*
+ * fnic_trace_debugfs_lseek - Seek through a debugfs file
+ * @file: The file pointer to seek through.
+ * @offset: The offset to seek to or the amount to seek by.
+ * @howto: Indicates how to seek.
+ *
+ * Description:
+ * This routine is the entry point for the debugfs lseek file operation.
+ * The @howto parameter indicates whether @offset is the offset to directly
+ * seek to, or if it is a value to seek forward or reverse by. This function
+ * figures out what the new offset of the debugfs file will be and assigns
+ * that value to the f_pos field of @file.
+ *
+ * Returns:
+ * This function returns the new offset if successful and returns a negative
+ * error if unable to process the seek.
+ */
+static loff_t fnic_trace_debugfs_lseek(struct file *file,
+ loff_t offset,
+ int howto)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ loff_t pos = -1;
+
+ switch (howto) {
+ case 0:
+ pos = offset;
+ break;
+ case 1:
+ pos = file->f_pos + offset;
+ break;
+ case 2:
+ pos = fnic_dbg_prt->buffer_len - offset;
+ }
+ return (pos < 0 || pos > fnic_dbg_prt->buffer_len) ?
+ -EINVAL : (file->f_pos = pos);
+}
+
+/*
+ * fnic_trace_debugfs_read - Read a debugfs file
+ * @file: The file pointer to read from.
+ * @ubuf: The buffer to copy the data to.
+ * @nbytes: The number of bytes to read.
+ * @pos: The position in the file to start reading from.
+ *
+ * Description:
+ * This routine reads data from the buffer indicated in the private_data
+ * field of @file. It will start reading at @pos and copy up to @nbytes of
+ * data to @ubuf.
+ *
+ * Returns:
+ * This function returns the amount of data that was read (this could be
+ * less than @nbytes if the end of the file was reached).
+ */
+static ssize_t fnic_trace_debugfs_read(struct file *file,
+ char __user *ubuf,
+ size_t nbytes,
+ loff_t *pos)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+ int rc = 0;
+ rc = simple_read_from_buffer(ubuf, nbytes, pos,
+ fnic_dbg_prt->buffer,
+ fnic_dbg_prt->buffer_len);
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_release - Release the buffer used to store
+ * debugfs file data
+ * @inode: The inode pointer
+ * @file: The file pointer that contains the buffer to release
+ *
+ * Description:
+ * This routine frees the buffer that was allocated when the debugfs
+ * file was opened.
+ *
+ * Returns:
+ * This function returns zero.
+ */
+static int fnic_trace_debugfs_release(struct inode *inode,
+ struct file *file)
+{
+ fnic_dbgfs_t *fnic_dbg_prt = file->private_data;
+
+ vfree(fnic_dbg_prt->buffer);
+ kfree(fnic_dbg_prt);
+ return 0;
+}
+
+static const struct file_operations fnic_trace_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_ctrl_open,
+ .read = fnic_trace_ctrl_read,
+ .write = fnic_trace_ctrl_write,
+};
+
+static const struct file_operations fnic_trace_debugfs_fops = {
+ .owner = THIS_MODULE,
+ .open = fnic_trace_debugfs_open,
+ .llseek = fnic_trace_debugfs_lseek,
+ .read = fnic_trace_debugfs_read,
+ .release = fnic_trace_debugfs_release,
+};
+
+/*
+ * fnic_trace_debugfs_init - Initialize debugfs for fnic trace logging
+ *
+ * Description:
+ * When Debugfs is configured this routine sets up the fnic debugfs
+ * file system. If not already created, this routine will create the
+ * fnic directory. It will create file trace to log fnic trace buffer
+ * output into debugfs and it will also create file trace_enable to
+ * control enable/disable of trace logging into trace buffer.
+ */
+int fnic_trace_debugfs_init(void)
+{
+ int rc = -1;
+ fnic_trace_debugfs_root = debugfs_create_dir("fnic", NULL);
+ if (!fnic_trace_debugfs_root) {
+ printk(KERN_DEBUG "Cannot create debugfs root\n");
+ return rc;
+ }
+ fnic_trace_enable = debugfs_create_file("tracing_enable",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL, &fnic_trace_ctrl_fops);
+
+ if (!fnic_trace_enable) {
+ printk(KERN_DEBUG "Cannot create trace_enable file"
+ " under debugfs");
+ return rc;
+ }
+
+ fnic_trace_debugfs_file = debugfs_create_file("trace",
+ S_IFREG|S_IRUGO|S_IWUSR,
+ fnic_trace_debugfs_root,
+ NULL,
+ &fnic_trace_debugfs_fops);
+
+ if (!fnic_trace_debugfs_file) {
+ printk(KERN_DEBUG "Cannot create trace file under debugfs");
+ return rc;
+ }
+ rc = 0;
+ return rc;
+}
+
+/*
+ * fnic_trace_debugfs_terminate - Tear down debugfs infrastructure
+ *
+ * Description:
+ * When Debugfs is configured this routine removes debugfs file system
+ * elements that are specific to fnic trace logging.
+ */
+void fnic_trace_debugfs_terminate(void)
+{
+ if (fnic_trace_debugfs_file) {
+ debugfs_remove(fnic_trace_debugfs_file);
+ fnic_trace_debugfs_file = NULL;
+ }
+ if (fnic_trace_enable) {
+ debugfs_remove(fnic_trace_enable);
+ fnic_trace_enable = NULL;
+ }
+ if (fnic_trace_debugfs_root) {
+ debugfs_remove(fnic_trace_debugfs_root);
+ fnic_trace_debugfs_root = NULL;
+ }
+}
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 3c53c3478ee7..483eb9dbe663 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -495,7 +495,8 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
}
fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
- fnic->vlan_hw_insert, fnic->vlan_id, 1);
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1);
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
}
@@ -563,7 +564,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
}
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
- fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
+ 0 /* hw inserts cos value */,
+ fnic->vlan_id, 1, 1, 1);
fnic_send_frame_end:
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
diff --git a/drivers/scsi/fnic/fnic_io.h b/drivers/scsi/fnic/fnic_io.h
index f0b896988cd5..c35b8f1889ea 100644
--- a/drivers/scsi/fnic/fnic_io.h
+++ b/drivers/scsi/fnic/fnic_io.h
@@ -21,7 +21,7 @@
#include <scsi/fc/fc_fcp.h>
#define FNIC_DFLT_SG_DESC_CNT 32
-#define FNIC_MAX_SG_DESC_CNT 1024 /* Maximum descriptors per sgl */
+#define FNIC_MAX_SG_DESC_CNT 256 /* Maximum descriptors per sgl */
#define FNIC_SG_DESC_ALIGN 16 /* Descriptor address alignment */
struct host_sg_desc {
@@ -45,7 +45,8 @@ enum fnic_sgl_list_type {
};
enum fnic_ioreq_state {
- FNIC_IOREQ_CMD_PENDING = 0,
+ FNIC_IOREQ_NOT_INITED = 0,
+ FNIC_IOREQ_CMD_PENDING,
FNIC_IOREQ_ABTS_PENDING,
FNIC_IOREQ_ABTS_COMPLETE,
FNIC_IOREQ_CMD_COMPLETE,
@@ -60,6 +61,7 @@ struct fnic_io_req {
u8 sgl_type; /* device DMA descriptor list type */
u8 io_completed:1; /* set to 1 when fw completes IO */
u32 port_id; /* remote port DID */
+ unsigned long start_time; /* in jiffies */
struct completion *abts_done; /* completion for abts */
struct completion *dr_done; /* completion for device reset */
};
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index fc98eb61e760..d601ac543c52 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -68,6 +68,10 @@ unsigned int fnic_log_level;
module_param(fnic_log_level, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(fnic_log_level, "bit mask of fnic logging levels");
+unsigned int fnic_trace_max_pages = 16;
+module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages "
+ "for fnic trace buffer");
static struct libfc_function_template fnic_transport_template = {
.frame_send = fnic_send,
@@ -399,8 +403,7 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
return fnic->data_src_addr;
}
-static int __devinit fnic_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct Scsi_Host *host;
struct fc_lport *lp;
@@ -625,6 +628,9 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
}
fnic->state = FNIC_IN_FC_MODE;
+ atomic_set(&fnic->in_flight, 0);
+ fnic->state_flags = FNIC_FLAGS_NONE;
+
/* Enable hardware stripping of vlan header on ingress */
fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);
@@ -774,7 +780,7 @@ err_out:
return err;
}
-static void __devexit fnic_remove(struct pci_dev *pdev)
+static void fnic_remove(struct pci_dev *pdev)
{
struct fnic *fnic = pci_get_drvdata(pdev);
struct fc_lport *lp = fnic->lport;
@@ -849,7 +855,7 @@ static struct pci_driver fnic_driver = {
.name = DRV_NAME,
.id_table = fnic_id_table,
.probe = fnic_probe,
- .remove = __devexit_p(fnic_remove),
+ .remove = fnic_remove,
};
static int __init fnic_init_module(void)
@@ -859,6 +865,14 @@ static int __init fnic_init_module(void)
printk(KERN_INFO PFX "%s, ver %s\n", DRV_DESCRIPTION, DRV_VERSION);
+ /* Allocate memory for trace buffer */
+ err = fnic_trace_buf_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Trace buffer initialization Failed "
+ "Fnic Tracing utility is disabled\n");
+ fnic_trace_free();
+ }
+
/* Create a cache for allocation of default size sgls */
len = sizeof(struct fnic_dflt_sgl_list);
fnic_sgl_cache[FNIC_SGL_CACHE_DFLT] = kmem_cache_create
@@ -929,6 +943,7 @@ err_create_fnic_ioreq_slab:
err_create_fnic_sgl_slab_max:
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
err_create_fnic_sgl_slab_dflt:
+ fnic_trace_free();
return err;
}
@@ -940,6 +955,7 @@ static void __exit fnic_cleanup_module(void)
kmem_cache_destroy(fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
kmem_cache_destroy(fnic_io_req_cache);
fc_release_transport(fnic_fc_transport);
+ fnic_trace_free();
}
module_init(fnic_init_module);
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index c40ce52ed7c6..be99e7549d89 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -47,6 +47,7 @@ const char *fnic_state_str[] = {
};
static const char *fnic_ioreq_state_str[] = {
+ [FNIC_IOREQ_NOT_INITED] = "FNIC_IOREQ_NOT_INITED",
[FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
[FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
[FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
@@ -165,6 +166,33 @@ static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
}
+/**
+ * __fnic_set_state_flags
+ * Sets/Clears bits in fnic's state_flags
+ **/
+void
+__fnic_set_state_flags(struct fnic *fnic, unsigned long st_flags,
+ unsigned long clearbits)
+{
+ struct Scsi_Host *host = fnic->lport->host;
+ int sh_locked = spin_is_locked(host->host_lock);
+ unsigned long flags = 0;
+
+ if (!sh_locked)
+ spin_lock_irqsave(host->host_lock, flags);
+
+ if (clearbits)
+ fnic->state_flags &= ~st_flags;
+ else
+ fnic->state_flags |= st_flags;
+
+ if (!sh_locked)
+ spin_unlock_irqrestore(host->host_lock, flags);
+
+ return;
+}
+
+
/*
* fnic_fw_reset_handler
* Routine to send reset msg to fw
@@ -175,9 +203,16 @@ int fnic_fw_reset_handler(struct fnic *fnic)
int ret = 0;
unsigned long flags;
+ /* indicate fwreset to io path */
+ fnic_set_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
skb_queue_purge(&fnic->frame_queue);
skb_queue_purge(&fnic->tx_queue);
+ /* wait for io cmpl */
+ while (atomic_read(&fnic->in_flight))
+ schedule_timeout(msecs_to_jiffies(1));
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -193,9 +228,12 @@ int fnic_fw_reset_handler(struct fnic *fnic)
if (!ret)
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Issued fw reset\n");
- else
+ else {
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Failed to issue fw reset\n");
+ }
+
return ret;
}
@@ -312,6 +350,8 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_queue_wq_copy_desc failure - no descriptors\n");
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -351,16 +391,20 @@ static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
*/
static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
{
- struct fc_lport *lp;
+ struct fc_lport *lp = shost_priv(sc->device->host);
struct fc_rport *rport;
- struct fnic_io_req *io_req;
- struct fnic *fnic;
+ struct fnic_io_req *io_req = NULL;
+ struct fnic *fnic = lport_priv(lp);
struct vnic_wq_copy *wq;
int ret;
- int sg_count;
+ u64 cmd_trace;
+ int sg_count = 0;
unsigned long flags;
unsigned long ptr;
+ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED)))
+ return SCSI_MLQUEUE_HOST_BUSY;
+
rport = starget_to_rport(scsi_target(sc->device));
ret = fc_remote_port_chkready(rport);
if (ret) {
@@ -369,20 +413,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
return 0;
}
- lp = shost_priv(sc->device->host);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
return SCSI_MLQUEUE_HOST_BUSY;
+ atomic_inc(&fnic->in_flight);
+
/*
* Release host lock, use driver resource specific locks from here.
* Don't re-enable interrupts in case they were disabled prior to the
* caller disabling them.
*/
spin_unlock(lp->host->host_lock);
+ CMD_STATE(sc) = FNIC_IOREQ_NOT_INITED;
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
/* Get a new io_req for this SCSI IO */
- fnic = lport_priv(lp);
-
io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
if (!io_req) {
ret = SCSI_MLQUEUE_HOST_BUSY;
@@ -393,6 +438,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* Map the data buffer */
sg_count = scsi_dma_map(sc);
if (sg_count < 0) {
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, sc->cmnd[0],
+ sg_count, CMD_STATE(sc));
mempool_free(io_req, fnic->io_req_pool);
goto out;
}
@@ -427,8 +475,10 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
/* initialize rest of io_req */
io_req->port_id = rport->port_id;
+ io_req->start_time = jiffies;
CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
CMD_SP(sc) = (char *)io_req;
+ CMD_FLAGS(sc) |= FNIC_IO_INITIALIZED;
sc->scsi_done = done;
/* create copy wq desc and enqueue it */
@@ -440,7 +490,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
* refetch the pointer under the lock.
*/
spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
-
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, 0, 0, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
CMD_SP(sc) = NULL;
@@ -450,8 +502,21 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
+ } else {
+ /* REVISIT: Use per IO lock in the final code */
+ CMD_FLAGS(sc) |= FNIC_IO_ISSUED;
}
out:
+ cmd_trace = ((u64)sc->cmnd[0] << 56 | (u64)sc->cmnd[7] << 40 |
+ (u64)sc->cmnd[8] << 32 | (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 | (u64)sc->cmnd[4] << 8 |
+ sc->cmnd[5]);
+
+ FNIC_TRACE(fnic_queuecommand, sc->device->host->host_no,
+ sc->request->tag, sc, io_req,
+ sg_count, cmd_trace,
+ (((u64)CMD_FLAGS(sc) >> 32) | CMD_STATE(sc)));
+ atomic_dec(&fnic->in_flight);
/* acquire host lock before returning to SCSI */
spin_lock(lp->host->host_lock);
return ret;
@@ -529,6 +594,8 @@ static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
fnic_flush_tx(fnic);
reset_cmpl_handler_end:
+ fnic_clear_state_flags(fnic, FNIC_FLAGS_FWRESET);
+
return ret;
}
@@ -622,6 +689,7 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
struct vnic_wq_copy *wq;
u16 request_out = desc->u.ack.request_out;
unsigned long flags;
+ u64 *ox_id_tag = (u64 *)(void *)desc;
/* mark the ack state */
wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
@@ -632,6 +700,9 @@ static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
fnic->fw_ack_recd[0] = 1;
}
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ FNIC_TRACE(fnic_fcpio_ack_handler,
+ fnic->lport->host->host_no, 0, 0, ox_id_tag[2], ox_id_tag[3],
+ ox_id_tag[4], ox_id_tag[5]);
}
/*
@@ -651,27 +722,53 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ u64 cmd_trace;
+ unsigned long start_time;
/* Decode the cmpl description to get the io_req id */
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
+ icmnd_cmpl = &desc->u.icmnd_cmpl;
- if (id >= FNIC_MAX_IO_REQ)
+ if (id >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl sc is null - "
+ "hdr status = %s tag = 0x%x desc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, desc);
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ fnic->lport->host->host_no, id,
+ ((u64)icmnd_cmpl->_resvd0[1] << 16 |
+ (u64)icmnd_cmpl->_resvd0[0]),
+ ((u64)hdr_status << 16 |
+ (u64)icmnd_cmpl->scsi_status << 8 |
+ (u64)icmnd_cmpl->flags), desc,
+ (u64)icmnd_cmpl->residual, 0);
return;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
+ CMD_FLAGS(sc) |= FNIC_IO_REQ_NULL;
spin_unlock_irqrestore(io_lock, flags);
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "icmnd_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
/* firmware completed the io */
io_req->io_completed = 1;
@@ -682,6 +779,28 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
*/
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_PENDING;
+ switch (hdr_status) {
+ case FCPIO_SUCCESS:
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl ABTS pending hdr status = %s "
+ "sc 0x%p scsi_status %x residual %d\n",
+ fnic_fcpio_status_to_str(hdr_status), sc,
+ icmnd_cmpl->scsi_status,
+ icmnd_cmpl->residual);
+ break;
+ case FCPIO_ABORTED:
+ CMD_FLAGS(sc) |= FNIC_IO_ABORTED;
+ break;
+ default:
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "icmnd_cmpl abts pending "
+ "hdr status = %s tag = 0x%x sc = 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status),
+ id, sc);
+ break;
+ }
return;
}
@@ -765,6 +884,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Break link with the SCSI command */
CMD_SP(sc) = NULL;
+ CMD_FLAGS(sc) |= FNIC_IO_DONE;
spin_unlock_irqrestore(io_lock, flags);
@@ -772,6 +892,20 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
+ cmd_trace = ((u64)hdr_status << 56) |
+ (u64)icmnd_cmpl->scsi_status << 48 |
+ (u64)icmnd_cmpl->flags << 40 | (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5];
+
+ FNIC_TRACE(fnic_fcpio_icmnd_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ ((u64)icmnd_cmpl->_resvd0[1] << 56 |
+ (u64)icmnd_cmpl->_resvd0[0] << 48 |
+ jiffies_to_msecs(jiffies - start_time)),
+ desc, cmd_trace,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
if (sc->sc_data_direction == DMA_FROM_DEVICE) {
fnic->lport->host_stats.fcp_input_requests++;
fnic->fcp_input_bytes += xfer_len;
@@ -784,7 +918,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
/* Call SCSI completion function to complete the IO */
if (sc->scsi_done)
sc->scsi_done(sc);
-
}
/* fnic_fcpio_itmf_cmpl_handler
@@ -801,28 +934,54 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
struct fnic_io_req *io_req;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time;
fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
fcpio_tag_id_dec(&tag, &id);
- if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
+ if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "Tag out of range tag %x hdr status = %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
return;
+ }
sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
WARN_ON_ONCE(!sc);
- if (!sc)
+ if (!sc) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl sc is null - hdr status = %s tag = 0x%x\n",
+ fnic_fcpio_status_to_str(hdr_status), id);
return;
-
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
WARN_ON_ONCE(!io_req);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "itmf_cmpl io_req is null - "
+ "hdr status = %s tag = 0x%x sc 0x%p\n",
+ fnic_fcpio_status_to_str(hdr_status), id, sc);
return;
}
+ start_time = io_req->start_time;
- if (id & FNIC_TAG_ABORT) {
+ if ((id & FNIC_TAG_ABORT) && (id & FNIC_TAG_DEV_RST)) {
+ /* Abort and terminate completion of device reset req */
+ /* REVISIT : Add asserts about various flags */
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset abts cmpl recd. id %x status %s\n",
+ id, fnic_fcpio_status_to_str(hdr_status));
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
+ CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ } else if (id & FNIC_TAG_ABORT) {
/* Completion of abort cmd */
if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
/* This is a late completion. Ignore it */
@@ -832,6 +991,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
CMD_ABTS_STATUS(sc) = hdr_status;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"abts cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -855,14 +1015,58 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id,
+ sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc,
+ (((u64)hdr_status << 40) |
+ (u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) |
+ CMD_STATE(sc)));
sc->scsi_done(sc);
+ }
}
} else if (id & FNIC_TAG_DEV_RST) {
/* Completion of device reset */
CMD_LR_STATUS(sc) = hdr_status;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ABTS_PENDING;
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Terminate pending "
+ "dev reset cmpl recd. id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TIMED_OUT) {
+ /* Need to wait for terminate completion */
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_TRACE(fnic_fcpio_itmf_cmpl_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ desc, 0,
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "dev reset cmpl recd after time out. "
+ "id %d status %s\n",
+ (int)(id & FNIC_TAG_MASK),
+ fnic_fcpio_status_to_str(hdr_status));
+ return;
+ }
CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"dev reset cmpl recd. id %d status %s\n",
(int)(id & FNIC_TAG_MASK),
@@ -889,7 +1093,6 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
struct fcpio_fw_req *desc)
{
struct fnic *fnic = vnic_dev_priv(vdev);
- int ret = 0;
switch (desc->hdr.type) {
case FCPIO_ACK: /* fw copied copy wq desc to its queue */
@@ -906,11 +1109,11 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
- ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
+ fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
break;
case FCPIO_RESET_CMPL: /* fw completed reset */
- ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
+ fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
break;
default:
@@ -920,7 +1123,7 @@ static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
break;
}
- return ret;
+ return 0;
}
/*
@@ -950,6 +1153,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
unsigned long flags = 0;
struct scsi_cmnd *sc;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
if (i == exclude_id)
@@ -962,6 +1166,23 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ /*
+ * We will be here only when FW completes reset
+ * without sending completions for outstanding ios.
+ */
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_DONE;
+ if (io_req && io_req->dr_done)
+ complete(io_req->dr_done);
+ else if (io_req && io_req->abts_done)
+ complete(io_req->abts_done);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ } else if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
goto cleanup_scsi_cmd;
@@ -975,6 +1196,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
* If there is a scsi_cmnd associated with this io_req, then
* free the corresponding state
*/
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -984,8 +1206,18 @@ cleanup_scsi_cmd:
" DID_TRANSPORT_DISRUPTED\n");
/* Complete the command to SCSI */
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_cleanup_io,
+ sc->device->host->host_no, i, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 |
+ (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
}
@@ -998,6 +1230,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
struct scsi_cmnd *sc;
unsigned long flags;
spinlock_t *io_lock;
+ unsigned long start_time = 0;
/* get the tag reference */
fcpio_tag_id_dec(&desc->hdr.tag, &id);
@@ -1027,6 +1260,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
@@ -1035,8 +1269,17 @@ wq_copy_cleanup_scsi_cmd:
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
" DID_NO_CONNECT\n");
- if (sc->scsi_done)
+ if (sc->scsi_done) {
+ FNIC_TRACE(fnic_wq_copy_cleanup_handler,
+ sc->device->host->host_no, id, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
sc->scsi_done(sc);
+ }
}
static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
@@ -1044,8 +1287,18 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
unsigned long flags;
+ spin_lock_irqsave(host->host_lock, flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, flags);
+ return 1;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
@@ -1053,6 +1306,9 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
if (!vnic_wq_copy_desc_avail(wq)) {
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_queue_abort_io_req: failure: no descriptors\n");
return 1;
}
fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
@@ -1060,12 +1316,15 @@ static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
fnic->config.ra_tov, fnic->config.ed_tov);
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
+ atomic_dec(&fnic->in_flight);
+
return 0;
}
-void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
+static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1075,13 +1334,14 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host,
- "fnic_rport_reset_exch called portid 0x%06x\n",
+ "fnic_rport_exch_reset called portid 0x%06x\n",
port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1096,6 +1356,15 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1104,9 +1373,29 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_rport_exch_reset: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "rport_exch_reset "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_rport_exch_reset dev rst sc 0x%p\n",
+ sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1118,7 +1407,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1127,12 +1416,17 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1141,6 +1435,7 @@ void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
void fnic_terminate_rport_io(struct fc_rport *rport)
{
int tag;
+ int abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1154,14 +1449,15 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
FNIC_SCSI_DBG(KERN_DEBUG,
fnic->lport->host, "fnic_terminate_rport_io called"
- " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
- rport->port_name, rport->node_name,
+ " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n",
+ rport->port_name, rport->node_name, rport,
rport->port_id);
if (fnic->in_remove)
return;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ abt_tag = tag;
sc = scsi_host_find_tag(fnic->lport->host, tag);
if (!sc)
continue;
@@ -1180,6 +1476,14 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
continue;
}
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst not pending sc 0x%p\n",
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
/*
* Found IO that is still pending with firmware and
* belongs to rport that went away
@@ -1188,9 +1492,27 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
spin_unlock_irqrestore(io_lock, flags);
continue;
}
+ if (io_req->abts_done) {
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "fnic_terminate_rport_io: io_req->abts_done is set "
+ "state is %s\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+ }
+ if (!(CMD_FLAGS(sc) & FNIC_IO_ISSUED)) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "fnic_terminate_rport_io "
+ "IO not yet issued %p tag 0x%x flags "
+ "%x state %d\n",
+ sc, tag, CMD_FLAGS(sc), CMD_STATE(sc));
+ }
old_ioreq_state = CMD_STATE(sc);
CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag = (tag | FNIC_TAG_DEV_RST);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_terminate_rport_io dev rst sc 0x%p\n", sc);
+ }
BUG_ON(io_req->abts_done);
@@ -1203,7 +1525,7 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
/*
@@ -1212,12 +1534,17 @@ void fnic_terminate_rport_io(struct fc_rport *rport)
* aborted later by scsi_eh, or cleaned up during
* lun reset
*/
- io_lock = fnic_io_lock_hash(fnic, sc);
-
spin_lock_irqsave(io_lock, flags);
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
}
@@ -1232,13 +1559,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
int ret = SUCCESS;
- u32 task_req;
+ u32 task_req = 0;
struct scsi_lun fc_lun;
+ int tag;
DECLARE_COMPLETION_ONSTACK(tm_done);
/* Wait for rport to unblock */
@@ -1249,9 +1578,13 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
fnic = lport_priv(lp);
rport = starget_to_rport(scsi_target(sc->device));
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
- rport->port_id, sc->device->lun, sc->request->tag);
+ tag = sc->request->tag;
+ FNIC_SCSI_DBG(KERN_DEBUG,
+ fnic->lport->host,
+ "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %x flags %x\n",
+ rport->port_id, sc->device->lun, tag, CMD_FLAGS(sc));
+
+ CMD_FLAGS(sc) = FNIC_NO_FLAGS;
if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
ret = FAILED;
@@ -1318,6 +1651,10 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
ret = FAILED;
goto fnic_abort_cmd_end;
}
+ if (task_req == FCPIO_ITMF_ABT_TASK)
+ CMD_FLAGS(sc) |= FNIC_IO_ABTS_ISSUED;
+ else
+ CMD_FLAGS(sc) |= FNIC_IO_TERM_ISSUED;
/*
* We queued an abort IO, wait for its completion.
@@ -1336,6 +1673,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1344,6 +1682,7 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
/* fw did not complete abort, timed out */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT;
ret = FAILED;
goto fnic_abort_cmd_end;
}
@@ -1359,12 +1698,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
spin_unlock_irqrestore(io_lock, flags);
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
fnic_abort_cmd_end:
+ FNIC_TRACE(fnic_abort_cmd, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Returning from abort cmd %s\n",
+ "Returning from abort cmd type %x %s\n", task_req,
(ret == SUCCESS) ?
"SUCCESS" : "FAILED");
return ret;
@@ -1375,16 +1723,28 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
struct fnic_io_req *io_req)
{
struct vnic_wq_copy *wq = &fnic->wq_copy[0];
+ struct Scsi_Host *host = fnic->lport->host;
struct scsi_lun fc_lun;
int ret = 0;
unsigned long intr_flags;
+ spin_lock_irqsave(host->host_lock, intr_flags);
+ if (unlikely(fnic_chk_state_flags_locked(fnic,
+ FNIC_FLAGS_IO_BLOCKED))) {
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+ return FAILED;
+ } else
+ atomic_inc(&fnic->in_flight);
+ spin_unlock_irqrestore(host->host_lock, intr_flags);
+
spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
free_wq_copy_descs(fnic, wq);
if (!vnic_wq_copy_desc_avail(wq)) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "queue_dr_io_req failure - no descriptors\n");
ret = -EAGAIN;
goto lr_io_req_end;
}
@@ -1399,6 +1759,7 @@ static inline int fnic_queue_dr_io_req(struct fnic *fnic,
lr_io_req_end:
spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
+ atomic_dec(&fnic->in_flight);
return ret;
}
@@ -1412,7 +1773,7 @@ lr_io_req_end:
static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_cmnd *lr_sc)
{
- int tag;
+ int tag, abt_tag;
struct fnic_io_req *io_req;
spinlock_t *io_lock;
unsigned long flags;
@@ -1421,6 +1782,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
struct scsi_lun fc_lun;
struct scsi_device *lun_dev = lr_sc->device;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ enum fnic_ioreq_state old_ioreq_state;
for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
sc = scsi_host_find_tag(fnic->lport->host, tag);
@@ -1449,7 +1811,41 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
"Found IO in %s on lun\n",
fnic_ioreq_state_to_str(CMD_STATE(sc)));
- BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) &&
+ (!(CMD_FLAGS(sc) & FNIC_DEV_RST_ISSUED))) {
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s dev rst not pending sc 0x%p\n", __func__,
+ sc);
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+ old_ioreq_state = CMD_STATE(sc);
+ /*
+ * Any pending IO issued prior to reset is expected to be
+ * in abts pending state, if not we need to set
+ * FNIC_IOREQ_ABTS_PENDING to indicate the IO is abort pending.
+ * When IO is completed, the IO will be handed over and
+ * handled in this function.
+ */
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+
+ if (io_req->abts_done)
+ shost_printk(KERN_ERR, fnic->lport->host,
+ "%s: io_req->abts_done is set state is %s\n",
+ __func__, fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ BUG_ON(io_req->abts_done);
+
+ abt_tag = tag;
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET) {
+ abt_tag |= FNIC_TAG_DEV_RST;
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "%s: dev rst sc 0x%p\n", __func__, sc);
+ }
CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
io_req->abts_done = &tm_done;
@@ -1458,17 +1854,25 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* Now queue the abort command to firmware */
int_to_scsilun(sc->device->lun, &fc_lun);
- if (fnic_queue_abort_io_req(fnic, tag,
+ if (fnic_queue_abort_io_req(fnic, abt_tag,
FCPIO_ITMF_ABT_TASK_TERM,
fc_lun.scsi_lun, io_req)) {
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (io_req)
io_req->abts_done = NULL;
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
+ CMD_STATE(sc) = old_ioreq_state;
spin_unlock_irqrestore(io_lock, flags);
ret = 1;
goto clean_pending_aborts_end;
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEVICE_RESET)
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
}
+ CMD_FLAGS(sc) |= FNIC_IO_INTERNAL_TERM_ISSUED;
wait_for_completion_timeout(&tm_done,
msecs_to_jiffies
@@ -1479,8 +1883,8 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
- ret = 1;
- goto clean_pending_aborts_end;
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_REQ_NULL;
+ continue;
}
io_req->abts_done = NULL;
@@ -1488,6 +1892,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
/* if abort is still pending with fw, fail */
if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
spin_unlock_irqrestore(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE;
ret = 1;
goto clean_pending_aborts_end;
}
@@ -1498,10 +1903,75 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
mempool_free(io_req, fnic->io_req_pool);
}
+ schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+
+ /* walk again to check, if IOs are still pending in fw */
+ if (fnic_is_abts_pending(fnic, lr_sc))
+ ret = FAILED;
+
clean_pending_aborts_end:
return ret;
}
+/**
+ * fnic_scsi_host_start_tag
+ * Allocates tagid from host's tag list
+ **/
+static inline int
+fnic_scsi_host_start_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag, ret = SCSI_NO_TAG;
+
+ BUG_ON(!bqt);
+ if (!bqt) {
+ pr_err("Tags are not supported\n");
+ goto end;
+ }
+
+ do {
+ tag = find_next_zero_bit(bqt->tag_map, bqt->max_depth, 1);
+ if (tag >= bqt->max_depth) {
+ pr_err("Tag allocation failure\n");
+ goto end;
+ }
+ } while (test_and_set_bit(tag, bqt->tag_map));
+
+ bqt->tag_index[tag] = sc->request;
+ sc->request->tag = tag;
+ sc->tag = tag;
+ if (!sc->request->special)
+ sc->request->special = sc;
+
+ ret = tag;
+
+end:
+ return ret;
+}
+
+/**
+ * fnic_scsi_host_end_tag
+ * frees tag allocated by fnic_scsi_host_start_tag.
+ **/
+static inline void
+fnic_scsi_host_end_tag(struct fnic *fnic, struct scsi_cmnd *sc)
+{
+ struct blk_queue_tag *bqt = fnic->lport->host->bqt;
+ int tag = sc->request->tag;
+
+ if (tag == SCSI_NO_TAG)
+ return;
+
+ BUG_ON(!bqt || !bqt->tag_index[tag]);
+ if (!bqt)
+ return;
+
+ bqt->tag_index[tag] = NULL;
+ clear_bit(tag, bqt->tag_map);
+
+ return;
+}
+
/*
* SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
* fail to get aborted. It calls driver's eh_device_reset with a SCSI command
@@ -1511,13 +1981,17 @@ int fnic_device_reset(struct scsi_cmnd *sc)
{
struct fc_lport *lp;
struct fnic *fnic;
- struct fnic_io_req *io_req;
+ struct fnic_io_req *io_req = NULL;
struct fc_rport *rport;
int status;
int ret = FAILED;
spinlock_t *io_lock;
unsigned long flags;
+ unsigned long start_time = 0;
+ struct scsi_lun fc_lun;
+ int tag = 0;
DECLARE_COMPLETION_ONSTACK(tm_done);
+ int tag_gen_flag = 0; /*to track tags allocated by fnic driver*/
/* Wait for rport to unblock */
fc_block_scsi_eh(sc);
@@ -1529,8 +2003,8 @@ int fnic_device_reset(struct scsi_cmnd *sc)
rport = starget_to_rport(scsi_target(sc->device));
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
- "Device reset called FCID 0x%x, LUN 0x%x\n",
- rport->port_id, sc->device->lun);
+ "Device reset called FCID 0x%x, LUN 0x%x sc 0x%p\n",
+ rport->port_id, sc->device->lun, sc);
if (lp->state != LPORT_ST_READY || !(lp->link_up))
goto fnic_device_reset_end;
@@ -1539,6 +2013,16 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (fc_remote_port_chkready(rport))
goto fnic_device_reset_end;
+ CMD_FLAGS(sc) = FNIC_DEVICE_RESET;
+ /* Allocate tag if not present */
+
+ tag = sc->request->tag;
+ if (unlikely(tag < 0)) {
+ tag = fnic_scsi_host_start_tag(fnic, sc);
+ if (unlikely(tag == SCSI_NO_TAG))
+ goto fnic_device_reset_end;
+ tag_gen_flag = 1;
+ }
io_lock = fnic_io_lock_hash(fnic, sc);
spin_lock_irqsave(io_lock, flags);
io_req = (struct fnic_io_req *)CMD_SP(sc);
@@ -1562,8 +2046,7 @@ int fnic_device_reset(struct scsi_cmnd *sc)
CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
spin_unlock_irqrestore(io_lock, flags);
- FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
- sc->request->tag);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %x\n", tag);
/*
* issue the device reset, if enqueue failed, clean up the ioreq
@@ -1576,6 +2059,9 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req->dr_done = NULL;
goto fnic_device_reset_clean;
}
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_ISSUED;
+ spin_unlock_irqrestore(io_lock, flags);
/*
* Wait on the local completion for LUN reset. The io_req may be
@@ -1588,12 +2074,13 @@ int fnic_device_reset(struct scsi_cmnd *sc)
io_req = (struct fnic_io_req *)CMD_SP(sc);
if (!io_req) {
spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "io_req is null tag 0x%x sc 0x%p\n", tag, sc);
goto fnic_device_reset_end;
}
io_req->dr_done = NULL;
status = CMD_LR_STATUS(sc);
- spin_unlock_irqrestore(io_lock, flags);
/*
* If lun reset not completed, bail out with failed. io_req
@@ -1602,7 +2089,53 @@ int fnic_device_reset(struct scsi_cmnd *sc)
if (status == FCPIO_INVALID_CODE) {
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Device reset timed out\n");
- goto fnic_device_reset_end;
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TIMED_OUT;
+ spin_unlock_irqrestore(io_lock, flags);
+ int_to_scsilun(sc->device->lun, &fc_lun);
+ /*
+ * Issue abort and terminate on the device reset request.
+ * If q'ing of the abort fails, retry issue it after a delay.
+ */
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (CMD_FLAGS(sc) & FNIC_DEV_RST_TERM_ISSUED) {
+ spin_unlock_irqrestore(io_lock, flags);
+ break;
+ }
+ spin_unlock_irqrestore(io_lock, flags);
+ if (fnic_queue_abort_io_req(fnic,
+ tag | FNIC_TAG_DEV_RST,
+ FCPIO_ITMF_ABT_TASK_TERM,
+ fc_lun.scsi_lun, io_req)) {
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_ABT_TERM_DELAY_TIMEOUT));
+ } else {
+ spin_lock_irqsave(io_lock, flags);
+ CMD_FLAGS(sc) |= FNIC_DEV_RST_TERM_ISSUED;
+ CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
+ io_req->abts_done = &tm_done;
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Abort and terminate issued on Device reset "
+ "tag 0x%x sc 0x%p\n", tag, sc);
+ break;
+ }
+ }
+ while (1) {
+ spin_lock_irqsave(io_lock, flags);
+ if (!(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) {
+ spin_unlock_irqrestore(io_lock, flags);
+ wait_for_completion_timeout(&tm_done,
+ msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
+ break;
+ } else {
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+ io_req->abts_done = NULL;
+ goto fnic_device_reset_clean;
+ }
+ }
+ } else {
+ spin_unlock_irqrestore(io_lock, flags);
}
/* Completed, but not successful, clean up the io_req, return fail */
@@ -1645,11 +2178,24 @@ fnic_device_reset_clean:
spin_unlock_irqrestore(io_lock, flags);
if (io_req) {
+ start_time = io_req->start_time;
fnic_release_ioreq_buf(fnic, io_req, sc);
mempool_free(io_req, fnic->io_req_pool);
}
fnic_device_reset_end:
+ FNIC_TRACE(fnic_device_reset, sc->device->host->host_no,
+ sc->request->tag, sc,
+ jiffies_to_msecs(jiffies - start_time),
+ 0, ((u64)sc->cmnd[0] << 32 |
+ (u64)sc->cmnd[2] << 24 | (u64)sc->cmnd[3] << 16 |
+ (u64)sc->cmnd[4] << 8 | sc->cmnd[5]),
+ (((u64)CMD_FLAGS(sc) << 32) | CMD_STATE(sc)));
+
+ /* free tag if it is allocated */
+ if (unlikely(tag_gen_flag))
+ fnic_scsi_host_end_tag(fnic, sc);
+
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
"Returning from device reset %s\n",
(ret == SUCCESS) ?
@@ -1735,7 +2281,15 @@ void fnic_scsi_abort_io(struct fc_lport *lp)
DECLARE_COMPLETION_ONSTACK(remove_wait);
/* Issue firmware reset for fnic, wait for reset to complete */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
+
fnic->remove_wait = &remove_wait;
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
@@ -1776,7 +2330,14 @@ void fnic_scsi_cleanup(struct fc_lport *lp)
struct fnic *fnic = lport_priv(lp);
/* issue fw reset */
+retry_fw_reset:
spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (unlikely(fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)) {
+ /* fw reset is in progress, poll for its completion */
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ schedule_timeout(msecs_to_jiffies(100));
+ goto retry_fw_reset;
+ }
old_state = fnic->state;
fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
@@ -1822,3 +2383,61 @@ call_fc_exch_mgr_reset:
fc_exch_mgr_reset(lp, sid, did);
}
+
+/*
+ * fnic_is_abts_pending() is a helper function that
+ * walks through tag map to check if there is any IOs pending,if there is one,
+ * then it returns 1 (true), otherwise 0 (false)
+ * if @lr_sc is non NULL, then it checks IOs specific to particular LUN,
+ * otherwise, it checks for all IOs.
+ */
+int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc)
+{
+ int tag;
+ struct fnic_io_req *io_req;
+ spinlock_t *io_lock;
+ unsigned long flags;
+ int ret = 0;
+ struct scsi_cmnd *sc;
+ struct scsi_device *lun_dev = NULL;
+
+ if (lr_sc)
+ lun_dev = lr_sc->device;
+
+ /* walk again to check, if IOs are still pending in fw */
+ for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
+ sc = scsi_host_find_tag(fnic->lport->host, tag);
+ /*
+ * ignore this lun reset cmd or cmds that do not belong to
+ * this lun
+ */
+ if (!sc || (lr_sc && (sc->device != lun_dev || sc == lr_sc)))
+ continue;
+
+ io_lock = fnic_io_lock_hash(fnic, sc);
+ spin_lock_irqsave(io_lock, flags);
+
+ io_req = (struct fnic_io_req *)CMD_SP(sc);
+
+ if (!io_req || sc->device != lun_dev) {
+ spin_unlock_irqrestore(io_lock, flags);
+ continue;
+ }
+
+ /*
+ * Found IO that is still pending with firmware and
+ * belongs to the LUN that we are resetting
+ */
+ FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host,
+ "Found IO in %s on lun\n",
+ fnic_ioreq_state_to_str(CMD_STATE(sc)));
+
+ if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
+ spin_unlock_irqrestore(io_lock, flags);
+ ret = 1;
+ continue;
+ }
+ }
+
+ return ret;
+}
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
new file mode 100644
index 000000000000..23a60e3d8527
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -0,0 +1,273 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/module.h>
+#include <linux/mempool.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include "fnic_io.h"
+#include "fnic.h"
+
+unsigned int trace_max_pages;
+static int fnic_max_trace_entries;
+
+static unsigned long fnic_trace_buf_p;
+static DEFINE_SPINLOCK(fnic_trace_lock);
+
+static fnic_trace_dbg_t fnic_trace_entries;
+int fnic_tracing_enabled = 1;
+
+/*
+ * fnic_trace_get_buf - Give buffer pointer to user to fill up trace information
+ *
+ * Description:
+ * This routine gets next available trace buffer entry location @wr_idx
+ * from allocated trace buffer pages and give that memory location
+ * to user to store the trace information.
+ *
+ * Return Value:
+ * This routine returns pointer to next available trace entry
+ * @fnic_buf_head for user to fill trace information.
+ */
+fnic_trace_data_t *fnic_trace_get_buf(void)
+{
+ unsigned long fnic_buf_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+
+ /*
+ * Get next available memory location for writing trace information
+ * at @wr_idx and increment @wr_idx
+ */
+ fnic_buf_head =
+ fnic_trace_entries.page_offset[fnic_trace_entries.wr_idx];
+ fnic_trace_entries.wr_idx++;
+
+ /*
+ * Verify if trace buffer is full then change wd_idx to
+ * start from zero
+ */
+ if (fnic_trace_entries.wr_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.wr_idx = 0;
+
+ /*
+ * Verify if write index @wr_idx and read index @rd_idx are same then
+ * increment @rd_idx to move to next entry in trace buffer
+ */
+ if (fnic_trace_entries.wr_idx == fnic_trace_entries.rd_idx) {
+ fnic_trace_entries.rd_idx++;
+ if (fnic_trace_entries.rd_idx >= fnic_max_trace_entries)
+ fnic_trace_entries.rd_idx = 0;
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return (fnic_trace_data_t *)fnic_buf_head;
+}
+
+/*
+ * fnic_get_trace_data - Copy trace buffer to a memory file
+ * @fnic_dbgfs_t: pointer to debugfs trace buffer
+ *
+ * Description:
+ * This routine gathers the fnic trace debugfs data from the fnic_trace_data_t
+ * buffer and dumps it to fnic_dbgfs_t. It will start at the rd_idx entry in
+ * the log and process the log until the end of the buffer. Then it will gather
+ * from the beginning of the log and process until the current entry @wr_idx.
+ *
+ * Return Value:
+ * This routine returns the amount of bytes that were dumped into fnic_dbgfs_t
+ */
+int fnic_get_trace_data(fnic_dbgfs_t *fnic_dbgfs_prt)
+{
+ int rd_idx;
+ int wr_idx;
+ int len = 0;
+ unsigned long flags;
+ char str[KSYM_SYMBOL_LEN];
+ struct timespec val;
+ fnic_trace_data_t *tbp;
+
+ spin_lock_irqsave(&fnic_trace_lock, flags);
+ rd_idx = fnic_trace_entries.rd_idx;
+ wr_idx = fnic_trace_entries.wr_idx;
+ if (wr_idx < rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * If rd_idx is reached to maximum trace entries
+ * then move rd_idx to zero
+ */
+ if (rd_idx > (fnic_max_trace_entries-1))
+ rd_idx = 0;
+ /*
+ * Continure dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ } else if (wr_idx > rd_idx) {
+ while (1) {
+ /* Start from read index @rd_idx */
+ tbp = (fnic_trace_data_t *)
+ fnic_trace_entries.page_offset[rd_idx];
+ if (!tbp) {
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return 0;
+ }
+ /* Convert function pointer to function name */
+ if (sizeof(unsigned long) < 8) {
+ sprint_symbol(str, tbp->fnaddr.low);
+ jiffies_to_timespec(tbp->timestamp.low, &val);
+ } else {
+ sprint_symbol(str, tbp->fnaddr.val);
+ jiffies_to_timespec(tbp->timestamp.val, &val);
+ }
+ /*
+ * Dump trace buffer entry to memory file
+ * and increment read index @rd_idx
+ */
+ len += snprintf(fnic_dbgfs_prt->buffer + len,
+ (trace_max_pages * PAGE_SIZE * 3) - len,
+ "%16lu.%16lu %-50s %8x %8x %16llx %16llx "
+ "%16llx %16llx %16llx\n", val.tv_sec,
+ val.tv_nsec, str, tbp->host_no, tbp->tag,
+ tbp->data[0], tbp->data[1], tbp->data[2],
+ tbp->data[3], tbp->data[4]);
+ rd_idx++;
+ /*
+ * Continue dumpping trace buffer entries into
+ * memory file till rd_idx reaches write index
+ */
+ if (rd_idx == wr_idx)
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&fnic_trace_lock, flags);
+ return len;
+}
+
+/*
+ * fnic_trace_buf_init - Initialize fnic trace buffer logging facility
+ *
+ * Description:
+ * Initialize trace buffer data structure by allocating required memory and
+ * setting page_offset information for every trace entry by adding trace entry
+ * length to previous page_offset value.
+ */
+int fnic_trace_buf_init(void)
+{
+ unsigned long fnic_buf_head;
+ int i;
+ int err = 0;
+
+ trace_max_pages = fnic_trace_max_pages;
+ fnic_max_trace_entries = (trace_max_pages * PAGE_SIZE)/
+ FNIC_ENTRY_SIZE_BYTES;
+
+ fnic_trace_buf_p = (unsigned long)vmalloc((trace_max_pages * PAGE_SIZE));
+ if (!fnic_trace_buf_p) {
+ printk(KERN_ERR PFX "Failed to allocate memory "
+ "for fnic_trace_buf_p\n");
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_buf_p, 0, (trace_max_pages * PAGE_SIZE));
+
+ fnic_trace_entries.page_offset = vmalloc(fnic_max_trace_entries *
+ sizeof(unsigned long));
+ if (!fnic_trace_entries.page_offset) {
+ printk(KERN_ERR PFX "Failed to allocate memory for"
+ " page_offset\n");
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ err = -ENOMEM;
+ goto err_fnic_trace_buf_init;
+ }
+ memset((void *)fnic_trace_entries.page_offset, 0,
+ (fnic_max_trace_entries * sizeof(unsigned long)));
+ fnic_trace_entries.wr_idx = fnic_trace_entries.rd_idx = 0;
+ fnic_buf_head = fnic_trace_buf_p;
+
+ /*
+ * Set page_offset field of fnic_trace_entries struct by
+ * calculating memory location for every trace entry using
+ * length of each trace entry
+ */
+ for (i = 0; i < fnic_max_trace_entries; i++) {
+ fnic_trace_entries.page_offset[i] = fnic_buf_head;
+ fnic_buf_head += FNIC_ENTRY_SIZE_BYTES;
+ }
+ err = fnic_trace_debugfs_init();
+ if (err < 0) {
+ printk(KERN_ERR PFX "Failed to initialize debugfs for tracing\n");
+ goto err_fnic_trace_debugfs_init;
+ }
+ printk(KERN_INFO PFX "Successfully Initialized Trace Buffer\n");
+ return err;
+err_fnic_trace_debugfs_init:
+ fnic_trace_free();
+err_fnic_trace_buf_init:
+ return err;
+}
+
+/*
+ * fnic_trace_free - Free memory of fnic trace data structures.
+ */
+void fnic_trace_free(void)
+{
+ fnic_tracing_enabled = 0;
+ fnic_trace_debugfs_terminate();
+ if (fnic_trace_entries.page_offset) {
+ vfree((void *)fnic_trace_entries.page_offset);
+ fnic_trace_entries.page_offset = NULL;
+ }
+ if (fnic_trace_buf_p) {
+ vfree((void *)fnic_trace_buf_p);
+ fnic_trace_buf_p = 0;
+ }
+ printk(KERN_INFO PFX "Successfully Freed Trace Buffer\n");
+}
diff --git a/drivers/scsi/fnic/fnic_trace.h b/drivers/scsi/fnic/fnic_trace.h
new file mode 100644
index 000000000000..cef42b4c4d6c
--- /dev/null
+++ b/drivers/scsi/fnic/fnic_trace.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright 2012 Cisco Systems, Inc. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __FNIC_TRACE_H__
+#define __FNIC_TRACE_H__
+
+#define FNIC_ENTRY_SIZE_BYTES 64
+
+extern ssize_t simple_read_from_buffer(void __user *to,
+ size_t count,
+ loff_t *ppos,
+ const void *from,
+ size_t available);
+
+extern unsigned int fnic_trace_max_pages;
+extern int fnic_tracing_enabled;
+extern unsigned int trace_max_pages;
+
+typedef struct fnic_trace_dbg {
+ int wr_idx;
+ int rd_idx;
+ unsigned long *page_offset;
+} fnic_trace_dbg_t;
+
+typedef struct fnic_dbgfs {
+ int buffer_len;
+ char *buffer;
+} fnic_dbgfs_t;
+
+struct fnic_trace_data {
+ union {
+ struct {
+ u32 low;
+ u32 high;
+ };
+ u64 val;
+ } timestamp, fnaddr;
+ u32 host_no;
+ u32 tag;
+ u64 data[5];
+} __attribute__((__packed__));
+
+typedef struct fnic_trace_data fnic_trace_data_t;
+
+#define FNIC_TRACE_ENTRY_SIZE \
+ (FNIC_ENTRY_SIZE_BYTES - sizeof(fnic_trace_data_t))
+
+#define FNIC_TRACE(_fn, _hn, _t, _a, _b, _c, _d, _e) \
+ if (unlikely(fnic_tracing_enabled)) { \
+ fnic_trace_data_t *trace_buf = fnic_trace_get_buf(); \
+ if (trace_buf) { \
+ if (sizeof(unsigned long) < 8) { \
+ trace_buf->timestamp.low = jiffies; \
+ trace_buf->fnaddr.low = (u32)(unsigned long)_fn; \
+ } else { \
+ trace_buf->timestamp.val = jiffies; \
+ trace_buf->fnaddr.val = (u64)(unsigned long)_fn; \
+ } \
+ trace_buf->host_no = _hn; \
+ trace_buf->tag = _t; \
+ trace_buf->data[0] = (u64)(unsigned long)_a; \
+ trace_buf->data[1] = (u64)(unsigned long)_b; \
+ trace_buf->data[2] = (u64)(unsigned long)_c; \
+ trace_buf->data[3] = (u64)(unsigned long)_d; \
+ trace_buf->data[4] = (u64)(unsigned long)_e; \
+ } \
+ }
+
+fnic_trace_data_t *fnic_trace_get_buf(void);
+int fnic_get_trace_data(fnic_dbgfs_t *);
+int fnic_trace_buf_init(void);
+void fnic_trace_free(void);
+int fnic_trace_debugfs_init(void);
+void fnic_trace_debugfs_terminate(void);
+
+#endif
diff --git a/drivers/scsi/g_NCR5380.c b/drivers/scsi/g_NCR5380.c
index 1a5954f0915a..5041f925c191 100644
--- a/drivers/scsi/g_NCR5380.c
+++ b/drivers/scsi/g_NCR5380.c
@@ -939,7 +939,7 @@ module_param(dtc_3181e, int, 0);
MODULE_LICENSE("GPL");
#ifndef SCSI_G_NCR5380_MEM
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
{
ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('D', 'T', 'C'), ISAPNP_FUNCTION(0x436e),
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 5d72274c507f..59bceac51a4c 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -590,7 +590,7 @@ static struct pci_driver gdth_pci_driver = {
.remove = gdth_pci_remove_one,
};
-static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
+static void gdth_pci_remove_one(struct pci_dev *pdev)
{
gdth_ha_str *ha = pci_get_drvdata(pdev);
@@ -602,8 +602,8 @@ static void __devexit gdth_pci_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static int __devinit gdth_pci_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int gdth_pci_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
u16 vendor = pdev->vendor;
u16 device = pdev->device;
@@ -855,8 +855,8 @@ static int __init gdth_init_isa(u32 bios_adr,gdth_ha_str *ha)
#endif /* CONFIG_ISA */
#ifdef CONFIG_PCI
-static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
- gdth_ha_str *ha)
+static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
+ gdth_ha_str *ha)
{
register gdt6_dpram_str __iomem *dp6_ptr;
register gdt6c_dpram_str __iomem *dp6c_ptr;
@@ -1107,14 +1107,8 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
pci_read_config_word(pdev, PCI_COMMAND, &command);
command |= 6;
pci_write_config_word(pdev, PCI_COMMAND, command);
- if (pci_resource_start(pdev, 8) == 1UL)
- pci_resource_start(pdev, 8) = 0UL;
- i = 0xFEFF0001UL;
- pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i);
- gdth_delay(1);
- pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
- pci_resource_start(pdev, 8));
-
+ gdth_delay(1);
+
dp6m_ptr = ha->brd;
/* Ensure that it is safe to access the non HW portions of DPMEM.
@@ -1239,7 +1233,7 @@ static int __devinit gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
/* controller protocol functions */
-static void __devinit gdth_enable_int(gdth_ha_str *ha)
+static void gdth_enable_int(gdth_ha_str *ha)
{
unsigned long flags;
gdt2_dpram_str __iomem *dp2_ptr;
@@ -1555,7 +1549,7 @@ static int gdth_internal_cmd(gdth_ha_str *ha, u8 service, u16 opcode,
/* search for devices */
-static int __devinit gdth_search_drives(gdth_ha_str *ha)
+static int gdth_search_drives(gdth_ha_str *ha)
{
u16 cdev_cnt, i;
int ok;
@@ -4959,8 +4953,7 @@ static int __init gdth_eisa_probe_one(u16 eisa_slot)
#endif /* CONFIG_EISA */
#ifdef CONFIG_PCI
-static int __devinit gdth_pci_probe_one(gdth_pci_str *pcistr,
- gdth_ha_str **ha_out)
+static int gdth_pci_probe_one(gdth_pci_str *pcistr, gdth_ha_str **ha_out)
{
struct Scsi_Host *shp;
gdth_ha_str *ha;
diff --git a/drivers/scsi/gvp11.c b/drivers/scsi/gvp11.c
index 488fbc648656..dbe4cc6b9f8b 100644
--- a/drivers/scsi/gvp11.c
+++ b/drivers/scsi/gvp11.c
@@ -204,7 +204,7 @@ static struct scsi_host_template gvp11_scsi_template = {
.use_clustering = DISABLE_CLUSTERING
};
-static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
+static int check_wd33c93(struct gvp11_scsiregs *regs)
{
#ifdef CHECK_WD33C93
volatile unsigned char *sasr_3393, *scmd_3393;
@@ -284,8 +284,7 @@ static int __devinit check_wd33c93(struct gvp11_scsiregs *regs)
return 0;
}
-static int __devinit gvp11_probe(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int gvp11_probe(struct zorro_dev *z, const struct zorro_device_id *ent)
{
struct Scsi_Host *instance;
unsigned long address;
@@ -380,7 +379,7 @@ fail_check_or_alloc:
return error;
}
-static void __devexit gvp11_remove(struct zorro_dev *z)
+static void gvp11_remove(struct zorro_dev *z)
{
struct Scsi_Host *instance = zorro_get_drvdata(z);
struct gvp11_hostdata *hdata = shost_priv(instance);
@@ -398,7 +397,7 @@ static void __devexit gvp11_remove(struct zorro_dev *z)
* SERIES I though).
*/
-static struct zorro_device_id gvp11_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id gvp11_zorro_tbl[] = {
{ ZORRO_PROD_GVP_COMBO_030_R3_SCSI, ~0x00ffffff },
{ ZORRO_PROD_GVP_SERIES_II, ~0x00ffffff },
{ ZORRO_PROD_GVP_GFORCE_030_SCSI, ~0x01ffffff },
@@ -414,7 +413,7 @@ static struct zorro_driver gvp11_driver = {
.name = "gvp11",
.id_table = gvp11_zorro_tbl,
.probe = gvp11_probe,
- .remove = __devexit_p(gvp11_remove),
+ .remove = gvp11_remove,
};
static int __init gvp11_init(void)
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 4217e49aea46..7f4f790a3d71 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -165,7 +165,7 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c);
static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);
static struct CommandList *cmd_alloc(struct ctlr_info *h);
static struct CommandList *cmd_special_alloc(struct ctlr_info *h);
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type);
@@ -189,16 +189,16 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,
/* performant mode helper functions */
static void calc_bucket_map(int *bucket, int num_buckets,
int nsgs, int *bucket_map);
-static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);
static inline u32 next_command(struct ctlr_info *h, u8 q);
-static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset);
-static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar);
-static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
-static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready);
+static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset);
+static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar);
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
+static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
+ int wait_for_ready);
static inline void finish_cmd(struct CommandList *c);
#define BOARD_NOT_READY 0
#define BOARD_READY 1
@@ -1131,7 +1131,7 @@ clean:
return -ENOMEM;
}
-static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+static int hpsa_map_sg_chain_block(struct ctlr_info *h,
struct CommandList *c)
{
struct SGDescriptor *chain_sg, *chain_block;
@@ -1144,8 +1144,15 @@ static void hpsa_map_sg_chain_block(struct ctlr_info *h,
(c->Header.SGTotal - h->max_cmd_sg_entries);
temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
PCI_DMA_TODEVICE);
+ if (dma_mapping_error(&h->pdev->dev, temp64)) {
+ /* prevent subsequent unmapping */
+ chain_sg->Addr.lower = 0;
+ chain_sg->Addr.upper = 0;
+ return -1;
+ }
chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+ return 0;
}
static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
@@ -1390,7 +1397,7 @@ static void hpsa_pci_unmap(struct pci_dev *pdev,
}
}
-static void hpsa_map_one(struct pci_dev *pdev,
+static int hpsa_map_one(struct pci_dev *pdev,
struct CommandList *cp,
unsigned char *buf,
size_t buflen,
@@ -1401,10 +1408,16 @@ static void hpsa_map_one(struct pci_dev *pdev,
if (buflen == 0 || data_direction == PCI_DMA_NONE) {
cp->Header.SGList = 0;
cp->Header.SGTotal = 0;
- return;
+ return 0;
}
addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction);
+ if (dma_mapping_error(&pdev->dev, addr64)) {
+ /* Prevent subsequent unmap of something never mapped */
+ cp->Header.SGList = 0;
+ cp->Header.SGTotal = 0;
+ return -1;
+ }
cp->SG[0].Addr.lower =
(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
cp->SG[0].Addr.upper =
@@ -1412,6 +1425,7 @@ static void hpsa_map_one(struct pci_dev *pdev,
cp->SG[0].Len = buflen;
cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */
cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */
+ return 0;
}
static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,
@@ -1540,13 +1554,18 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize,
+ page, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
ei = c->err_info;
if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) {
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -1564,7 +1583,9 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)
return -ENOMEM;
}
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no data buffer to map. */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h,
+ NULL, 0, 0, scsi3addr, TYPE_MSG);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -1631,8 +1652,11 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
}
/* address the controller */
memset(scsi3addr, 0, sizeof(scsi3addr));
- fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
- buf, bufsize, 0, scsi3addr, TYPE_CMD);
+ if (fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h,
+ buf, bufsize, 0, scsi3addr, TYPE_CMD)) {
+ rc = -1;
+ goto out;
+ }
if (extended_response)
c->Request.CDB[1] = extended_response;
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);
@@ -1642,6 +1666,7 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,
hpsa_scsi_interpret_error(c);
rc = -1;
}
+out:
cmd_special_free(h, c);
return rc;
}
@@ -2105,7 +2130,10 @@ static int hpsa_scatter_gather(struct ctlr_info *h,
if (chained) {
cp->Header.SGList = h->max_cmd_sg_entries;
cp->Header.SGTotal = (u16) (use_sg + 1);
- hpsa_map_sg_chain_block(h, cp);
+ if (hpsa_map_sg_chain_block(h, cp)) {
+ scsi_dma_unmap(cmd);
+ return -1;
+ }
return 0;
}
@@ -2353,8 +2381,9 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,
if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS)
waittime = waittime * 2;
- /* Send the Test Unit Ready */
- fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD);
+ /* Send the Test Unit Ready, fill_cmd can't fail, no mapping */
+ (void) fill_cmd(c, TEST_UNIT_READY, h,
+ NULL, 0, 0, lunaddr, TYPE_CMD);
hpsa_scsi_do_simple_cmd_core(h, c);
/* no unmap needed here because no data xfer. */
@@ -2439,7 +2468,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,
return -ENOMEM;
}
- fill_cmd(c, HPSA_ABORT_MSG, h, abort, 0, 0, scsi3addr, TYPE_MSG);
+ /* fill_cmd can't fail here, no buffer to map */
+ (void) fill_cmd(c, HPSA_ABORT_MSG, h, abort,
+ 0, 0, scsi3addr, TYPE_MSG);
if (swizzle)
swizzle_abort_tag(&c->Request.CDB[4]);
hpsa_scsi_do_simple_cmd_core(h, c);
@@ -2928,6 +2959,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
struct CommandList *c;
char *buff = NULL;
union u64bit temp64;
+ int rc = 0;
if (!argp)
return -EINVAL;
@@ -2947,8 +2979,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
/* Copy the data into the buffer we created */
if (copy_from_user(buff, iocommand.buf,
iocommand.buf_size)) {
- kfree(buff);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out_kfree;
}
} else {
memset(buff, 0, iocommand.buf_size);
@@ -2956,8 +2988,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
}
c = cmd_special_alloc(h);
if (c == NULL) {
- kfree(buff);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto out_kfree;
}
/* Fill in the command type */
c->cmd_type = CMD_IOCTL_PEND;
@@ -2982,6 +3014,13 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
if (iocommand.buf_size > 0) {
temp64.val = pci_map_single(h->pdev, buff,
iocommand.buf_size, PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[0].Addr.lower = 0;
+ c->SG[0].Addr.upper = 0;
+ c->SG[0].Len = 0;
+ rc = -ENOMEM;
+ goto out;
+ }
c->SG[0].Addr.lower = temp64.val32.lower;
c->SG[0].Addr.upper = temp64.val32.upper;
c->SG[0].Len = iocommand.buf_size;
@@ -2996,22 +3035,22 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
memcpy(&iocommand.error_info, c->err_info,
sizeof(iocommand.error_info));
if (copy_to_user(argp, &iocommand, sizeof(iocommand))) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
if (iocommand.Request.Type.Direction == XFER_READ &&
iocommand.buf_size > 0) {
/* Copy the data out of the buffer we created */
if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
- kfree(buff);
- cmd_special_free(h, c);
- return -EFAULT;
+ rc = -EFAULT;
+ goto out;
}
}
- kfree(buff);
+out:
cmd_special_free(h, c);
- return 0;
+out_kfree:
+ kfree(buff);
+ return rc;
}
static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
@@ -3103,6 +3142,15 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
for (i = 0; i < sg_used; i++) {
temp64.val = pci_map_single(h->pdev, buff[i],
buff_size[i], PCI_DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(&h->pdev->dev, temp64.val)) {
+ c->SG[i].Addr.lower = 0;
+ c->SG[i].Addr.upper = 0;
+ c->SG[i].Len = 0;
+ hpsa_pci_unmap(h->pdev, c, i,
+ PCI_DMA_BIDIRECTIONAL);
+ status = -ENOMEM;
+ goto cleanup1;
+ }
c->SG[i].Addr.lower = temp64.val32.lower;
c->SG[i].Addr.upper = temp64.val32.upper;
c->SG[i].Len = buff_size[i];
@@ -3182,15 +3230,16 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
}
}
-static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
- unsigned char *scsi3addr, u8 reset_type)
+static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,
+ u8 reset_type)
{
struct CommandList *c;
c = cmd_alloc(h);
if (!c)
return -ENOMEM;
- fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
+ /* fill_cmd can't fail here, no data buffer to map */
+ (void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
RAID_CTLR_LUNID, TYPE_MSG);
c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
c->waiting = NULL;
@@ -3202,7 +3251,7 @@ static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
return 0;
}
-static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
+static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
int cmd_type)
{
@@ -3271,7 +3320,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);
BUG();
- return;
+ return -1;
}
} else if (cmd_type == TYPE_MSG) {
switch (cmd) {
@@ -3343,10 +3392,9 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
default:
pci_dir = PCI_DMA_BIDIRECTIONAL;
}
-
- hpsa_map_one(h->pdev, c, buff, size, pci_dir);
-
- return;
+ if (hpsa_map_one(h->pdev, c, buff, size, pci_dir))
+ return -1;
+ return 0;
}
/*
@@ -3606,8 +3654,8 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *queue)
* in simple mode, not performant mode due to the tag lookup.
* We only ever use this immediately after a controller reset.
*/
-static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
- unsigned char type)
+static int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
+ unsigned char type)
{
struct Command {
struct CommandListHeader CommandHeader;
@@ -3756,14 +3804,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
return 0;
}
-static __devinit void init_driver_version(char *driver_version, int len)
+static void init_driver_version(char *driver_version, int len)
{
memset(driver_version, 0, len);
strncpy(driver_version, HPSA " " HPSA_DRIVER_VERSION, len - 1);
}
-static __devinit int write_driver_ver_to_cfgtable(
- struct CfgTable __iomem *cfgtable)
+static int write_driver_ver_to_cfgtable(struct CfgTable __iomem *cfgtable)
{
char *driver_version;
int i, size = sizeof(cfgtable->driver_version);
@@ -3779,8 +3826,8 @@ static __devinit int write_driver_ver_to_cfgtable(
return 0;
}
-static __devinit void read_driver_ver_from_cfgtable(
- struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
+static void read_driver_ver_from_cfgtable(struct CfgTable __iomem *cfgtable,
+ unsigned char *driver_ver)
{
int i;
@@ -3788,8 +3835,7 @@ static __devinit void read_driver_ver_from_cfgtable(
driver_ver[i] = readb(&cfgtable->driver_version[i]);
}
-static __devinit int controller_reset_failed(
- struct CfgTable __iomem *cfgtable)
+static int controller_reset_failed(struct CfgTable __iomem *cfgtable)
{
char *driver_ver, *old_driver_ver;
@@ -3812,7 +3858,7 @@ static __devinit int controller_reset_failed(
/* This does a hard reset of the controller using PCI power management
* states or the using the doorbell register.
*/
-static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
+static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4029,7 +4075,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr)
* controllers that are capable. If not, we use IO-APIC mode.
*/
-static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
+static void hpsa_interrupt_mode(struct ctlr_info *h)
{
#ifdef CONFIG_PCI_MSI
int err, i;
@@ -4077,7 +4123,7 @@ default_int_mode:
h->intr[h->intr_mode] = h->pdev->irq;
}
-static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
+static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
{
int i;
u32 subsystem_vendor_id, subsystem_device_id;
@@ -4101,8 +4147,8 @@ static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
return ARRAY_SIZE(products) - 1; /* generic unknown smart array */
}
-static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
- unsigned long *memory_bar)
+static int hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
+ unsigned long *memory_bar)
{
int i;
@@ -4118,8 +4164,8 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
- void __iomem *vaddr, int wait_for_ready)
+static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,
+ int wait_for_ready)
{
int i, iterations;
u32 scratchpad;
@@ -4143,9 +4189,9 @@ static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
return -ENODEV;
}
-static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
- void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index,
- u64 *cfg_offset)
+static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr,
+ u32 *cfg_base_addr, u64 *cfg_base_addr_index,
+ u64 *cfg_offset)
{
*cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET);
*cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET);
@@ -4158,7 +4204,7 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
return 0;
}
-static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
+static int hpsa_find_cfgtables(struct ctlr_info *h)
{
u64 cfg_offset;
u32 cfg_base_addr;
@@ -4187,7 +4233,7 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
return 0;
}
-static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
+static void hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
{
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
@@ -4208,7 +4254,7 @@ static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
* max commands, max SG elements without chaining, and with chaining,
* SG chain block size, etc.
*/
-static void __devinit hpsa_find_board_params(struct ctlr_info *h)
+static void hpsa_find_board_params(struct ctlr_info *h)
{
hpsa_get_max_perf_mode_cmds(h);
h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */
@@ -4266,7 +4312,7 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);
}
-static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
+static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
{
int i;
u32 doorbell_value;
@@ -4287,7 +4333,7 @@ static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
}
}
-static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
+static int hpsa_enter_simple_mode(struct ctlr_info *h)
{
u32 trans_support;
@@ -4310,7 +4356,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
return 0;
}
-static int __devinit hpsa_pci_init(struct ctlr_info *h)
+static int hpsa_pci_init(struct ctlr_info *h)
{
int prod_index, err;
@@ -4378,7 +4424,7 @@ err_out_free_res:
return err;
}
-static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
+static void hpsa_hba_inquiry(struct ctlr_info *h)
{
int rc;
@@ -4394,7 +4440,7 @@ static void __devinit hpsa_hba_inquiry(struct ctlr_info *h)
}
}
-static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
+static int hpsa_init_reset_devices(struct pci_dev *pdev)
{
int rc, i;
@@ -4426,7 +4472,7 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
return 0;
}
-static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
+static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
{
h->cmd_pool_bits = kzalloc(
DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
@@ -4499,7 +4545,7 @@ static int hpsa_request_irq(struct ctlr_info *h,
return 0;
}
-static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
+static int hpsa_kdump_soft_reset(struct ctlr_info *h)
{
if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
HPSA_RESET_TYPE_CONTROLLER)) {
@@ -4713,8 +4759,7 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
spin_unlock_irqrestore(&lockup_detector_lock, flags);
}
-static int __devinit hpsa_init_one(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int dac, rc;
struct ctlr_info *h;
@@ -4885,10 +4930,13 @@ static void hpsa_flush_cache(struct ctlr_info *h)
dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
goto out_of_memory;
}
- fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
- RAID_CTLR_LUNID, TYPE_CMD);
+ if (fill_cmd(c, HPSA_CACHE_FLUSH, h, flush_buf, 4, 0,
+ RAID_CTLR_LUNID, TYPE_CMD)) {
+ goto out;
+ }
hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_TODEVICE);
if (c->err_info->CommandStatus != 0)
+out:
dev_warn(&h->pdev->dev,
"error flushing cache on controller\n");
cmd_special_free(h, c);
@@ -4910,7 +4958,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
hpsa_free_irqs_and_disable_msix(h);
}
-static void __devexit hpsa_free_device_info(struct ctlr_info *h)
+static void hpsa_free_device_info(struct ctlr_info *h)
{
int i;
@@ -4918,7 +4966,7 @@ static void __devexit hpsa_free_device_info(struct ctlr_info *h)
kfree(h->dev[i]);
}
-static void __devexit hpsa_remove_one(struct pci_dev *pdev)
+static void hpsa_remove_one(struct pci_dev *pdev)
{
struct ctlr_info *h;
@@ -4966,7 +5014,7 @@ static int hpsa_resume(__attribute__((unused)) struct pci_dev *pdev)
static struct pci_driver hpsa_pci_driver = {
.name = HPSA,
.probe = hpsa_init_one,
- .remove = __devexit_p(hpsa_remove_one),
+ .remove = hpsa_remove_one,
.id_table = hpsa_pci_device_id, /* id_table */
.shutdown = hpsa_shutdown,
.suspend = hpsa_suspend,
@@ -5010,8 +5058,7 @@ static void calc_bucket_map(int bucket[], int num_buckets,
}
}
-static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
- u32 use_short_tags)
+static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
{
int i;
unsigned long register_value;
@@ -5079,7 +5126,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
h->transMethod = CFGTBL_Trans_Performant;
}
-static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
+static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
{
u32 trans_support;
int i;
diff --git a/drivers/scsi/hptiop.c b/drivers/scsi/hptiop.c
index 192724ed7a32..ee196b363d81 100644
--- a/drivers/scsi/hptiop.c
+++ b/drivers/scsi/hptiop.c
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -42,7 +42,7 @@ MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver");
static char driver_name[] = "hptiop";
static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver";
-static const char driver_ver[] = "v1.6 (091225)";
+static const char driver_ver[] = "v1.8";
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec);
static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
@@ -77,6 +77,11 @@ static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec)
return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
}
+static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec)
+{
+ return iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec);
+}
+
static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag)
{
if (tag & IOPMU_QUEUE_ADDR_HOST_BIT)
@@ -230,6 +235,74 @@ static int iop_intr_mv(struct hptiop_hba *hba)
return ret;
}
+static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag)
+{
+ u32 req_type = _tag & 0xf;
+ struct hpt_iop_request_scsi_command *req;
+
+ switch (req_type) {
+ case IOP_REQUEST_TYPE_GET_CONFIG:
+ case IOP_REQUEST_TYPE_SET_CONFIG:
+ hba->msg_done = 1;
+ break;
+
+ case IOP_REQUEST_TYPE_SCSI_COMMAND:
+ req = hba->reqs[(_tag >> 4) & 0xff].req_virt;
+ if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT))
+ req->header.result = IOP_RESULT_SUCCESS;
+ hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int iop_intr_mvfrey(struct hptiop_hba *hba)
+{
+ u32 _tag, status, cptr, cur_rptr;
+ int ret = 0;
+
+ if (hba->initialized)
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ status = readl(&(hba->u.mvfrey.mu->f0_doorbell));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->f0_doorbell));
+ if (status & CPU_TO_F0_DRBL_MSG_BIT) {
+ u32 msg = readl(&(hba->u.mvfrey.mu->cpu_to_f0_msg_a));
+ dprintk("received outbound msg %x\n", msg);
+ hptiop_message_callback(hba, msg);
+ }
+ ret = 1;
+ }
+
+ status = readl(&(hba->u.mvfrey.mu->isr_cause));
+ if (status) {
+ writel(status, &(hba->u.mvfrey.mu->isr_cause));
+ do {
+ cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
+ cur_rptr = hba->u.mvfrey.outlist_rptr;
+ while (cur_rptr != cptr) {
+ cur_rptr++;
+ if (cur_rptr == hba->u.mvfrey.list_count)
+ cur_rptr = 0;
+
+ _tag = hba->u.mvfrey.outlist[cur_rptr].val;
+ BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS));
+ hptiop_request_callback_mvfrey(hba, _tag);
+ ret = 1;
+ }
+ hba->u.mvfrey.outlist_rptr = cur_rptr;
+ } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
+ }
+
+ if (hba->initialized)
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+
+ return ret;
+}
+
static int iop_send_sync_request_itl(struct hptiop_hba *hba,
void __iomem *_req, u32 millisec)
{
@@ -272,6 +345,26 @@ static int iop_send_sync_request_mv(struct hptiop_hba *hba,
return -1;
}
+static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba,
+ u32 size_bits, u32 millisec)
+{
+ struct hpt_iop_request_header *reqhdr =
+ hba->u.mvfrey.internal_req.req_virt;
+ u32 i;
+
+ hba->msg_done = 0;
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST);
+ hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req));
+
+ for (i = 0; i < millisec; i++) {
+ iop_intr_mvfrey(hba);
+ if (hba->msg_done)
+ break;
+ msleep(1);
+ }
+ return hba->msg_done ? 0 : -1;
+}
+
static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg)
{
writel(msg, &hba->u.itl.iop->inbound_msgaddr0);
@@ -285,11 +378,18 @@ static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg)
readl(&hba->u.mv.regs->inbound_doorbell);
}
+static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg)
+{
+ writel(msg, &(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+ readl(&(hba->u.mvfrey.mu->f0_to_cpu_msg_a));
+}
+
static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
{
u32 i;
hba->msg_done = 0;
+ hba->ops->disable_intr(hba);
hba->ops->post_msg(hba, msg);
for (i = 0; i < millisec; i++) {
@@ -301,6 +401,7 @@ static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec)
msleep(1);
}
+ hba->ops->enable_intr(hba);
return hba->msg_done? 0 : -1;
}
@@ -354,6 +455,28 @@ static int iop_get_config_mv(struct hptiop_hba *hba,
return 0;
}
+static int iop_get_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_get_config *config)
+{
+ struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
+
+ if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
+ info->header.type != IOP_REQUEST_TYPE_GET_CONFIG)
+ return -1;
+
+ config->interface_version = info->interface_version;
+ config->firmware_version = info->firmware_version;
+ config->max_requests = info->max_requests;
+ config->request_size = info->request_size;
+ config->max_sg_count = info->max_sg_count;
+ config->data_transfer_length = info->data_transfer_length;
+ config->alignment_mask = info->alignment_mask;
+ config->max_devices = info->max_devices;
+ config->sdram_size = info->sdram_size;
+
+ return 0;
+}
+
static int iop_set_config_itl(struct hptiop_hba *hba,
struct hpt_iop_request_set_config *config)
{
@@ -408,6 +531,29 @@ static int iop_set_config_mv(struct hptiop_hba *hba,
return 0;
}
+static int iop_set_config_mvfrey(struct hptiop_hba *hba,
+ struct hpt_iop_request_set_config *config)
+{
+ struct hpt_iop_request_set_config *req =
+ hba->u.mvfrey.internal_req.req_virt;
+
+ memcpy(req, config, sizeof(struct hpt_iop_request_set_config));
+ req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT);
+ req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG);
+ req->header.size =
+ cpu_to_le32(sizeof(struct hpt_iop_request_set_config));
+ req->header.result = cpu_to_le32(IOP_RESULT_PENDING);
+ req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5);
+ req->header.context_hi32 = 0;
+
+ if (iop_send_sync_request_mvfrey(hba, 0, 20000)) {
+ dprintk("Set config send cmd failed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
static void hptiop_enable_intr_itl(struct hptiop_hba *hba)
{
writel(~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0),
@@ -420,6 +566,13 @@ static void hptiop_enable_intr_mv(struct hptiop_hba *hba)
&hba->u.mv.regs->outbound_intmask);
}
+static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(CPU_TO_F0_DRBL_MSG_BIT, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0x1, &(hba->u.mvfrey.mu->isr_enable));
+ writel(0x1010, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
static int hptiop_initialize_iop(struct hptiop_hba *hba)
{
/* enable interrupts */
@@ -502,17 +655,39 @@ static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba)
return 0;
}
+static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ hba->u.mvfrey.config = hptiop_map_pci_bar(hba, 0);
+ if (hba->u.mvfrey.config == NULL)
+ return -1;
+
+ hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, 2);
+ if (hba->u.mvfrey.mu == NULL) {
+ iounmap(hba->u.mvfrey.config);
+ return -1;
+ }
+
+ return 0;
+}
+
static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba)
{
iounmap(hba->u.mv.regs);
iounmap(hba->u.mv.mu);
}
+static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba)
+{
+ iounmap(hba->u.mvfrey.config);
+ iounmap(hba->u.mvfrey.mu);
+}
+
static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg)
{
dprintk("iop message 0x%x\n", msg);
- if (msg == IOPMU_INBOUND_MSG0_NOP)
+ if (msg == IOPMU_INBOUND_MSG0_NOP ||
+ msg == IOPMU_INBOUND_MSG0_RESET_COMM)
hba->msg_done = 1;
if (!hba->initialized)
@@ -592,6 +767,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
memcpy(scp->sense_buffer, &req->sg_list,
min_t(size_t, SCSI_SENSE_BUFFERSIZE,
le32_to_cpu(req->dataxfer_length)));
+ goto skip_resid;
break;
default:
@@ -599,6 +775,10 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
break;
}
+ scsi_set_resid(scp,
+ scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length));
+
+skip_resid:
dprintk("scsi_done(%p)\n", scp);
scp->scsi_done(scp);
free_req(hba, &hba->reqs[tag]);
@@ -692,7 +872,8 @@ static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg)
BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors);
scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) {
- psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg));
+ psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) |
+ hba->ops->host_phy_flag;
psg[idx].size = cpu_to_le32(sg_dma_len(sg));
psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ?
cpu_to_le32(1) : 0;
@@ -751,6 +932,78 @@ static void hptiop_post_req_mv(struct hptiop_hba *hba,
MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba);
}
+static void hptiop_post_req_mvfrey(struct hptiop_hba *hba,
+ struct hptiop_request *_req)
+{
+ struct hpt_iop_request_header *reqhdr = _req->req_virt;
+ u32 index;
+
+ reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT |
+ IOP_REQUEST_FLAG_ADDR_BITS |
+ ((_req->req_shifted_phy >> 11) & 0xffff0000));
+ reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT |
+ (_req->index << 4) | reqhdr->type);
+ reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) &
+ 0xffffffff);
+
+ hba->u.mvfrey.inlist_wptr++;
+ index = hba->u.mvfrey.inlist_wptr & 0x3fff;
+
+ if (index == hba->u.mvfrey.list_count) {
+ index = 0;
+ hba->u.mvfrey.inlist_wptr &= ~0x3fff;
+ hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
+ }
+
+ hba->u.mvfrey.inlist[index].addr =
+ (dma_addr_t)_req->req_shifted_phy << 5;
+ hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
+ writel(hba->u.mvfrey.inlist_wptr,
+ &(hba->u.mvfrey.mu->inbound_write_ptr));
+ readl(&(hba->u.mvfrey.mu->inbound_write_ptr));
+}
+
+static int hptiop_reset_comm_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mv(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
+static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = hba->u.mvfrey.list_count;
+
+ if (iop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
+ return -1;
+
+ /* wait 100ms for MCU ready */
+ msleep(100);
+
+ writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->inbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->inbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_base_high));
+
+ writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff),
+ &(hba->u.mvfrey.mu->outbound_shadow_base));
+ writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16),
+ &(hba->u.mvfrey.mu->outbound_shadow_base_high));
+
+ hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE;
+ hba->u.mvfrey.outlist_rptr = list_count - 1;
+ return 0;
+}
+
static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
void (*done)(struct scsi_cmnd *))
{
@@ -771,14 +1024,15 @@ static int hptiop_queuecommand_lck(struct scsi_cmnd *scp,
_req->scp = scp;
- dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%x-%x-%x) "
+ dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%d cdb=(%08x-%08x-%08x-%08x) "
"req_index=%d, req=%p\n",
scp,
host->host_no, scp->device->channel,
scp->device->id, scp->device->lun,
- ((u32 *)scp->cmnd)[0],
- ((u32 *)scp->cmnd)[1],
- ((u32 *)scp->cmnd)[2],
+ cpu_to_be32(((u32 *)scp->cmnd)[0]),
+ cpu_to_be32(((u32 *)scp->cmnd)[1]),
+ cpu_to_be32(((u32 *)scp->cmnd)[2]),
+ cpu_to_be32(((u32 *)scp->cmnd)[3]),
_req->index, _req->req_virt);
scp->result = 0;
@@ -933,6 +1187,11 @@ static struct scsi_host_template driver_template = {
.change_queue_depth = hptiop_adjust_disk_queue_depth,
};
+static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
{
hba->u.mv.internal_req = dma_alloc_coherent(&hba->pcidev->dev,
@@ -943,6 +1202,63 @@ static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba)
return -1;
}
+static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba)
+{
+ u32 list_count = readl(&hba->u.mvfrey.mu->inbound_conf_ctl);
+ char *p;
+ dma_addr_t phy;
+
+ BUG_ON(hba->max_request_size == 0);
+
+ if (list_count == 0) {
+ BUG_ON(1);
+ return -1;
+ }
+
+ list_count >>= 16;
+
+ hba->u.mvfrey.list_count = list_count;
+ hba->u.mvfrey.internal_mem_size = 0x800 +
+ list_count * sizeof(struct mvfrey_inlist_entry) +
+ list_count * sizeof(struct mvfrey_outlist_entry) +
+ sizeof(int);
+
+ p = dma_alloc_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size, &phy, GFP_KERNEL);
+ if (!p)
+ return -1;
+
+ hba->u.mvfrey.internal_req.req_virt = p;
+ hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5;
+ hba->u.mvfrey.internal_req.scp = NULL;
+ hba->u.mvfrey.internal_req.next = NULL;
+
+ p += 0x800;
+ phy += 0x800;
+
+ hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
+ hba->u.mvfrey.inlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_inlist_entry);
+ phy += list_count * sizeof(struct mvfrey_inlist_entry);
+
+ hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
+ hba->u.mvfrey.outlist_phy = phy;
+
+ p += list_count * sizeof(struct mvfrey_outlist_entry);
+ phy += list_count * sizeof(struct mvfrey_outlist_entry);
+
+ hba->u.mvfrey.outlist_cptr = (__le32 *)p;
+ hba->u.mvfrey.outlist_cptr_phy = phy;
+
+ return 0;
+}
+
+static int hptiop_internal_memfree_itl(struct hptiop_hba *hba)
+{
+ return 0;
+}
+
static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
{
if (hba->u.mv.internal_req) {
@@ -953,8 +1269,20 @@ static int hptiop_internal_memfree_mv(struct hptiop_hba *hba)
return -1;
}
-static int __devinit hptiop_probe(struct pci_dev *pcidev,
- const struct pci_device_id *id)
+static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba)
+{
+ if (hba->u.mvfrey.internal_req.req_virt) {
+ dma_free_coherent(&hba->pcidev->dev,
+ hba->u.mvfrey.internal_mem_size,
+ hba->u.mvfrey.internal_req.req_virt,
+ (dma_addr_t)
+ hba->u.mvfrey.internal_req.req_shifted_phy << 5);
+ return 0;
+ } else
+ return -1;
+}
+
+static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id)
{
struct Scsi_Host *host = NULL;
struct hptiop_hba *hba;
@@ -1027,7 +1355,7 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto unmap_pci_bar;
}
- if (hba->ops->internal_memalloc) {
+ if (hba->ops->family == MV_BASED_IOP) {
if (hba->ops->internal_memalloc(hba)) {
printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
hba->host->host_no);
@@ -1050,6 +1378,19 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba->interface_version = le32_to_cpu(iop_config.interface_version);
hba->sdram_size = le32_to_cpu(iop_config.sdram_size);
+ if (hba->ops->family == MVFREY_BASED_IOP) {
+ if (hba->ops->internal_memalloc(hba)) {
+ printk(KERN_ERR "scsi%d: internal_memalloc failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ if (hba->ops->reset_comm(hba)) {
+ printk(KERN_ERR "scsi%d: reset comm failed\n",
+ hba->host->host_no);
+ goto unmap_pci_bar;
+ }
+ }
+
if (hba->firmware_version > 0x01020000 ||
hba->interface_version > 0x01020000)
hba->iopintf_v2 = 1;
@@ -1104,14 +1445,13 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
hba->dma_coherent = start_virt;
hba->dma_coherent_handle = start_phy;
- if ((start_phy & 0x1f) != 0)
- {
+ if ((start_phy & 0x1f) != 0) {
offset = ((start_phy + 0x1f) & ~0x1f) - start_phy;
start_phy += offset;
start_virt += offset;
}
- hba->req_list = start_virt;
+ hba->req_list = NULL;
for (i = 0; i < hba->max_requests; i++) {
hba->reqs[i].next = NULL;
hba->reqs[i].req_virt = start_virt;
@@ -1132,7 +1472,6 @@ static int __devinit hptiop_probe(struct pci_dev *pcidev,
goto free_request_mem;
}
-
scsi_scan_host(host);
dprintk("scsi%d: hptiop_probe successfully\n", hba->host->host_no);
@@ -1147,8 +1486,7 @@ free_request_irq:
free_irq(hba->pcidev->irq, hba);
unmap_pci_bar:
- if (hba->ops->internal_memfree)
- hba->ops->internal_memfree(hba);
+ hba->ops->internal_memfree(hba);
hba->ops->unmap_pci_bar(hba);
@@ -1198,6 +1536,16 @@ static void hptiop_disable_intr_mv(struct hptiop_hba *hba)
readl(&hba->u.mv.regs->outbound_intmask);
}
+static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba)
+{
+ writel(0, &(hba->u.mvfrey.mu->f0_doorbell_enable));
+ readl(&(hba->u.mvfrey.mu->f0_doorbell_enable));
+ writel(0, &(hba->u.mvfrey.mu->isr_enable));
+ readl(&(hba->u.mvfrey.mu->isr_enable));
+ writel(0, &(hba->u.mvfrey.mu->pcie_f0_int_enable));
+ readl(&(hba->u.mvfrey.mu->pcie_f0_int_enable));
+}
+
static void hptiop_remove(struct pci_dev *pcidev)
{
struct Scsi_Host *host = pci_get_drvdata(pcidev);
@@ -1216,8 +1564,7 @@ static void hptiop_remove(struct pci_dev *pcidev)
hba->dma_coherent,
hba->dma_coherent_handle);
- if (hba->ops->internal_memfree)
- hba->ops->internal_memfree(hba);
+ hba->ops->internal_memfree(hba);
hba->ops->unmap_pci_bar(hba);
@@ -1229,9 +1576,10 @@ static void hptiop_remove(struct pci_dev *pcidev)
}
static struct hptiop_adapter_ops hptiop_itl_ops = {
+ .family = INTEL_BASED_IOP,
.iop_wait_ready = iop_wait_ready_itl,
- .internal_memalloc = NULL,
- .internal_memfree = NULL,
+ .internal_memalloc = hptiop_internal_memalloc_itl,
+ .internal_memfree = hptiop_internal_memfree_itl,
.map_pci_bar = hptiop_map_pci_bar_itl,
.unmap_pci_bar = hptiop_unmap_pci_bar_itl,
.enable_intr = hptiop_enable_intr_itl,
@@ -1242,9 +1590,12 @@ static struct hptiop_adapter_ops hptiop_itl_ops = {
.post_msg = hptiop_post_msg_itl,
.post_req = hptiop_post_req_itl,
.hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_itl,
+ .host_phy_flag = cpu_to_le64(0),
};
static struct hptiop_adapter_ops hptiop_mv_ops = {
+ .family = MV_BASED_IOP,
.iop_wait_ready = iop_wait_ready_mv,
.internal_memalloc = hptiop_internal_memalloc_mv,
.internal_memfree = hptiop_internal_memfree_mv,
@@ -1258,6 +1609,27 @@ static struct hptiop_adapter_ops hptiop_mv_ops = {
.post_msg = hptiop_post_msg_mv,
.post_req = hptiop_post_req_mv,
.hw_dma_bit_mask = 33,
+ .reset_comm = hptiop_reset_comm_mv,
+ .host_phy_flag = cpu_to_le64(0),
+};
+
+static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
+ .family = MVFREY_BASED_IOP,
+ .iop_wait_ready = iop_wait_ready_mvfrey,
+ .internal_memalloc = hptiop_internal_memalloc_mvfrey,
+ .internal_memfree = hptiop_internal_memfree_mvfrey,
+ .map_pci_bar = hptiop_map_pci_bar_mvfrey,
+ .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey,
+ .enable_intr = hptiop_enable_intr_mvfrey,
+ .disable_intr = hptiop_disable_intr_mvfrey,
+ .get_config = iop_get_config_mvfrey,
+ .set_config = iop_set_config_mvfrey,
+ .iop_intr = iop_intr_mvfrey,
+ .post_msg = hptiop_post_msg_mvfrey,
+ .post_req = hptiop_post_req_mvfrey,
+ .hw_dma_bit_mask = 64,
+ .reset_comm = hptiop_reset_comm_mvfrey,
+ .host_phy_flag = cpu_to_le64(1),
};
static struct pci_device_id hptiop_id_table[] = {
@@ -1283,6 +1655,8 @@ static struct pci_device_id hptiop_id_table[] = {
{ PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops },
{ PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops },
+ { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops },
+ { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops },
{},
};
diff --git a/drivers/scsi/hptiop.h b/drivers/scsi/hptiop.h
index baa648d87fde..020619d60b08 100644
--- a/drivers/scsi/hptiop.h
+++ b/drivers/scsi/hptiop.h
@@ -1,6 +1,6 @@
/*
* HighPoint RR3xxx/4xxx controller driver for Linux
- * Copyright (C) 2006-2009 HighPoint Technologies, Inc. All Rights Reserved.
+ * Copyright (C) 2006-2012 HighPoint Technologies, Inc. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -75,6 +75,45 @@ struct hpt_iopmv_regs {
__le32 outbound_intmask;
};
+#pragma pack(1)
+struct hpt_iopmu_mvfrey {
+ __le32 reserved0[(0x4000 - 0) / 4];
+ __le32 inbound_base;
+ __le32 inbound_base_high;
+ __le32 reserved1[(0x4018 - 0x4008) / 4];
+ __le32 inbound_write_ptr;
+ __le32 reserved2[(0x402c - 0x401c) / 4];
+ __le32 inbound_conf_ctl;
+ __le32 reserved3[(0x4050 - 0x4030) / 4];
+ __le32 outbound_base;
+ __le32 outbound_base_high;
+ __le32 outbound_shadow_base;
+ __le32 outbound_shadow_base_high;
+ __le32 reserved4[(0x4088 - 0x4060) / 4];
+ __le32 isr_cause;
+ __le32 isr_enable;
+ __le32 reserved5[(0x1020c - 0x4090) / 4];
+ __le32 pcie_f0_int_enable;
+ __le32 reserved6[(0x10400 - 0x10210) / 4];
+ __le32 f0_to_cpu_msg_a;
+ __le32 reserved7[(0x10420 - 0x10404) / 4];
+ __le32 cpu_to_f0_msg_a;
+ __le32 reserved8[(0x10480 - 0x10424) / 4];
+ __le32 f0_doorbell;
+ __le32 f0_doorbell_enable;
+};
+
+struct mvfrey_inlist_entry {
+ dma_addr_t addr;
+ __le32 intrfc_len;
+ __le32 reserved;
+};
+
+struct mvfrey_outlist_entry {
+ __le32 val;
+};
+#pragma pack()
+
#define MVIOP_MU_QUEUE_ADDR_HOST_MASK (~(0x1full))
#define MVIOP_MU_QUEUE_ADDR_HOST_BIT 4
@@ -87,6 +126,9 @@ struct hpt_iopmv_regs {
#define MVIOP_MU_OUTBOUND_INT_MSG 1
#define MVIOP_MU_OUTBOUND_INT_POSTQUEUE 2
+#define CL_POINTER_TOGGLE 0x00004000
+#define CPU_TO_F0_DRBL_MSG_BIT 0x02000000
+
enum hpt_iopmu_message {
/* host-to-iop messages */
IOPMU_INBOUND_MSG0_NOP = 0,
@@ -95,6 +137,7 @@ enum hpt_iopmu_message {
IOPMU_INBOUND_MSG0_SHUTDOWN,
IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK,
IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK,
+ IOPMU_INBOUND_MSG0_RESET_COMM,
IOPMU_INBOUND_MSG0_MAX = 0xff,
/* iop-to-host messages */
IOPMU_OUTBOUND_MSG0_REGISTER_DEVICE_0 = 0x100,
@@ -118,6 +161,7 @@ struct hpt_iop_request_header {
#define IOP_REQUEST_FLAG_BIST_REQUEST 2
#define IOP_REQUEST_FLAG_REMAPPED 4
#define IOP_REQUEST_FLAG_OUTPUT_CONTEXT 8
+#define IOP_REQUEST_FLAG_ADDR_BITS 0x40 /* flags[31:16] is phy_addr[47:32] */
enum hpt_iop_request_type {
IOP_REQUEST_TYPE_GET_CONFIG = 0,
@@ -223,6 +267,13 @@ struct hpt_scsi_pointer {
#define HPT_SCP(scp) ((struct hpt_scsi_pointer *)&(scp)->SCp)
+enum hptiop_family {
+ UNKNOWN_BASED_IOP,
+ INTEL_BASED_IOP,
+ MV_BASED_IOP,
+ MVFREY_BASED_IOP
+} ;
+
struct hptiop_hba {
struct hptiop_adapter_ops *ops;
union {
@@ -236,6 +287,22 @@ struct hptiop_hba {
void *internal_req;
dma_addr_t internal_req_phy;
} mv;
+ struct {
+ struct hpt_iop_request_get_config __iomem *config;
+ struct hpt_iopmu_mvfrey __iomem *mu;
+
+ int internal_mem_size;
+ struct hptiop_request internal_req;
+ int list_count;
+ struct mvfrey_inlist_entry *inlist;
+ dma_addr_t inlist_phy;
+ __le32 inlist_wptr;
+ struct mvfrey_outlist_entry *outlist;
+ dma_addr_t outlist_phy;
+ __le32 *outlist_cptr; /* copy pointer shadow */
+ dma_addr_t outlist_cptr_phy;
+ __le32 outlist_rptr;
+ } mvfrey;
} u;
struct Scsi_Host *host;
@@ -283,6 +350,7 @@ struct hpt_ioctl_k {
};
struct hptiop_adapter_ops {
+ enum hptiop_family family;
int (*iop_wait_ready)(struct hptiop_hba *hba, u32 millisec);
int (*internal_memalloc)(struct hptiop_hba *hba);
int (*internal_memfree)(struct hptiop_hba *hba);
@@ -298,6 +366,8 @@ struct hptiop_adapter_ops {
void (*post_msg)(struct hptiop_hba *hba, u32 msg);
void (*post_req)(struct hptiop_hba *hba, struct hptiop_request *_req);
int hw_dma_bit_mask;
+ int (*reset_comm)(struct hptiop_hba *hba);
+ __le64 host_phy_flag;
};
#define HPT_IOCTL_RESULT_OK 0
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 5e8d51bd03de..cc82d0f322b6 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4905,7 +4905,7 @@ static unsigned long ibmvfc_get_desired_dma(struct vio_dev *vdev)
return pool_dma + ((512 * 1024) * driver_template.cmd_per_lun);
}
-static struct vio_device_id ibmvfc_device_table[] __devinitdata = {
+static struct vio_device_id ibmvfc_device_table[] = {
{"fcp", "IBM,vfc-client"},
{ "", "" }
};
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index ef9a54c7da67..a044f593e8b9 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -2362,7 +2362,7 @@ static int ibmvscsi_resume(struct device *dev)
* ibmvscsi_device_table: Used by vio.c to match devices in the device tree we
* support.
*/
-static struct vio_device_id ibmvscsi_device_table[] __devinitdata = {
+static struct vio_device_id ibmvscsi_device_table[] = {
{"vscsi", "IBM,v-scsi"},
{ "", "" }
};
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c
index aa7ed81e9237..bf9eca845166 100644
--- a/drivers/scsi/ibmvscsi/ibmvstgt.c
+++ b/drivers/scsi/ibmvscsi/ibmvstgt.c
@@ -907,7 +907,7 @@ static int ibmvstgt_remove(struct vio_dev *dev)
return 0;
}
-static struct vio_device_id ibmvstgt_device_table[] __devinitdata = {
+static struct vio_device_id ibmvstgt_device_table[] = {
{"v-scsi-host", "IBM,v-scsi-host"},
{"",""}
};
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index dd741bcd6ccd..280d5af113d1 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -2992,7 +2992,7 @@ static struct pci_driver initio_pci_driver = {
.name = "initio",
.id_table = initio_pci_tbl,
.probe = initio_probe_one,
- .remove = __devexit_p(initio_remove_one),
+ .remove = initio_remove_one,
};
static int __init initio_init_driver(void)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fe6029f4df16..f328089a1060 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -98,6 +98,7 @@ static unsigned int ipr_transop_timeout = 0;
static unsigned int ipr_debug = 0;
static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
static unsigned int ipr_dual_ioa_raid = 1;
+static unsigned int ipr_number_of_msix = 2;
static DEFINE_SPINLOCK(ipr_driver_lock);
/* This table describes the differences between DMA controller chips */
@@ -107,6 +108,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x0022C,
.clr_interrupt_mask_reg = 0x00230,
@@ -131,6 +133,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 100,
.cache_line_size = 0x20,
.clear_isr = 1,
+ .iopoll_weight = 0,
{
.set_interrupt_mask_reg = 0x00288,
.clr_interrupt_mask_reg = 0x0028C,
@@ -155,6 +158,7 @@ static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
.max_cmds = 1000,
.cache_line_size = 0x20,
.clear_isr = 0,
+ .iopoll_weight = 64,
{
.set_interrupt_mask_reg = 0x00010,
.clr_interrupt_mask_reg = 0x00018,
@@ -215,6 +219,8 @@ MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to e
module_param_named(max_devs, ipr_max_devs, int, 0);
MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
"[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
+module_param_named(number_of_msix, ipr_number_of_msix, int, 0);
+MODULE_PARM_DESC(number_of_msix, "Specify the number of MSIX interrupts to use on capable adapters (1 - 5). (default:2)");
MODULE_LICENSE("GPL");
MODULE_VERSION(IPR_DRIVER_VERSION);
@@ -549,7 +555,8 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
struct ipr_trace_entry *trace_entry;
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
- trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
+ trace_entry = &ioa_cfg->trace[atomic_add_return
+ (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES];
trace_entry->time = jiffies;
trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
trace_entry->type = type;
@@ -560,6 +567,7 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
trace_entry->u.add_data = add_data;
+ wmb();
}
#else
#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while (0)
@@ -595,8 +603,11 @@ static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
dma_addr_t dma_addr = ipr_cmd->dma_addr;
+ int hrrq_id;
+ hrrq_id = ioarcb->cmd_pkt.hrrq_id;
memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
+ ioarcb->cmd_pkt.hrrq_id = hrrq_id;
ioarcb->data_transfer_length = 0;
ioarcb->read_data_transfer_length = 0;
ioarcb->ioadl_len = 0;
@@ -646,12 +657,16 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
* pointer to ipr command struct
**/
static
-struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
+struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_hrr_queue *hrrq)
{
- struct ipr_cmnd *ipr_cmd;
+ struct ipr_cmnd *ipr_cmd = NULL;
+
+ if (likely(!list_empty(&hrrq->hrrq_free_q))) {
+ ipr_cmd = list_entry(hrrq->hrrq_free_q.next,
+ struct ipr_cmnd, queue);
+ list_del(&ipr_cmd->queue);
+ }
- ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
- list_del(&ipr_cmd->queue);
return ipr_cmd;
}
@@ -666,7 +681,8 @@ struct ipr_cmnd *__ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
static
struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
{
- struct ipr_cmnd *ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
+ struct ipr_cmnd *ipr_cmd =
+ __ipr_get_free_ipr_cmnd(&ioa_cfg->hrrq[IPR_INIT_HRRQ]);
ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
return ipr_cmd;
}
@@ -686,9 +702,15 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
u32 clr_ints)
{
volatile u32 int_reg;
+ int i;
/* Stop new interrupts */
- ioa_cfg->allow_interrupts = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
/* Set interrupt mask to stop all new interrupts */
if (ioa_cfg->sis64)
@@ -761,13 +783,12 @@ static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
**/
static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ata_queued_cmd *qc = ipr_cmd->qc;
struct ipr_sata_port *sata_port = qc->ap->private_data;
qc->err_mask |= AC_ERR_OTHER;
sata_port->ioasa.status |= ATA_BUSY;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
ata_qc_complete(qc);
}
@@ -783,14 +804,13 @@ static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
**/
static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
scsi_cmd->result |= (DID_ERROR << 16);
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -805,24 +825,32 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd, *temp;
+ struct ipr_hrr_queue *hrrq;
ENTER;
- list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
- list_del(&ipr_cmd->queue);
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry_safe(ipr_cmd,
+ temp, &hrrq->hrrq_pending_q, queue) {
+ list_del(&ipr_cmd->queue);
- ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
- ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+ ipr_cmd->s.ioasa.hdr.ioasc =
+ cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+ ipr_cmd->s.ioasa.hdr.ilid =
+ cpu_to_be32(IPR_DRIVER_ILID);
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- else if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ else if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
- del_timer(&ipr_cmd->timer);
- ipr_cmd->done(ipr_cmd);
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH,
+ IPR_IOASC_IOA_WAS_RESET);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->done(ipr_cmd);
+ }
+ spin_unlock(&hrrq->_lock);
}
-
LEAVE;
}
@@ -872,9 +900,7 @@ static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
void (*done) (struct ipr_cmnd *),
void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = done;
@@ -975,6 +1001,14 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
spin_lock_irq(ioa_cfg->host->host_lock);
}
+static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
+{
+ if (ioa_cfg->hrrq_num == 1)
+ return 0;
+ else
+ return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1;
+}
+
/**
* ipr_send_hcam - Send an HCAM to the adapter.
* @ioa_cfg: ioa config struct
@@ -994,9 +1028,9 @@ static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
- if (ioa_cfg->allow_cmds) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
ipr_cmd->u.hostrcb = hostrcb;
@@ -1166,14 +1200,15 @@ static int ipr_is_same_device(struct ipr_resource_entry *res,
}
/**
- * ipr_format_res_path - Format the resource path for printing.
+ * __ipr_format_res_path - Format the resource path for printing.
* @res_path: resource path
* @buf: buffer
+ * @len: length of buffer provided
*
* Return value:
* pointer to buffer
**/
-static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
+static char *__ipr_format_res_path(u8 *res_path, char *buffer, int len)
{
int i;
char *p = buffer;
@@ -1187,6 +1222,27 @@ static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
}
/**
+ * ipr_format_res_path - Format the resource path for printing.
+ * @ioa_cfg: ioa config struct
+ * @res_path: resource path
+ * @buf: buffer
+ * @len: length of buffer provided
+ *
+ * Return value:
+ * pointer to buffer
+ **/
+static char *ipr_format_res_path(struct ipr_ioa_cfg *ioa_cfg,
+ u8 *res_path, char *buffer, int len)
+{
+ char *p = buffer;
+
+ *p = '\0';
+ p += snprintf(p, buffer + len - p, "%d/", ioa_cfg->host->host_no);
+ __ipr_format_res_path(res_path, p, len - (buffer - p));
+ return buffer;
+}
+
+/**
* ipr_update_res_entry - Update the resource entry.
* @res: resource entry struct
* @cfgtew: config table entry wrapper struct
@@ -1226,8 +1282,8 @@ static void ipr_update_res_entry(struct ipr_resource_entry *res,
if (res->sdev && new_path)
sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(res->ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
} else {
res->flags = cfgtew->u.cfgte->flags;
if (res->flags & IPR_IS_IOA_RESOURCE)
@@ -1363,7 +1419,7 @@ static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ioasc) {
if (ioasc != IPR_IOASC_IOA_WAS_RESET)
@@ -1613,8 +1669,8 @@ static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err_separator;
ipr_err("Device %d : %s", i + 1,
- ipr_format_res_path(dev_entry->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(dev_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_log_ext_vpd(&dev_entry->vpd);
ipr_err("-----New Device Information-----\n");
@@ -1960,14 +2016,16 @@ static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
path_active_desc[i].desc, path_state_desc[j].desc,
- ipr_format_res_path(fabric->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ fabric->res_path,
+ buffer, sizeof(buffer)));
return;
}
}
ipr_err("Path state=%02X Resource Path=%s\n", path_state,
- ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(hostrcb->ioa_cfg, fabric->res_path,
+ buffer, sizeof(buffer)));
}
static const struct {
@@ -2108,18 +2166,20 @@ static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
path_status_desc[j].desc, path_type_desc[i].desc,
- ipr_format_res_path(cfg->res_path, buffer,
- sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]),
+ be32_to_cpu(cfg->wwid[1]));
return;
}
}
ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
"WWN=%08X%08X\n", cfg->type_status,
- ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
- link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
- be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+ ipr_format_res_path(hostrcb->ioa_cfg,
+ cfg->res_path, buffer, sizeof(buffer)),
+ link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+ be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
}
/**
@@ -2182,7 +2242,8 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("RAID %s Array Configuration: %s\n",
error->protection_level,
- ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, error->last_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
@@ -2203,11 +2264,12 @@ static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
ipr_err("Array Member %d:\n", i);
ipr_log_ext_vpd(&array_entry->vpd);
ipr_err("Current Location: %s\n",
- ipr_format_res_path(array_entry->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg, array_entry->res_path,
+ buffer, sizeof(buffer)));
ipr_err("Expected Location: %s\n",
- ipr_format_res_path(array_entry->expected_res_path,
- buffer, sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ array_entry->expected_res_path,
+ buffer, sizeof(buffer)));
ipr_err_separator;
}
@@ -2409,7 +2471,7 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
list_del(&hostrcb->queue);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (!ioasc) {
ipr_handle_log_data(ioa_cfg, hostrcb);
@@ -2491,36 +2553,6 @@ static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
}
/**
- * ipr_reset_reload - Reset/Reload the IOA
- * @ioa_cfg: ioa config struct
- * @shutdown_type: shutdown type
- *
- * This function resets the adapter and re-initializes it.
- * This function assumes that all new host commands have been stopped.
- * Return value:
- * SUCCESS / FAILED
- **/
-static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
- enum ipr_shutdown_type shutdown_type)
-{
- if (!ioa_cfg->in_reset_reload)
- ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
- spin_lock_irq(ioa_cfg->host->host_lock);
-
- /* If we got hit with a host reset while we were already resetting
- the adapter for some reason, and the reset failed. */
- if (ioa_cfg->ioa_is_dead) {
- ipr_trace;
- return FAILED;
- }
-
- return SUCCESS;
-}
-
-/**
* ipr_find_ses_entry - Find matching SES in SES table
* @res: resource entry struct of SES
*
@@ -3153,7 +3185,8 @@ static void ipr_worker_thread(struct work_struct *work)
restart:
do {
did_work = 0;
- if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds ||
+ !ioa_cfg->allow_ml_add_del) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
return;
}
@@ -3401,7 +3434,7 @@ static ssize_t ipr_show_adapter_state(struct device *dev,
int len;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
len = snprintf(buf, PAGE_SIZE, "offline\n");
else
len = snprintf(buf, PAGE_SIZE, "online\n");
@@ -3427,14 +3460,20 @@ static ssize_t ipr_store_adapter_state(struct device *dev,
struct Scsi_Host *shost = class_to_shost(dev);
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
unsigned long lock_flags;
- int result = count;
+ int result = count, i;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
- ioa_cfg->ioa_is_dead = 0;
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead &&
+ !strncmp(buf, "online", 6)) {
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ioa_cfg->reset_retries = 0;
ioa_cfg->in_ioa_bringdown = 0;
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
@@ -3494,6 +3533,95 @@ static struct device_attribute ipr_ioa_reset_attr = {
.store = ipr_store_reset_adapter
};
+static int ipr_iopoll(struct blk_iopoll *iop, int budget);
+ /**
+ * ipr_show_iopoll_weight - Show ipr polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_iopoll_weight(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long lock_flags = 0;
+ int len;
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->iopoll_weight);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return len;
+}
+
+/**
+ * ipr_store_iopoll_weight - Change the adapter's polling mode
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ * number of bytes printed to buffer
+ **/
+static ssize_t ipr_store_iopoll_weight(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
+ unsigned long user_iopoll_weight;
+ unsigned long lock_flags = 0;
+ int i;
+
+ if (!ioa_cfg->sis64) {
+ dev_info(&ioa_cfg->pdev->dev, "blk-iopoll not supported on this adapter\n");
+ return -EINVAL;
+ }
+ if (kstrtoul(buf, 10, &user_iopoll_weight))
+ return -EINVAL;
+
+ if (user_iopoll_weight > 256) {
+ dev_info(&ioa_cfg->pdev->dev, "Invalid blk-iopoll weight. It must be less than 256\n");
+ return -EINVAL;
+ }
+
+ if (user_iopoll_weight == ioa_cfg->iopoll_weight) {
+ dev_info(&ioa_cfg->pdev->dev, "Current blk-iopoll weight has the same weight\n");
+ return strlen(buf);
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
+ spin_lock_irqsave(shost->host_lock, lock_flags);
+ ioa_cfg->iopoll_weight = user_iopoll_weight;
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
+
+ return strlen(buf);
+}
+
+static struct device_attribute ipr_iopoll_weight_attr = {
+ .attr = {
+ .name = "iopoll_weight",
+ .mode = S_IRUGO | S_IWUSR,
+ },
+ .show = ipr_show_iopoll_weight,
+ .store = ipr_store_iopoll_weight
+};
+
/**
* ipr_alloc_ucode_buffer - Allocates a microcode download buffer
* @buf_len: buffer length
@@ -3862,6 +3990,7 @@ static struct device_attribute *ipr_ioa_attrs[] = {
&ipr_ioa_reset_attr,
&ipr_update_fw_attr,
&ipr_ioa_fw_type_attr,
+ &ipr_iopoll_weight_attr,
NULL,
};
@@ -4014,7 +4143,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
ioa_cfg->dump = dump;
ioa_cfg->sdt_state = WAIT_FOR_DUMP;
- if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead && !ioa_cfg->dump_taken) {
ioa_cfg->dump_taken = 1;
schedule_work(&ioa_cfg->work_q);
}
@@ -4227,8 +4356,8 @@ static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribut
res = (struct ipr_resource_entry *)sdev->hostdata;
if (res && ioa_cfg->sis64)
len = snprintf(buf, PAGE_SIZE, "%s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ __ipr_format_res_path(res->res_path, buffer,
+ sizeof(buffer)));
else if (res)
len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
res->bus, res->target, res->lun);
@@ -4556,8 +4685,8 @@ static int ipr_slave_configure(struct scsi_device *sdev)
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
if (ioa_cfg->sis64)
sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
- ipr_format_res_path(res->res_path, buffer,
- sizeof(buffer)));
+ ipr_format_res_path(ioa_cfg,
+ res->res_path, buffer, sizeof(buffer)));
return 0;
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -4638,22 +4767,18 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
-/**
- * ipr_eh_host_reset - Reset the host adapter
- * @scsi_cmd: scsi command struct
- *
- * Return value:
- * SUCCESS / FAILED
- **/
-static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
+static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ unsigned long lock_flags = 0;
+ int rc = SUCCESS;
ENTER;
- ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->in_reset_reload) {
+ ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
dev_err(&ioa_cfg->pdev->dev,
"Adapter being reset as a result of error recovery.\n");
@@ -4661,20 +4786,19 @@ static int __ipr_eh_host_reset(struct scsi_cmnd *scsi_cmd)
ioa_cfg->sdt_state = GET_DUMP;
}
- rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
-
- LEAVE;
- return rc;
-}
-
-static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
-{
- int rc;
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- spin_lock_irq(cmd->device->host->host_lock);
- rc = __ipr_eh_host_reset(cmd);
- spin_unlock_irq(cmd->device->host->host_lock);
+ /* If we got hit with a host reset while we were already resetting
+ the adapter for some reason, and the reset failed. */
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
+ ipr_trace;
+ rc = FAILED;
+ }
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ LEAVE;
return rc;
}
@@ -4723,7 +4847,7 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
@@ -4793,6 +4917,7 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
struct ipr_resource_entry *res;
struct ata_port *ap;
int rc = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
@@ -4808,22 +4933,26 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
*/
if (ioa_cfg->in_reset_reload)
return FAILED;
- if (ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- if (ipr_cmd->scsi_cmd)
- ipr_cmd->done = ipr_scsi_eh_done;
- if (ipr_cmd->qc)
- ipr_cmd->done = ipr_sata_eh_done;
- if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
- ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
- ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
+ if (ipr_cmd->scsi_cmd)
+ ipr_cmd->done = ipr_scsi_eh_done;
+ if (ipr_cmd->qc)
+ ipr_cmd->done = ipr_sata_eh_done;
+ if (ipr_cmd->qc &&
+ !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
+ ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
+ ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
+ }
}
}
+ spin_unlock(&hrrq->_lock);
}
-
res->resetting_device = 1;
scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
@@ -4833,11 +4962,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
ata_std_error_handler(ap);
spin_lock_irq(scsi_cmd->device->host->host_lock);
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
- rc = -EIO;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd,
+ &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->ioarcb.res_handle ==
+ res->res_handle) {
+ rc = -EIO;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
} else
rc = ipr_device_reset(ioa_cfg, res);
@@ -4890,7 +5025,7 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
else
ipr_cmd->sibling->done(ipr_cmd->sibling);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
LEAVE;
}
@@ -4951,6 +5086,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
struct ipr_cmd_pkt *cmd_pkt;
u32 ioasc, int_reg;
int op_found = 0;
+ struct ipr_hrr_queue *hrrq;
ENTER;
ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
@@ -4960,7 +5096,8 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
* This will force the mid-layer to call ipr_eh_host_reset,
* which will then go to sleep and wait for the reset to complete
*/
- if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
+ if (ioa_cfg->in_reset_reload ||
+ ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return FAILED;
if (!res)
return FAILED;
@@ -4975,12 +5112,16 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
if (!ipr_is_gscsi(res))
return FAILED;
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->scsi_cmd == scsi_cmd) {
- ipr_cmd->done = ipr_scsi_eh_done;
- op_found = 1;
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->scsi_cmd == scsi_cmd) {
+ ipr_cmd->done = ipr_scsi_eh_done;
+ op_found = 1;
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
if (!op_found)
@@ -5007,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
ipr_trace;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
if (!ipr_is_naca_model(res))
res->needs_sync_complete = 1;
@@ -5099,6 +5240,9 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
} else {
if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ else if (int_reg & IPR_PCII_NO_HOST_RRQ)
+ dev_err(&ioa_cfg->pdev->dev,
+ "No Host RRQ. 0x%08X\n", int_reg);
else
dev_err(&ioa_cfg->pdev->dev,
"Permanent IOA failure. 0x%08X\n", int_reg);
@@ -5121,10 +5265,10 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* none
**/
-static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg, u16 number)
{
ioa_cfg->errors_logged++;
- dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+ dev_err(&ioa_cfg->pdev->dev, "%s %d\n", msg, number);
if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
ioa_cfg->sdt_state = GET_DUMP;
@@ -5132,6 +5276,83 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
}
+static int ipr_process_hrrq(struct ipr_hrr_queue *hrr_queue, int budget,
+ struct list_head *doneq)
+{
+ u32 ioasc;
+ u16 cmd_index;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_ioa_cfg *ioa_cfg = hrr_queue->ioa_cfg;
+ int num_hrrq = 0;
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrr_queue->allow_interrupts)
+ return 0;
+
+ while ((be32_to_cpu(*hrr_queue->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrr_queue->toggle_bit) {
+
+ cmd_index = (be32_to_cpu(*hrr_queue->hrrq_curr) &
+ IPR_HRRQ_REQ_RESP_HANDLE_MASK) >>
+ IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
+
+ if (unlikely(cmd_index > hrr_queue->max_cmd_id ||
+ cmd_index < hrr_queue->min_cmd_id)) {
+ ipr_isr_eh(ioa_cfg,
+ "Invalid response handle from IOA: ",
+ cmd_index);
+ break;
+ }
+
+ ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
+ ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+
+ ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
+
+ list_move_tail(&ipr_cmd->queue, doneq);
+
+ if (hrr_queue->hrrq_curr < hrr_queue->hrrq_end) {
+ hrr_queue->hrrq_curr++;
+ } else {
+ hrr_queue->hrrq_curr = hrr_queue->hrrq_start;
+ hrr_queue->toggle_bit ^= 1u;
+ }
+ num_hrrq++;
+ if (budget > 0 && num_hrrq >= budget)
+ break;
+ }
+
+ return num_hrrq;
+}
+
+static int ipr_iopoll(struct blk_iopoll *iop, int budget)
+{
+ struct ipr_ioa_cfg *ioa_cfg;
+ struct ipr_hrr_queue *hrrq;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ unsigned long hrrq_flags;
+ int completed_ops;
+ LIST_HEAD(doneq);
+
+ hrrq = container_of(iop, struct ipr_hrr_queue, iopoll);
+ ioa_cfg = hrrq->ioa_cfg;
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ completed_ops = ipr_process_hrrq(hrrq, budget, &doneq);
+
+ if (completed_ops < budget)
+ blk_iopoll_complete(iop);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
+
+ return completed_ops;
+}
+
/**
* ipr_isr - Interrupt service routine
* @irq: irq number
@@ -5142,78 +5363,48 @@ static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
**/
static irqreturn_t ipr_isr(int irq, void *devp)
{
- struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
- unsigned long lock_flags = 0;
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
u32 int_reg = 0;
- u32 ioasc;
- u16 cmd_index;
int num_hrrq = 0;
int irq_none = 0;
struct ipr_cmnd *ipr_cmd, *temp;
irqreturn_t rc = IRQ_NONE;
LIST_HEAD(doneq);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/* If interrupts are disabled, ignore the interrupt */
- if (!ioa_cfg->allow_interrupts) {
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return IRQ_NONE;
}
while (1) {
- ipr_cmd = NULL;
-
- while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
- ioa_cfg->toggle_bit) {
-
- cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
- IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
-
- if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
- ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
- rc = IRQ_HANDLED;
- goto unlock_out;
- }
-
- ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
-
- ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
-
- ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
-
- list_move_tail(&ipr_cmd->queue, &doneq);
-
- rc = IRQ_HANDLED;
-
- if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
- ioa_cfg->hrrq_curr++;
- } else {
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit ^= 1u;
- }
- }
+ if (ipr_process_hrrq(hrrq, -1, &doneq)) {
+ rc = IRQ_HANDLED;
- if (ipr_cmd && !ioa_cfg->clear_isr)
- break;
+ if (!ioa_cfg->clear_isr)
+ break;
- if (ipr_cmd != NULL) {
/* Clear the PCI interrupt */
num_hrrq = 0;
do {
- writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+ writel(IPR_PCII_HRRQ_UPDATED,
+ ioa_cfg->regs.clr_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
- num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+ num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
} else if (rc == IRQ_NONE && irq_none == 0) {
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
irq_none++;
} else if (num_hrrq == IPR_MAX_HRRQ_RETRIES &&
int_reg & IPR_PCII_HRRQ_UPDATED) {
- ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+ ipr_isr_eh(ioa_cfg,
+ "Error clearing HRRQ: ", num_hrrq);
rc = IRQ_HANDLED;
- goto unlock_out;
+ break;
} else
break;
}
@@ -5221,14 +5412,64 @@ static irqreturn_t ipr_isr(int irq, void *devp)
if (unlikely(rc == IRQ_NONE))
rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
-unlock_out:
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
list_del(&ipr_cmd->queue);
del_timer(&ipr_cmd->timer);
ipr_cmd->fast_done(ipr_cmd);
}
+ return rc;
+}
+
+/**
+ * ipr_isr_mhrrq - Interrupt service routine
+ * @irq: irq number
+ * @devp: pointer to ioa config struct
+ *
+ * Return value:
+ * IRQ_NONE / IRQ_HANDLED
+ **/
+static irqreturn_t ipr_isr_mhrrq(int irq, void *devp)
+{
+ struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp;
+ struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg;
+ unsigned long hrrq_flags = 0;
+ struct ipr_cmnd *ipr_cmd, *temp;
+ irqreturn_t rc = IRQ_NONE;
+ LIST_HEAD(doneq);
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+
+ /* If interrupts are disabled, ignore the interrupt */
+ if (!hrrq->allow_interrupts) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_NONE;
+ }
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit) {
+ if (!blk_iopoll_sched_prep(&hrrq->iopoll))
+ blk_iopoll_sched(&hrrq->iopoll);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return IRQ_HANDLED;
+ }
+ } else {
+ if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
+ hrrq->toggle_bit)
+
+ if (ipr_process_hrrq(hrrq, -1, &doneq))
+ rc = IRQ_HANDLED;
+ }
+
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) {
+ list_del(&ipr_cmd->queue);
+ del_timer(&ipr_cmd->timer);
+ ipr_cmd->fast_done(ipr_cmd);
+ }
return rc;
}
@@ -5388,7 +5629,6 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
{
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
@@ -5406,7 +5646,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
res->in_erp = 0;
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5790,7 +6030,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
}
scsi_dma_unmap(ipr_cmd->scsi_cmd);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
}
@@ -5809,21 +6049,21 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
- unsigned long lock_flags;
+ unsigned long hrrq_flags;
scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
scsi_dma_unmap(scsi_cmd);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
} else {
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags);
ipr_erp_start(ioa_cfg, ipr_cmd);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags);
}
}
@@ -5846,22 +6086,34 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
struct ipr_resource_entry *res;
struct ipr_ioarcb *ioarcb;
struct ipr_cmnd *ipr_cmd;
- unsigned long lock_flags;
+ unsigned long hrrq_flags, lock_flags;
int rc;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
- spin_lock_irqsave(shost->host_lock, lock_flags);
scsi_cmd->result = (DID_OK << 16);
res = scsi_cmd->device->hostdata;
+ if (ipr_is_gata(res) && res->sata_port) {
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ return rc;
+ }
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
/*
* We are currently blocking all devices due to a host reset
* We have told the host to stop giving us new requests, but
* ERP ops don't count. FIXME
*/
- if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(!hrrq->allow_cmds && !hrrq->ioa_is_dead && !hrrq->removing_ioa)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return SCSI_MLQUEUE_HOST_BUSY;
}
@@ -5869,19 +6121,17 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
* FIXME - Create scsi_set_host_offline interface
* and the ioa_is_dead check can be removed
*/
- if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead || hrrq->removing_ioa || !res)) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
goto err_nodev;
}
- if (ipr_is_gata(res) && res->sata_port) {
- rc = ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
- return rc;
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
-
- ipr_cmd = __ipr_get_free_ipr_cmnd(ioa_cfg);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
ipr_init_ipr_cmnd(ipr_cmd, ipr_scsi_done);
ioarcb = &ipr_cmd->ioarcb;
@@ -5902,26 +6152,27 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
}
if (scsi_cmd->cmnd[0] >= 0xC0 &&
- (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
+ (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE)) {
ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ }
if (ioa_cfg->sis64)
rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
else
rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
- spin_lock_irqsave(shost->host_lock, lock_flags);
- if (unlikely(rc || (!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
+ if (unlikely(rc || (!hrrq->allow_cmds && !hrrq->ioa_is_dead))) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
if (!rc)
scsi_dma_unmap(scsi_cmd);
return SCSI_MLQUEUE_HOST_BUSY;
}
- if (unlikely(ioa_cfg->ioa_is_dead)) {
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
scsi_dma_unmap(scsi_cmd);
goto err_nodev;
}
@@ -5931,18 +6182,18 @@ static int ipr_queuecommand(struct Scsi_Host *shost,
ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
res->needs_sync_complete = 0;
}
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_pending_q);
ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
ipr_send_command(ipr_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
err_nodev:
- spin_lock_irqsave(shost->host_lock, lock_flags);
+ spin_lock_irqsave(hrrq->lock, hrrq_flags);
memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
scsi_cmd->result = (DID_NO_CONNECT << 16);
scsi_cmd->scsi_done(scsi_cmd);
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
+ spin_unlock_irqrestore(hrrq->lock, hrrq_flags);
return 0;
}
@@ -6040,7 +6291,7 @@ static void ipr_ata_phy_reset(struct ata_port *ap)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
goto out_unlock;
rc = ipr_device_reset(ioa_cfg, res);
@@ -6071,6 +6322,7 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
struct ipr_sata_port *sata_port = qc->ap->private_data;
struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
unsigned long flags;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
@@ -6080,11 +6332,15 @@ static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
}
- list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
- if (ipr_cmd->qc == qc) {
- ipr_device_reset(ioa_cfg, sata_port->res);
- break;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (ipr_cmd->qc == qc) {
+ ipr_device_reset(ioa_cfg, sata_port->res);
+ break;
+ }
}
+ spin_unlock(&hrrq->_lock);
}
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -6133,6 +6389,7 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
struct ipr_resource_entry *res = sata_port->res;
u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
+ spin_lock(&ipr_cmd->hrrq->_lock);
if (ipr_cmd->ioa_cfg->sis64)
memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
sizeof(struct ipr_ioasa_gata));
@@ -6148,7 +6405,8 @@ static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
else
qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
ata_qc_complete(qc);
}
@@ -6244,6 +6502,48 @@ static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
}
/**
+ * ipr_qc_defer - Get a free ipr_cmd
+ * @qc: queued command
+ *
+ * Return value:
+ * 0 if success
+ **/
+static int ipr_qc_defer(struct ata_queued_cmd *qc)
+{
+ struct ata_port *ap = qc->ap;
+ struct ipr_sata_port *sata_port = ap->private_data;
+ struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
+ struct ipr_cmnd *ipr_cmd;
+ struct ipr_hrr_queue *hrrq;
+ int hrrq_id;
+
+ hrrq_id = ipr_get_hrrq_index(ioa_cfg);
+ hrrq = &ioa_cfg->hrrq[hrrq_id];
+
+ qc->lldd_task = NULL;
+ spin_lock(&hrrq->_lock);
+ if (unlikely(hrrq->ioa_is_dead)) {
+ spin_unlock(&hrrq->_lock);
+ return 0;
+ }
+
+ if (unlikely(!hrrq->allow_cmds)) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ ipr_cmd = __ipr_get_free_ipr_cmnd(hrrq);
+ if (ipr_cmd == NULL) {
+ spin_unlock(&hrrq->_lock);
+ return ATA_DEFER_LINK;
+ }
+
+ qc->lldd_task = ipr_cmd;
+ spin_unlock(&hrrq->_lock);
+ return 0;
+}
+
+/**
* ipr_qc_issue - Issue a SATA qc to a device
* @qc: queued command
*
@@ -6260,10 +6560,23 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
struct ipr_ioarcb *ioarcb;
struct ipr_ioarcb_ata_regs *regs;
- if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
+ if (qc->lldd_task == NULL)
+ ipr_qc_defer(qc);
+
+ ipr_cmd = qc->lldd_task;
+ if (ipr_cmd == NULL)
return AC_ERR_SYSTEM;
- ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+ qc->lldd_task = NULL;
+ spin_lock(&ipr_cmd->hrrq->_lock);
+ if (unlikely(!ipr_cmd->hrrq->allow_cmds ||
+ ipr_cmd->hrrq->ioa_is_dead)) {
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
+ return AC_ERR_SYSTEM;
+ }
+
+ ipr_init_ipr_cmnd(ipr_cmd, ipr_lock_and_done);
ioarcb = &ipr_cmd->ioarcb;
if (ioa_cfg->sis64) {
@@ -6275,7 +6588,7 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
memset(regs, 0, sizeof(*regs));
ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->qc = qc;
ipr_cmd->done = ipr_sata_done;
ipr_cmd->ioarcb.res_handle = res->res_handle;
@@ -6315,10 +6628,12 @@ static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
default:
WARN_ON(1);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return AC_ERR_INVALID;
}
ipr_send_command(ipr_cmd);
+ spin_unlock(&ipr_cmd->hrrq->_lock);
return 0;
}
@@ -6357,6 +6672,7 @@ static struct ata_port_operations ipr_sata_ops = {
.hardreset = ipr_sata_reset,
.post_internal_cmd = ipr_ata_post_internal,
.qc_prep = ata_noop_qc_prep,
+ .qc_defer = ipr_qc_defer,
.qc_issue = ipr_qc_issue,
.qc_fill_rtf = ipr_qc_fill_rtf,
.port_start = ata_sas_port_start,
@@ -6425,14 +6741,17 @@ static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
ENTER;
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ ipr_trace;
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
+
ioa_cfg->in_reset_reload = 0;
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
-
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -6454,11 +6773,16 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_resource_entry *res;
struct ipr_hostrcb *hostrcb, *temp;
- int i = 0;
+ int i = 0, j;
ENTER;
ioa_cfg->in_reset_reload = 0;
- ioa_cfg->allow_cmds = 1;
+ for (j = 0; j < ioa_cfg->hrrq_num; j++) {
+ spin_lock(&ioa_cfg->hrrq[j]._lock);
+ ioa_cfg->hrrq[j].allow_cmds = 1;
+ spin_unlock(&ioa_cfg->hrrq[j]._lock);
+ }
+ wmb();
ioa_cfg->reset_cmd = NULL;
ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
@@ -6482,14 +6806,14 @@ static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
ioa_cfg->reset_retries = 0;
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
wake_up_all(&ioa_cfg->reset_wait_q);
spin_unlock(ioa_cfg->host->host_lock);
scsi_unblock_requests(ioa_cfg->host);
spin_lock(ioa_cfg->host->host_lock);
- if (!ioa_cfg->allow_cmds)
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds)
scsi_block_requests(ioa_cfg->host);
LEAVE;
@@ -6560,9 +6884,11 @@ static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
if (!ioa_cfg->sis64)
ipr_cmd->job_step = ipr_set_supported_devs;
+ LEAVE;
return IPR_RC_JOB_RETURN;
}
+ LEAVE;
return IPR_RC_JOB_CONTINUE;
}
@@ -6820,7 +7146,7 @@ static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
return IPR_RC_JOB_RETURN;
}
@@ -7278,46 +7604,71 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
{
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+ struct ipr_hrr_queue *hrrq;
ENTER;
+ ipr_cmd->job_step = ipr_ioafp_std_inquiry;
dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
- ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
- ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+ if (ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num) {
+ hrrq = &ioa_cfg->hrrq[ioa_cfg->identify_hrrq_index];
- ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
- if (ioa_cfg->sis64)
- ioarcb->cmd_pkt.cdb[1] = 0x1;
- ioarcb->cmd_pkt.cdb[2] =
- ((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
- ioarcb->cmd_pkt.cdb[3] =
- ((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
- ioarcb->cmd_pkt.cdb[4] =
- ((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[5] =
- ((u64) ioa_cfg->host_rrq_dma) & 0xff;
- ioarcb->cmd_pkt.cdb[7] =
- ((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
- ioarcb->cmd_pkt.cdb[8] =
- (sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
+ ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
+ ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
- if (ioa_cfg->sis64) {
- ioarcb->cmd_pkt.cdb[10] =
- ((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
- ioarcb->cmd_pkt.cdb[11] =
- ((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
- ioarcb->cmd_pkt.cdb[12] =
- ((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
- ioarcb->cmd_pkt.cdb[13] =
- ((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
- }
+ ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+ if (ioa_cfg->sis64)
+ ioarcb->cmd_pkt.cdb[1] = 0x1;
- ipr_cmd->job_step = ipr_ioafp_std_inquiry;
+ if (ioa_cfg->nvectors == 1)
+ ioarcb->cmd_pkt.cdb[1] &= ~IPR_ID_HRRQ_SELE_ENABLE;
+ else
+ ioarcb->cmd_pkt.cdb[1] |= IPR_ID_HRRQ_SELE_ENABLE;
+
+ ioarcb->cmd_pkt.cdb[2] =
+ ((u64) hrrq->host_rrq_dma >> 24) & 0xff;
+ ioarcb->cmd_pkt.cdb[3] =
+ ((u64) hrrq->host_rrq_dma >> 16) & 0xff;
+ ioarcb->cmd_pkt.cdb[4] =
+ ((u64) hrrq->host_rrq_dma >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[5] =
+ ((u64) hrrq->host_rrq_dma) & 0xff;
+ ioarcb->cmd_pkt.cdb[7] =
+ ((sizeof(u32) * hrrq->size) >> 8) & 0xff;
+ ioarcb->cmd_pkt.cdb[8] =
+ (sizeof(u32) * hrrq->size) & 0xff;
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[9] =
+ ioa_cfg->identify_hrrq_index;
- ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
+ if (ioa_cfg->sis64) {
+ ioarcb->cmd_pkt.cdb[10] =
+ ((u64) hrrq->host_rrq_dma >> 56) & 0xff;
+ ioarcb->cmd_pkt.cdb[11] =
+ ((u64) hrrq->host_rrq_dma >> 48) & 0xff;
+ ioarcb->cmd_pkt.cdb[12] =
+ ((u64) hrrq->host_rrq_dma >> 40) & 0xff;
+ ioarcb->cmd_pkt.cdb[13] =
+ ((u64) hrrq->host_rrq_dma >> 32) & 0xff;
+ }
+
+ if (ioarcb->cmd_pkt.cdb[1] & IPR_ID_HRRQ_SELE_ENABLE)
+ ioarcb->cmd_pkt.cdb[14] =
+ ioa_cfg->identify_hrrq_index;
+
+ ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
+ IPR_INTERNAL_TIMEOUT);
+
+ if (++ioa_cfg->identify_hrrq_index < ioa_cfg->hrrq_num)
+ ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+
+ LEAVE;
+ return IPR_RC_JOB_RETURN;
+ }
LEAVE;
- return IPR_RC_JOB_RETURN;
+ return IPR_RC_JOB_CONTINUE;
}
/**
@@ -7365,7 +7716,9 @@ static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
unsigned long timeout)
{
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+
+ ENTER;
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
ipr_cmd->timer.data = (unsigned long) ipr_cmd;
@@ -7383,13 +7736,26 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
**/
static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
{
- memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
+ struct ipr_hrr_queue *hrrq;
- /* Initialize Host RRQ pointers */
- ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
- ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
- ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
- ioa_cfg->toggle_bit = 1;
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock(&hrrq->_lock);
+ memset(hrrq->host_rrq, 0, sizeof(u32) * hrrq->size);
+
+ /* Initialize Host RRQ pointers */
+ hrrq->hrrq_start = hrrq->host_rrq;
+ hrrq->hrrq_end = &hrrq->host_rrq[hrrq->size - 1];
+ hrrq->hrrq_curr = hrrq->hrrq_start;
+ hrrq->toggle_bit = 1;
+ spin_unlock(&hrrq->_lock);
+ }
+ wmb();
+
+ ioa_cfg->identify_hrrq_index = 0;
+ if (ioa_cfg->hrrq_num == 1)
+ atomic_set(&ioa_cfg->hrrq_index, 0);
+ else
+ atomic_set(&ioa_cfg->hrrq_index, 1);
/* Zero out config table */
memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
@@ -7446,7 +7812,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
return IPR_RC_JOB_RETURN;
}
@@ -7466,12 +7833,18 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
volatile u32 int_reg;
volatile u64 maskval;
+ int i;
ENTER;
ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
ipr_init_ioa_mem(ioa_cfg);
- ioa_cfg->allow_interrupts = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
@@ -7511,7 +7884,7 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
LEAVE;
return IPR_RC_JOB_RETURN;
@@ -8030,7 +8403,8 @@ static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
int rc = IPR_RC_JOB_CONTINUE;
ENTER;
- if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
+ if (shutdown_type != IPR_SHUTDOWN_NONE &&
+ !ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
@@ -8078,7 +8452,8 @@ static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
* We are doing nested adapter resets and this is
* not the current reset job.
*/
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue,
+ &ipr_cmd->hrrq->hrrq_free_q);
return;
}
@@ -8113,10 +8488,17 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
struct ipr_cmnd *ipr_cmd;
+ int i;
ioa_cfg->in_reset_reload = 1;
- ioa_cfg->allow_cmds = 0;
- scsi_block_requests(ioa_cfg->host);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa)
+ scsi_block_requests(ioa_cfg->host);
ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
ioa_cfg->reset_cmd = ipr_cmd;
@@ -8141,7 +8523,9 @@ static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
enum ipr_shutdown_type shutdown_type)
{
- if (ioa_cfg->ioa_is_dead)
+ int i;
+
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead)
return;
if (ioa_cfg->in_reset_reload) {
@@ -8156,7 +8540,12 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
"IOA taken offline - error recovery failed\n");
ioa_cfg->reset_retries = 0;
- ioa_cfg->ioa_is_dead = 1;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].ioa_is_dead = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
if (ioa_cfg->in_ioa_bringdown) {
ioa_cfg->reset_cmd = NULL;
@@ -8164,9 +8553,11 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
ipr_fail_all_ops(ioa_cfg);
wake_up_all(&ioa_cfg->reset_wait_q);
- spin_unlock_irq(ioa_cfg->host->host_lock);
- scsi_unblock_requests(ioa_cfg->host);
- spin_lock_irq(ioa_cfg->host->host_lock);
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) {
+ spin_unlock_irq(ioa_cfg->host->host_lock);
+ scsi_unblock_requests(ioa_cfg->host);
+ spin_lock_irq(ioa_cfg->host->host_lock);
+ }
return;
} else {
ioa_cfg->in_ioa_bringdown = 1;
@@ -8188,9 +8579,17 @@ static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
*/
static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
{
+ struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+ int i;
+
/* Disallow new interrupts, avoid loop */
- ipr_cmd->ioa_cfg->allow_interrupts = 0;
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_interrupts = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
ipr_cmd->done = ipr_reset_ioa_job;
return IPR_RC_JOB_RETURN;
}
@@ -8247,13 +8646,19 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
{
unsigned long flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
ioa_cfg->sdt_state = ABORT_DUMP;
ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
ioa_cfg->in_ioa_bringdown = 1;
- ioa_cfg->allow_cmds = 0;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].allow_cmds = 0;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
}
@@ -8296,7 +8701,7 @@ static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
* Return value:
* 0 on success / -EIO on failure
**/
-static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
{
int rc = 0;
unsigned long host_lock_flags = 0;
@@ -8310,12 +8715,11 @@ static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
} else
_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
IPR_SHUTDOWN_NONE);
-
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
- if (ioa_cfg->ioa_is_dead) {
+ if (ioa_cfg->hrrq[IPR_INIT_HRRQ].ioa_is_dead) {
rc = -EIO;
} else if (ipr_invalid_adapter(ioa_cfg)) {
if (!ipr_testmode)
@@ -8376,8 +8780,13 @@ static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
ipr_free_cmd_blks(ioa_cfg);
- pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+
+ for (i = 0; i < ioa_cfg->hrrq_num; i++)
+ pci_free_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+
pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
@@ -8408,8 +8817,23 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
struct pci_dev *pdev = ioa_cfg->pdev;
ENTER;
- free_irq(pdev->irq, ioa_cfg);
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ int i;
+ for (i = 0; i < ioa_cfg->nvectors; i++)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ } else
+ free_irq(pdev->irq, &ioa_cfg->hrrq[0]);
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ pci_disable_msi(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ pci_disable_msix(pdev);
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ }
+
iounmap(ioa_cfg->hdw_dma_regs);
pci_release_regions(pdev);
ipr_free_mem(ioa_cfg);
@@ -8425,12 +8849,12 @@ static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* 0 on success / -ENOMEM on allocation failure
**/
-static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_cmnd *ipr_cmd;
struct ipr_ioarcb *ioarcb;
dma_addr_t dma_addr;
- int i;
+ int i, entries_each_hrrq, hrrq_id = 0;
ioa_cfg->ipr_cmd_pool = pci_pool_create(IPR_NAME, ioa_cfg->pdev,
sizeof(struct ipr_cmnd), 512, 0);
@@ -8446,6 +8870,41 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
return -ENOMEM;
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ if (ioa_cfg->hrrq_num > 1) {
+ if (i == 0) {
+ entries_each_hrrq = IPR_NUM_INTERNAL_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (entries_each_hrrq - 1);
+ } else {
+ entries_each_hrrq =
+ IPR_NUM_BASE_CMD_BLKS/
+ (ioa_cfg->hrrq_num - 1);
+ ioa_cfg->hrrq[i].min_cmd_id =
+ IPR_NUM_INTERNAL_CMD_BLKS +
+ (i - 1) * entries_each_hrrq;
+ ioa_cfg->hrrq[i].max_cmd_id =
+ (IPR_NUM_INTERNAL_CMD_BLKS +
+ i * entries_each_hrrq - 1);
+ }
+ } else {
+ entries_each_hrrq = IPR_NUM_CMD_BLKS;
+ ioa_cfg->hrrq[i].min_cmd_id = 0;
+ ioa_cfg->hrrq[i].max_cmd_id = (entries_each_hrrq - 1);
+ }
+ ioa_cfg->hrrq[i].size = entries_each_hrrq;
+ }
+
+ BUG_ON(ioa_cfg->hrrq_num == 0);
+
+ i = IPR_NUM_CMD_BLKS -
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id - 1;
+ if (i > 0) {
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].size += i;
+ ioa_cfg->hrrq[ioa_cfg->hrrq_num - 1].max_cmd_id += i;
+ }
+
for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
ipr_cmd = pci_pool_alloc(ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
@@ -8484,7 +8943,11 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
ipr_cmd->sense_buffer_dma = dma_addr +
offsetof(struct ipr_cmnd, sense_buffer);
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ ipr_cmd->ioarcb.cmd_pkt.hrrq_id = hrrq_id;
+ ipr_cmd->hrrq = &ioa_cfg->hrrq[hrrq_id];
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ if (i >= ioa_cfg->hrrq[hrrq_id].max_cmd_id)
+ hrrq_id++;
}
return 0;
@@ -8497,7 +8960,7 @@ static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* 0 on success / non-zero for error
**/
-static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
+static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
{
struct pci_dev *pdev = ioa_cfg->pdev;
int i, rc = -ENOMEM;
@@ -8516,6 +8979,10 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+
+ if (!ioa_cfg->target_ids || !ioa_cfg->array_ids
+ || !ioa_cfg->vset_ids)
+ goto out_free_res_entries;
}
for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
@@ -8530,15 +8997,34 @@ static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
if (!ioa_cfg->vpd_cbs)
goto out_free_res_entries;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_free_q);
+ INIT_LIST_HEAD(&ioa_cfg->hrrq[i].hrrq_pending_q);
+ spin_lock_init(&ioa_cfg->hrrq[i]._lock);
+ if (i == 0)
+ ioa_cfg->hrrq[i].lock = ioa_cfg->host->host_lock;
+ else
+ ioa_cfg->hrrq[i].lock = &ioa_cfg->hrrq[i]._lock;
+ }
+
if (ipr_alloc_cmd_blks(ioa_cfg))
goto out_free_vpd_cbs;
- ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
- sizeof(u32) * IPR_NUM_CMD_BLKS,
- &ioa_cfg->host_rrq_dma);
-
- if (!ioa_cfg->host_rrq)
- goto out_ipr_free_cmd_blocks;
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ ioa_cfg->hrrq[i].host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ &ioa_cfg->hrrq[i].host_rrq_dma);
+
+ if (!ioa_cfg->hrrq[i].host_rrq) {
+ while (--i > 0)
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ goto out_ipr_free_cmd_blocks;
+ }
+ ioa_cfg->hrrq[i].ioa_cfg = ioa_cfg;
+ }
ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
ioa_cfg->cfg_table_size,
@@ -8582,8 +9068,12 @@ out_free_hostrcb_dma:
ioa_cfg->u.cfg_table,
ioa_cfg->cfg_table_dma);
out_free_host_rrq:
- pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
- ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ pci_free_consistent(pdev,
+ sizeof(u32) * ioa_cfg->hrrq[i].size,
+ ioa_cfg->hrrq[i].host_rrq,
+ ioa_cfg->hrrq[i].host_rrq_dma);
+ }
out_ipr_free_cmd_blocks:
ipr_free_cmd_blks(ioa_cfg);
out_free_vpd_cbs:
@@ -8591,6 +9081,9 @@ out_free_vpd_cbs:
ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
out_free_res_entries:
kfree(ioa_cfg->res_entries);
+ kfree(ioa_cfg->target_ids);
+ kfree(ioa_cfg->array_ids);
+ kfree(ioa_cfg->vset_ids);
goto out;
}
@@ -8601,7 +9094,7 @@ out_free_res_entries:
* Return value:
* none
**/
-static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
+static void ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
{
int i;
@@ -8625,8 +9118,8 @@ static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
* Return value:
* none
**/
-static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
- struct Scsi_Host *host, struct pci_dev *pdev)
+static void ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
+ struct Scsi_Host *host, struct pci_dev *pdev)
{
const struct ipr_interrupt_offsets *p;
struct ipr_interrupts *t;
@@ -8638,15 +9131,11 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->doorbell = IPR_DOORBELL;
sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
- sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
- sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
- INIT_LIST_HEAD(&ioa_cfg->free_q);
- INIT_LIST_HEAD(&ioa_cfg->pending_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
INIT_LIST_HEAD(&ioa_cfg->free_res_q);
@@ -8712,7 +9201,7 @@ static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
* Return value:
* ptr to chip information on success / NULL on failure
**/
-static const struct ipr_chip_t * __devinit
+static const struct ipr_chip_t *
ipr_get_chip_info(const struct pci_device_id *dev_id)
{
int i;
@@ -8724,6 +9213,88 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
return NULL;
}
+static int ipr_enable_msix(struct ipr_ioa_cfg *ioa_cfg)
+{
+ struct msix_entry entries[IPR_MAX_MSIX_VECTORS];
+ int i, err, vectors;
+
+ for (i = 0; i < ARRAY_SIZE(entries); ++i)
+ entries[i].entry = i;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msix(ioa_cfg->pdev, entries, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msix(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = entries[i].vector;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static int ipr_enable_msi(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, err, vectors;
+
+ vectors = ipr_number_of_msix;
+
+ while ((err = pci_enable_msi_block(ioa_cfg->pdev, vectors)) > 0)
+ vectors = err;
+
+ if (err < 0) {
+ pci_disable_msi(ioa_cfg->pdev);
+ return err;
+ }
+
+ if (!err) {
+ for (i = 0; i < vectors; i++)
+ ioa_cfg->vectors_info[i].vec = ioa_cfg->pdev->irq + i;
+ ioa_cfg->nvectors = vectors;
+ }
+
+ return err;
+}
+
+static void name_msi_vectors(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int vec_idx, n = sizeof(ioa_cfg->vectors_info[0].desc) - 1;
+
+ for (vec_idx = 0; vec_idx < ioa_cfg->nvectors; vec_idx++) {
+ snprintf(ioa_cfg->vectors_info[vec_idx].desc, n,
+ "host%d-%d", ioa_cfg->host->host_no, vec_idx);
+ ioa_cfg->vectors_info[vec_idx].
+ desc[strlen(ioa_cfg->vectors_info[vec_idx].desc)] = 0;
+ }
+}
+
+static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg)
+{
+ int i, rc;
+
+ for (i = 1; i < ioa_cfg->nvectors; i++) {
+ rc = request_irq(ioa_cfg->vectors_info[i].vec,
+ ipr_isr_mhrrq,
+ 0,
+ ioa_cfg->vectors_info[i].desc,
+ &ioa_cfg->hrrq[i]);
+ if (rc) {
+ while (--i >= 0)
+ free_irq(ioa_cfg->vectors_info[i].vec,
+ &ioa_cfg->hrrq[i]);
+ return rc;
+ }
+ }
+ return 0;
+}
+
/**
* ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
* @pdev: PCI device struct
@@ -8734,12 +9305,13 @@ ipr_get_chip_info(const struct pci_device_id *dev_id)
* Return value:
* 0 on success / non-zero on failure
**/
-static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
+static irqreturn_t ipr_test_intr(int irq, void *devp)
{
struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
unsigned long lock_flags = 0;
irqreturn_t rc = IRQ_HANDLED;
+ dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq);
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ioa_cfg->msi_received = 1;
@@ -8761,8 +9333,7 @@ static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
* Return value:
* 0 on success / non-zero on failure
**/
-static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
- struct pci_dev *pdev)
+static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
{
int rc;
volatile u32 int_reg;
@@ -8788,9 +9359,9 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
if (!ioa_cfg->msi_received) {
/* MSI test failed */
dev_info(&pdev->dev, "MSI test failed. Falling back to LSI.\n");
@@ -8807,16 +9378,15 @@ static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
return rc;
}
-/**
- * ipr_probe_ioa - Allocates memory and does first stage of initialization
+ /* ipr_probe_ioa - Allocates memory and does first stage of initialization
* @pdev: PCI device struct
* @dev_id: PCI device id struct
*
* Return value:
* 0 on success / non-zero on failure
**/
-static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
+static int ipr_probe_ioa(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
struct Scsi_Host *host;
@@ -8824,6 +9394,7 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
void __iomem *ipr_regs;
int rc = PCIBIOS_SUCCESSFUL;
volatile u32 mask, uproc, interrupts;
+ unsigned long lock_flags;
ENTER;
@@ -8919,17 +9490,56 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
goto cleanup_nomem;
}
- /* Enable MSI style interrupts if they are supported. */
- if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
+ if (ipr_number_of_msix > IPR_MAX_MSIX_VECTORS) {
+ dev_err(&pdev->dev, "The max number of MSIX is %d\n",
+ IPR_MAX_MSIX_VECTORS);
+ ipr_number_of_msix = IPR_MAX_MSIX_VECTORS;
+ }
+
+ if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msix(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSIX;
+ else if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI &&
+ ipr_enable_msi(ioa_cfg) == 0)
+ ioa_cfg->intr_flag = IPR_USE_MSI;
+ else {
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ }
+
+ if (ioa_cfg->intr_flag == IPR_USE_MSI ||
+ ioa_cfg->intr_flag == IPR_USE_MSIX) {
rc = ipr_test_msi(ioa_cfg, pdev);
- if (rc == -EOPNOTSUPP)
- pci_disable_msi(pdev);
+ if (rc == -EOPNOTSUPP) {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSI;
+ pci_disable_msi(pdev);
+ } else if (ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ ioa_cfg->intr_flag &= ~IPR_USE_MSIX;
+ pci_disable_msix(pdev);
+ }
+
+ ioa_cfg->intr_flag = IPR_USE_LSI;
+ ioa_cfg->nvectors = 1;
+ }
else if (rc)
goto out_msi_disable;
- else
- dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
- } else if (ipr_debug)
- dev_info(&pdev->dev, "Cannot enable MSI.\n");
+ else {
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ dev_info(&pdev->dev,
+ "Request for %d MSIs succeeded with starting IRQ: %d\n",
+ ioa_cfg->nvectors, pdev->irq);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ dev_info(&pdev->dev,
+ "Request for %d MSIXs succeeded.",
+ ioa_cfg->nvectors);
+ }
+ }
+
+ ioa_cfg->hrrq_num = min3(ioa_cfg->nvectors,
+ (unsigned int)num_online_cpus(),
+ (unsigned int)IPR_MAX_HRRQ_NUM);
/* Save away PCI config space for use following IOA reset */
rc = pci_save_state(pdev);
@@ -8976,11 +9586,24 @@ static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
ioa_cfg->ioa_unit_checked = 1;
+ spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
- rc = request_irq(pdev->irq, ipr_isr,
- ioa_cfg->msi_received ? 0 : IRQF_SHARED,
- IPR_NAME, ioa_cfg);
+ spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI
+ || ioa_cfg->intr_flag == IPR_USE_MSIX) {
+ name_msi_vectors(ioa_cfg);
+ rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_isr,
+ 0,
+ ioa_cfg->vectors_info[0].desc,
+ &ioa_cfg->hrrq[0]);
+ if (!rc)
+ rc = ipr_request_other_msi_irqs(ioa_cfg);
+ } else {
+ rc = request_irq(pdev->irq, ipr_isr,
+ IRQF_SHARED,
+ IPR_NAME, &ioa_cfg->hrrq[0]);
+ }
if (rc) {
dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
pdev->irq, rc);
@@ -9005,7 +9628,10 @@ out:
cleanup_nolog:
ipr_free_mem(ioa_cfg);
out_msi_disable:
- pci_disable_msi(pdev);
+ if (ioa_cfg->intr_flag == IPR_USE_MSI)
+ pci_disable_msi(pdev);
+ else if (ioa_cfg->intr_flag == IPR_USE_MSIX)
+ pci_disable_msix(pdev);
cleanup_nomem:
iounmap(ipr_regs);
out_release_regions:
@@ -9075,6 +9701,7 @@ static void __ipr_remove(struct pci_dev *pdev)
{
unsigned long host_lock_flags = 0;
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
+ int i;
ENTER;
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9084,6 +9711,12 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
}
+ for (i = 0; i < ioa_cfg->hrrq_num; i++) {
+ spin_lock(&ioa_cfg->hrrq[i]._lock);
+ ioa_cfg->hrrq[i].removing_ioa = 1;
+ spin_unlock(&ioa_cfg->hrrq[i]._lock);
+ }
+ wmb();
ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
@@ -9113,7 +9746,7 @@ static void __ipr_remove(struct pci_dev *pdev)
* Return value:
* none
**/
-static void __devexit ipr_remove(struct pci_dev *pdev)
+static void ipr_remove(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
@@ -9136,11 +9769,10 @@ static void __devexit ipr_remove(struct pci_dev *pdev)
* Return value:
* 0 on success / non-zero on failure
**/
-static int __devinit ipr_probe(struct pci_dev *pdev,
- const struct pci_device_id *dev_id)
+static int ipr_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
{
struct ipr_ioa_cfg *ioa_cfg;
- int rc;
+ int rc, i;
rc = ipr_probe_ioa(pdev, dev_id);
@@ -9187,6 +9819,17 @@ static int __devinit ipr_probe(struct pci_dev *pdev,
scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
ioa_cfg->allow_ml_add_del = 1;
ioa_cfg->host->max_channel = IPR_VSET_BUS;
+ ioa_cfg->iopoll_weight = ioa_cfg->chip_cfg->iopoll_weight;
+
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ for (i = 1; i < ioa_cfg->hrrq_num; i++) {
+ blk_iopoll_init(&ioa_cfg->hrrq[i].iopoll,
+ ioa_cfg->iopoll_weight, ipr_iopoll);
+ blk_iopoll_enable(&ioa_cfg->hrrq[i].iopoll);
+ }
+ }
+
schedule_work(&ioa_cfg->work_q);
return 0;
}
@@ -9205,8 +9848,16 @@ static void ipr_shutdown(struct pci_dev *pdev)
{
struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
unsigned long lock_flags = 0;
+ int i;
spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+ if (blk_iopoll_enabled && ioa_cfg->iopoll_weight &&
+ ioa_cfg->sis64 && ioa_cfg->nvectors > 1) {
+ ioa_cfg->iopoll_weight = 0;
+ for (i = 1; i < ioa_cfg->hrrq_num; i++)
+ blk_iopoll_disable(&ioa_cfg->hrrq[i].iopoll);
+ }
+
while (ioa_cfg->in_reset_reload) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
@@ -9218,7 +9869,7 @@ static void ipr_shutdown(struct pci_dev *pdev)
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
}
-static struct pci_device_id ipr_pci_table[] __devinitdata = {
+static struct pci_device_id ipr_pci_table[] = {
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
@@ -9279,6 +9930,8 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C0, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C3, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
@@ -9292,6 +9945,14 @@ static struct pci_device_id ipr_pci_table[] __devinitdata = {
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C8, 0, 0, 0 },
{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D5, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D6, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D7, 0, 0, 0 },
+ { PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROCODILE,
+ PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57D8, 0, 0, 0 },
{ }
};
MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -9305,7 +9966,7 @@ static struct pci_driver ipr_driver = {
.name = IPR_NAME,
.id_table = ipr_pci_table,
.probe = ipr_probe,
- .remove = __devexit_p(ipr_remove),
+ .remove = ipr_remove,
.shutdown = ipr_shutdown,
.err_handler = &ipr_err_handler,
};
@@ -9318,9 +9979,7 @@ static struct pci_driver ipr_driver = {
**/
static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
{
- struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
- list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+ list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
/**
@@ -9342,7 +10001,7 @@ static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
- if (!ioa_cfg->allow_cmds) {
+ if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
continue;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index c8a137f83bb1..21a6ff1ed5c6 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -32,14 +32,15 @@
#include <linux/libata.h>
#include <linux/list.h>
#include <linux/kref.h>
+#include <linux/blk-iopoll.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
/*
* Literals
*/
-#define IPR_DRIVER_VERSION "2.5.4"
-#define IPR_DRIVER_DATE "(July 11, 2012)"
+#define IPR_DRIVER_VERSION "2.6.0"
+#define IPR_DRIVER_DATE "(November 16, 2012)"
/*
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -82,6 +83,7 @@
#define IPR_SUBS_DEV_ID_57B4 0x033B
#define IPR_SUBS_DEV_ID_57B2 0x035F
+#define IPR_SUBS_DEV_ID_57C0 0x0352
#define IPR_SUBS_DEV_ID_57C3 0x0353
#define IPR_SUBS_DEV_ID_57C4 0x0354
#define IPR_SUBS_DEV_ID_57C6 0x0357
@@ -94,6 +96,10 @@
#define IPR_SUBS_DEV_ID_574D 0x0356
#define IPR_SUBS_DEV_ID_57C8 0x035D
+#define IPR_SUBS_DEV_ID_57D5 0x03FB
+#define IPR_SUBS_DEV_ID_57D6 0x03FC
+#define IPR_SUBS_DEV_ID_57D7 0x03FF
+#define IPR_SUBS_DEV_ID_57D8 0x03FE
#define IPR_NAME "ipr"
/*
@@ -298,6 +304,9 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
* Misc literals
*/
#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
+#define IPR_MAX_MSIX_VECTORS 0x5
+#define IPR_MAX_HRRQ_NUM 0x10
+#define IPR_INIT_HRRQ 0x0
/*
* Adapter interface types
@@ -404,7 +413,7 @@ struct ipr_config_table_entry64 {
__be64 dev_id;
__be64 lun;
__be64 lun_wwn[2];
-#define IPR_MAX_RES_PATH_LENGTH 24
+#define IPR_MAX_RES_PATH_LENGTH 48
__be64 res_path;
struct ipr_std_inq_data std_inq_data;
u8 reserved2[4];
@@ -459,9 +468,40 @@ struct ipr_supported_device {
u8 reserved2[16];
}__attribute__((packed, aligned (4)));
+struct ipr_hrr_queue {
+ struct ipr_ioa_cfg *ioa_cfg;
+ __be32 *host_rrq;
+ dma_addr_t host_rrq_dma;
+#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
+#define IPR_HRRQ_RESP_BIT_SET 0x00000002
+#define IPR_HRRQ_TOGGLE_BIT 0x00000001
+#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
+#define IPR_ID_HRRQ_SELE_ENABLE 0x02
+ volatile __be32 *hrrq_start;
+ volatile __be32 *hrrq_end;
+ volatile __be32 *hrrq_curr;
+
+ struct list_head hrrq_free_q;
+ struct list_head hrrq_pending_q;
+ spinlock_t _lock;
+ spinlock_t *lock;
+
+ volatile u32 toggle_bit;
+ u32 size;
+ u32 min_cmd_id;
+ u32 max_cmd_id;
+ u8 allow_interrupts:1;
+ u8 ioa_is_dead:1;
+ u8 allow_cmds:1;
+ u8 removing_ioa:1;
+
+ struct blk_iopoll iopoll;
+};
+
/* Command packet structure */
struct ipr_cmd_pkt {
- __be16 reserved; /* Reserved by IOA */
+ u8 reserved; /* Reserved by IOA */
+ u8 hrrq_id;
u8 request_type;
#define IPR_RQTYPE_SCSICDB 0x00
#define IPR_RQTYPE_IOACMD 0x01
@@ -1022,6 +1062,10 @@ struct ipr_hostrcb64_fabric_desc {
struct ipr_hostrcb64_config_element elem[1];
}__attribute__((packed, aligned (8)));
+#define for_each_hrrq(hrrq, ioa_cfg) \
+ for (hrrq = (ioa_cfg)->hrrq; \
+ hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
+
#define for_each_fabric_cfg(fabric, cfg) \
for (cfg = (fabric)->elem; \
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -1308,6 +1352,7 @@ struct ipr_chip_cfg_t {
u16 max_cmds;
u8 cache_line_size;
u8 clear_isr;
+ u32 iopoll_weight;
struct ipr_interrupt_offsets regs;
};
@@ -1317,6 +1362,7 @@ struct ipr_chip_t {
u16 intr_type;
#define IPR_USE_LSI 0x00
#define IPR_USE_MSI 0x01
+#define IPR_USE_MSIX 0x02
u16 sis_type;
#define IPR_SIS32 0x00
#define IPR_SIS64 0x01
@@ -1375,13 +1421,10 @@ struct ipr_ioa_cfg {
struct list_head queue;
- u8 allow_interrupts:1;
u8 in_reset_reload:1;
u8 in_ioa_bringdown:1;
u8 ioa_unit_checked:1;
- u8 ioa_is_dead:1;
u8 dump_taken:1;
- u8 allow_cmds:1;
u8 allow_ml_add_del:1;
u8 needs_hard_reset:1;
u8 dual_raid:1;
@@ -1413,21 +1456,7 @@ struct ipr_ioa_cfg {
char trace_start[8];
#define IPR_TRACE_START_LABEL "trace"
struct ipr_trace_entry *trace;
- u32 trace_index:IPR_NUM_TRACE_INDEX_BITS;
-
- /*
- * Queue for free command blocks
- */
- char ipr_free_label[8];
-#define IPR_FREEQ_LABEL "free-q"
- struct list_head free_q;
-
- /*
- * Queue for command blocks outstanding to the adapter
- */
- char ipr_pending_label[8];
-#define IPR_PENDQ_LABEL "pend-q"
- struct list_head pending_q;
+ atomic_t trace_index;
char cfg_table_start[8];
#define IPR_CFG_TBL_START "cfg"
@@ -1452,16 +1481,10 @@ struct ipr_ioa_cfg {
struct list_head hostrcb_free_q;
struct list_head hostrcb_pending_q;
- __be32 *host_rrq;
- dma_addr_t host_rrq_dma;
-#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
-#define IPR_HRRQ_RESP_BIT_SET 0x00000002
-#define IPR_HRRQ_TOGGLE_BIT 0x00000001
-#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
- volatile __be32 *hrrq_start;
- volatile __be32 *hrrq_end;
- volatile __be32 *hrrq_curr;
- volatile u32 toggle_bit;
+ struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
+ u32 hrrq_num;
+ atomic_t hrrq_index;
+ u16 identify_hrrq_index;
struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
@@ -1507,6 +1530,17 @@ struct ipr_ioa_cfg {
u32 max_cmds;
struct ipr_cmnd **ipr_cmnd_list;
dma_addr_t *ipr_cmnd_list_dma;
+
+ u16 intr_flag;
+ unsigned int nvectors;
+
+ struct {
+ unsigned short vec;
+ char desc[22];
+ } vectors_info[IPR_MAX_MSIX_VECTORS];
+
+ u32 iopoll_weight;
+
}; /* struct ipr_ioa_cfg */
struct ipr_cmnd {
@@ -1544,6 +1578,7 @@ struct ipr_cmnd {
struct scsi_device *sdev;
} u;
+ struct ipr_hrr_queue *hrrq;
struct ipr_ioa_cfg *ioa_cfg;
};
@@ -1717,7 +1752,8 @@ struct ipr_ucode_image_header {
if (ipr_is_device(hostrcb)) { \
if ((hostrcb)->ioa_cfg->sis64) { \
printk(KERN_ERR IPR_NAME ": %s: " fmt, \
- ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \
+ ipr_format_res_path(hostrcb->ioa_cfg, \
+ hostrcb->hcam.u.error64.fd_res_path, \
hostrcb->rp_buffer, \
sizeof(hostrcb->rp_buffer)), \
__VA_ARGS__); \
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index b6d7a5c2fc94..9aa86a315a08 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -389,14 +389,14 @@ MODULE_DEVICE_TABLE( pci, ips_pci_table );
static char ips_hot_plug_name[] = "ips";
-static int __devinit ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
-static void __devexit ips_remove_device(struct pci_dev *pci_dev);
+static int ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent);
+static void ips_remove_device(struct pci_dev *pci_dev);
static struct pci_driver ips_pci_driver = {
.name = ips_hot_plug_name,
.id_table = ips_pci_table,
.probe = ips_insert_device,
- .remove = __devexit_p(ips_remove_device),
+ .remove = ips_remove_device,
};
@@ -6837,7 +6837,7 @@ err_out_sh:
/* Routine Description: */
/* Remove one Adapter ( Hot Plugging ) */
/*---------------------------------------------------------------------------*/
-static void __devexit
+static void
ips_remove_device(struct pci_dev *pci_dev)
{
struct Scsi_Host *sh = pci_get_drvdata(pci_dev);
@@ -6898,7 +6898,7 @@ module_exit(ips_module_exit);
/* Return Value: */
/* 0 if Successful, else non-zero */
/*---------------------------------------------------------------------------*/
-static int __devinit
+static int
ips_insert_device(struct pci_dev *pci_dev, const struct pci_device_id *ent)
{
int index = -1;
diff --git a/drivers/scsi/isci/init.c b/drivers/scsi/isci/init.c
index b74050b95d6a..d73fdcfeb45a 100644
--- a/drivers/scsi/isci/init.c
+++ b/drivers/scsi/isci/init.c
@@ -282,7 +282,7 @@ static void isci_unregister(struct isci_host *isci_host)
scsi_host_put(shost);
}
-static int __devinit isci_pci_init(struct pci_dev *pdev)
+static int isci_pci_init(struct pci_dev *pdev)
{
int err, bar_num, bar_mask = 0;
void __iomem * const *iomap;
@@ -616,7 +616,7 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
return NULL;
}
-static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int isci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct isci_pci_info *pci_info;
int err, i;
@@ -709,7 +709,7 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
return err;
}
-static void __devexit isci_pci_remove(struct pci_dev *pdev)
+static void isci_pci_remove(struct pci_dev *pdev)
{
struct isci_host *ihost;
int i;
@@ -778,7 +778,7 @@ static struct pci_driver isci_pci_driver = {
.name = DRV_NAME,
.id_table = isci_id_table,
.probe = isci_pci_probe,
- .remove = __devexit_p(isci_pci_remove),
+ .remove = isci_pci_remove,
#ifdef CONFIG_PM
.driver.pm = &isci_pm_ops,
#endif
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 27cfb0cb186c..69efbf12b299 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -129,7 +129,7 @@ static const struct esp_driver_ops jazz_esp_ops = {
.dma_error = jazz_esp_dma_error,
};
-static int __devinit esp_jazz_probe(struct platform_device *dev)
+static int esp_jazz_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -201,7 +201,7 @@ fail:
return err;
}
-static int __devexit esp_jazz_remove(struct platform_device *dev)
+static int esp_jazz_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
@@ -223,7 +223,7 @@ MODULE_ALIAS("platform:jazz_esp");
static struct platform_driver esp_jazz_driver = {
.probe = esp_jazz_probe,
- .remove = __devexit_p(esp_jazz_remove),
+ .remove = esp_jazz_remove,
.driver = {
.name = "jazz_esp",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/lasi700.c b/drivers/scsi/lasi700.c
index 23880f8fe7e4..5c4ded997265 100644
--- a/drivers/scsi/lasi700.c
+++ b/drivers/scsi/lasi700.c
@@ -168,7 +168,7 @@ static struct parisc_driver lasi700_driver = {
.name = "lasi_scsi",
.id_table = lasi700_ids,
.probe = lasi700_probe,
- .remove = __devexit_p(lasi700_driver_remove),
+ .remove = lasi700_driver_remove,
};
static int __init
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index efc6e72f09f3..aec2e0da5016 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -1800,7 +1800,7 @@ out:
* @dev:domain device to be detect.
* @src_dev: the device which originated BROADCAST(CHANGE).
*
- * Add self-configuration expander suport. Suppose two expander cascading,
+ * Add self-configuration expander support. Suppose two expander cascading,
* when the first level expander is self-configuring, hotplug the disks in
* second level expander, BROADCAST(CHANGE) will not only be originated
* in the second level expander, but also be originated in the first level
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 69b59935b53f..7706c99ec8bb 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -466,11 +466,13 @@ enum intr_type_t {
MSIX,
};
+#define LPFC_CT_CTX_MAX 64
struct unsol_rcv_ct_ctx {
uint32_t ctxt_id;
uint32_t SID;
- uint32_t flags;
-#define UNSOL_VALID 0x00000001
+ uint32_t valid;
+#define UNSOL_INVALID 0
+#define UNSOL_VALID 1
uint16_t oxid;
uint16_t rxid;
};
@@ -689,6 +691,7 @@ struct lpfc_hba {
#define LPFC_FCF_PRIORITY 2 /* Priority fcf failover */
uint32_t cfg_fcf_failover_policy;
uint32_t cfg_fcp_io_sched;
+ uint32_t cfg_fcp2_no_tgt_reset;
uint32_t cfg_cr_delay;
uint32_t cfg_cr_count;
uint32_t cfg_multi_ring_support;
@@ -714,6 +717,7 @@ struct lpfc_hba {
uint32_t cfg_log_verbose;
uint32_t cfg_aer_support;
uint32_t cfg_sriov_nr_virtfn;
+ uint32_t cfg_request_firmware_upgrade;
uint32_t cfg_iocb_cnt;
uint32_t cfg_suppress_link_up;
#define LPFC_INITIALIZE_LINK 0 /* do normal init_link mbox */
@@ -748,6 +752,15 @@ struct lpfc_hba {
void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
PCI BAR2 */
+ void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
+ PCI BAR0 with dual-ULP support */
+ void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
+ PCI BAR2 with dual-ULP support */
+ void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
+ PCI BAR4 with dual-ULP support */
+#define PCI_64BIT_BAR0 0
+#define PCI_64BIT_BAR2 2
+#define PCI_64BIT_BAR4 4
void __iomem *MBslimaddr; /* virtual address for mbox cmds */
void __iomem *HAregaddr; /* virtual address for host attn reg */
void __iomem *CAregaddr; /* virtual address for chip attn reg */
@@ -936,7 +949,7 @@ struct lpfc_hba {
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
struct list_head ct_ev_waiters;
- struct unsol_rcv_ct_ctx ct_ctx[64];
+ struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
uint32_t ctx_idx;
uint8_t menlo_flag; /* menlo generic flags */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index ad16e54ac383..a364cae9e984 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3618,6 +3618,77 @@ static DEVICE_ATTR(lpfc_sriov_nr_virtfn, S_IRUGO | S_IWUSR,
lpfc_sriov_nr_virtfn_show, lpfc_sriov_nr_virtfn_store);
/**
+ * lpfc_request_firmware_store - Request for Linux generic firmware upgrade
+ *
+ * @dev: class device that is converted into a Scsi_host.
+ * @attr: device attribute, not used.
+ * @buf: containing the string the number of vfs to be enabled.
+ * @count: unused variable.
+ *
+ * Description:
+ *
+ * Returns:
+ * length of the buf on success if val is in range the intended mode
+ * is supported.
+ * -EINVAL if val out of range or intended mode is not supported.
+ **/
+static ssize_t
+lpfc_request_firmware_upgrade_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(dev);
+ struct lpfc_vport *vport = (struct lpfc_vport *)shost->hostdata;
+ struct lpfc_hba *phba = vport->phba;
+ int val = 0, rc = -EINVAL;
+
+ /* Sanity check on user data */
+ if (!isdigit(buf[0]))
+ return -EINVAL;
+ if (sscanf(buf, "%i", &val) != 1)
+ return -EINVAL;
+ if (val != 1)
+ return -EINVAL;
+
+ rc = lpfc_sli4_request_firmware_update(phba, RUN_FW_UPGRADE);
+ if (rc)
+ rc = -EPERM;
+ else
+ rc = strlen(buf);
+ return rc;
+}
+
+static int lpfc_req_fw_upgrade;
+module_param(lpfc_req_fw_upgrade, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(lpfc_req_fw_upgrade, "Enable Linux generic firmware upgrade");
+lpfc_param_show(request_firmware_upgrade)
+
+/**
+ * lpfc_request_firmware_upgrade_init - Enable initial linux generic fw upgrade
+ * @phba: lpfc_hba pointer.
+ * @val: 0 or 1.
+ *
+ * Description:
+ * Set the initial Linux generic firmware upgrade enable or disable flag.
+ *
+ * Returns:
+ * zero if val saved.
+ * -EINVAL val out of range
+ **/
+static int
+lpfc_request_firmware_upgrade_init(struct lpfc_hba *phba, int val)
+{
+ if (val >= 0 && val <= 1) {
+ phba->cfg_request_firmware_upgrade = val;
+ return 0;
+ }
+ return -EINVAL;
+}
+static DEVICE_ATTR(lpfc_req_fw_upgrade, S_IRUGO | S_IWUSR,
+ lpfc_request_firmware_upgrade_show,
+ lpfc_request_firmware_upgrade_store);
+
+/**
* lpfc_fcp_imax_store
*
* @dev: class device that is converted into a Scsi_host.
@@ -3788,6 +3859,16 @@ LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for "
"issuing commands [0] - Round Robin, [1] - Current CPU");
/*
+# lpfc_fcp2_no_tgt_reset: Determine bus reset behavior
+# range is [0,1]. Default value is 0.
+# For [0], bus reset issues target reset to ALL devices
+# For [1], bus reset issues target reset to non-FCP2 devices
+*/
+LPFC_ATTR_RW(fcp2_no_tgt_reset, 0, 0, 1, "Determine bus reset behavior for "
+ "FCP2 devices [0] - issue tgt reset, [1] - no tgt reset");
+
+
+/*
# lpfc_cr_delay & lpfc_cr_count: Default values for I/O colaesing
# cr_delay (msec) or cr_count outstanding commands. cr_delay can take
# value [0,63]. cr_count can take value [1,255]. Default value of cr_delay
@@ -4029,6 +4110,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_scan_down,
&dev_attr_lpfc_link_speed,
&dev_attr_lpfc_fcp_io_sched,
+ &dev_attr_lpfc_fcp2_no_tgt_reset,
&dev_attr_lpfc_cr_delay,
&dev_attr_lpfc_cr_count,
&dev_attr_lpfc_multi_ring_support,
@@ -4069,6 +4151,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
&dev_attr_lpfc_aer_support,
&dev_attr_lpfc_aer_state_cleanup,
&dev_attr_lpfc_sriov_nr_virtfn,
+ &dev_attr_lpfc_req_fw_upgrade,
&dev_attr_lpfc_suppress_link_up,
&dev_attr_lpfc_iocb_cnt,
&dev_attr_iocb_hw,
@@ -5019,6 +5102,7 @@ void
lpfc_get_cfgparam(struct lpfc_hba *phba)
{
lpfc_fcp_io_sched_init(phba, lpfc_fcp_io_sched);
+ lpfc_fcp2_no_tgt_reset_init(phba, lpfc_fcp2_no_tgt_reset);
lpfc_cr_delay_init(phba, lpfc_cr_delay);
lpfc_cr_count_init(phba, lpfc_cr_count);
lpfc_multi_ring_support_init(phba, lpfc_multi_ring_support);
@@ -5051,6 +5135,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
lpfc_aer_support_init(phba, lpfc_aer_support);
lpfc_sriov_nr_virtfn_init(phba, lpfc_sriov_nr_virtfn);
+ lpfc_request_firmware_upgrade_init(phba, lpfc_req_fw_upgrade);
lpfc_suppress_link_up_init(phba, lpfc_suppress_link_up);
lpfc_iocb_cnt_init(phba, lpfc_iocb_cnt);
phba->cfg_enable_dss = 1;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f7368eb80415..32d5683e6181 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -955,9 +955,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_lock_irqsave(&phba->ct_ev_lock, flags);
if (phba->sli_rev == LPFC_SLI_REV4) {
evt_dat->immed_dat = phba->ctx_idx;
- phba->ctx_idx = (phba->ctx_idx + 1) % 64;
+ phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
/* Provide warning for over-run of the ct_ctx array */
- if (phba->ct_ctx[evt_dat->immed_dat].flags &
+ if (phba->ct_ctx[evt_dat->immed_dat].valid ==
UNSOL_VALID)
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
"2717 CT context array entry "
@@ -973,7 +973,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
piocbq->iocb.unsli3.rcvsli3.ox_id;
phba->ct_ctx[evt_dat->immed_dat].SID =
piocbq->iocb.un.rcvels.remoteID;
- phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
+ phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
} else
evt_dat->immed_dat = piocbq->iocb.ulpContext;
@@ -1013,6 +1013,47 @@ error_ct_unsol_exit:
}
/**
+ * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
+ * @phba: Pointer to HBA context object.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function handles abort to the CT command toward management plane
+ * for SLI4 port.
+ *
+ * If the pending context of a CT command to management plane present, clears
+ * such context and returns 1 for handled; otherwise, it returns 0 indicating
+ * no context exists.
+ **/
+int
+lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
+{
+ struct fc_frame_header fc_hdr;
+ struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
+ int ctx_idx, handled = 0;
+ uint16_t oxid, rxid;
+ uint32_t sid;
+
+ memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
+ sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
+ oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
+ rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
+
+ for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
+ if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
+ continue;
+ if (phba->ct_ctx[ctx_idx].rxid != rxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].oxid != oxid)
+ continue;
+ if (phba->ct_ctx[ctx_idx].SID != sid)
+ continue;
+ phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
+ handled = 1;
+ }
+ return handled;
+}
+
+/**
* lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
* @job: SET_EVENT fc_bsg_job
**/
@@ -1318,7 +1359,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
icmd->ulpClass = CLASS3;
if (phba->sli_rev == LPFC_SLI_REV4) {
/* Do not issue unsol response if oxid not marked as valid */
- if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
+ if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
rc = IOCB_ERROR;
goto issue_ct_rsp_exit;
}
@@ -1352,7 +1393,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
/* The exchange is done, mark the entry as invalid */
- phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
+ phba->ct_ctx[tag].valid = UNSOL_INVALID;
} else
icmd->ulpContext = (ushort) tag;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 4380a44000bc..76ca65dae781 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -164,8 +164,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
-void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
- struct lpfc_iocbq *);
+int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
void lpfc_fdmi_tmo(unsigned long);
@@ -427,6 +426,7 @@ int lpfc_bsg_request(struct fc_bsg_job *);
int lpfc_bsg_timeout(struct fc_bsg_job *);
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
+int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
struct lpfc_iocbq *);
struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
@@ -468,3 +468,4 @@ void lpfc_sli4_node_prep(struct lpfc_hba *);
int lpfc_sli4_xri_sgl_update(struct lpfc_hba *);
void lpfc_free_sgl_list(struct lpfc_hba *, struct list_head *);
uint32_t lpfc_sli_port_speed_get(struct lpfc_hba *);
+int lpfc_sli4_request_firmware_update(struct lpfc_hba *, uint8_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 7ffabb7e3afa..7bff3a19af56 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -164,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
}
/**
- * lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
+ * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
* @phba: Pointer to HBA context object.
- * @pring: Pointer to the driver internal I/O ring.
- * @piocbq: Pointer to the IOCBQ.
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
*
- * This function serves as the default handler for the sli4 unsolicited
- * abort event. It shall be invoked when there is no application interface
- * registered unsolicited abort handler. This handler does nothing but
- * just simply releases the dma buffer used by the unsol abort event.
+ * This function serves as the upper level protocol abort handler for CT
+ * protocol.
+ *
+ * Return 1 if abort has been handled, 0 otherwise.
**/
-void
-lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
- struct lpfc_sli_ring *pring,
- struct lpfc_iocbq *piocbq)
+int
+lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
{
- IOCB_t *icmd = &piocbq->iocb;
- struct lpfc_dmabuf *bdeBuf;
- uint32_t size;
+ int handled;
- /* Forward abort event to any process registered to receive ct event */
- if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
- return;
+ /* CT upper level goes through BSG */
+ handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
- /* If there is no BDE associated with IOCB, there is nothing to do */
- if (icmd->ulpBdeCount == 0)
- return;
- bdeBuf = piocbq->context2;
- piocbq->context2 = NULL;
- size = icmd->un.cont64[0].tus.f.bdeSize;
- lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
- lpfc_in_buf_free(phba, bdeBuf);
+ return handled;
}
static void
@@ -634,7 +621,7 @@ lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
/* Check for retry */
if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) {
if (irsp->ulpStatus != IOSTAT_LOCAL_REJECT ||
- (irsp->un.ulpWord[4] && IOERR_PARAM_MASK) !=
+ (irsp->un.ulpWord[4] & IOERR_PARAM_MASK) !=
IOERR_NO_RESOURCES)
vport->fc_ns_retry++;
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f19e9b6f9f13..08d156a9094f 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1182,8 +1182,6 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
sp->cmn.w2.r_a_tov = 0;
sp->cmn.virtual_fabric_support = 0;
sp->cls1.classValid = 0;
- sp->cls2.seqDelivery = 1;
- sp->cls3.seqDelivery = 1;
if (sp->cmn.fcphLow < FC_PH3)
sp->cmn.fcphLow = FC_PH3;
if (sp->cmn.fcphHigh < FC_PH3)
@@ -1198,7 +1196,13 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
/* Set the fcfi to the fcfi we registered with */
elsiocb->iocb.ulpContext = phba->fcf.fcfi;
}
+ /* Can't do SLI4 class2 without support sequence coalescing */
+ sp->cls2.classValid = 0;
+ sp->cls2.seqDelivery = 0;
} else {
+ /* Historical, setting sequential-delivery bit for SLI3 */
+ sp->cls2.seqDelivery = (sp->cls2.classValid) ? 1 : 0;
+ sp->cls3.seqDelivery = (sp->cls3.classValid) ? 1 : 0;
if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
sp->cmn.request_multiple_Nport = 1;
/* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
@@ -3118,6 +3122,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
case IOERR_SEQUENCE_TIMEOUT:
case IOERR_INVALID_RPI:
+ if (cmd == ELS_CMD_PLOGI &&
+ did == NameServer_DID) {
+ /* Continue forever if plogi to */
+ /* the nameserver fails */
+ maxretry = 0;
+ delay = 100;
+ }
retry = 1;
break;
}
@@ -6513,7 +6524,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
struct lpfc_nodelist *ndlp;
struct ls_rjt stat;
uint32_t *payload;
- uint32_t cmd, did, newnode, rjt_err = 0;
+ uint32_t cmd, did, newnode;
+ uint8_t rjt_exp, rjt_err = 0;
IOCB_t *icmd = &elsiocb->iocb;
if (!vport || !(elsiocb->context2))
@@ -6602,12 +6614,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* If Nport discovery is delayed, reject PLOGIs */
if (vport->fc_flag & FC_DISC_DELAYED) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
if (vport->port_state < LPFC_DISC_AUTH) {
if (!(phba->pport->fc_flag & FC_PT2PT) ||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
/* We get here, and drop thru, if we are PT2PT with
@@ -6644,6 +6658,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
@@ -6657,6 +6672,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
lpfc_send_els_event(vport, ndlp, payload);
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
@@ -6676,6 +6692,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvADISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6689,6 +6706,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPDISC++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb,
@@ -6726,6 +6744,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
phba->fc_stat.elsRcvPRLI++;
if (vport->port_state < LPFC_DISC_AUTH) {
rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_NOTHING_MORE;
break;
}
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
@@ -6809,6 +6828,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (newnode)
lpfc_nlp_put(ndlp);
break;
+ case ELS_CMD_REC:
+ /* receive this due to exchange closed */
+ rjt_err = LSRJT_UNABLE_TPC;
+ rjt_exp = LSEXP_INVALID_OX_RX;
+ break;
default:
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
@@ -6816,6 +6840,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
/* Unsupported ELS command, reject */
rjt_err = LSRJT_CMD_UNSUPPORTED;
+ rjt_exp = LSEXP_NOTHING_MORE;
/* Unknown ELS command <elsCmd> received from NPORT <did> */
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
@@ -6830,7 +6855,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
if (rjt_err) {
memset(&stat, 0, sizeof(stat));
stat.un.b.lsRjtRsnCode = rjt_err;
- stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
+ stat.un.b.lsRjtRsnCodeExp = rjt_exp;
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
NULL);
}
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 7398ca862e97..e8c476031703 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -538,6 +538,7 @@ struct fc_vft_header {
#define ELS_CMD_ECHO 0x10000000
#define ELS_CMD_TEST 0x11000000
#define ELS_CMD_RRQ 0x12000000
+#define ELS_CMD_REC 0x13000000
#define ELS_CMD_PRLI 0x20100014
#define ELS_CMD_PRLO 0x21100014
#define ELS_CMD_PRLO_ACC 0x02100014
@@ -574,6 +575,7 @@ struct fc_vft_header {
#define ELS_CMD_ECHO 0x10
#define ELS_CMD_TEST 0x11
#define ELS_CMD_RRQ 0x12
+#define ELS_CMD_REC 0x13
#define ELS_CMD_PRLI 0x14001020
#define ELS_CMD_PRLO 0x14001021
#define ELS_CMD_PRLO_ACC 0x14001002
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2cdeb5434fb7..6e93b886cd4d 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -106,6 +106,7 @@ struct lpfc_sli_intf {
#define LPFC_SLI4_MB_WORD_COUNT 64
#define LPFC_MAX_MQ_PAGE 8
+#define LPFC_MAX_WQ_PAGE_V0 4
#define LPFC_MAX_WQ_PAGE 8
#define LPFC_MAX_CQ_PAGE 4
#define LPFC_MAX_EQ_PAGE 8
@@ -703,24 +704,41 @@ struct lpfc_register {
* BAR0. The offsets are the same so the driver must account for
* any base address difference.
*/
-#define LPFC_RQ_DOORBELL 0x00A0
-#define lpfc_rq_doorbell_num_posted_SHIFT 16
-#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
-#define lpfc_rq_doorbell_num_posted_WORD word0
-#define lpfc_rq_doorbell_id_SHIFT 0
-#define lpfc_rq_doorbell_id_MASK 0xFFFF
-#define lpfc_rq_doorbell_id_WORD word0
-
-#define LPFC_WQ_DOORBELL 0x0040
-#define lpfc_wq_doorbell_num_posted_SHIFT 24
-#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
-#define lpfc_wq_doorbell_num_posted_WORD word0
-#define lpfc_wq_doorbell_index_SHIFT 16
-#define lpfc_wq_doorbell_index_MASK 0x00FF
-#define lpfc_wq_doorbell_index_WORD word0
-#define lpfc_wq_doorbell_id_SHIFT 0
-#define lpfc_wq_doorbell_id_MASK 0xFFFF
-#define lpfc_wq_doorbell_id_WORD word0
+#define LPFC_ULP0_RQ_DOORBELL 0x00A0
+#define LPFC_ULP1_RQ_DOORBELL 0x00C0
+#define lpfc_rq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_rq_db_list_fm_num_posted_WORD word0
+#define lpfc_rq_db_list_fm_index_SHIFT 16
+#define lpfc_rq_db_list_fm_index_MASK 0x00FF
+#define lpfc_rq_db_list_fm_index_WORD word0
+#define lpfc_rq_db_list_fm_id_SHIFT 0
+#define lpfc_rq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_list_fm_id_WORD word0
+#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_rq_db_ring_fm_num_posted_WORD word0
+#define lpfc_rq_db_ring_fm_id_SHIFT 0
+#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_rq_db_ring_fm_id_WORD word0
+
+#define LPFC_ULP0_WQ_DOORBELL 0x0040
+#define LPFC_ULP1_WQ_DOORBELL 0x0060
+#define lpfc_wq_db_list_fm_num_posted_SHIFT 24
+#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF
+#define lpfc_wq_db_list_fm_num_posted_WORD word0
+#define lpfc_wq_db_list_fm_index_SHIFT 16
+#define lpfc_wq_db_list_fm_index_MASK 0x00FF
+#define lpfc_wq_db_list_fm_index_WORD word0
+#define lpfc_wq_db_list_fm_id_SHIFT 0
+#define lpfc_wq_db_list_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_list_fm_id_WORD word0
+#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16
+#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF
+#define lpfc_wq_db_ring_fm_num_posted_WORD word0
+#define lpfc_wq_db_ring_fm_id_SHIFT 0
+#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF
+#define lpfc_wq_db_ring_fm_id_WORD word0
#define LPFC_EQCQ_DOORBELL 0x0120
#define lpfc_eqcq_doorbell_se_SHIFT 31
@@ -1131,12 +1149,22 @@ struct lpfc_mbx_wq_create {
struct { /* Version 0 Request */
uint32_t word0;
#define lpfc_mbx_wq_create_num_pages_SHIFT 0
-#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF
#define lpfc_mbx_wq_create_num_pages_WORD word0
+#define lpfc_mbx_wq_create_dua_SHIFT 8
+#define lpfc_mbx_wq_create_dua_MASK 0x00000001
+#define lpfc_mbx_wq_create_dua_WORD word0
#define lpfc_mbx_wq_create_cq_id_SHIFT 16
#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
#define lpfc_mbx_wq_create_cq_id_WORD word0
- struct dma_address page[LPFC_MAX_WQ_PAGE];
+ struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
+ uint32_t word9;
+#define lpfc_mbx_wq_create_bua_SHIFT 0
+#define lpfc_mbx_wq_create_bua_MASK 0x00000001
+#define lpfc_mbx_wq_create_bua_WORD word9
+#define lpfc_mbx_wq_create_ulp_num_SHIFT 8
+#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_wq_create_ulp_num_WORD word9
} request;
struct { /* Version 1 Request */
uint32_t word0; /* Word 0 is the same as in v0 */
@@ -1160,6 +1188,17 @@ struct lpfc_mbx_wq_create {
#define lpfc_mbx_wq_create_q_id_SHIFT 0
#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
#define lpfc_mbx_wq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_wq_create_bar_set_SHIFT 0
+#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_bar_set_WORD word2
+#define WQ_PCI_BAR_0_AND_1 0x00
+#define WQ_PCI_BAR_2_AND_3 0x01
+#define WQ_PCI_BAR_4_AND_5 0x02
+#define lpfc_mbx_wq_create_db_format_SHIFT 16
+#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_wq_create_db_format_WORD word2
} response;
} u;
};
@@ -1223,14 +1262,31 @@ struct lpfc_mbx_rq_create {
#define lpfc_mbx_rq_create_num_pages_SHIFT 0
#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
#define lpfc_mbx_rq_create_num_pages_WORD word0
+#define lpfc_mbx_rq_create_dua_SHIFT 16
+#define lpfc_mbx_rq_create_dua_MASK 0x00000001
+#define lpfc_mbx_rq_create_dua_WORD word0
+#define lpfc_mbx_rq_create_bqu_SHIFT 17
+#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
+#define lpfc_mbx_rq_create_bqu_WORD word0
+#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
+#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
+#define lpfc_mbx_rq_create_ulp_num_WORD word0
struct rq_context context;
struct dma_address page[LPFC_MAX_WQ_PAGE];
} request;
struct {
uint32_t word0;
-#define lpfc_mbx_rq_create_q_id_SHIFT 0
-#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
-#define lpfc_mbx_rq_create_q_id_WORD word0
+#define lpfc_mbx_rq_create_q_id_SHIFT 0
+#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD word0
+ uint32_t doorbell_offset;
+ uint32_t word2;
+#define lpfc_mbx_rq_create_bar_set_SHIFT 0
+#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_bar_set_WORD word2
+#define lpfc_mbx_rq_create_db_format_SHIFT 16
+#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
+#define lpfc_mbx_rq_create_db_format_WORD word2
} response;
} u;
};
@@ -1388,6 +1444,33 @@ struct lpfc_mbx_get_rsrc_extent_info {
} u;
};
+struct lpfc_mbx_query_fw_config {
+ struct mbox_header header;
+ struct {
+ uint32_t config_number;
+#define LPFC_FC_FCOE 0x00000007
+ uint32_t asic_revision;
+ uint32_t physical_port;
+ uint32_t function_mode;
+#define LPFC_FCOE_INI_MODE 0x00000040
+#define LPFC_FCOE_TGT_MODE 0x00000080
+#define LPFC_DUA_MODE 0x00000800
+ uint32_t ulp0_mode;
+#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
+#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
+ uint32_t ulp0_nap_words[12];
+ uint32_t ulp1_mode;
+ uint32_t ulp1_nap_words[12];
+ uint32_t function_capabilities;
+ uint32_t cqid_base;
+ uint32_t cqid_tot;
+ uint32_t eqid_base;
+ uint32_t eqid_tot;
+ uint32_t ulp0_nap2_words[2];
+ uint32_t ulp1_nap2_words[2];
+ } rsp;
+};
+
struct lpfc_id_range {
uint32_t word5;
#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
@@ -1803,51 +1886,6 @@ struct lpfc_mbx_redisc_fcf_tbl {
#define lpfc_mbx_redisc_fcf_index_WORD word12
};
-struct lpfc_mbx_query_fw_cfg {
- struct mbox_header header;
- uint32_t config_number;
- uint32_t asic_rev;
- uint32_t phys_port;
- uint32_t function_mode;
-/* firmware Function Mode */
-#define lpfc_function_mode_toe_SHIFT 0
-#define lpfc_function_mode_toe_MASK 0x00000001
-#define lpfc_function_mode_toe_WORD function_mode
-#define lpfc_function_mode_nic_SHIFT 1
-#define lpfc_function_mode_nic_MASK 0x00000001
-#define lpfc_function_mode_nic_WORD function_mode
-#define lpfc_function_mode_rdma_SHIFT 2
-#define lpfc_function_mode_rdma_MASK 0x00000001
-#define lpfc_function_mode_rdma_WORD function_mode
-#define lpfc_function_mode_vm_SHIFT 3
-#define lpfc_function_mode_vm_MASK 0x00000001
-#define lpfc_function_mode_vm_WORD function_mode
-#define lpfc_function_mode_iscsi_i_SHIFT 4
-#define lpfc_function_mode_iscsi_i_MASK 0x00000001
-#define lpfc_function_mode_iscsi_i_WORD function_mode
-#define lpfc_function_mode_iscsi_t_SHIFT 5
-#define lpfc_function_mode_iscsi_t_MASK 0x00000001
-#define lpfc_function_mode_iscsi_t_WORD function_mode
-#define lpfc_function_mode_fcoe_i_SHIFT 6
-#define lpfc_function_mode_fcoe_i_MASK 0x00000001
-#define lpfc_function_mode_fcoe_i_WORD function_mode
-#define lpfc_function_mode_fcoe_t_SHIFT 7
-#define lpfc_function_mode_fcoe_t_MASK 0x00000001
-#define lpfc_function_mode_fcoe_t_WORD function_mode
-#define lpfc_function_mode_dal_SHIFT 8
-#define lpfc_function_mode_dal_MASK 0x00000001
-#define lpfc_function_mode_dal_WORD function_mode
-#define lpfc_function_mode_lro_SHIFT 9
-#define lpfc_function_mode_lro_MASK 0x00000001
-#define lpfc_function_mode_lro_WORD function_mode
-#define lpfc_function_mode_flex10_SHIFT 10
-#define lpfc_function_mode_flex10_MASK 0x00000001
-#define lpfc_function_mode_flex10_WORD function_mode
-#define lpfc_function_mode_ncsi_SHIFT 11
-#define lpfc_function_mode_ncsi_MASK 0x00000001
-#define lpfc_function_mode_ncsi_WORD function_mode
-};
-
/* Status field for embedded SLI_CONFIG mailbox command */
#define STATUS_SUCCESS 0x0
#define STATUS_FAILED 0x1
@@ -2965,7 +3003,7 @@ struct lpfc_mqe {
struct lpfc_mbx_read_config rd_config;
struct lpfc_mbx_request_features req_ftrs;
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
- struct lpfc_mbx_query_fw_cfg query_fw_cfg;
+ struct lpfc_mbx_query_fw_config query_fw_cfg;
struct lpfc_mbx_supp_pages supp_pages;
struct lpfc_mbx_pc_sli4_params sli4_params;
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
@@ -3219,6 +3257,9 @@ struct wqe_common {
#define wqe_dif_SHIFT 0
#define wqe_dif_MASK 0x00000003
#define wqe_dif_WORD word7
+#define LPFC_WQE_DIF_PASSTHRU 1
+#define LPFC_WQE_DIF_STRIP 2
+#define LPFC_WQE_DIF_INSERT 3
#define wqe_ct_SHIFT 2
#define wqe_ct_MASK 0x00000003
#define wqe_ct_WORD word7
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 7dc4218d9c4c..26ca2efa976e 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -3854,7 +3854,7 @@ static void
lpfc_sli4_async_sli_evt(struct lpfc_hba *phba, struct lpfc_acqe_sli *acqe_sli)
{
char port_name;
- char message[80];
+ char message[128];
uint8_t status;
struct lpfc_acqe_misconfigured_event *misconfigured;
@@ -6233,9 +6233,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
phba->sli4_hba.conf_regs_memmap_p +
LPFC_CTL_PORT_SEM_OFFSET;
phba->sli4_hba.RQDBregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_RQ_DOORBELL;
phba->sli4_hba.WQDBregaddr =
- phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
+ phba->sli4_hba.conf_regs_memmap_p +
+ LPFC_ULP0_WQ_DOORBELL;
phba->sli4_hba.EQCQDBregaddr =
phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
phba->sli4_hba.MQDBregaddr =
@@ -6289,9 +6291,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
return -ENODEV;
phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
- vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_RQ_DOORBELL);
phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
- vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+ vf * LPFC_VFR_PAGE_SIZE +
+ LPFC_ULP0_WQ_DOORBELL);
phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
@@ -6987,6 +6991,19 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
phba->sli4_hba.fcp_wq = NULL;
}
+ if (phba->pci_bar0_memmap_p) {
+ iounmap(phba->pci_bar0_memmap_p);
+ phba->pci_bar0_memmap_p = NULL;
+ }
+ if (phba->pci_bar2_memmap_p) {
+ iounmap(phba->pci_bar2_memmap_p);
+ phba->pci_bar2_memmap_p = NULL;
+ }
+ if (phba->pci_bar4_memmap_p) {
+ iounmap(phba->pci_bar4_memmap_p);
+ phba->pci_bar4_memmap_p = NULL;
+ }
+
/* Release FCP CQ mapping array */
if (phba->sli4_hba.fcp_cq_map != NULL) {
kfree(phba->sli4_hba.fcp_cq_map);
@@ -7050,6 +7067,53 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
int rc = -ENOMEM;
int fcp_eqidx, fcp_cqidx, fcp_wqidx;
int fcp_cq_index = 0;
+ uint32_t shdr_status, shdr_add_status;
+ union lpfc_sli4_cfg_shdr *shdr;
+ LPFC_MBOXQ_t *mboxq;
+ uint32_t length;
+
+ /* Check for dual-ULP support */
+ mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+ if (!mboxq) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3249 Unable to allocate memory for "
+ "QUERY_FW_CFG mailbox command\n");
+ return -ENOMEM;
+ }
+ length = (sizeof(struct lpfc_mbx_query_fw_config) -
+ sizeof(struct lpfc_sli4_cfg_mhdr));
+ lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+ LPFC_MBOX_OPCODE_QUERY_FW_CFG,
+ length, LPFC_SLI4_MBX_EMBED);
+
+ rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+ shdr = (union lpfc_sli4_cfg_shdr *)
+ &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+ shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+ shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+ if (shdr_status || shdr_add_status || rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3250 QUERY_FW_CFG mailbox failed with status "
+ "x%x add_status x%x, mbx status x%x\n",
+ shdr_status, shdr_add_status, rc);
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
+ rc = -ENXIO;
+ goto out_error;
+ }
+
+ phba->sli4_hba.fw_func_mode =
+ mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
+ phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
+ phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
+ "ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
+ phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
+
+ if (rc != MBX_TIMEOUT)
+ mempool_free(mboxq, phba->mbox_mem_pool);
/*
* Set up HBA Event Queues (EQs)
@@ -7664,78 +7728,6 @@ out:
}
/**
- * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
- * @phba: pointer to lpfc hba data structure.
- * @cnt: number of nop mailbox commands to send.
- *
- * This routine is invoked to send a number @cnt of NOP mailbox command and
- * wait for each command to complete.
- *
- * Return: the number of NOP mailbox command completed.
- **/
-static int
-lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
-{
- LPFC_MBOXQ_t *mboxq;
- int length, cmdsent;
- uint32_t mbox_tmo;
- uint32_t rc = 0;
- uint32_t shdr_status, shdr_add_status;
- union lpfc_sli4_cfg_shdr *shdr;
-
- if (cnt == 0) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2518 Requested to send 0 NOP mailbox cmd\n");
- return cnt;
- }
-
- mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
- if (!mboxq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "2519 Unable to allocate memory for issuing "
- "NOP mailbox command\n");
- return 0;
- }
-
- /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
- length = (sizeof(struct lpfc_mbx_nop) -
- sizeof(struct lpfc_sli4_cfg_mhdr));
-
- for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
- lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
- LPFC_MBOX_OPCODE_NOP, length,
- LPFC_SLI4_MBX_EMBED);
- if (!phba->sli4_hba.intr_enable)
- rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
- else {
- mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
- rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
- }
- if (rc == MBX_TIMEOUT)
- break;
- /* Check return status */
- shdr = (union lpfc_sli4_cfg_shdr *)
- &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
- shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
- shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
- &shdr->response);
- if (shdr_status || shdr_add_status || rc) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
- "2520 NOP mailbox command failed "
- "status x%x add_status x%x mbx "
- "status x%x\n", shdr_status,
- shdr_add_status, rc);
- break;
- }
- }
-
- if (rc != MBX_TIMEOUT)
- mempool_free(mboxq, phba->mbox_mem_pool);
-
- return cmdsent;
-}
-
-/**
* lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
* @phba: pointer to lpfc hba data structure.
*
@@ -8503,37 +8495,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)
}
/**
- * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
- * @phba: pointer to lpfc hba data structure.
- *
- * This routine is invoked to unset the HBA device initialization steps to
- * a device with SLI-4 interface spec.
- **/
-static void
-lpfc_sli4_unset_hba(struct lpfc_hba *phba)
-{
- struct lpfc_vport *vport = phba->pport;
- struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
-
- spin_lock_irq(shost->host_lock);
- vport->load_flag |= FC_UNLOADING;
- spin_unlock_irq(shost->host_lock);
-
- phba->pport->work_port_events = 0;
-
- /* Stop the SLI4 device port */
- lpfc_stop_port(phba);
-
- lpfc_sli4_disable_intr(phba);
-
- /* Reset SLI4 HBA FCoE function */
- lpfc_pci_function_reset(phba);
- lpfc_sli4_queue_destroy(phba);
-
- return;
-}
-
-/**
* lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
* @phba: Pointer to HBA context object.
*
@@ -8813,7 +8774,7 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
* 0 - driver can claim the device
* negative value - driver can not claim the device
**/
-static int __devinit
+static int
lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct lpfc_hba *phba;
@@ -8980,7 +8941,7 @@ out_free_phba:
* removed from PCI bus, it performs all the necessary cleanup for the HBA
* device to be removed from the PCI subsystem properly.
**/
-static void __devexit
+static void
lpfc_pci_remove_one_s3(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -9450,7 +9411,7 @@ lpfc_write_firmware(const struct firmware *fw, void *context)
struct lpfc_dmabuf *dmabuf, *next;
uint32_t offset = 0, temp_offset = 0;
- /* It can be null, sanity check */
+ /* It can be null in no-wait mode, sanity check */
if (!fw) {
rc = -ENXIO;
goto out;
@@ -9528,11 +9489,48 @@ release_out:
release_firmware(fw);
out:
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "3024 Firmware update done: %d.", rc);
+ "3024 Firmware update done: %d.\n", rc);
return;
}
/**
+ * lpfc_sli4_request_firmware_update - Request linux generic firmware upgrade
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to perform Linux generic firmware upgrade on device
+ * that supports such feature.
+ **/
+int
+lpfc_sli4_request_firmware_update(struct lpfc_hba *phba, uint8_t fw_upgrade)
+{
+ uint8_t file_name[ELX_MODEL_NAME_SIZE];
+ int ret;
+ const struct firmware *fw;
+
+ /* Only supported on SLI4 interface type 2 for now */
+ if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
+ LPFC_SLI_INTF_IF_TYPE_2)
+ return -EPERM;
+
+ snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp", phba->ModelName);
+
+ if (fw_upgrade == INT_FW_UPGRADE) {
+ ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
+ file_name, &phba->pcidev->dev,
+ GFP_KERNEL, (void *)phba,
+ lpfc_write_firmware);
+ } else if (fw_upgrade == RUN_FW_UPGRADE) {
+ ret = request_firmware(&fw, file_name, &phba->pcidev->dev);
+ if (!ret)
+ lpfc_write_firmware(fw, (void *)phba);
+ } else {
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/**
* lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
* @pdev: pointer to PCI device
* @pid: pointer to PCI device identifier
@@ -9550,7 +9548,7 @@ out:
* 0 - driver can claim the device
* negative value - driver can not claim the device
**/
-static int __devinit
+static int
lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
{
struct lpfc_hba *phba;
@@ -9558,9 +9556,7 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
struct Scsi_Host *shost = NULL;
int error, ret;
uint32_t cfg_mode, intr_mode;
- int mcnt;
int adjusted_fcp_io_channel;
- uint8_t file_name[ELX_MODEL_NAME_SIZE];
/* Allocate memory for HBA structure */
phba = lpfc_hba_alloc(pdev);
@@ -9648,71 +9644,41 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
/* Now, trying to enable interrupt and bring up the device */
cfg_mode = phba->cfg_use_msi;
- while (true) {
- /* Put device to a known state before enabling interrupt */
- lpfc_stop_port(phba);
- /* Configure and enable interrupt */
- intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
- if (intr_mode == LPFC_INTR_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0426 Failed to enable interrupt.\n");
- error = -ENODEV;
- goto out_free_sysfs_attr;
- }
- /* Default to single EQ for non-MSI-X */
- if (phba->intr_type != MSIX)
- adjusted_fcp_io_channel = 1;
- else
- adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
- phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
- /* Set up SLI-4 HBA */
- if (lpfc_sli4_hba_setup(phba)) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "1421 Failed to set up hba\n");
- error = -ENODEV;
- goto out_disable_intr;
- }
- /* Send NOP mbx cmds for non-INTx mode active interrupt test */
- if (intr_mode != 0)
- mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
- LPFC_ACT_INTR_CNT);
-
- /* Check active interrupts received only for MSI/MSI-X */
- if (intr_mode == 0 ||
- phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
- /* Log the current active interrupt mode */
- phba->intr_mode = intr_mode;
- lpfc_log_intr_mode(phba, intr_mode);
- break;
- }
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "0451 Configure interrupt mode (%d) "
- "failed active interrupt test.\n",
- intr_mode);
- /* Unset the previous SLI-4 HBA setup. */
- /*
- * TODO: Is this operation compatible with IF TYPE 2
- * devices? All port state is deleted and cleared.
- */
- lpfc_sli4_unset_hba(phba);
- /* Try next level of interrupt mode */
- cfg_mode = --intr_mode;
+ /* Put device to a known state before enabling interrupt */
+ lpfc_stop_port(phba);
+ /* Configure and enable interrupt */
+ intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+ if (intr_mode == LPFC_INTR_ERROR) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0426 Failed to enable interrupt.\n");
+ error = -ENODEV;
+ goto out_free_sysfs_attr;
}
+ /* Default to single EQ for non-MSI-X */
+ if (phba->intr_type != MSIX)
+ adjusted_fcp_io_channel = 1;
+ else
+ adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
+ phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
+ /* Set up SLI-4 HBA */
+ if (lpfc_sli4_hba_setup(phba)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "1421 Failed to set up hba\n");
+ error = -ENODEV;
+ goto out_disable_intr;
+ }
+
+ /* Log the current active interrupt mode */
+ phba->intr_mode = intr_mode;
+ lpfc_log_intr_mode(phba, intr_mode);
/* Perform post initialization setup */
lpfc_post_init_setup(phba);
- /* check for firmware upgrade or downgrade (if_type 2 only) */
- if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
- LPFC_SLI_INTF_IF_TYPE_2) {
- snprintf(file_name, ELX_MODEL_NAME_SIZE, "%s.grp",
- phba->ModelName);
- ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
- file_name, &phba->pcidev->dev,
- GFP_KERNEL, (void *)phba,
- lpfc_write_firmware);
- }
+ /* check for firmware upgrade or downgrade */
+ if (phba->cfg_request_firmware_upgrade)
+ ret = lpfc_sli4_request_firmware_update(phba, INT_FW_UPGRADE);
/* Check if there are static vports to be created. */
lpfc_create_static_vport(phba);
@@ -9750,7 +9716,7 @@ out_free_phba:
* removed from PCI bus, it performs all the necessary cleanup for the HBA
* device to be removed from the PCI subsystem properly.
**/
-static void __devexit
+static void
lpfc_pci_remove_one_s4(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -10176,7 +10142,7 @@ lpfc_io_resume_s4(struct pci_dev *pdev)
* 0 - driver can claim the device
* negative value - driver can not claim the device
**/
-static int __devinit
+static int
lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
{
int rc;
@@ -10204,7 +10170,7 @@ lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
* remove routine, which will perform all the necessary cleanup for the
* device to be removed from the PCI subsystem properly.
**/
-static void __devexit
+static void
lpfc_pci_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -10546,7 +10512,7 @@ static struct pci_driver lpfc_driver = {
.name = LPFC_DRIVER_NAME,
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
- .remove = __devexit_p(lpfc_pci_remove_one),
+ .remove = lpfc_pci_remove_one,
.suspend = lpfc_pci_suspend_one,
.resume = lpfc_pci_resume_one,
.err_handler = &lpfc_err_handler,
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index d8fadcb2db73..46128c679202 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1115,6 +1115,13 @@ out:
"0261 Cannot Register NameServer login\n");
}
+ /*
+ ** In case the node reference counter does not go to zero, ensure that
+ ** the stale state for the node is not processed.
+ */
+
+ ndlp->nlp_prev_state = ndlp->nlp_state;
+ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DEFER_RM;
spin_unlock_irq(shost->host_lock);
@@ -2159,13 +2166,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
{
struct lpfc_iocbq *cmdiocb, *rspiocb;
IOCB_t *irsp;
+ struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
cmdiocb = (struct lpfc_iocbq *) arg;
rspiocb = cmdiocb->context_un.rsp_iocb;
irsp = &rspiocb->iocb;
if (irsp->ulpStatus) {
+ spin_lock_irq(shost->host_lock);
ndlp->nlp_flag |= NLP_DEFER_RM;
+ spin_unlock_irq(shost->host_lock);
return NLP_STE_FREED_NODE;
}
return ndlp->nlp_state;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7f45ac9964a9..98af07c6e300 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -288,6 +288,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
}
/**
+ * lpfc_change_queue_type() - Change a device's scsi tag queuing type
+ * @sdev: Pointer the scsi device whose queue depth is to change
+ * @tag_type: Identifier for queue tag type
+ */
+static int
+lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+/**
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
* @phba: The Hba for which this call is being executed.
*
@@ -3227,6 +3247,21 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
}
}
+ switch (scsi_get_prot_op(scsi_cmnd)) {
+ case SCSI_PROT_WRITE_STRIP:
+ case SCSI_PROT_READ_STRIP:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_STRIP;
+ break;
+ case SCSI_PROT_WRITE_INSERT:
+ case SCSI_PROT_READ_INSERT:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_INSERT;
+ break;
+ case SCSI_PROT_WRITE_PASS:
+ case SCSI_PROT_READ_PASS:
+ lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF_PASS;
+ break;
+ }
+
fcpdl = lpfc_bg_scsi_adjust_dl(phba, lpfc_cmd);
fcp_cmnd->fcpDl = be32_to_cpu(fcpdl);
@@ -3236,7 +3271,6 @@ lpfc_bg_scsi_prep_dma_buf_s4(struct lpfc_hba *phba,
* we need to set word 4 of IOCB here
*/
iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
- lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_DIF;
return 0;
err:
@@ -3958,7 +3992,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
break;
}
} else
- fcp_cmnd->fcpCntl1 = 0;
+ fcp_cmnd->fcpCntl1 = SIMPLE_Q;
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
@@ -4914,6 +4948,9 @@ lpfc_bus_reset_handler(struct scsi_cmnd *cmnd)
list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
if (!NLP_CHK_NODE_ACT(ndlp))
continue;
+ if (vport->phba->cfg_fcp2_no_tgt_reset &&
+ (ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE))
+ continue;
if (ndlp->nlp_state == NLP_STE_MAPPED_NODE &&
ndlp->nlp_sid == i &&
ndlp->rport) {
@@ -5133,6 +5170,7 @@ struct scsi_host_template lpfc_template = {
.max_sectors = 0xFFFF,
.vendor_id = LPFC_NL_VENDOR_ID,
.change_queue_depth = lpfc_change_queue_depth,
+ .change_queue_type = lpfc_change_queue_type,
};
struct scsi_host_template lpfc_vport_template = {
@@ -5155,4 +5193,5 @@ struct scsi_host_template lpfc_vport_template = {
.shost_attrs = lpfc_vport_attrs,
.max_sectors = 0xFFFF,
.change_queue_depth = lpfc_change_queue_depth,
+ .change_queue_type = lpfc_change_queue_type,
};
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index d7f3313ef886..74b67d98e952 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -124,10 +124,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
/* Ring Doorbell */
doorbell.word0 = 0;
- bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
- bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
- bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
- writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+ if (q->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
+ bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
+ } else if (q->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
+ bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, q->db_regaddr);
return 0;
}
@@ -456,10 +463,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
/* Ring The Header Receive Queue Doorbell */
if (!(hq->host_index % hq->entry_repost)) {
doorbell.word0 = 0;
- bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
- hq->entry_repost);
- bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
- writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+ if (hq->db_format == LPFC_DB_RING_FORMAT) {
+ bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
+ } else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
+ bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
+ hq->entry_repost);
+ bf_set(lpfc_rq_db_list_fm_index, &doorbell,
+ hq->host_index);
+ bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
+ } else {
+ return -EINVAL;
+ }
+ writel(doorbell.word0, hq->db_regaddr);
}
return put_index;
}
@@ -4939,7 +4956,7 @@ out_free_mboxq:
static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
{
- uint8_t fcp_eqidx;
+ int fcp_eqidx;
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
@@ -5622,6 +5639,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
}
/* RPIs. */
count = phba->sli4_hba.max_cfg_param.max_rpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3279 Invalid provisioning of "
+ "rpi:%d\n", count);
+ rc = -EINVAL;
+ goto err_exit;
+ }
base = phba->sli4_hba.max_cfg_param.rpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.rpi_bmask = kzalloc(longs *
@@ -5644,6 +5668,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VPIs. */
count = phba->sli4_hba.max_cfg_param.max_vpi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3280 Invalid provisioning of "
+ "vpi:%d\n", count);
+ rc = -EINVAL;
+ goto free_rpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vpi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->vpi_bmask = kzalloc(longs *
@@ -5666,6 +5697,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* XRIs. */
count = phba->sli4_hba.max_cfg_param.max_xri;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3281 Invalid provisioning of "
+ "xri:%d\n", count);
+ rc = -EINVAL;
+ goto free_vpi_ids;
+ }
base = phba->sli4_hba.max_cfg_param.xri_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.xri_bmask = kzalloc(longs *
@@ -5689,6 +5727,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
/* VFIs. */
count = phba->sli4_hba.max_cfg_param.max_vfi;
+ if (count <= 0) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+ "3282 Invalid provisioning of "
+ "vfi:%d\n", count);
+ rc = -EINVAL;
+ goto free_xri_ids;
+ }
base = phba->sli4_hba.max_cfg_param.vfi_base;
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
phba->sli4_hba.vfi_bmask = kzalloc(longs *
@@ -6599,7 +6644,7 @@ static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
uint32_t flag)
{
- MAILBOX_t *mb;
+ MAILBOX_t *mbx;
struct lpfc_sli *psli = &phba->sli;
uint32_t status, evtctr;
uint32_t ha_copy, hc_copy;
@@ -6653,7 +6698,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
psli = &phba->sli;
- mb = &pmbox->u.mb;
+ mbx = &pmbox->u.mb;
status = MBX_SUCCESS;
if (phba->link_state == LPFC_HBA_ERROR) {
@@ -6668,7 +6713,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
goto out_not_finished;
}
- if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
+ if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
!(hc_copy & HC_MBINT_ENA)) {
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -6722,7 +6767,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0308 Mbox cmd issue - BUSY Data: "
"x%x x%x x%x x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0xffffff,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
psli->slistat.mbox_busy++;
@@ -6732,15 +6777,15 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Bsy vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Bsy: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
return MBX_BUSY;
@@ -6751,7 +6796,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* If we are not polling, we MUST be in SLI2 mode */
if (flag != MBX_POLL) {
if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
- (mb->mbxCommand != MBX_KILL_BOARD)) {
+ (mbx->mbxCommand != MBX_KILL_BOARD)) {
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
/* Mbox command <mbxCommand> cannot issue */
@@ -6773,23 +6818,23 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
"x%x\n",
pmbox->vport ? pmbox->vport->vpi : 0,
- mb->mbxCommand, phba->pport->port_state,
+ mbx->mbxCommand, phba->pport->port_state,
psli->sli_flag, flag);
- if (mb->mbxCommand != MBX_HEARTBEAT) {
+ if (mbx->mbxCommand != MBX_HEARTBEAT) {
if (pmbox->vport) {
lpfc_debugfs_disc_trc(pmbox->vport,
LPFC_DISC_TRC_MBOX_VPORT,
"MBOX Send vport: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
else {
lpfc_debugfs_disc_trc(phba->pport,
LPFC_DISC_TRC_MBOX,
"MBOX Send: cmd:x%x mb:x%x x%x",
- (uint32_t)mb->mbxCommand,
- mb->un.varWords[0], mb->un.varWords[1]);
+ (uint32_t)mbx->mbxCommand,
+ mbx->un.varWords[0], mbx->un.varWords[1]);
}
}
@@ -6797,12 +6842,12 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
evtctr = psli->slistat.mbox_event;
/* next set own bit for the adapter and copy over command word */
- mb->mbxOwner = OWN_CHIP;
+ mbx->mbxOwner = OWN_CHIP;
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= (uint8_t *)phba->mbox_ext
- (uint8_t *)phba->mbox;
}
@@ -6814,11 +6859,11 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->in_ext_byte_len);
}
/* Copy command data to host SLIM area */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
} else {
/* Populate mbox extension offset word. */
if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
- *(((uint32_t *)mb) + pmbox->mbox_offset_word)
+ *(((uint32_t *)mbx) + pmbox->mbox_offset_word)
= MAILBOX_HBA_EXT_OFFSET;
/* Copy the mailbox extension data */
@@ -6828,24 +6873,24 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
pmbox->context2, pmbox->in_ext_byte_len);
}
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* copy command data into host mbox for cmpl */
- lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
}
/* First copy mbox command data to HBA SLIM, skip past first
word */
to_slim = phba->MBslimaddr + sizeof (uint32_t);
- lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
+ lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
MAILBOX_CMD_SIZE - sizeof (uint32_t));
/* Next copy over first word, with mbxOwner set */
- ldata = *((uint32_t *)mb);
+ ldata = *((uint32_t *)mbx);
to_slim = phba->MBslimaddr;
writel(ldata, to_slim);
readl(to_slim); /* flush */
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
/* switch over to host mailbox */
psli->sli_flag |= LPFC_SLI_ACTIVE;
}
@@ -6920,7 +6965,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
/* First copy command data */
word0 = *((uint32_t *)phba->mbox);
word0 = le32_to_cpu(word0);
- if (mb->mbxCommand == MBX_CONFIG_PORT) {
+ if (mbx->mbxCommand == MBX_CONFIG_PORT) {
MAILBOX_t *slimmb;
uint32_t slimword0;
/* Check real SLIM for any errors */
@@ -6947,7 +6992,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
if (psli->sli_flag & LPFC_SLI_ACTIVE) {
/* copy results back to user */
- lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
+ lpfc_sli_pcimem_bcopy(phba->mbox, mbx, MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
lpfc_sli_pcimem_bcopy(phba->mbox_ext,
@@ -6956,7 +7001,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
}
} else {
/* First copy command data */
- lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
+ lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
MAILBOX_CMD_SIZE);
/* Copy the mailbox extension data */
if (pmbox->out_ext_byte_len && pmbox->context2) {
@@ -6971,7 +7016,7 @@ lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
readl(phba->HAregaddr); /* flush */
psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
- status = mb->mbxStatus;
+ status = mbx->mbxStatus;
}
spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -8068,10 +8113,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
- if (iocbq->iocb_flag & LPFC_IO_DIF) {
- iocbq->iocb_flag &= ~LPFC_IO_DIF;
- bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
- }
bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
break;
case CMD_FCP_IREAD64_CR:
@@ -8091,10 +8132,6 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
LPFC_WQE_LENLOC_WORD4);
bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
- if (iocbq->iocb_flag & LPFC_IO_DIF) {
- iocbq->iocb_flag &= ~LPFC_IO_DIF;
- bf_set(wqe_dif, &wqe->generic.wqe_com, 1);
- }
bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
break;
case CMD_FCP_ICMND64_CR:
@@ -8304,6 +8341,14 @@ lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
break;
}
+ if (iocbq->iocb_flag & LPFC_IO_DIF_PASS)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_PASSTHRU);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_STRIP)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_STRIP);
+ else if (iocbq->iocb_flag & LPFC_IO_DIF_INSERT)
+ bf_set(wqe_dif, &wqe->generic.wqe_com, LPFC_WQE_DIF_INSERT);
+ iocbq->iocb_flag &= ~(LPFC_IO_DIF_PASS | LPFC_IO_DIF_STRIP |
+ LPFC_IO_DIF_INSERT);
bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
wqe->generic.wqe_com.abort_tag = abort_tag;
@@ -8370,7 +8415,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
* This is a continuation of a commandi,(CX) so this
* sglq is on the active list
*/
- sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+ sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
if (!sglq)
return IOCB_ERROR;
}
@@ -8855,12 +8900,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
pring->prt[3].type = FC_TYPE_CT;
pring->prt[3].lpfc_sli_rcv_unsol_event =
lpfc_ct_unsol_event;
- /* abort unsolicited sequence */
- pring->prt[4].profile = 0; /* Mask 4 */
- pring->prt[4].rctl = FC_RCTL_BA_ABTS;
- pring->prt[4].type = FC_TYPE_BLS;
- pring->prt[4].lpfc_sli_rcv_unsol_event =
- lpfc_sli4_ct_abort_unsol_event;
break;
}
totiocbsize += (pring->sli.sli3.numCiocb *
@@ -11873,7 +11912,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
struct lpfc_eqe *eqe;
unsigned long iflag;
int ecount = 0;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
@@ -11975,7 +12014,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
struct lpfc_hba *phba;
irqreturn_t hba_irq_rc;
bool hba_handled = false;
- uint32_t fcp_eqidx;
+ int fcp_eqidx;
/* Get the driver's phba structure from the dev_id */
phba = (struct lpfc_hba *)dev_id;
@@ -12097,6 +12136,54 @@ out_fail:
}
/**
+ * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @pci_barset: PCI BAR set flag.
+ *
+ * This function shall perform iomap of the specified PCI BAR address to host
+ * memory address if not already done so and return it. The returned host
+ * memory address can be NULL.
+ */
+static void __iomem *
+lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
+{
+ struct pci_dev *pdev;
+ unsigned long bar_map, bar_map_len;
+
+ if (!phba->pcidev)
+ return NULL;
+ else
+ pdev = phba->pcidev;
+
+ switch (pci_barset) {
+ case WQ_PCI_BAR_0_AND_1:
+ if (!phba->pci_bar0_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
+ phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar0_memmap_p;
+ case WQ_PCI_BAR_2_AND_3:
+ if (!phba->pci_bar2_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
+ phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar2_memmap_p;
+ case WQ_PCI_BAR_4_AND_5:
+ if (!phba->pci_bar4_memmap_p) {
+ bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
+ bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
+ phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
+ }
+ return phba->pci_bar4_memmap_p;
+ default:
+ break;
+ }
+ return NULL;
+}
+
+/**
* lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
* @phba: HBA structure that indicates port to create a queue on.
* @startq: The starting FCP EQ to modify
@@ -12673,6 +12760,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
struct dma_address *page;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!wq || !cq)
@@ -12696,6 +12786,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
cq->queue_id);
bf_set(lpfc_mbox_hdr_version, &shdr->request,
phba->sli4_hba.pc_sli4_params.wqv);
+
if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
wq->entry_count);
@@ -12723,6 +12814,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12740,6 +12835,47 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
status = -ENXIO;
goto out;
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
+ &wq_create->u.response);
+ if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (wq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3265 WQ[%d] doorbell format not "
+ "supported: x%x\n", wq->queue_id,
+ wq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+ pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
+ &wq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3263 WQ[%d] failed to memmap pci "
+ "barset:x%x\n", wq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+ db_offset = wq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3252 WQ[%d] doorbell offset not "
+ "supported: x%x\n", wq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ wq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3264 WQ[%d]: barset:x%x, offset:x%x\n",
+ wq->queue_id, pci_barset, db_offset);
+ } else {
+ wq->db_format = LPFC_DB_LIST_FORMAT;
+ wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
+ }
wq->type = LPFC_WQ;
wq->assoc_qid = cq->queue_id;
wq->subtype = subtype;
@@ -12816,6 +12952,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
+ void __iomem *bar_memmap_p;
+ uint32_t db_offset;
+ uint16_t pci_barset;
/* sanity check on queue memory */
if (!hrq || !drq || !cq)
@@ -12894,6 +13033,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
+
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
@@ -12911,6 +13053,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
status = -ENXIO;
goto out;
}
+
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
+ hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
+ &rq_create->u.response);
+ if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
+ (hrq->db_format != LPFC_DB_RING_FORMAT)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3262 RQ [%d] doorbell format not "
+ "supported: x%x\n", hrq->queue_id,
+ hrq->db_format);
+ status = -EINVAL;
+ goto out;
+ }
+
+ pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
+ &rq_create->u.response);
+ bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
+ if (!bar_memmap_p) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3269 RQ[%d] failed to memmap pci "
+ "barset:x%x\n", hrq->queue_id,
+ pci_barset);
+ status = -ENOMEM;
+ goto out;
+ }
+
+ db_offset = rq_create->u.response.doorbell_offset;
+ if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
+ (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3270 RQ[%d] doorbell offset not "
+ "supported: x%x\n", hrq->queue_id,
+ db_offset);
+ status = -EINVAL;
+ goto out;
+ }
+ hrq->db_regaddr = bar_memmap_p + db_offset;
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
+ hrq->queue_id, pci_barset, db_offset);
+ } else {
+ hrq->db_format = LPFC_DB_RING_FORMAT;
+ hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
+ }
hrq->type = LPFC_HRQ;
hrq->assoc_qid = cq->queue_id;
hrq->subtype = subtype;
@@ -12976,6 +13162,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
putPaddrHigh(dmabuf->phys);
}
+ if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
+ bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
/* The IOCTL status is embedded in the mailbox subheader. */
shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
@@ -14063,6 +14251,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
}
/**
+ * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
+ * @vport: pointer to a vitural port
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function tries to abort from the assembed sequence from upper level
+ * protocol, described by the information from basic abbort @dmabuf. It
+ * checks to see whether such pending context exists at upper level protocol.
+ * If so, it shall clean up the pending context.
+ *
+ * Return
+ * true -- if there is matching pending context of the sequence cleaned
+ * at ulp;
+ * false -- if there is no matching pending context of the sequence present
+ * at ulp.
+ **/
+static bool
+lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+ struct lpfc_hba *phba = vport->phba;
+ int handled;
+
+ /* Accepting abort at ulp with SLI4 only */
+ if (phba->sli_rev < LPFC_SLI_REV4)
+ return false;
+
+ /* Register all caring upper level protocols to attend abort */
+ handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
+ if (handled)
+ return true;
+
+ return false;
+}
+
+/**
* lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
* @phba: Pointer to HBA context object.
* @cmd_iocbq: pointer to the command iocbq structure.
@@ -14077,8 +14299,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
struct lpfc_iocbq *cmd_iocbq,
struct lpfc_iocbq *rsp_iocbq)
{
- if (cmd_iocbq)
+ struct lpfc_nodelist *ndlp;
+
+ if (cmd_iocbq) {
+ ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
+ lpfc_nlp_put(ndlp);
+ lpfc_nlp_not_used(ndlp);
lpfc_sli_release_iocbq(phba, cmd_iocbq);
+ }
/* Failure means BLS ABORT RSP did not get delivered to remote node*/
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
@@ -14118,9 +14346,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
* event after aborting the sequence handling.
**/
static void
-lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
- struct fc_frame_header *fc_hdr)
+lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
+ struct fc_frame_header *fc_hdr, bool aborted)
{
+ struct lpfc_hba *phba = vport->phba;
struct lpfc_iocbq *ctiocb = NULL;
struct lpfc_nodelist *ndlp;
uint16_t oxid, rxid, xri, lxri;
@@ -14135,12 +14364,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
rxid = be16_to_cpu(fc_hdr->fh_rx_id);
- ndlp = lpfc_findnode_did(phba->pport, sid);
+ ndlp = lpfc_findnode_did(vport, sid);
if (!ndlp) {
- lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
- "1268 Find ndlp returned NULL for oxid:x%x "
- "SID:x%x\n", oxid, sid);
- return;
+ ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "1268 Failed to allocate ndlp for "
+ "oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
+ lpfc_nlp_init(vport, ndlp, sid);
+ /* Put ndlp onto pport node list */
+ lpfc_enqueue_node(vport, ndlp);
+ } else if (!NLP_CHK_NODE_ACT(ndlp)) {
+ /* re-setup ndlp without removing from node list */
+ ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
+ if (!ndlp) {
+ lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
+ "3275 Failed to active ndlp found "
+ "for oxid:x%x SID:x%x\n", oxid, sid);
+ return;
+ }
}
/* Allocate buffer for rsp iocb */
@@ -14164,7 +14408,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
icmd->ulpLe = 1;
icmd->ulpClass = CLASS3;
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
- ctiocb->context1 = ndlp;
+ ctiocb->context1 = lpfc_nlp_get(ndlp);
ctiocb->iocb_cmpl = NULL;
ctiocb->vport = phba->pport;
@@ -14183,14 +14427,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
if (lxri != NO_XRI)
lpfc_set_rrq_active(phba, ndlp, lxri,
(xri == oxid) ? rxid : oxid, 0);
- /* If the oxid maps to the FCP XRI range or if it is out of range,
- * send a BLS_RJT. The driver no longer has that exchange.
- * Override the IOCB for a BA_RJT.
+ /* For BA_ABTS from exchange responder, if the logical xri with
+ * the oxid maps to the FCP XRI range, the port no longer has
+ * that exchange context, send a BLS_RJT. Override the IOCB for
+ * a BA_RJT.
+ */
+ if ((fctl & FC_FC_EX_CTX) &&
+ (lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
+ icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
+ bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
+ bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
+ bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
+ }
+
+ /* If BA_ABTS failed to abort a partially assembled receive sequence,
+ * the driver no longer has that exchange, send a BLS_RJT. Override
+ * the IOCB for a BA_RJT.
*/
- if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
- phba->sli4_hba.max_cfg_param.xri_base) ||
- xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
- phba->sli4_hba.max_cfg_param.xri_base)) {
+ if (aborted == false) {
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
@@ -14214,17 +14468,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
/* Xmit CT abts response on exchange <xid> */
- lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
- "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
+ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+ "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
if (rc == IOCB_ERROR) {
- lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
- "2925 Failed to issue CT ABTS RSP x%x on "
- "xri x%x, Data x%x\n",
- icmd->un.xseq64.w5.hcsw.Rctl, oxid,
- phba->link_state);
+ lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+ "2925 Failed to issue CT ABTS RSP x%x on "
+ "xri x%x, Data x%x\n",
+ icmd->un.xseq64.w5.hcsw.Rctl, oxid,
+ phba->link_state);
+ lpfc_nlp_put(ndlp);
+ ctiocb->context1 = NULL;
lpfc_sli_release_iocbq(phba, ctiocb);
}
}
@@ -14249,32 +14505,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
struct lpfc_hba *phba = vport->phba;
struct fc_frame_header fc_hdr;
uint32_t fctl;
- bool abts_par;
+ bool aborted;
/* Make a copy of fc_hdr before the dmabuf being released */
memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
if (fctl & FC_FC_EX_CTX) {
- /*
- * ABTS sent by responder to exchange, just free the buffer
- */
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by responder to exchange, no cleanup needed */
+ aborted = true;
} else {
- /*
- * ABTS sent by initiator to exchange, need to do cleanup
- */
- /* Try to abort partially assembled seq */
- abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
-
- /* Send abort to ULP if partially seq abort failed */
- if (abts_par == false)
- lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
- else
- lpfc_in_buf_free(phba, &dmabuf->dbuf);
+ /* ABTS by initiator to exchange, need to do cleanup */
+ aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
+ if (aborted == false)
+ aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
}
- /* Send basic accept (BA_ACC) to the abort requester */
- lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
+ lpfc_in_buf_free(phba, &dmabuf->dbuf);
+
+ /* Respond with BA_ACC or BA_RJT accordingly */
+ lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
}
/**
@@ -15307,10 +15556,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
{
uint16_t next_fcf_index;
+initial_priority:
/* Search start from next bit of currently registered FCF index */
+ next_fcf_index = phba->fcf.current_rec.fcf_indx;
+
next_priority:
- next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
- LPFC_SLI4_FCF_TBL_INDX_MAX;
+ /* Determine the next fcf index to check */
+ next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX,
next_fcf_index);
@@ -15337,7 +15589,7 @@ next_priority:
* at that level and continue the selection process.
*/
if (lpfc_check_next_fcf_pri_level(phba))
- goto next_priority;
+ goto initial_priority;
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
"2844 No roundrobin failover FCF available\n");
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index 2f48d000a3b4..9d2e0c6fe334 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -69,7 +69,9 @@ struct lpfc_iocbq {
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define DSS_SECURITY_OP 0x100 /* security IO */
#define LPFC_IO_ON_TXCMPLQ 0x200 /* The IO is still on the TXCMPLQ */
-#define LPFC_IO_DIF 0x400 /* T10 DIF IO */
+#define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */
+#define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */
+#define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index f44a06a4c6e7..be02b59ea279 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -82,6 +82,9 @@
#define LPFC_FW_RESET_MAXIMUM_WAIT_10MS_CNT 12000
+#define INT_FW_UPGRADE 0
+#define RUN_FW_UPGRADE 1
+
enum lpfc_sli4_queue_type {
LPFC_EQ,
LPFC_GCQ,
@@ -136,6 +139,10 @@ struct lpfc_queue {
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
+ uint16_t db_format;
+#define LPFC_DB_RING_FORMAT 0x01
+#define LPFC_DB_LIST_FORMAT 0x02
+ void __iomem *db_regaddr;
/* For q stats */
uint32_t q_cnt_1;
uint32_t q_cnt_2;
@@ -505,6 +512,10 @@ struct lpfc_sli4_hba {
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+ uint8_t fw_func_mode; /* FW function protocol mode */
+ uint32_t ulp0_mode; /* ULP0 protocol mode */
+ uint32_t ulp1_mode; /* ULP1 protocol mode */
+
/* Setup information for various queue parameters */
int eq_esize;
int eq_ecount;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 0c2149189dda..f3b7795a296b 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
* included with this package. *
*******************************************************************/
-#define LPFC_DRIVER_VERSION "8.3.35"
+#define LPFC_DRIVER_VERSION "8.3.37"
#define LPFC_DRIVER_NAME "lpfc"
/* Used for SLI 2/3 */
diff --git a/drivers/scsi/mac_esp.c b/drivers/scsi/mac_esp.c
index 70eb1f79b1ba..994fc5caf036 100644
--- a/drivers/scsi/mac_esp.c
+++ b/drivers/scsi/mac_esp.c
@@ -481,7 +481,7 @@ static struct esp_driver_ops mac_esp_ops = {
.dma_error = mac_esp_dma_error,
};
-static int __devinit esp_mac_probe(struct platform_device *dev)
+static int esp_mac_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -591,7 +591,7 @@ fail:
return err;
}
-static int __devexit esp_mac_remove(struct platform_device *dev)
+static int esp_mac_remove(struct platform_device *dev)
{
struct mac_esp_priv *mep = platform_get_drvdata(dev);
struct esp *esp = mep->esp;
@@ -614,7 +614,7 @@ static int __devexit esp_mac_remove(struct platform_device *dev)
static struct platform_driver esp_mac_driver = {
.probe = esp_mac_probe,
- .remove = __devexit_p(esp_mac_remove),
+ .remove = esp_mac_remove,
.driver = {
.name = DRV_MODULE_NAME,
.owner = THIS_MODULE,
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 76ad72d32c3f..9504ec0ec682 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -4522,7 +4522,7 @@ static struct scsi_host_template megaraid_template = {
.eh_host_reset_handler = megaraid_reset,
};
-static int __devinit
+static int
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
@@ -4914,7 +4914,7 @@ __megaraid_shutdown(adapter_t *adapter)
mdelay(1000);
}
-static void __devexit
+static void
megaraid_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -5008,7 +5008,7 @@ static struct pci_driver megaraid_pci_driver = {
.name = "megaraid_legacy",
.id_table = megaraid_pci_tbl,
.probe = megaraid_probe_one,
- .remove = __devexit_p(megaraid_remove_one),
+ .remove = megaraid_remove_one,
.shutdown = megaraid_shutdown,
};
diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
index 54b1c5bb310f..e6a1e0b38a19 100644
--- a/drivers/scsi/megaraid/megaraid_mbox.c
+++ b/drivers/scsi/megaraid/megaraid_mbox.c
@@ -305,7 +305,7 @@ static struct pci_driver megaraid_pci_driver = {
.name = "megaraid",
.id_table = pci_id_table_g,
.probe = megaraid_probe_one,
- .remove = __devexit_p(megaraid_detach_one),
+ .remove = megaraid_detach_one,
.shutdown = megaraid_mbox_shutdown,
};
@@ -434,7 +434,7 @@ megaraid_exit(void)
* This routine should be called whenever a new adapter is detected by the
* PCI hotplug susbsystem.
*/
-static int __devinit
+static int
megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
adapter_t *adapter;
@@ -735,7 +735,7 @@ megaraid_io_detach(adapter_t *adapter)
* - Allocate memory required for all the commands
* - Use internal library of FW routines, build up complete soft state
*/
-static int __devinit
+static int
megaraid_init_mbox(adapter_t *adapter)
{
struct pci_dev *pdev;
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 16b7a72a70c4..408d2548a748 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
/*
* MegaRAID SAS Driver meta data
*/
-#define MEGASAS_VERSION "06.504.01.00-rc1"
-#define MEGASAS_RELDATE "Oct. 1, 2012"
-#define MEGASAS_EXT_VERSION "Mon. Oct. 1 17:00:00 PDT 2012"
+#define MEGASAS_VERSION "06.506.00.00-rc1"
+#define MEGASAS_RELDATE "Feb. 9, 2013"
+#define MEGASAS_EXT_VERSION "Sat. Feb. 9 17:00:00 PDT 2013"
/*
* Device IDs
@@ -1276,7 +1276,7 @@ struct megasas_evt_detail {
} __attribute__ ((packed));
struct megasas_aen_event {
- struct work_struct hotplug_work;
+ struct delayed_work hotplug_work;
struct megasas_instance *instance;
};
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index d2c5366aff7f..9d53540207ec 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* FILE: megaraid_sas_base.c
- * Version : v06.504.01.00-rc1
+ * Version : v06.506.00.00-rc1
*
* Authors: LSI Corporation
* Sreenivas Bagalkote
@@ -2060,9 +2060,9 @@ megasas_service_aen(struct megasas_instance *instance, struct megasas_cmd *cmd)
} else {
ev->instance = instance;
instance->ev = ev;
- INIT_WORK(&ev->hotplug_work, megasas_aen_polling);
- schedule_delayed_work(
- (struct delayed_work *)&ev->hotplug_work, 0);
+ INIT_DELAYED_WORK(&ev->hotplug_work,
+ megasas_aen_polling);
+ schedule_delayed_work(&ev->hotplug_work, 0);
}
}
}
@@ -3972,8 +3972,8 @@ fail_set_dma_mask:
* @pdev: PCI device structure
* @id: PCI ids of supported hotplugged adapter
*/
-static int __devinit
-megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+static int megasas_probe_one(struct pci_dev *pdev,
+ const struct pci_device_id *id)
{
int rval, pos, i, j;
struct Scsi_Host *host;
@@ -4352,8 +4352,7 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
/* cancel the delayed work if this work still in queue */
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work_sync(
- (struct delayed_work *)&ev->hotplug_work);
+ cancel_delayed_work_sync(&ev->hotplug_work);
instance->ev = NULL;
}
@@ -4526,7 +4525,7 @@ fail_ready_state:
* megasas_detach_one - PCI hot"un"plug entry point
* @pdev: PCI device structure
*/
-static void __devexit megasas_detach_one(struct pci_dev *pdev)
+static void megasas_detach_one(struct pci_dev *pdev)
{
int i;
struct Scsi_Host *host;
@@ -4545,8 +4544,7 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
/* cancel the delayed work if this work still in queue*/
if (instance->ev != NULL) {
struct megasas_aen_event *ev = instance->ev;
- cancel_delayed_work_sync(
- (struct delayed_work *)&ev->hotplug_work);
+ cancel_delayed_work_sync(&ev->hotplug_work);
instance->ev = NULL;
}
@@ -5121,7 +5119,7 @@ static struct pci_driver megasas_pci_driver = {
.name = "megaraid_sas",
.id_table = megasas_pci_table,
.probe = megasas_probe_one,
- .remove = __devexit_p(megasas_detach_one),
+ .remove = megasas_detach_one,
.suspend = megasas_suspend,
.resume = megasas_resume,
.shutdown = megasas_shutdown,
@@ -5190,7 +5188,7 @@ static void
megasas_aen_polling(struct work_struct *work)
{
struct megasas_aen_event *ev =
- container_of(work, struct megasas_aen_event, hotplug_work);
+ container_of(work, struct megasas_aen_event, hotplug_work.work);
struct megasas_instance *instance = ev->instance;
union megasas_evt_class_locale class_locale;
struct Scsi_Host *host;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 74030aff69ad..a7d56687bfca 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -1206,7 +1206,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
}
io_request->Control |= (0x4 << 26);
- io_request->EEDPBlockSize = MEGASAS_EEDPBLOCKSIZE;
+ io_request->EEDPBlockSize = scp->device->sector_size;
} else {
/* Some drives don't support 16/12 byte CDB's, convert to 10 */
if (((cdb_len == 12) || (cdb_len == 16)) &&
@@ -1511,7 +1511,8 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance,
if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS &&
instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) {
io_request->Function = 0;
- io_request->DevHandle =
+ if (fusion->fast_path_io)
+ io_request->DevHandle =
local_map_ptr->raidMap.devHndlInfo[device_id].curDevHdl;
io_request->RaidContext.timeoutValue =
local_map_ptr->raidMap.fpPdIoTimeoutSec;
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index a7c64f051996..f68a3cd11d5d 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -61,7 +61,6 @@
#define MEGASAS_SCSI_ADDL_CDB_LEN 0x18
#define MEGASAS_RD_WR_PROTECT_CHECK_ALL 0x20
#define MEGASAS_RD_WR_PROTECT_CHECK_NONE 0x60
-#define MEGASAS_EEDPBLOCKSIZE 512
/*
* Raid context flags
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index ffd85c511c8e..bcb23d28b3e8 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -155,7 +155,7 @@ _base_fault_reset_work(struct work_struct *work)
struct task_struct *p;
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
- if (ioc->shost_recovery)
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
goto rearm_timer;
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
@@ -164,6 +164,20 @@ _base_fault_reset_work(struct work_struct *work)
printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
ioc->name, __func__);
+ /* It may be possible that EEH recovery can resolve some of
+ * pci bus failure issues rather removing the dead ioc function
+ * by considering controller is in a non-operational state. So
+ * here priority is given to the EEH recovery. If it doesn't
+ * not resolve this issue, mpt2sas driver will consider this
+ * controller to non-operational state and remove the dead ioc
+ * function.
+ */
+ if (ioc->non_operational_loop++ < 5) {
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
+ flags);
+ goto rearm_timer;
+ }
+
/*
* Call _scsih_flush_pending_cmds callback so that we flush all
* pending commands back to OS. This call is required to aovid
@@ -193,6 +207,8 @@ _base_fault_reset_work(struct work_struct *work)
return; /* don't rearm timer */
}
+ ioc->non_operational_loop = 0;
+
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
FORCE_BIG_HAMMER);
@@ -2007,6 +2023,14 @@ _base_display_intel_branding(struct MPT2SAS_ADAPTER *ioc)
printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
MPT2SAS_INTEL_RMS25KB040_BRANDING);
break;
+ case MPT2SAS_INTEL_RMS25LB040_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB040_BRANDING);
+ break;
+ case MPT2SAS_INTEL_RMS25LB080_SSDID:
+ printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+ MPT2SAS_INTEL_RMS25LB080_BRANDING);
+ break;
default:
break;
}
@@ -4386,6 +4410,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
if (missing_delay[0] != -1 && missing_delay[1] != -1)
_base_update_missing_delay(ioc, missing_delay[0],
missing_delay[1]);
+ ioc->non_operational_loop = 0;
return 0;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 543d8d637479..4caaac13682f 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -165,6 +165,10 @@
"Intel(R) Integrated RAID Module RMS25KB080"
#define MPT2SAS_INTEL_RMS25KB040_BRANDING \
"Intel(R) Integrated RAID Module RMS25KB040"
+#define MPT2SAS_INTEL_RMS25LB040_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB040"
+#define MPT2SAS_INTEL_RMS25LB080_BRANDING \
+ "Intel(R) Integrated RAID Module RMS25LB080"
#define MPT2SAS_INTEL_RMS2LL080_BRANDING \
"Intel Integrated RAID Module RMS2LL080"
#define MPT2SAS_INTEL_RMS2LL040_BRANDING \
@@ -180,6 +184,8 @@
#define MPT2SAS_INTEL_RMS25JB040_SSDID 0x3517
#define MPT2SAS_INTEL_RMS25KB080_SSDID 0x3518
#define MPT2SAS_INTEL_RMS25KB040_SSDID 0x3519
+#define MPT2SAS_INTEL_RMS25LB040_SSDID 0x351A
+#define MPT2SAS_INTEL_RMS25LB080_SSDID 0x351B
#define MPT2SAS_INTEL_RMS2LL080_SSDID 0x350E
#define MPT2SAS_INTEL_RMS2LL040_SSDID 0x350F
#define MPT2SAS_INTEL_RS25GB008_SSDID 0x3000
@@ -835,6 +841,7 @@ struct MPT2SAS_ADAPTER {
u16 cpu_msix_table_sz;
u32 ioc_reset_count;
MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+ u32 non_operational_loop;
/* internal commands, callback index */
u8 scsi_io_cb_idx;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index af4e6c451b1b..c6bdc9267229 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -7686,7 +7686,7 @@ _scsih_shutdown(struct pci_dev *pdev)
* Routine called when unloading the driver.
* Return nothing.
*/
-static void __devexit
+static void
_scsih_remove(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
@@ -8338,7 +8338,7 @@ static struct pci_driver scsih_driver = {
.name = MPT2SAS_DRIVER_NAME,
.id_table = scsih_pci_table,
.probe = _scsih_probe,
- .remove = __devexit_p(_scsih_remove),
+ .remove = _scsih_remove,
.shutdown = _scsih_shutdown,
.err_handler = &_scsih_err_handler,
#ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
new file mode 100644
index 000000000000..81471bf415d8
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -0,0 +1,67 @@
+#
+# Kernel configuration file for the MPT3SAS
+#
+# This code is based on drivers/scsi/mpt3sas/Kconfig
+# Copyright (C) 2012 LSI Corporation
+# (mailto:DL-MPTFusionLinux@lsi.com)
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version 2
+# of the License, or (at your option) any later version.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+
+# NO WARRANTY
+# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+# solely responsible for determining the appropriateness of using and
+# distributing the Program and assumes all risks associated with its
+# exercise of rights under this Agreement, including but not limited to
+# the risks and costs of program errors, damage to or loss of data,
+# programs or equipment, and unavailability or interruption of operations.
+
+# DISCLAIMER OF LIABILITY
+# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+# USA.
+
+config SCSI_MPT3SAS
+ tristate "LSI MPT Fusion SAS 3.0 Device Driver"
+ depends on PCI && SCSI
+ select SCSI_SAS_ATTRS
+ select RAID_ATTRS
+ ---help---
+ This driver supports PCI-Express SAS 12Gb/s Host Adapters.
+
+config SCSI_MPT3SAS_MAX_SGE
+ int "LSI MPT Fusion Max number of SG Entries (16 - 256)"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ default "128"
+ range 16 256
+ ---help---
+ This option allows you to specify the maximum number of scatter-
+ gather entries per I/O. The driver default is 128, which matches
+ MAX_PHYS_SEGMENTS in most kernels. However in SuSE kernels this
+ can be 256. However, it may decreased down to 16. Decreasing this
+ parameter will reduce memory requirements on a per controller instance.
+
+config SCSI_MPT3SAS_LOGGING
+ bool "LSI MPT Fusion logging facility"
+ depends on PCI && SCSI && SCSI_MPT3SAS
+ ---help---
+ This turns on a logging facility.
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile
new file mode 100644
index 000000000000..4c1d2e7a1176
--- /dev/null
+++ b/drivers/scsi/mpt3sas/Makefile
@@ -0,0 +1,8 @@
+# mpt3sas makefile
+obj-m += mpt3sas.o
+mpt3sas-y += mpt3sas_base.o \
+ mpt3sas_config.o \
+ mpt3sas_scsih.o \
+ mpt3sas_transport.o \
+ mpt3sas_ctl.o \
+ mpt3sas_trigger_diag.o
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
new file mode 100644
index 000000000000..03317ffea62c
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -0,0 +1,1164 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2.h
+ * Title: MPI Message independent structures and definitions
+ * including System Interface Register Set and
+ * scatter/gather formats.
+ * Creation Date: June 21, 2006
+ *
+ * mpi2.h Version: 02.00.26
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 06-26-07 02.00.02 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-31-07 02.00.03 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved ReplyPostHostIndex register to offset 0x6C of the
+ * MPI2_SYSTEM_INTERFACE_REGS and modified the define for
+ * MPI2_REPLY_POST_HOST_INDEX_OFFSET.
+ * Added union of request descriptors.
+ * Added union of reply descriptors.
+ * 10-31-07 02.00.04 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_VERSION_02_00.
+ * Fixed the size of the FunctionDependent5 field in the
+ * MPI2_DEFAULT_REPLY structure.
+ * 12-18-07 02.00.05 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Removed the MPI-defined Fault Codes and extended the
+ * product specific codes up to 0xEFFF.
+ * Added a sixth key value for the WriteSequence register
+ * and changed the flush value to 0x0.
+ * Added message function codes for Diagnostic Buffer Post
+ * and Diagnsotic Release.
+ * New IOCStatus define: MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED
+ * Moved MPI2_VERSION_UNION from mpi2_ioc.h.
+ * 02-29-08 02.00.06 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-03-08 02.00.07 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-21-08 02.00.08 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added #defines for marking a reply descriptor as unused.
+ * 06-27-08 02.00.09 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-02-08 02.00.10 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Moved LUN field defines from mpi2_init.h.
+ * 01-19-09 02.00.11 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-06-09 02.00.12 Bumped MPI2_HEADER_VERSION_UNIT.
+ * In all request and reply descriptors, replaced VF_ID
+ * field with MSIxIndex field.
+ * Removed DevHandle field from
+ * MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR and made those
+ * bytes reserved.
+ * Added RAID Accelerator functionality.
+ * 07-30-09 02.00.13 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 10-28-09 02.00.14 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MSI-x index mask and shift for Reply Post Host
+ * Index register.
+ * Added function code for Host Based Discovery Action.
+ * 02-10-10 02.00.15 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added define for MPI2_FUNCTION_PWR_MGMT_CONTROL.
+ * Added defines for product-specific range of message
+ * function codes, 0xF0 to 0xFF.
+ * 05-12-10 02.00.16 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added alternative defines for the SGE Direction bit.
+ * 08-11-10 02.00.17 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-10-10 02.00.18 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR define.
+ * 02-23-11 02.00.19 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added MPI2_FUNCTION_SEND_HOST_MESSAGE.
+ * 03-09-11 02.00.20 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 05-25-11 02.00.21 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 08-24-11 02.00.22 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 11-18-11 02.00.23 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.24 Bumped MPI2_HEADER_VERSION_UNIT.
+ * 03-29-12 02.00.25 Bumped MPI2_HEADER_VERSION_UNIT.
+ * Added Hard Reset delay timings.
+ * 07-10-12 02.00.26 Bumped MPI2_HEADER_VERSION_UNIT.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_H
+#define MPI2_H
+
+/*****************************************************************************
+*
+* MPI Version Definitions
+*
+*****************************************************************************/
+
+#define MPI2_VERSION_MAJOR_MASK (0xFF00)
+#define MPI2_VERSION_MAJOR_SHIFT (8)
+#define MPI2_VERSION_MINOR_MASK (0x00FF)
+#define MPI2_VERSION_MINOR_SHIFT (0)
+
+/*major version for all MPI v2.x */
+#define MPI2_VERSION_MAJOR (0x02)
+
+/*minor version for MPI v2.0 compatible products */
+#define MPI2_VERSION_MINOR (0x00)
+#define MPI2_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI2_VERSION_MINOR)
+#define MPI2_VERSION_02_00 (0x0200)
+
+/*minor version for MPI v2.5 compatible products */
+#define MPI25_VERSION_MINOR (0x05)
+#define MPI25_VERSION ((MPI2_VERSION_MAJOR << MPI2_VERSION_MAJOR_SHIFT) | \
+ MPI25_VERSION_MINOR)
+#define MPI2_VERSION_02_05 (0x0205)
+
+/*Unit and Dev versioning for this MPI header set */
+#define MPI2_HEADER_VERSION_UNIT (0x1A)
+#define MPI2_HEADER_VERSION_DEV (0x00)
+#define MPI2_HEADER_VERSION_UNIT_MASK (0xFF00)
+#define MPI2_HEADER_VERSION_UNIT_SHIFT (8)
+#define MPI2_HEADER_VERSION_DEV_MASK (0x00FF)
+#define MPI2_HEADER_VERSION_DEV_SHIFT (0)
+#define MPI2_HEADER_VERSION ((MPI2_HEADER_VERSION_UNIT << 8) | \
+ MPI2_HEADER_VERSION_DEV)
+
+/*****************************************************************************
+*
+* IOC State Definitions
+*
+*****************************************************************************/
+
+#define MPI2_IOC_STATE_RESET (0x00000000)
+#define MPI2_IOC_STATE_READY (0x10000000)
+#define MPI2_IOC_STATE_OPERATIONAL (0x20000000)
+#define MPI2_IOC_STATE_FAULT (0x40000000)
+
+#define MPI2_IOC_STATE_MASK (0xF0000000)
+#define MPI2_IOC_STATE_SHIFT (28)
+
+/*Fault state range for prodcut specific codes */
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MIN (0x0000)
+#define MPI2_FAULT_PRODUCT_SPECIFIC_MAX (0xEFFF)
+
+/*****************************************************************************
+*
+* System Interface Register Definitions
+*
+*****************************************************************************/
+
+typedef volatile struct _MPI2_SYSTEM_INTERFACE_REGS {
+ U32 Doorbell; /*0x00 */
+ U32 WriteSequence; /*0x04 */
+ U32 HostDiagnostic; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DiagRWData; /*0x10 */
+ U32 DiagRWAddressLow; /*0x14 */
+ U32 DiagRWAddressHigh; /*0x18 */
+ U32 Reserved2[5]; /*0x1C */
+ U32 HostInterruptStatus; /*0x30 */
+ U32 HostInterruptMask; /*0x34 */
+ U32 DCRData; /*0x38 */
+ U32 DCRAddress; /*0x3C */
+ U32 Reserved3[2]; /*0x40 */
+ U32 ReplyFreeHostIndex; /*0x48 */
+ U32 Reserved4[8]; /*0x4C */
+ U32 ReplyPostHostIndex; /*0x6C */
+ U32 Reserved5; /*0x70 */
+ U32 HCBSize; /*0x74 */
+ U32 HCBAddressLow; /*0x78 */
+ U32 HCBAddressHigh; /*0x7C */
+ U32 Reserved6[16]; /*0x80 */
+ U32 RequestDescriptorPostLow; /*0xC0 */
+ U32 RequestDescriptorPostHigh; /*0xC4 */
+ U32 Reserved7[14]; /*0xC8 */
+} MPI2_SYSTEM_INTERFACE_REGS,
+ *PTR_MPI2_SYSTEM_INTERFACE_REGS,
+ Mpi2SystemInterfaceRegs_t,
+ *pMpi2SystemInterfaceRegs_t;
+
+/*
+ *Defines for working with the Doorbell register.
+ */
+#define MPI2_DOORBELL_OFFSET (0x00000000)
+
+/*IOC --> System values */
+#define MPI2_DOORBELL_USED (0x08000000)
+#define MPI2_DOORBELL_WHO_INIT_MASK (0x07000000)
+#define MPI2_DOORBELL_WHO_INIT_SHIFT (24)
+#define MPI2_DOORBELL_FAULT_CODE_MASK (0x0000FFFF)
+#define MPI2_DOORBELL_DATA_MASK (0x0000FFFF)
+
+/*System --> IOC values */
+#define MPI2_DOORBELL_FUNCTION_MASK (0xFF000000)
+#define MPI2_DOORBELL_FUNCTION_SHIFT (24)
+#define MPI2_DOORBELL_ADD_DWORDS_MASK (0x00FF0000)
+#define MPI2_DOORBELL_ADD_DWORDS_SHIFT (16)
+
+/*
+ *Defines for the WriteSequence register
+ */
+#define MPI2_WRITE_SEQUENCE_OFFSET (0x00000004)
+#define MPI2_WRSEQ_KEY_VALUE_MASK (0x0000000F)
+#define MPI2_WRSEQ_FLUSH_KEY_VALUE (0x0)
+#define MPI2_WRSEQ_1ST_KEY_VALUE (0xF)
+#define MPI2_WRSEQ_2ND_KEY_VALUE (0x4)
+#define MPI2_WRSEQ_3RD_KEY_VALUE (0xB)
+#define MPI2_WRSEQ_4TH_KEY_VALUE (0x2)
+#define MPI2_WRSEQ_5TH_KEY_VALUE (0x7)
+#define MPI2_WRSEQ_6TH_KEY_VALUE (0xD)
+
+/*
+ *Defines for the HostDiagnostic register
+ */
+#define MPI2_HOST_DIAGNOSTIC_OFFSET (0x00000008)
+
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_MASK (0x00001800)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_DEFAULT (0x00000000)
+#define MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW (0x00000800)
+
+#define MPI2_DIAG_CLEAR_FLASH_BAD_SIG (0x00000400)
+#define MPI2_DIAG_FORCE_HCB_ON_RESET (0x00000200)
+#define MPI2_DIAG_HCB_MODE (0x00000100)
+#define MPI2_DIAG_DIAG_WRITE_ENABLE (0x00000080)
+#define MPI2_DIAG_FLASH_BAD_SIG (0x00000040)
+#define MPI2_DIAG_RESET_HISTORY (0x00000020)
+#define MPI2_DIAG_DIAG_RW_ENABLE (0x00000010)
+#define MPI2_DIAG_RESET_ADAPTER (0x00000004)
+#define MPI2_DIAG_HOLD_IOC_RESET (0x00000002)
+
+/*
+ *Offsets for DiagRWData and address
+ */
+#define MPI2_DIAG_RW_DATA_OFFSET (0x00000010)
+#define MPI2_DIAG_RW_ADDRESS_LOW_OFFSET (0x00000014)
+#define MPI2_DIAG_RW_ADDRESS_HIGH_OFFSET (0x00000018)
+
+/*
+ *Defines for the HostInterruptStatus register
+ */
+#define MPI2_HOST_INTERRUPT_STATUS_OFFSET (0x00000030)
+#define MPI2_HIS_SYS2IOC_DB_STATUS (0x80000000)
+#define MPI2_HIS_IOP_DOORBELL_STATUS MPI2_HIS_SYS2IOC_DB_STATUS
+#define MPI2_HIS_RESET_IRQ_STATUS (0x40000000)
+#define MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT (0x00000008)
+#define MPI2_HIS_IOC2SYS_DB_STATUS (0x00000001)
+#define MPI2_HIS_DOORBELL_INTERRUPT MPI2_HIS_IOC2SYS_DB_STATUS
+
+/*
+ *Defines for the HostInterruptMask register
+ */
+#define MPI2_HOST_INTERRUPT_MASK_OFFSET (0x00000034)
+#define MPI2_HIM_RESET_IRQ_MASK (0x40000000)
+#define MPI2_HIM_REPLY_INT_MASK (0x00000008)
+#define MPI2_HIM_RIM MPI2_HIM_REPLY_INT_MASK
+#define MPI2_HIM_IOC2SYS_DB_MASK (0x00000001)
+#define MPI2_HIM_DIM MPI2_HIM_IOC2SYS_DB_MASK
+
+/*
+ *Offsets for DCRData and address
+ */
+#define MPI2_DCR_DATA_OFFSET (0x00000038)
+#define MPI2_DCR_ADDRESS_OFFSET (0x0000003C)
+
+/*
+ *Offset for the Reply Free Queue
+ */
+#define MPI2_REPLY_FREE_HOST_INDEX_OFFSET (0x00000048)
+
+/*
+ *Defines for the Reply Descriptor Post Queue
+ */
+#define MPI2_REPLY_POST_HOST_INDEX_OFFSET (0x0000006C)
+#define MPI2_REPLY_POST_HOST_INDEX_MASK (0x00FFFFFF)
+#define MPI2_RPHI_MSIX_INDEX_MASK (0xFF000000)
+#define MPI2_RPHI_MSIX_INDEX_SHIFT (24)
+
+/*
+ *Defines for the HCBSize and address
+ */
+#define MPI2_HCB_SIZE_OFFSET (0x00000074)
+#define MPI2_HCB_SIZE_SIZE_MASK (0xFFFFF000)
+#define MPI2_HCB_SIZE_HCB_ENABLE (0x00000001)
+
+#define MPI2_HCB_ADDRESS_LOW_OFFSET (0x00000078)
+#define MPI2_HCB_ADDRESS_HIGH_OFFSET (0x0000007C)
+
+/*
+ *Offsets for the Request Queue
+ */
+#define MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET (0x000000C0)
+#define MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET (0x000000C4)
+
+/*Hard Reset delay timings */
+#define MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC (50000)
+#define MPI2_HARD_RESET_PCIE_RESET_READ_WINDOW_MICRO_SEC (255000)
+#define MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC (256000)
+
+/*****************************************************************************
+*
+* Message Descriptors
+*
+*****************************************************************************/
+
+/*Request Descriptors */
+
+/*Default Request Descriptor */
+typedef struct _MPI2_DEFAULT_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DescriptorTypeDependent; /*0x06 */
+} MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REQUEST_DESCRIPTOR,
+ Mpi2DefaultRequestDescriptor_t,
+ *pMpi2DefaultRequestDescriptor_t;
+
+/*defines for the RequestFlags field */
+#define MPI2_REQ_DESCRIPT_FLAGS_TYPE_MASK (0x0E)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO (0x00)
+#define MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET (0x02)
+#define MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY (0x06)
+#define MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE (0x08)
+#define MPI2_REQ_DESCRIPT_FLAGS_RAID_ACCELERATOR (0x0A)
+#define MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO (0x0C)
+
+#define MPI2_REQ_DESCRIPT_FLAGS_IOC_FIFO_MARKER (0x01)
+
+/*High Priority Request Descriptor */
+typedef struct _MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR,
+ Mpi2HighPriorityRequestDescriptor_t,
+ *pMpi2HighPriorityRequestDescriptor_t;
+
+/*SCSI IO Request Descriptor */
+typedef struct _MPI2_SCSI_IO_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 DevHandle; /*0x06 */
+} MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi2SCSIIORequestDescriptor_t,
+ *pMpi2SCSIIORequestDescriptor_t;
+
+/*SCSI Target Request Descriptor */
+typedef struct _MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR,
+ Mpi2SCSITargetRequestDescriptor_t,
+ *pMpi2SCSITargetRequestDescriptor_t;
+
+/*RAID Accelerator Request Descriptor */
+typedef struct _MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR {
+ U8 RequestFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 LMID; /*0x04 */
+ U16 Reserved; /*0x06 */
+} MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR,
+ Mpi2RAIDAcceleratorRequestDescriptor_t,
+ *pMpi2RAIDAcceleratorRequestDescriptor_t;
+
+/*Fast Path SCSI IO Request Descriptor */
+typedef MPI2_SCSI_IO_REQUEST_DESCRIPTOR
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR,
+ Mpi25FastPathSCSIIORequestDescriptor_t,
+ *pMpi25FastPathSCSIIORequestDescriptor_t;
+
+/*union of Request Descriptors */
+typedef union _MPI2_REQUEST_DESCRIPTOR_UNION {
+ MPI2_DEFAULT_REQUEST_DESCRIPTOR Default;
+ MPI2_HIGH_PRIORITY_REQUEST_DESCRIPTOR HighPriority;
+ MPI2_SCSI_IO_REQUEST_DESCRIPTOR SCSIIO;
+ MPI2_SCSI_TARGET_REQUEST_DESCRIPTOR SCSITarget;
+ MPI2_RAID_ACCEL_REQUEST_DESCRIPTOR RAIDAccelerator;
+ MPI25_FP_SCSI_IO_REQUEST_DESCRIPTOR FastPathSCSIIO;
+ U64 Words;
+} MPI2_REQUEST_DESCRIPTOR_UNION,
+ *PTR_MPI2_REQUEST_DESCRIPTOR_UNION,
+ Mpi2RequestDescriptorUnion_t,
+ *pMpi2RequestDescriptorUnion_t;
+
+/*Reply Descriptors */
+
+/*Default Reply Descriptor */
+typedef struct _MPI2_DEFAULT_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 DescriptorTypeDependent1; /*0x02 */
+ U32 DescriptorTypeDependent2; /*0x04 */
+} MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ *PTR_MPI2_DEFAULT_REPLY_DESCRIPTOR,
+ Mpi2DefaultReplyDescriptor_t,
+ *pMpi2DefaultReplyDescriptor_t;
+
+/*defines for the ReplyFlags field */
+#define MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK (0x0F)
+#define MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS (0x00)
+#define MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY (0x01)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS (0x02)
+#define MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER (0x03)
+#define MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS (0x05)
+#define MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS (0x06)
+#define MPI2_RPY_DESCRIPT_FLAGS_UNUSED (0x0F)
+
+/*values for marking a reply descriptor as unused */
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD0_MARK (0xFFFFFFFF)
+#define MPI2_RPY_DESCRIPT_UNUSED_WORD1_MARK (0xFFFFFFFF)
+
+/*Address Reply Descriptor */
+typedef struct _MPI2_ADDRESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 ReplyFrameAddress; /*0x04 */
+} MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_ADDRESS_REPLY_DESCRIPTOR,
+ Mpi2AddressReplyDescriptor_t,
+ *pMpi2AddressReplyDescriptor_t;
+
+#define MPI2_ADDRESS_REPLY_SMID_INVALID (0x00)
+
+/*SCSI IO Success Reply Descriptor */
+typedef struct _MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U16 TaskTag; /*0x04 */
+ U16 Reserved1; /*0x06 */
+} MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2SCSIIOSuccessReplyDescriptor_t,
+ *pMpi2SCSIIOSuccessReplyDescriptor_t;
+
+/*TargetAssist Success Reply Descriptor */
+typedef struct _MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U8 SequenceNumber; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2TargetAssistSuccessReplyDescriptor_t,
+ *pMpi2TargetAssistSuccessReplyDescriptor_t;
+
+/*Target Command Buffer Reply Descriptor */
+typedef struct _MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U8 VP_ID; /*0x02 */
+ U8 Flags; /*0x03 */
+ U16 InitiatorDevHandle; /*0x04 */
+ U16 IoIndex; /*0x06 */
+} MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ *PTR_MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR,
+ Mpi2TargetCommandBufferReplyDescriptor_t,
+ *pMpi2TargetCommandBufferReplyDescriptor_t;
+
+/*defines for Flags field */
+#define MPI2_RPY_DESCRIPT_TCB_FLAGS_PHYNUM_MASK (0x3F)
+
+/*RAID Accelerator Success Reply Descriptor */
+typedef struct _MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR {
+ U8 ReplyFlags; /*0x00 */
+ U8 MSIxIndex; /*0x01 */
+ U16 SMID; /*0x02 */
+ U32 Reserved; /*0x04 */
+} MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi2RAIDAcceleratorSuccessReplyDescriptor_t,
+ *pMpi2RAIDAcceleratorSuccessReplyDescriptor_t;
+
+/*Fast Path SCSI IO Success Reply Descriptor */
+typedef MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ *PTR_MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR,
+ Mpi25FastPathSCSIIOSuccessReplyDescriptor_t,
+ *pMpi25FastPathSCSIIOSuccessReplyDescriptor_t;
+
+/*union of Reply Descriptors */
+typedef union _MPI2_REPLY_DESCRIPTORS_UNION {
+ MPI2_DEFAULT_REPLY_DESCRIPTOR Default;
+ MPI2_ADDRESS_REPLY_DESCRIPTOR AddressReply;
+ MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR SCSIIOSuccess;
+ MPI2_TARGETASSIST_SUCCESS_REPLY_DESCRIPTOR TargetAssistSuccess;
+ MPI2_TARGET_COMMAND_BUFFER_REPLY_DESCRIPTOR TargetCommandBuffer;
+ MPI2_RAID_ACCELERATOR_SUCCESS_REPLY_DESCRIPTOR RAIDAcceleratorSuccess;
+ MPI25_FP_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR FastPathSCSIIOSuccess;
+ U64 Words;
+} MPI2_REPLY_DESCRIPTORS_UNION,
+ *PTR_MPI2_REPLY_DESCRIPTORS_UNION,
+ Mpi2ReplyDescriptorsUnion_t,
+ *pMpi2ReplyDescriptorsUnion_t;
+
+/*****************************************************************************
+*
+* Message Functions
+*
+*****************************************************************************/
+
+#define MPI2_FUNCTION_SCSI_IO_REQUEST (0x00)
+#define MPI2_FUNCTION_SCSI_TASK_MGMT (0x01)
+#define MPI2_FUNCTION_IOC_INIT (0x02)
+#define MPI2_FUNCTION_IOC_FACTS (0x03)
+#define MPI2_FUNCTION_CONFIG (0x04)
+#define MPI2_FUNCTION_PORT_FACTS (0x05)
+#define MPI2_FUNCTION_PORT_ENABLE (0x06)
+#define MPI2_FUNCTION_EVENT_NOTIFICATION (0x07)
+#define MPI2_FUNCTION_EVENT_ACK (0x08)
+#define MPI2_FUNCTION_FW_DOWNLOAD (0x09)
+#define MPI2_FUNCTION_TARGET_ASSIST (0x0B)
+#define MPI2_FUNCTION_TARGET_STATUS_SEND (0x0C)
+#define MPI2_FUNCTION_TARGET_MODE_ABORT (0x0D)
+#define MPI2_FUNCTION_FW_UPLOAD (0x12)
+#define MPI2_FUNCTION_RAID_ACTION (0x15)
+#define MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH (0x16)
+#define MPI2_FUNCTION_TOOLBOX (0x17)
+#define MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR (0x18)
+#define MPI2_FUNCTION_SMP_PASSTHROUGH (0x1A)
+#define MPI2_FUNCTION_SAS_IO_UNIT_CONTROL (0x1B)
+#define MPI2_FUNCTION_SATA_PASSTHROUGH (0x1C)
+#define MPI2_FUNCTION_DIAG_BUFFER_POST (0x1D)
+#define MPI2_FUNCTION_DIAG_RELEASE (0x1E)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_BASE_POST (0x24)
+#define MPI2_FUNCTION_TARGET_CMD_BUF_LIST_POST (0x25)
+#define MPI2_FUNCTION_RAID_ACCELERATOR (0x2C)
+#define MPI2_FUNCTION_HOST_BASED_DISCOVERY_ACTION (0x2F)
+#define MPI2_FUNCTION_PWR_MGMT_CONTROL (0x30)
+#define MPI2_FUNCTION_SEND_HOST_MESSAGE (0x31)
+#define MPI2_FUNCTION_MIN_PRODUCT_SPECIFIC (0xF0)
+#define MPI2_FUNCTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*Doorbell functions */
+#define MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET (0x40)
+#define MPI2_FUNCTION_HANDSHAKE (0x42)
+
+/*****************************************************************************
+*
+* IOC Status Values
+*
+*****************************************************************************/
+
+/*mask for IOCStatus status value */
+#define MPI2_IOCSTATUS_MASK (0x7FFF)
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SUCCESS (0x0000)
+#define MPI2_IOCSTATUS_INVALID_FUNCTION (0x0001)
+#define MPI2_IOCSTATUS_BUSY (0x0002)
+#define MPI2_IOCSTATUS_INVALID_SGL (0x0003)
+#define MPI2_IOCSTATUS_INTERNAL_ERROR (0x0004)
+#define MPI2_IOCSTATUS_INVALID_VPID (0x0005)
+#define MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES (0x0006)
+#define MPI2_IOCSTATUS_INVALID_FIELD (0x0007)
+#define MPI2_IOCSTATUS_INVALID_STATE (0x0008)
+#define MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED (0x0009)
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_CONFIG_INVALID_ACTION (0x0020)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_TYPE (0x0021)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_PAGE (0x0022)
+#define MPI2_IOCSTATUS_CONFIG_INVALID_DATA (0x0023)
+#define MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS (0x0024)
+#define MPI2_IOCSTATUS_CONFIG_CANT_COMMIT (0x0025)
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR (0x0040)
+#define MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE (0x0042)
+#define MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE (0x0043)
+#define MPI2_IOCSTATUS_SCSI_DATA_OVERRUN (0x0044)
+#define MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN (0x0045)
+#define MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR (0x0046)
+#define MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR (0x0047)
+#define MPI2_IOCSTATUS_SCSI_TASK_TERMINATED (0x0048)
+#define MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH (0x0049)
+#define MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED (0x004A)
+#define MPI2_IOCSTATUS_SCSI_IOC_TERMINATED (0x004B)
+#define MPI2_IOCSTATUS_SCSI_EXT_TERMINATED (0x004C)
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_EEDP_GUARD_ERROR (0x004D)
+#define MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR (0x004E)
+#define MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR (0x004F)
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX (0x0062)
+#define MPI2_IOCSTATUS_TARGET_ABORTED (0x0063)
+#define MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE (0x0064)
+#define MPI2_IOCSTATUS_TARGET_NO_CONNECTION (0x0065)
+#define MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR (0x006D)
+#define MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA (0x006E)
+#define MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT (0x006F)
+#define MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT (0x0070)
+#define MPI2_IOCSTATUS_TARGET_NAK_RECEIVED (0x0071)
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED (0x0090)
+#define MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN (0x0091)
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED (0x00A0)
+
+/****************************************************************************
+* RAID Accelerator values
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_RAID_ACCEL_ERROR (0x00B0)
+
+/****************************************************************************
+* IOCStatus flag to indicate that log info is available
+****************************************************************************/
+
+#define MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE (0x8000)
+
+/****************************************************************************
+* IOCLogInfo Types
+****************************************************************************/
+
+#define MPI2_IOCLOGINFO_TYPE_MASK (0xF0000000)
+#define MPI2_IOCLOGINFO_TYPE_SHIFT (28)
+#define MPI2_IOCLOGINFO_TYPE_NONE (0x0)
+#define MPI2_IOCLOGINFO_TYPE_SCSI (0x1)
+#define MPI2_IOCLOGINFO_TYPE_FC (0x2)
+#define MPI2_IOCLOGINFO_TYPE_SAS (0x3)
+#define MPI2_IOCLOGINFO_TYPE_ISCSI (0x4)
+#define MPI2_IOCLOGINFO_LOG_DATA_MASK (0x0FFFFFFF)
+
+/*****************************************************************************
+*
+* Standard Message Structures
+*
+*****************************************************************************/
+
+/****************************************************************************
+*Request Message Header for all request messages
+****************************************************************************/
+
+typedef struct _MPI2_REQUEST_HEADER {
+ U16 FunctionDependent1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+} MPI2_REQUEST_HEADER, *PTR_MPI2_REQUEST_HEADER,
+ MPI2RequestHeader_t, *pMPI2RequestHeader_t;
+
+/****************************************************************************
+* Default Reply
+****************************************************************************/
+
+typedef struct _MPI2_DEFAULT_REPLY {
+ U16 FunctionDependent1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 FunctionDependent2; /*0x04 */
+ U8 FunctionDependent3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 FunctionDependent5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DEFAULT_REPLY, *PTR_MPI2_DEFAULT_REPLY,
+ MPI2DefaultReply_t, *pMPI2DefaultReply_t;
+
+/*common version structure/union used in messages and configuration pages */
+
+typedef struct _MPI2_VERSION_STRUCT {
+ U8 Dev; /*0x00 */
+ U8 Unit; /*0x01 */
+ U8 Minor; /*0x02 */
+ U8 Major; /*0x03 */
+} MPI2_VERSION_STRUCT;
+
+typedef union _MPI2_VERSION_UNION {
+ MPI2_VERSION_STRUCT Struct;
+ U32 Word;
+} MPI2_VERSION_UNION;
+
+/*LUN field defines, common to many structures */
+#define MPI2_LUN_FIRST_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_SECOND_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_THIRD_LEVEL_ADDRESSING (0x0000FFFF)
+#define MPI2_LUN_FOURTH_LEVEL_ADDRESSING (0xFFFF0000)
+#define MPI2_LUN_LEVEL_1_WORD (0xFF00)
+#define MPI2_LUN_LEVEL_1_DWORD (0x0000FF00)
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* MPI Simple Element structures
+****************************************************************************/
+
+typedef struct _MPI2_SGE_SIMPLE32 {
+ U32 FlagsLength;
+ U32 Address;
+} MPI2_SGE_SIMPLE32, *PTR_MPI2_SGE_SIMPLE32,
+ Mpi2SGESimple32_t, *pMpi2SGESimple32_t;
+
+typedef struct _MPI2_SGE_SIMPLE64 {
+ U32 FlagsLength;
+ U64 Address;
+} MPI2_SGE_SIMPLE64, *PTR_MPI2_SGE_SIMPLE64,
+ Mpi2SGESimple64_t, *pMpi2SGESimple64_t;
+
+typedef struct _MPI2_SGE_SIMPLE_UNION {
+ U32 FlagsLength;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_SIMPLE_UNION,
+ *PTR_MPI2_SGE_SIMPLE_UNION,
+ Mpi2SGESimpleUnion_t,
+ *pMpi2SGESimpleUnion_t;
+
+/****************************************************************************
+* MPI Chain Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_CHAIN32 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U32 Address;
+} MPI2_SGE_CHAIN32, *PTR_MPI2_SGE_CHAIN32,
+ Mpi2SGEChain32_t, *pMpi2SGEChain32_t;
+
+typedef struct _MPI2_SGE_CHAIN64 {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ U64 Address;
+} MPI2_SGE_CHAIN64, *PTR_MPI2_SGE_CHAIN64,
+ Mpi2SGEChain64_t, *pMpi2SGEChain64_t;
+
+typedef struct _MPI2_SGE_CHAIN_UNION {
+ U16 Length;
+ U8 NextChainOffset;
+ U8 Flags;
+ union {
+ U32 Address32;
+ U64 Address64;
+ } u;
+} MPI2_SGE_CHAIN_UNION,
+ *PTR_MPI2_SGE_CHAIN_UNION,
+ Mpi2SGEChainUnion_t,
+ *pMpi2SGEChainUnion_t;
+
+/****************************************************************************
+* MPI Transaction Context Element structures - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANSACTION32 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[1];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION32,
+ *PTR_MPI2_SGE_TRANSACTION32,
+ Mpi2SGETransaction32_t,
+ *pMpi2SGETransaction32_t;
+
+typedef struct _MPI2_SGE_TRANSACTION64 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[2];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION64,
+ *PTR_MPI2_SGE_TRANSACTION64,
+ Mpi2SGETransaction64_t,
+ *pMpi2SGETransaction64_t;
+
+typedef struct _MPI2_SGE_TRANSACTION96 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[3];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION96, *PTR_MPI2_SGE_TRANSACTION96,
+ Mpi2SGETransaction96_t, *pMpi2SGETransaction96_t;
+
+typedef struct _MPI2_SGE_TRANSACTION128 {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ U32 TransactionContext[4];
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION128, *PTR_MPI2_SGE_TRANSACTION128,
+ Mpi2SGETransaction_t128, *pMpi2SGETransaction_t128;
+
+typedef struct _MPI2_SGE_TRANSACTION_UNION {
+ U8 Reserved;
+ U8 ContextSize;
+ U8 DetailsLength;
+ U8 Flags;
+ union {
+ U32 TransactionContext32[1];
+ U32 TransactionContext64[2];
+ U32 TransactionContext96[3];
+ U32 TransactionContext128[4];
+ } u;
+ U32 TransactionDetails[1];
+} MPI2_SGE_TRANSACTION_UNION,
+ *PTR_MPI2_SGE_TRANSACTION_UNION,
+ Mpi2SGETransactionUnion_t,
+ *pMpi2SGETransactionUnion_t;
+
+/****************************************************************************
+* MPI SGE union for IO SGL's - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_IO_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_MPI_SGE_IO_UNION, *PTR_MPI2_MPI_SGE_IO_UNION,
+ Mpi2MpiSGEIOUnion_t, *pMpi2MpiSGEIOUnion_t;
+
+/****************************************************************************
+* MPI SGE union for SGL's with Simple and Transaction elements - for MPI v2.0 products only
+****************************************************************************/
+
+typedef struct _MPI2_SGE_TRANS_SIMPLE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_SGE_TRANS_SIMPLE_UNION,
+ *PTR_MPI2_SGE_TRANS_SIMPLE_UNION,
+ Mpi2SGETransSimpleUnion_t,
+ *pMpi2SGETransSimpleUnion_t;
+
+/****************************************************************************
+* All MPI SGE types union
+****************************************************************************/
+
+typedef struct _MPI2_MPI_SGE_UNION {
+ union {
+ MPI2_SGE_SIMPLE_UNION Simple;
+ MPI2_SGE_CHAIN_UNION Chain;
+ MPI2_SGE_TRANSACTION_UNION Transaction;
+ } u;
+} MPI2_MPI_SGE_UNION, *PTR_MPI2_MPI_SGE_UNION,
+ Mpi2MpiSgeUnion_t, *pMpi2MpiSgeUnion_t;
+
+/****************************************************************************
+* MPI SGE field definition and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_SGE_FLAGS_LAST_ELEMENT (0x80)
+#define MPI2_SGE_FLAGS_END_OF_BUFFER (0x40)
+#define MPI2_SGE_FLAGS_ELEMENT_TYPE_MASK (0x30)
+#define MPI2_SGE_FLAGS_LOCAL_ADDRESS (0x08)
+#define MPI2_SGE_FLAGS_DIRECTION (0x04)
+#define MPI2_SGE_FLAGS_ADDRESS_SIZE (0x02)
+#define MPI2_SGE_FLAGS_END_OF_LIST (0x01)
+
+#define MPI2_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_SGE_LENGTH_MASK (0x00FFFFFF)
+#define MPI2_SGE_CHAIN_LENGTH_MASK (0x0000FFFF)
+
+/*Element Type */
+
+#define MPI2_SGE_FLAGS_TRANSACTION_ELEMENT (0x00)
+#define MPI2_SGE_FLAGS_SIMPLE_ELEMENT (0x10)
+#define MPI2_SGE_FLAGS_CHAIN_ELEMENT (0x30)
+#define MPI2_SGE_FLAGS_ELEMENT_MASK (0x30)
+
+/*Address location */
+
+#define MPI2_SGE_FLAGS_SYSTEM_ADDRESS (0x00)
+
+/*Direction */
+
+#define MPI2_SGE_FLAGS_IOC_TO_HOST (0x00)
+#define MPI2_SGE_FLAGS_HOST_TO_IOC (0x04)
+
+#define MPI2_SGE_FLAGS_DEST (MPI2_SGE_FLAGS_IOC_TO_HOST)
+#define MPI2_SGE_FLAGS_SOURCE (MPI2_SGE_FLAGS_HOST_TO_IOC)
+
+/*Address Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_ADDRESSING (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_ADDRESSING (0x02)
+
+/*Context Size */
+
+#define MPI2_SGE_FLAGS_32_BIT_CONTEXT (0x00)
+#define MPI2_SGE_FLAGS_64_BIT_CONTEXT (0x02)
+#define MPI2_SGE_FLAGS_96_BIT_CONTEXT (0x04)
+#define MPI2_SGE_FLAGS_128_BIT_CONTEXT (0x06)
+
+#define MPI2_SGE_CHAIN_OFFSET_MASK (0x00FF0000)
+#define MPI2_SGE_CHAIN_OFFSET_SHIFT (16)
+
+/****************************************************************************
+* MPI SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_GET_FLAGS(f) (((f) & ~MPI2_SGE_LENGTH_MASK) >> \
+ MPI2_SGE_FLAGS_SHIFT)
+#define MPI2_SGE_LENGTH(f) ((f) & MPI2_SGE_LENGTH_MASK)
+#define MPI2_SGE_CHAIN_LENGTH(f) ((f) & MPI2_SGE_CHAIN_LENGTH_MASK)
+
+#define MPI2_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_SGE_SET_FLAGS(f) | \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_pSGE_GET_FLAGS(psg) MPI2_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_pSGE_GET_LENGTH(psg) MPI2_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_SGE_SET_FLAGS(f))
+#define MPI2_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_SGE_LENGTH(l))
+
+#define MPI2_GET_CHAIN_OFFSET(x) ((x & MPI2_SGE_CHAIN_OFFSET_MASK) >> \
+ MPI2_SGE_CHAIN_OFFSET_SHIFT)
+
+/*****************************************************************************
+*
+* Fusion-MPT IEEE Scatter Gather Elements
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IEEE Simple Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_SIMPLE32 is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_SIMPLE32 {
+ U32 Address;
+ U32 FlagsLength;
+} MPI2_IEEE_SGE_SIMPLE32, *PTR_MPI2_IEEE_SGE_SIMPLE32,
+ Mpi2IeeeSgeSimple32_t, *pMpi2IeeeSgeSimple32_t;
+
+typedef struct _MPI2_IEEE_SGE_SIMPLE64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 Reserved2;
+ U8 Flags;
+} MPI2_IEEE_SGE_SIMPLE64, *PTR_MPI2_IEEE_SGE_SIMPLE64,
+ Mpi2IeeeSgeSimple64_t, *pMpi2IeeeSgeSimple64_t;
+
+typedef union _MPI2_IEEE_SGE_SIMPLE_UNION {
+ MPI2_IEEE_SGE_SIMPLE32 Simple32;
+ MPI2_IEEE_SGE_SIMPLE64 Simple64;
+} MPI2_IEEE_SGE_SIMPLE_UNION,
+ *PTR_MPI2_IEEE_SGE_SIMPLE_UNION,
+ Mpi2IeeeSgeSimpleUnion_t,
+ *pMpi2IeeeSgeSimpleUnion_t;
+
+/****************************************************************************
+* IEEE Chain Element structures
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_CHAIN32 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE32 MPI2_IEEE_SGE_CHAIN32;
+
+/*MPI2_IEEE_SGE_CHAIN64 is for MPI v2.0 products only */
+typedef MPI2_IEEE_SGE_SIMPLE64 MPI2_IEEE_SGE_CHAIN64;
+
+typedef union _MPI2_IEEE_SGE_CHAIN_UNION {
+ MPI2_IEEE_SGE_CHAIN32 Chain32;
+ MPI2_IEEE_SGE_CHAIN64 Chain64;
+} MPI2_IEEE_SGE_CHAIN_UNION,
+ *PTR_MPI2_IEEE_SGE_CHAIN_UNION,
+ Mpi2IeeeSgeChainUnion_t,
+ *pMpi2IeeeSgeChainUnion_t;
+
+/*MPI25_IEEE_SGE_CHAIN64 is for MPI v2.5 products only */
+typedef struct _MPI25_IEEE_SGE_CHAIN64 {
+ U64 Address;
+ U32 Length;
+ U16 Reserved1;
+ U8 NextChainOffset;
+ U8 Flags;
+} MPI25_IEEE_SGE_CHAIN64,
+ *PTR_MPI25_IEEE_SGE_CHAIN64,
+ Mpi25IeeeSgeChain64_t,
+ *pMpi25IeeeSgeChain64_t;
+
+/****************************************************************************
+* All IEEE SGE types union
+****************************************************************************/
+
+/*MPI2_IEEE_SGE_UNION is for MPI v2.0 products only */
+typedef struct _MPI2_IEEE_SGE_UNION {
+ union {
+ MPI2_IEEE_SGE_SIMPLE_UNION Simple;
+ MPI2_IEEE_SGE_CHAIN_UNION Chain;
+ } u;
+} MPI2_IEEE_SGE_UNION, *PTR_MPI2_IEEE_SGE_UNION,
+ Mpi2IeeeSgeUnion_t, *pMpi2IeeeSgeUnion_t;
+
+/****************************************************************************
+* IEEE SGE union for IO SGL's
+****************************************************************************/
+
+typedef union _MPI25_SGE_IO_UNION {
+ MPI2_IEEE_SGE_SIMPLE64 IeeeSimple;
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain;
+} MPI25_SGE_IO_UNION, *PTR_MPI25_SGE_IO_UNION,
+ Mpi25SGEIOUnion_t, *pMpi25SGEIOUnion_t;
+
+/****************************************************************************
+* IEEE SGE field definitions and masks
+****************************************************************************/
+
+/*Flags field bit definitions */
+
+#define MPI2_IEEE_SGE_FLAGS_ELEMENT_TYPE_MASK (0x80)
+#define MPI25_IEEE_SGE_FLAGS_END_OF_LIST (0x40)
+
+#define MPI2_IEEE32_SGE_FLAGS_SHIFT (24)
+
+#define MPI2_IEEE32_SGE_LENGTH_MASK (0x00FFFFFF)
+
+/*Element Type */
+
+#define MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT (0x00)
+#define MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT (0x80)
+
+/*Data Location Address Space */
+
+#define MPI2_IEEE_SGE_FLAGS_ADDR_MASK (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_IEEE_SGE_FLAGS_IOCDDR_ADDR (0x01)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLB_ADDR (0x02)
+#define MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR (0x03)
+#define MPI2_IEEE_SGE_FLAGS_SYSTEMPLBCPI_ADDR \
+ (MPI2_IEEE_SGE_FLAGS_SYSTEMPLBPCI_ADDR)
+
+/****************************************************************************
+* IEEE SGE operation Macros
+****************************************************************************/
+
+/*SIMPLE FlagsLength manipulations... */
+#define MPI2_IEEE32_SGE_SET_FLAGS(f) ((U32)(f) << MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_GET_FLAGS(f) (((f) & ~MPI2_IEEE32_SGE_LENGTH_MASK) \
+ >> MPI2_IEEE32_SGE_FLAGS_SHIFT)
+#define MPI2_IEEE32_SGE_LENGTH(f) ((f) & MPI2_IEEE32_SGE_LENGTH_MASK)
+
+#define MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l) (MPI2_IEEE32_SGE_SET_FLAGS(f) |\
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+#define MPI2_IEEE32_pSGE_GET_FLAGS(psg) \
+ MPI2_IEEE32_SGE_GET_FLAGS((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_GET_LENGTH(psg) \
+ MPI2_IEEE32_SGE_LENGTH((psg)->FlagsLength)
+#define MPI2_IEEE32_pSGE_SET_FLAGS_LENGTH(psg, f, l) ((psg)->FlagsLength = \
+ MPI2_IEEE32_SGE_SET_FLAGS_LENGTH(f, l))
+
+/*CAUTION - The following are READ-MODIFY-WRITE! */
+#define MPI2_IEEE32_pSGE_SET_FLAGS(psg, f) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_SET_FLAGS(f))
+#define MPI2_IEEE32_pSGE_SET_LENGTH(psg, l) ((psg)->FlagsLength |= \
+ MPI2_IEEE32_SGE_LENGTH(l))
+
+/*****************************************************************************
+*
+* Fusion-MPT MPI/IEEE Scatter Gather Unions
+*
+*****************************************************************************/
+
+typedef union _MPI2_SIMPLE_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+} MPI2_SIMPLE_SGE_UNION, *PTR_MPI2_SIMPLE_SGE_UNION,
+ Mpi2SimpleSgeUntion_t, *pMpi2SimpleSgeUntion_t;
+
+typedef union _MPI2_SGE_IO_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple;
+ MPI2_SGE_CHAIN_UNION MpiChain;
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain;
+} MPI2_SGE_IO_UNION, *PTR_MPI2_SGE_IO_UNION,
+ Mpi2SGEIOUnion_t, *pMpi2SGEIOUnion_t;
+
+/****************************************************************************
+*
+* Values for SGLFlags field, used in many request messages with an SGL
+*
+****************************************************************************/
+
+/*values for MPI SGL Data Location Address Space subfield */
+#define MPI2_SGLFLAGS_ADDRESS_SPACE_MASK (0x0C)
+#define MPI2_SGLFLAGS_SYSTEM_ADDRESS_SPACE (0x00)
+#define MPI2_SGLFLAGS_IOCDDR_ADDRESS_SPACE (0x04)
+#define MPI2_SGLFLAGS_IOCPLB_ADDRESS_SPACE (0x08)
+#define MPI2_SGLFLAGS_IOCPLBNTA_ADDRESS_SPACE (0x0C)
+/*values for SGL Type subfield */
+#define MPI2_SGLFLAGS_SGL_TYPE_MASK (0x03)
+#define MPI2_SGLFLAGS_SGL_TYPE_MPI (0x00)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE32 (0x01)
+#define MPI2_SGLFLAGS_SGL_TYPE_IEEE64 (0x02)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
new file mode 100644
index 000000000000..d8b2c3eedb57
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -0,0 +1,3323 @@
+/*
+ * Copyright (c) 2000-2011 LSI Corporation.
+ *
+ *
+ * Name: mpi2_cnfg.h
+ * Title: MPI Configuration messages and pages
+ * Creation Date: November 10, 2006
+ *
+ * mpi2_cnfg.h Version: 02.00.22
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 Added defines for SAS IO Unit Page 2 PhyFlags.
+ * Added Manufacturing Page 11.
+ * Added MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE
+ * define.
+ * 06-26-07 02.00.02 Adding generic structure for product-specific
+ * Manufacturing pages: MPI2_CONFIG_PAGE_MANUFACTURING_PS.
+ * Rework of BIOS Page 2 configuration page.
+ * Fixed MPI2_BIOSPAGE2_BOOT_DEVICE to be a union of the
+ * forms.
+ * Added configuration pages IOC Page 8 and Driver
+ * Persistent Mapping Page 0.
+ * 08-31-07 02.00.03 Modified configuration pages dealing with Integrated
+ * RAID (Manufacturing Page 4, RAID Volume Pages 0 and 1,
+ * RAID Physical Disk Pages 0 and 1, RAID Configuration
+ * Page 0).
+ * Added new value for AccessStatus field of SAS Device
+ * Page 0 (_SATA_NEEDS_INITIALIZATION).
+ * 10-31-07 02.00.04 Added missing SEPDevHandle field to
+ * MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0.
+ * 12-18-07 02.00.05 Modified IO Unit Page 0 to use 32-bit version fields for
+ * NVDATA.
+ * Modified IOC Page 7 to use masks and added field for
+ * SASBroadcastPrimitiveMasks.
+ * Added MPI2_CONFIG_PAGE_BIOS_4.
+ * Added MPI2_CONFIG_PAGE_LOG_0.
+ * 02-29-08 02.00.06 Modified various names to make them 32-character unique.
+ * Added SAS Device IDs.
+ * Updated Integrated RAID configuration pages including
+ * Manufacturing Page 4, IOC Page 6, and RAID Configuration
+ * Page 0.
+ * 05-21-08 02.00.07 Added define MPI2_MANPAGE4_MIX_SSD_SAS_SATA.
+ * Added define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION.
+ * Fixed define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING.
+ * Added missing MaxNumRoutedSasAddresses field to
+ * MPI2_CONFIG_PAGE_EXPANDER_0.
+ * Added SAS Port Page 0.
+ * Modified structure layout for
+ * MPI2_CONFIG_PAGE_DRIVER_MAPPING_0.
+ * 06-27-08 02.00.08 Changed MPI2_CONFIG_PAGE_RD_PDISK_1 to use
+ * MPI2_RAID_PHYS_DISK1_PATH_MAX to size the array.
+ * 10-02-08 02.00.09 Changed MPI2_RAID_PGAD_CONFIGNUM_MASK from 0x0000FFFF
+ * to 0x000000FF.
+ * Added two new values for the Physical Disk Coercion Size
+ * bits in the Flags field of Manufacturing Page 4.
+ * Added product-specific Manufacturing pages 16 to 31.
+ * Modified Flags bits for controlling write cache on SATA
+ * drives in IO Unit Page 1.
+ * Added new bit to AdditionalControlFlags of SAS IO Unit
+ * Page 1 to control Invalid Topology Correction.
+ * Added additional defines for RAID Volume Page 0
+ * VolumeStatusFlags field.
+ * Modified meaning of RAID Volume Page 0 VolumeSettings
+ * define for auto-configure of hot-swap drives.
+ * Added SupportedPhysDisks field to RAID Volume Page 1 and
+ * added related defines.
+ * Added PhysDiskAttributes field (and related defines) to
+ * RAID Physical Disk Page 0.
+ * Added MPI2_SAS_PHYINFO_PHY_VACANT define.
+ * Added three new DiscoveryStatus bits for SAS IO Unit
+ * Page 0 and SAS Expander Page 0.
+ * Removed multiplexing information from SAS IO Unit pages.
+ * Added BootDeviceWaitTime field to SAS IO Unit Page 4.
+ * Removed Zone Address Resolved bit from PhyInfo and from
+ * Expander Page 0 Flags field.
+ * Added two new AccessStatus values to SAS Device Page 0
+ * for indicating routing problems. Added 3 reserved words
+ * to this page.
+ * 01-19-09 02.00.10 Fixed defines for GPIOVal field of IO Unit Page 3.
+ * Inserted missing reserved field into structure for IOC
+ * Page 6.
+ * Added more pending task bits to RAID Volume Page 0
+ * VolumeStatusFlags defines.
+ * Added MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED define.
+ * Added a new DiscoveryStatus bit for SAS IO Unit Page 0
+ * and SAS Expander Page 0 to flag a downstream initiator
+ * when in simplified routing mode.
+ * Removed SATA Init Failure defines for DiscoveryStatus
+ * fields of SAS IO Unit Page 0 and SAS Expander Page 0.
+ * Added MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED define.
+ * Added PortGroups, DmaGroup, and ControlGroup fields to
+ * SAS Device Page 0.
+ * 05-06-09 02.00.11 Added structures and defines for IO Unit Page 5 and IO
+ * Unit Page 6.
+ * Added expander reduced functionality data to SAS
+ * Expander Page 0.
+ * Added SAS PHY Page 2 and SAS PHY Page 3.
+ * 07-30-09 02.00.12 Added IO Unit Page 7.
+ * Added new device ids.
+ * Added SAS IO Unit Page 5.
+ * Added partial and slumber power management capable flags
+ * to SAS Device Page 0 Flags field.
+ * Added PhyInfo defines for power condition.
+ * Added Ethernet configuration pages.
+ * 10-28-09 02.00.13 Added MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY.
+ * Added SAS PHY Page 4 structure and defines.
+ * 02-10-10 02.00.14 Modified the comments for the configuration page
+ * structures that contain an array of data. The host
+ * should use the "count" field in the page data (e.g. the
+ * NumPhys field) to determine the number of valid elements
+ * in the array.
+ * Added/modified some MPI2_MFGPAGE_DEVID_SAS defines.
+ * Added PowerManagementCapabilities to IO Unit Page 7.
+ * Added PortWidthModGroup field to
+ * MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_6 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_7 and related defines.
+ * Added MPI2_CONFIG_PAGE_SASIOUNIT_8 and related defines.
+ * 05-12-10 02.00.15 Added MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT
+ * define.
+ * Added MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE define.
+ * Added MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY define.
+ * 08-11-10 02.00.16 Removed IO Unit Page 1 device path (multi-pathing)
+ * defines.
+ * 11-10-10 02.00.17 Added ReceptacleID field (replacing Reserved1) to
+ * MPI2_MANPAGE7_CONNECTOR_INFO and reworked defines for
+ * the Pinout field.
+ * Added BoardTemperature and BoardTemperatureUnits fields
+ * to MPI2_CONFIG_PAGE_IO_UNIT_7.
+ * Added MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING define
+ * and MPI2_CONFIG_PAGE_EXT_MAN_PS structure.
+ * 02-23-11 02.00.18 Added ProxyVF_ID field to MPI2_CONFIG_REQUEST.
+ * Added IO Unit Page 8, IO Unit Page 9,
+ * and IO Unit Page 10.
+ * Added SASNotifyPrimitiveMasks field to
+ * MPI2_CONFIG_PAGE_IOC_7.
+ * 03-09-11 02.00.19 Fixed IO Unit Page 10 (to match the spec).
+ * 05-25-11 02.00.20 Cleaned up a few comments.
+ * 08-24-11 02.00.21 Marked the IO Unit Page 7 PowerManagementCapabilities
+ * for PCIe link as obsolete.
+ * Added SpinupFlags field containing a Disable Spin-up bit
+ * to the MPI2_SAS_IOUNIT4_SPINUP_GROUP fields of SAS IO
+ * Unit Page 4.
+ * 11-18-11 02.00.22 Added define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT.
+ * Added UEFIVersion field to BIOS Page 1 and defined new
+ * BiosOptions bits.
+ * Incorporating additions for MPI v2.5.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_CNFG_H
+#define MPI2_CNFG_H
+
+/*****************************************************************************
+* Configuration Page Header and defines
+*****************************************************************************/
+
+/*Config Page Header */
+typedef struct _MPI2_CONFIG_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 PageLength; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+} MPI2_CONFIG_PAGE_HEADER, *PTR_MPI2_CONFIG_PAGE_HEADER,
+ Mpi2ConfigPageHeader_t, *pMpi2ConfigPageHeader_t;
+
+typedef union _MPI2_CONFIG_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ U8 Bytes[4];
+ U16 Word16[2];
+ U32 Word32;
+} MPI2_CONFIG_PAGE_HEADER_UNION, *PTR_MPI2_CONFIG_PAGE_HEADER_UNION,
+ Mpi2ConfigPageHeaderUnion, *pMpi2ConfigPageHeaderUnion;
+
+/*Extended Config Page Header */
+typedef struct _MPI2_CONFIG_EXTENDED_PAGE_HEADER {
+ U8 PageVersion; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 PageNumber; /*0x02 */
+ U8 PageType; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 Reserved2; /*0x07 */
+} MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ *PTR_MPI2_CONFIG_EXTENDED_PAGE_HEADER,
+ Mpi2ConfigExtendedPageHeader_t,
+ *pMpi2ConfigExtendedPageHeader_t;
+
+typedef union _MPI2_CONFIG_EXT_PAGE_HEADER_UNION {
+ MPI2_CONFIG_PAGE_HEADER Struct;
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Ext;
+ U8 Bytes[8];
+ U16 Word16[4];
+ U32 Word32[2];
+} MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ *PTR_MPI2_CONFIG_EXT_PAGE_HEADER_UNION,
+ Mpi2ConfigPageExtendedHeaderUnion,
+ *pMpi2ConfigPageExtendedHeaderUnion;
+
+
+/*PageType field values */
+#define MPI2_CONFIG_PAGEATTR_READ_ONLY (0x00)
+#define MPI2_CONFIG_PAGEATTR_CHANGEABLE (0x10)
+#define MPI2_CONFIG_PAGEATTR_PERSISTENT (0x20)
+#define MPI2_CONFIG_PAGEATTR_MASK (0xF0)
+
+#define MPI2_CONFIG_PAGETYPE_IO_UNIT (0x00)
+#define MPI2_CONFIG_PAGETYPE_IOC (0x01)
+#define MPI2_CONFIG_PAGETYPE_BIOS (0x02)
+#define MPI2_CONFIG_PAGETYPE_RAID_VOLUME (0x08)
+#define MPI2_CONFIG_PAGETYPE_MANUFACTURING (0x09)
+#define MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK (0x0A)
+#define MPI2_CONFIG_PAGETYPE_EXTENDED (0x0F)
+#define MPI2_CONFIG_PAGETYPE_MASK (0x0F)
+
+#define MPI2_CONFIG_TYPENUM_MASK (0x0FFF)
+
+
+/*ExtPageType field values */
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT (0x10)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER (0x11)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE (0x12)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PHY (0x13)
+#define MPI2_CONFIG_EXTPAGETYPE_LOG (0x14)
+#define MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE (0x15)
+#define MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG (0x16)
+#define MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING (0x17)
+#define MPI2_CONFIG_EXTPAGETYPE_SAS_PORT (0x18)
+#define MPI2_CONFIG_EXTPAGETYPE_ETHERNET (0x19)
+#define MPI2_CONFIG_EXTPAGETYPE_EXT_MANUFACTURING (0x1A)
+
+
+/*****************************************************************************
+* PageAddress defines
+*****************************************************************************/
+
+/*RAID Volume PageAddress format */
+#define MPI2_RAID_VOLUME_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_RAID_VOLUME_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_RAID_VOLUME_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Physical Disk PageAddress format */
+#define MPI2_PHYSDISK_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM (0x00000000)
+#define MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM (0x10000000)
+#define MPI2_PHYSDISK_PGAD_FORM_DEVHANDLE (0x20000000)
+
+#define MPI2_PHYSDISK_PGAD_PHYSDISKNUM_MASK (0x000000FF)
+#define MPI2_PHYSDISK_PGAD_DEVHANDLE_MASK (0x0000FFFF)
+
+
+/*SAS Expander PageAddress format */
+#define MPI2_SAS_EXPAND_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL (0x00000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM (0x10000000)
+#define MPI2_SAS_EXPAND_PGAD_FORM_HNDL (0x20000000)
+
+#define MPI2_SAS_EXPAND_PGAD_HANDLE_MASK (0x0000FFFF)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_MASK (0x00FF0000)
+#define MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT (16)
+
+
+/*SAS Device PageAddress format */
+#define MPI2_SAS_DEVICE_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_DEVICE_PGAD_FORM_HANDLE (0x20000000)
+
+#define MPI2_SAS_DEVICE_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*SAS PHY PageAddress format */
+#define MPI2_SAS_PHY_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER (0x00000000)
+#define MPI2_SAS_PHY_PGAD_FORM_PHY_TBL_INDEX (0x10000000)
+
+#define MPI2_SAS_PHY_PGAD_PHY_NUMBER_MASK (0x000000FF)
+#define MPI2_SAS_PHY_PGAD_PHY_TBL_INDEX_MASK (0x0000FFFF)
+
+
+/*SAS Port PageAddress format */
+#define MPI2_SASPORT_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SASPORT_PGAD_FORM_GET_NEXT_PORT (0x00000000)
+#define MPI2_SASPORT_PGAD_FORM_PORT_NUM (0x10000000)
+
+#define MPI2_SASPORT_PGAD_PORTNUMBER_MASK (0x00000FFF)
+
+
+/*SAS Enclosure PageAddress format */
+#define MPI2_SAS_ENCLOS_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_GET_NEXT_HANDLE (0x00000000)
+#define MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE (0x10000000)
+
+#define MPI2_SAS_ENCLOS_PGAD_HANDLE_MASK (0x0000FFFF)
+
+
+/*RAID Configuration PageAddress format */
+#define MPI2_RAID_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM (0x00000000)
+#define MPI2_RAID_PGAD_FORM_CONFIGNUM (0x10000000)
+#define MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG (0x20000000)
+
+#define MPI2_RAID_PGAD_CONFIGNUM_MASK (0x000000FF)
+
+
+/*Driver Persistent Mapping PageAddress format */
+#define MPI2_DPM_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_DPM_PGAD_FORM_ENTRY_RANGE (0x00000000)
+
+#define MPI2_DPM_PGAD_ENTRY_COUNT_MASK (0x0FFF0000)
+#define MPI2_DPM_PGAD_ENTRY_COUNT_SHIFT (16)
+#define MPI2_DPM_PGAD_START_ENTRY_MASK (0x0000FFFF)
+
+
+/*Ethernet PageAddress format */
+#define MPI2_ETHERNET_PGAD_FORM_MASK (0xF0000000)
+#define MPI2_ETHERNET_PGAD_FORM_IF_NUM (0x00000000)
+
+#define MPI2_ETHERNET_PGAD_IF_NUMBER_MASK (0x000000FF)
+
+
+
+/****************************************************************************
+* Configuration messages
+****************************************************************************/
+
+/*Configuration Request Message */
+typedef struct _MPI2_CONFIG_REQUEST {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 ProxyVF_ID; /*0x0D */
+ U16 Reserved4; /*0x0E */
+ U32 Reserved3; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+ U32 PageAddress; /*0x18 */
+ MPI2_SGE_IO_UNION PageBufferSGE; /*0x1C */
+} MPI2_CONFIG_REQUEST, *PTR_MPI2_CONFIG_REQUEST,
+ Mpi2ConfigRequest_t, *pMpi2ConfigRequest_t;
+
+/*values for the Action field */
+#define MPI2_CONFIG_ACTION_PAGE_HEADER (0x00)
+#define MPI2_CONFIG_ACTION_PAGE_READ_CURRENT (0x01)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT (0x02)
+#define MPI2_CONFIG_ACTION_PAGE_DEFAULT (0x03)
+#define MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM (0x04)
+#define MPI2_CONFIG_ACTION_PAGE_READ_DEFAULT (0x05)
+#define MPI2_CONFIG_ACTION_PAGE_READ_NVRAM (0x06)
+#define MPI2_CONFIG_ACTION_PAGE_GET_CHANGEABLE (0x07)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+
+/*Config Reply Message */
+typedef struct _MPI2_CONFIG_REPLY {
+ U8 Action; /*0x00 */
+ U8 SGLFlags; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ExtPageLength; /*0x04 */
+ U8 ExtPageType; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 Reserved2; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x14 */
+} MPI2_CONFIG_REPLY, *PTR_MPI2_CONFIG_REPLY,
+ Mpi2ConfigReply_t, *pMpi2ConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+* C o n f i g u r a t i o n P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Manufacturing Config pages
+****************************************************************************/
+
+#define MPI2_MFGPAGE_VENDORID_LSI (0x1000)
+
+/*MPI v2.0 SAS products */
+#define MPI2_MFGPAGE_DEVID_SAS2004 (0x0070)
+#define MPI2_MFGPAGE_DEVID_SAS2008 (0x0072)
+#define MPI2_MFGPAGE_DEVID_SAS2108_1 (0x0074)
+#define MPI2_MFGPAGE_DEVID_SAS2108_2 (0x0076)
+#define MPI2_MFGPAGE_DEVID_SAS2108_3 (0x0077)
+#define MPI2_MFGPAGE_DEVID_SAS2116_1 (0x0064)
+#define MPI2_MFGPAGE_DEVID_SAS2116_2 (0x0065)
+
+#define MPI2_MFGPAGE_DEVID_SSS6200 (0x007E)
+
+#define MPI2_MFGPAGE_DEVID_SAS2208_1 (0x0080)
+#define MPI2_MFGPAGE_DEVID_SAS2208_2 (0x0081)
+#define MPI2_MFGPAGE_DEVID_SAS2208_3 (0x0082)
+#define MPI2_MFGPAGE_DEVID_SAS2208_4 (0x0083)
+#define MPI2_MFGPAGE_DEVID_SAS2208_5 (0x0084)
+#define MPI2_MFGPAGE_DEVID_SAS2208_6 (0x0085)
+#define MPI2_MFGPAGE_DEVID_SAS2308_1 (0x0086)
+#define MPI2_MFGPAGE_DEVID_SAS2308_2 (0x0087)
+#define MPI2_MFGPAGE_DEVID_SAS2308_3 (0x006E)
+
+/*MPI v2.5 SAS products */
+#define MPI25_MFGPAGE_DEVID_SAS3004 (0x0096)
+#define MPI25_MFGPAGE_DEVID_SAS3008 (0x0097)
+#define MPI25_MFGPAGE_DEVID_SAS3108_1 (0x0090)
+#define MPI25_MFGPAGE_DEVID_SAS3108_2 (0x0091)
+#define MPI25_MFGPAGE_DEVID_SAS3108_5 (0x0094)
+#define MPI25_MFGPAGE_DEVID_SAS3108_6 (0x0095)
+
+
+
+
+/*Manufacturing Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 ChipName[16]; /*0x04 */
+ U8 ChipRevision[8]; /*0x14 */
+ U8 BoardName[16]; /*0x1C */
+ U8 BoardAssembly[16]; /*0x2C */
+ U8 BoardTracerNumber[16]; /*0x3C */
+} MPI2_CONFIG_PAGE_MAN_0,
+ *PTR_MPI2_CONFIG_PAGE_MAN_0,
+ Mpi2ManufacturingPage0_t,
+ *pMpi2ManufacturingPage0_t;
+
+#define MPI2_MANUFACTURING0_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 VPD[256]; /*0x04 */
+} MPI2_CONFIG_PAGE_MAN_1,
+ *PTR_MPI2_CONFIG_PAGE_MAN_1,
+ Mpi2ManufacturingPage1_t,
+ *pMpi2ManufacturingPage1_t;
+
+#define MPI2_MANUFACTURING1_PAGEVERSION (0x00)
+
+
+typedef struct _MPI2_CHIP_REVISION_ID {
+ U16 DeviceID; /*0x00 */
+ U8 PCIRevisionID; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_CHIP_REVISION_ID, *PTR_MPI2_CHIP_REVISION_ID,
+ Mpi2ChipRevisionId_t, *pMpi2ChipRevisionId_t;
+
+
+/*Manufacturing Page 2 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ HwSettings[MPI2_MAN_PAGE_2_HW_SETTINGS_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_2,
+ *PTR_MPI2_CONFIG_PAGE_MAN_2,
+ Mpi2ManufacturingPage2_t,
+ *pMpi2ManufacturingPage2_t;
+
+#define MPI2_MANUFACTURING2_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check Header.PageLength at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_3_INFO_WORDS
+#define MPI2_MAN_PAGE_3_INFO_WORDS (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ MPI2_CHIP_REVISION_ID ChipId; /*0x04 */
+ U32
+ Info[MPI2_MAN_PAGE_3_INFO_WORDS];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_3,
+ *PTR_MPI2_CONFIG_PAGE_MAN_3,
+ Mpi2ManufacturingPage3_t,
+ *pMpi2ManufacturingPage3_t;
+
+#define MPI2_MANUFACTURING3_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 4 */
+
+typedef struct _MPI2_MANPAGE4_PWR_SAVE_SETTINGS {
+ U8 PowerSaveFlags; /*0x00 */
+ U8 InternalOperationsSleepTime; /*0x01 */
+ U8 InternalOperationsRunTime; /*0x02 */
+ U8 HostIdleTime; /*0x03 */
+} MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ *PTR_MPI2_MANPAGE4_PWR_SAVE_SETTINGS,
+ Mpi2ManPage4PwrSaveSettings_t,
+ *pMpi2ManPage4PwrSaveSettings_t;
+
+/*defines for the PowerSaveFlags field */
+#define MPI2_MANPAGE4_MASK_POWERSAVE_MODE (0x03)
+#define MPI2_MANPAGE4_POWERSAVE_MODE_DISABLED (0x00)
+#define MPI2_MANPAGE4_CUSTOM_POWERSAVE_MODE (0x01)
+#define MPI2_MANPAGE4_FULL_POWERSAVE_MODE (0x02)
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Flags; /*0x08 */
+ U8 InquirySize; /*0x0C */
+ U8 Reserved2; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ U8 InquiryData[56]; /*0x10 */
+ U32 RAID0VolumeSettings; /*0x48 */
+ U32 RAID1EVolumeSettings; /*0x4C */
+ U32 RAID1VolumeSettings; /*0x50 */
+ U32 RAID10VolumeSettings; /*0x54 */
+ U32 Reserved4; /*0x58 */
+ U32 Reserved5; /*0x5C */
+ MPI2_MANPAGE4_PWR_SAVE_SETTINGS PowerSaveSettings; /*0x60 */
+ U8 MaxOCEDisks; /*0x64 */
+ U8 ResyncRate; /*0x65 */
+ U16 DataScrubDuration; /*0x66 */
+ U8 MaxHotSpares; /*0x68 */
+ U8 MaxPhysDisksPerVol; /*0x69 */
+ U8 MaxPhysDisks; /*0x6A */
+ U8 MaxVolumes; /*0x6B */
+} MPI2_CONFIG_PAGE_MAN_4,
+ *PTR_MPI2_CONFIG_PAGE_MAN_4,
+ Mpi2ManufacturingPage4_t,
+ *pMpi2ManufacturingPage4_t;
+
+#define MPI2_MANUFACTURING4_PAGEVERSION (0x0A)
+
+/*Manufacturing Page 4 Flags field */
+#define MPI2_MANPAGE4_METADATA_SIZE_MASK (0x00030000)
+#define MPI2_MANPAGE4_METADATA_512MB (0x00000000)
+
+#define MPI2_MANPAGE4_MIX_SSD_SAS_SATA (0x00008000)
+#define MPI2_MANPAGE4_MIX_SSD_AND_NON_SSD (0x00004000)
+#define MPI2_MANPAGE4_HIDE_PHYSDISK_NON_IR (0x00002000)
+
+#define MPI2_MANPAGE4_MASK_PHYSDISK_COERCION (0x00001C00)
+#define MPI2_MANPAGE4_PHYSDISK_COERCION_1GB (0x00000000)
+#define MPI2_MANPAGE4_PHYSDISK_128MB_COERCION (0x00000400)
+#define MPI2_MANPAGE4_PHYSDISK_ADAPTIVE_COERCION (0x00000800)
+#define MPI2_MANPAGE4_PHYSDISK_ZERO_COERCION (0x00000C00)
+
+#define MPI2_MANPAGE4_MASK_BAD_BLOCK_MARKING (0x00000300)
+#define MPI2_MANPAGE4_DEFAULT_BAD_BLOCK_MARKING (0x00000000)
+#define MPI2_MANPAGE4_TABLE_BAD_BLOCK_MARKING (0x00000100)
+#define MPI2_MANPAGE4_WRITE_LONG_BAD_BLOCK_MARKING (0x00000200)
+
+#define MPI2_MANPAGE4_FORCE_OFFLINE_FAILOVER (0x00000080)
+#define MPI2_MANPAGE4_RAID10_DISABLE (0x00000040)
+#define MPI2_MANPAGE4_RAID1E_DISABLE (0x00000020)
+#define MPI2_MANPAGE4_RAID1_DISABLE (0x00000010)
+#define MPI2_MANPAGE4_RAID0_DISABLE (0x00000008)
+#define MPI2_MANPAGE4_IR_MODEPAGE8_DISABLE (0x00000004)
+#define MPI2_MANPAGE4_IM_RESYNC_CACHE_ENABLE (0x00000002)
+#define MPI2_MANPAGE4_IR_NO_MIX_SAS_SATA (0x00000001)
+
+
+/*Manufacturing Page 5 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MAN_PAGE_5_PHY_ENTRIES
+#define MPI2_MAN_PAGE_5_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_MANUFACTURING5_ENTRY {
+ U64 WWID; /*0x00 */
+ U64 DeviceName; /*0x08 */
+} MPI2_MANUFACTURING5_ENTRY,
+ *PTR_MPI2_MANUFACTURING5_ENTRY,
+ Mpi2Manufacturing5Entry_t,
+ *pMpi2Manufacturing5Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_MANUFACTURING5_ENTRY
+ Phy[MPI2_MAN_PAGE_5_PHY_ENTRIES];/*0x08 */
+} MPI2_CONFIG_PAGE_MAN_5,
+ *PTR_MPI2_CONFIG_PAGE_MAN_5,
+ Mpi2ManufacturingPage5_t,
+ *pMpi2ManufacturingPage5_t;
+
+#define MPI2_MANUFACTURING5_PAGEVERSION (0x03)
+
+
+/*Manufacturing Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_6,
+ *PTR_MPI2_CONFIG_PAGE_MAN_6,
+ Mpi2ManufacturingPage6_t,
+ *pMpi2ManufacturingPage6_t;
+
+#define MPI2_MANUFACTURING6_PAGEVERSION (0x00)
+
+
+/*Manufacturing Page 7 */
+
+typedef struct _MPI2_MANPAGE7_CONNECTOR_INFO {
+ U32 Pinout; /*0x00 */
+ U8 Connector[16]; /*0x04 */
+ U8 Location; /*0x14 */
+ U8 ReceptacleID; /*0x15 */
+ U16 Slot; /*0x16 */
+ U32 Reserved2; /*0x18 */
+} MPI2_MANPAGE7_CONNECTOR_INFO,
+ *PTR_MPI2_MANPAGE7_CONNECTOR_INFO,
+ Mpi2ManPage7ConnectorInfo_t,
+ *pMpi2ManPage7ConnectorInfo_t;
+
+/*defines for the Pinout field */
+#define MPI2_MANPAGE7_PINOUT_LANE_MASK (0x0000FF00)
+#define MPI2_MANPAGE7_PINOUT_LANE_SHIFT (8)
+
+#define MPI2_MANPAGE7_PINOUT_TYPE_MASK (0x000000FF)
+#define MPI2_MANPAGE7_PINOUT_TYPE_UNKNOWN (0x00)
+#define MPI2_MANPAGE7_PINOUT_SATA_SINGLE (0x01)
+#define MPI2_MANPAGE7_PINOUT_SFF_8482 (0x02)
+#define MPI2_MANPAGE7_PINOUT_SFF_8486 (0x03)
+#define MPI2_MANPAGE7_PINOUT_SFF_8484 (0x04)
+#define MPI2_MANPAGE7_PINOUT_SFF_8087 (0x05)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_4I (0x06)
+#define MPI2_MANPAGE7_PINOUT_SFF_8643_8I (0x07)
+#define MPI2_MANPAGE7_PINOUT_SFF_8470 (0x08)
+#define MPI2_MANPAGE7_PINOUT_SFF_8088 (0x09)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_4X (0x0A)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_8X (0x0B)
+#define MPI2_MANPAGE7_PINOUT_SFF_8644_16X (0x0C)
+#define MPI2_MANPAGE7_PINOUT_SFF_8436 (0x0D)
+
+/*defines for the Location field */
+#define MPI2_MANPAGE7_LOCATION_UNKNOWN (0x01)
+#define MPI2_MANPAGE7_LOCATION_INTERNAL (0x02)
+#define MPI2_MANPAGE7_LOCATION_EXTERNAL (0x04)
+#define MPI2_MANPAGE7_LOCATION_SWITCHABLE (0x08)
+#define MPI2_MANPAGE7_LOCATION_AUTO (0x10)
+#define MPI2_MANPAGE7_LOCATION_NOT_PRESENT (0x20)
+#define MPI2_MANPAGE7_LOCATION_NOT_CONNECTED (0x80)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_MANPAGE7_CONNECTOR_INFO_MAX
+#define MPI2_MANPAGE7_CONNECTOR_INFO_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Flags; /*0x0C */
+ U8 EnclosureName[16]; /*0x10 */
+ U8 NumPhys; /*0x20 */
+ U8 Reserved3; /*0x21 */
+ U16 Reserved4; /*0x22 */
+ MPI2_MANPAGE7_CONNECTOR_INFO
+ ConnectorInfo[MPI2_MANPAGE7_CONNECTOR_INFO_MAX]; /*0x24 */
+} MPI2_CONFIG_PAGE_MAN_7,
+ *PTR_MPI2_CONFIG_PAGE_MAN_7,
+ Mpi2ManufacturingPage7_t,
+ *pMpi2ManufacturingPage7_t;
+
+#define MPI2_MANUFACTURING7_PAGEVERSION (0x01)
+
+/*defines for the Flags field */
+#define MPI2_MANPAGE7_FLAG_USE_SLOT_INFO (0x00000001)
+
+
+/*
+ *Generic structure to use for product-specific manufacturing pages
+ *(currently Manufacturing Page 8 through Manufacturing Page 31).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_MAN_PS {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 ProductSpecificInfo;/*0x04 */
+} MPI2_CONFIG_PAGE_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_MAN_PS,
+ Mpi2ManufacturingPagePS_t,
+ *pMpi2ManufacturingPagePS_t;
+
+#define MPI2_MANUFACTURING8_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING9_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING10_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING11_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING12_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING13_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING14_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING15_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING16_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING17_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING18_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING19_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING20_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING21_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING22_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING23_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING24_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING25_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING26_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING27_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING28_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING29_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING30_PAGEVERSION (0x00)
+#define MPI2_MANUFACTURING31_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* IO Unit Config Pages
+****************************************************************************/
+
+/*IO Unit Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64 UniqueValue; /*0x04 */
+ MPI2_VERSION_UNION NvdataVersionDefault; /*0x08 */
+ MPI2_VERSION_UNION NvdataVersionPersistent; /*0x0A */
+} MPI2_CONFIG_PAGE_IO_UNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_0,
+ Mpi2IOUnitPage0_t, *pMpi2IOUnitPage0_t;
+
+#define MPI2_IOUNITPAGE0_PAGEVERSION (0x02)
+
+
+/*IO Unit Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+} MPI2_CONFIG_PAGE_IO_UNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_1,
+ Mpi2IOUnitPage1_t, *pMpi2IOUnitPage1_t;
+
+#define MPI2_IOUNITPAGE1_PAGEVERSION (0x04)
+
+/*IO Unit Page 1 Flags defines */
+#define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE (0x00002000)
+#define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH (0x00001000)
+#define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY (0x00000800)
+#define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE (0x00000600)
+#define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT (9)
+#define MPI2_IOUNITPAGE1_ENABLE_SATA_WRITE_CACHE (0x00000000)
+#define MPI2_IOUNITPAGE1_DISABLE_SATA_WRITE_CACHE (0x00000200)
+#define MPI2_IOUNITPAGE1_UNCHANGED_SATA_WRITE_CACHE (0x00000400)
+#define MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE (0x00000100)
+#define MPI2_IOUNITPAGE1_DISABLE_IR (0x00000040)
+#define MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING (0x00000020)
+#define MPI2_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID (0x00000004)
+
+
+/*IO Unit Page 3 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for GPIOCount at runtime.
+ */
+#ifndef MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 GPIOCount; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16
+ GPIOVal[MPI2_IO_UNIT_PAGE_3_GPIO_VAL_MAX];/*0x08 */
+} MPI2_CONFIG_PAGE_IO_UNIT_3,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_3,
+ Mpi2IOUnitPage3_t, *pMpi2IOUnitPage3_t;
+
+#define MPI2_IOUNITPAGE3_PAGEVERSION (0x01)
+
+/*defines for IO Unit Page 3 GPIOVal field */
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_MASK (0xFFFC)
+#define MPI2_IOUNITPAGE3_GPIO_FUNCTION_SHIFT (2)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_OFF (0x0000)
+#define MPI2_IOUNITPAGE3_GPIO_SETTING_ON (0x0001)
+
+
+/*IO Unit Page 5 */
+
+/*
+ *Upper layer code (drivers, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumDmaEngines at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES
+#define MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_5 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U64
+ RaidAcceleratorBufferBaseAddress; /*0x04 */
+ U64
+ RaidAcceleratorBufferSize; /*0x0C */
+ U64
+ RaidAcceleratorControlBaseAddress; /*0x14 */
+ U8 RAControlSize; /*0x1C */
+ U8 NumDmaEngines; /*0x1D */
+ U8 RAMinControlSize; /*0x1E */
+ U8 RAMaxControlSize; /*0x1F */
+ U32 Reserved1; /*0x20 */
+ U32 Reserved2; /*0x24 */
+ U32 Reserved3; /*0x28 */
+ U32
+ DmaEngineCapabilities[MPI2_IOUNITPAGE5_DMAENGINE_ENTRIES]; /*0x2C */
+} MPI2_CONFIG_PAGE_IO_UNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_5,
+ Mpi2IOUnitPage5_t, *pMpi2IOUnitPage5_t;
+
+#define MPI2_IOUNITPAGE5_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 5 DmaEngineCapabilities field */
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS (0xFF00)
+#define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS (16)
+
+#define MPI2_IOUNITPAGE5_DMA_CAP_EEDP (0x0008)
+#define MPI2_IOUNITPAGE5_DMA_CAP_PARITY_GENERATION (0x0004)
+#define MPI2_IOUNITPAGE5_DMA_CAP_HASHING (0x0002)
+#define MPI2_IOUNITPAGE5_DMA_CAP_ENCRYPTION (0x0001)
+
+
+/*IO Unit Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 Flags; /*0x04 */
+ U8 RAHostControlSize; /*0x06 */
+ U8 Reserved0; /*0x07 */
+ U64
+ RaidAcceleratorHostControlBaseAddress; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+ U32 Reserved3; /*0x18 */
+} MPI2_CONFIG_PAGE_IO_UNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_6,
+ Mpi2IOUnitPage6_t, *pMpi2IOUnitPage6_t;
+
+#define MPI2_IOUNITPAGE6_PAGEVERSION (0x00)
+
+/*defines for IO Unit Page 6 Flags field */
+#define MPI2_IOUNITPAGE6_FLAGS_ENABLE_RAID_ACCELERATOR (0x0001)
+
+
+/*IO Unit Page 7 */
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 CurrentPowerMode; /*0x04 */
+ U8 PreviousPowerMode; /*0x05 */
+ U8 PCIeWidth; /*0x06 */
+ U8 PCIeSpeed; /*0x07 */
+ U32 ProcessorState; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U16 IOCTemperature; /*0x10 */
+ U8
+ IOCTemperatureUnits; /*0x12 */
+ U8 IOCSpeed; /*0x13 */
+ U16 BoardTemperature; /*0x14 */
+ U8
+ BoardTemperatureUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+} MPI2_CONFIG_PAGE_IO_UNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
+ Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
+
+#define MPI2_IOUNITPAGE7_PAGEVERSION (0x02)
+
+/*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
+#define MPI25_IOUNITPAGE7_PM_INIT_MASK (0xC0)
+#define MPI25_IOUNITPAGE7_PM_INIT_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_INIT_HOST (0x40)
+#define MPI25_IOUNITPAGE7_PM_INIT_IO_UNIT (0x80)
+#define MPI25_IOUNITPAGE7_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI25_IOUNITPAGE7_PM_MODE_MASK (0x07)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNAVAILABLE (0x00)
+#define MPI25_IOUNITPAGE7_PM_MODE_UNKNOWN (0x01)
+#define MPI25_IOUNITPAGE7_PM_MODE_FULL_POWER (0x04)
+#define MPI25_IOUNITPAGE7_PM_MODE_REDUCED_POWER (0x05)
+#define MPI25_IOUNITPAGE7_PM_MODE_STANDBY (0x06)
+
+
+/*defines for IO Unit Page 7 PCIeWidth field */
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1 (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X2 (0x02)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X4 (0x04)
+#define MPI2_IOUNITPAGE7_PCIE_WIDTH_X8 (0x08)
+
+/*defines for IO Unit Page 7 PCIeSpeed field */
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_2_5_GBPS (0x00)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_5_0_GBPS (0x01)
+#define MPI2_IOUNITPAGE7_PCIE_SPEED_8_0_GBPS (0x02)
+
+/*defines for IO Unit Page 7 ProcessorState field */
+#define MPI2_IOUNITPAGE7_PSTATE_MASK_SECOND (0x0000000F)
+#define MPI2_IOUNITPAGE7_PSTATE_SHIFT_SECOND (0)
+
+#define MPI2_IOUNITPAGE7_PSTATE_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_PSTATE_DISABLED (0x01)
+#define MPI2_IOUNITPAGE7_PSTATE_ENABLED (0x02)
+
+/*defines for IO Unit Page 7 PowerManagementCapabilities field */
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_FULL_PWR_MODE (0x00400000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_REDUCED_PWR_MODE (0x00200000)
+#define MPI25_IOUNITPAGE7_PMCAP_DPA_STANDBY_MODE (0x00100000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_FULL_PWR_MODE (0x00040000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_REDUCED_PWR_MODE (0x00020000)
+#define MPI25_IOUNITPAGE7_PMCAP_HOST_STANDBY_MODE (0x00010000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_FULL_PWR_MODE (0x00004000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_REDUCED_PWR_MODE (0x00002000)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_STANDBY_MODE (0x00001000)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_12_5_PCT_IOCSPEED (0x00000040)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_25_0_PCT_IOCSPEED (0x00000020)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_50_0_PCT_IOCSPEED (0x00000010)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_WIDTH_CHANGE_PCIE (0x00000008)
+#define MPI2_IOUNITPAGE7_PMCAP_HOST_SPEED_CHANGE_PCIE (0x00000004)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_WIDTH_CHANGE_PCIE (0x00000002)
+#define MPI25_IOUNITPAGE7_PMCAP_IO_SPEED_CHANGE_PCIE (0x00000001)
+
+/*obsolete names for the PowerManagementCapabilities bits (above) */
+#define MPI2_IOUNITPAGE7_PMCAP_12_5_PCT_IOCSPEED (0x00000400)
+#define MPI2_IOUNITPAGE7_PMCAP_25_0_PCT_IOCSPEED (0x00000200)
+#define MPI2_IOUNITPAGE7_PMCAP_50_0_PCT_IOCSPEED (0x00000100)
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_WIDTH_CHANGE (0x00000008) /*obsolete */
+#define MPI2_IOUNITPAGE7_PMCAP_PCIE_SPEED_CHANGE (0x00000004) /*obsolete */
+
+
+/*defines for IO Unit Page 7 IOCTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_IOC_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_IOC_TEMP_CELSIUS (0x02)
+
+/*defines for IO Unit Page 7 IOCSpeed field */
+#define MPI2_IOUNITPAGE7_IOC_SPEED_FULL (0x01)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_HALF (0x02)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_QUARTER (0x04)
+#define MPI2_IOUNITPAGE7_IOC_SPEED_EIGHTH (0x08)
+
+/*defines for IO Unit Page 7 BoardTemperatureUnits field */
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_NOT_PRESENT (0x00)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_FAHRENHEIT (0x01)
+#define MPI2_IOUNITPAGE7_BOARD_TEMP_CELSIUS (0x02)
+
+
+/*IO Unit Page 8 */
+
+#define MPI2_IOUNIT8_NUM_THRESHOLDS (4)
+
+typedef struct _MPI2_IOUNIT8_SENSOR {
+ U16 Flags; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U16
+ Threshold[MPI2_IOUNIT8_NUM_THRESHOLDS]; /*0x04 */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+} MPI2_IOUNIT8_SENSOR, *PTR_MPI2_IOUNIT8_SENSOR,
+ Mpi2IOUnit8Sensor_t, *pMpi2IOUnit8Sensor_t;
+
+/*defines for IO Unit Page 8 Sensor Flags field */
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T3_ENABLE (0x0008)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T2_ENABLE (0x0004)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T1_ENABLE (0x0002)
+#define MPI2_IOUNIT8_SENSOR_FLAGS_T0_ENABLE (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE8_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE8_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 PollingInterval; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT8_SENSOR
+ Sensor[MPI2_IOUNITPAGE8_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_8,
+ Mpi2IOUnitPage8_t, *pMpi2IOUnitPage8_t;
+
+#define MPI2_IOUNITPAGE8_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 9 */
+
+typedef struct _MPI2_IOUNIT9_SENSOR {
+ U16 CurrentTemperature; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 Flags; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U16 Reserved3; /*0x06 */
+ U32 Reserved4; /*0x08 */
+ U32 Reserved5; /*0x0C */
+} MPI2_IOUNIT9_SENSOR, *PTR_MPI2_IOUNIT9_SENSOR,
+ Mpi2IOUnit9Sensor_t, *pMpi2IOUnit9Sensor_t;
+
+/*defines for IO Unit Page 9 Sensor Flags field */
+#define MPI2_IOUNIT9_SENSOR_FLAGS_TEMP_VALID (0x01)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumSensors at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE9_SENSOR_ENTRIES
+#define MPI2_IOUNITPAGE9_SENSOR_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_9 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U8 NumSensors; /*0x0C */
+ U8 Reserved4; /*0x0D */
+ U16 Reserved3; /*0x0E */
+ MPI2_IOUNIT9_SENSOR
+ Sensor[MPI2_IOUNITPAGE9_SENSOR_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_9,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_9,
+ Mpi2IOUnitPage9_t, *pMpi2IOUnitPage9_t;
+
+#define MPI2_IOUNITPAGE9_PAGEVERSION (0x00)
+
+
+/*IO Unit Page 10 */
+
+typedef struct _MPI2_IOUNIT10_FUNCTION {
+ U8 CreditPercent; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_IOUNIT10_FUNCTION,
+ *PTR_MPI2_IOUNIT10_FUNCTION,
+ Mpi2IOUnit10Function_t,
+ *pMpi2IOUnit10Function_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumFunctions at runtime.
+ */
+#ifndef MPI2_IOUNITPAGE10_FUNCTION_ENTRIES
+#define MPI2_IOUNITPAGE10_FUNCTION_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_IO_UNIT_10 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumFunctions; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ MPI2_IOUNIT10_FUNCTION
+ Function[MPI2_IOUNITPAGE10_FUNCTION_ENTRIES];/*0x10 */
+} MPI2_CONFIG_PAGE_IO_UNIT_10,
+ *PTR_MPI2_CONFIG_PAGE_IO_UNIT_10,
+ Mpi2IOUnitPage10_t, *pMpi2IOUnitPage10_t;
+
+#define MPI2_IOUNITPAGE10_PAGEVERSION (0x01)
+
+
+
+/****************************************************************************
+* IOC Config Pages
+****************************************************************************/
+
+/*IOC Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U16 VendorID; /*0x0C */
+ U16 DeviceID; /*0x0E */
+ U8 RevisionID; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ U32 ClassCode; /*0x14 */
+ U16 SubsystemVendorID; /*0x18 */
+ U16 SubsystemID; /*0x1A */
+} MPI2_CONFIG_PAGE_IOC_0,
+ *PTR_MPI2_CONFIG_PAGE_IOC_0,
+ Mpi2IOCPage0_t, *pMpi2IOCPage0_t;
+
+#define MPI2_IOCPAGE0_PAGEVERSION (0x02)
+
+
+/*IOC Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Flags; /*0x04 */
+ U32 CoalescingTimeout; /*0x08 */
+ U8 CoalescingDepth; /*0x0C */
+ U8 PCISlotNum; /*0x0D */
+ U8 PCIBusNum; /*0x0E */
+ U8 PCIDomainSegment; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_1,
+ *PTR_MPI2_CONFIG_PAGE_IOC_1,
+ Mpi2IOCPage1_t, *pMpi2IOCPage1_t;
+
+#define MPI2_IOCPAGE1_PAGEVERSION (0x05)
+
+/*defines for IOC Page 1 Flags field */
+#define MPI2_IOCPAGE1_REPLY_COALESCING (0x00000001)
+
+#define MPI2_IOCPAGE1_PCISLOTNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIBUSNUM_UNKNOWN (0xFF)
+#define MPI2_IOCPAGE1_PCIDOMAIN_UNKNOWN (0xFF)
+
+/*IOC Page 6 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_6 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32
+ CapabilitiesFlags; /*0x04 */
+ U8 MaxDrivesRAID0; /*0x08 */
+ U8 MaxDrivesRAID1; /*0x09 */
+ U8
+ MaxDrivesRAID1E; /*0x0A */
+ U8
+ MaxDrivesRAID10; /*0x0B */
+ U8 MinDrivesRAID0; /*0x0C */
+ U8 MinDrivesRAID1; /*0x0D */
+ U8
+ MinDrivesRAID1E; /*0x0E */
+ U8
+ MinDrivesRAID10; /*0x0F */
+ U32 Reserved1; /*0x10 */
+ U8
+ MaxGlobalHotSpares; /*0x14 */
+ U8 MaxPhysDisks; /*0x15 */
+ U8 MaxVolumes; /*0x16 */
+ U8 MaxConfigs; /*0x17 */
+ U8 MaxOCEDisks; /*0x18 */
+ U8 Reserved2; /*0x19 */
+ U16 Reserved3; /*0x1A */
+ U32
+ SupportedStripeSizeMapRAID0; /*0x1C */
+ U32
+ SupportedStripeSizeMapRAID1E; /*0x20 */
+ U32
+ SupportedStripeSizeMapRAID10; /*0x24 */
+ U32 Reserved4; /*0x28 */
+ U32 Reserved5; /*0x2C */
+ U16
+ DefaultMetadataSize; /*0x30 */
+ U16 Reserved6; /*0x32 */
+ U16
+ MaxBadBlockTableEntries; /*0x34 */
+ U16 Reserved7; /*0x36 */
+ U32
+ IRNvsramVersion; /*0x38 */
+} MPI2_CONFIG_PAGE_IOC_6,
+ *PTR_MPI2_CONFIG_PAGE_IOC_6,
+ Mpi2IOCPage6_t, *pMpi2IOCPage6_t;
+
+#define MPI2_IOCPAGE6_PAGEVERSION (0x05)
+
+/*defines for IOC Page 6 CapabilitiesFlags */
+#define MPI2_IOCPAGE6_CAP_FLAGS_4K_SECTORS_SUPPORT (0x00000020)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID10_SUPPORT (0x00000010)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1_SUPPORT (0x00000008)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID1E_SUPPORT (0x00000004)
+#define MPI2_IOCPAGE6_CAP_FLAGS_RAID0_SUPPORT (0x00000002)
+#define MPI2_IOCPAGE6_CAP_FLAGS_GLOBAL_HOT_SPARE (0x00000001)
+
+
+/*IOC Page 7 */
+
+#define MPI2_IOCPAGE7_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_7 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32
+ EventMasks[MPI2_IOCPAGE7_EVENTMASK_WORDS];/*0x08 */
+ U16 SASBroadcastPrimitiveMasks; /*0x18 */
+ U16 SASNotifyPrimitiveMasks; /*0x1A */
+ U32 Reserved3; /*0x1C */
+} MPI2_CONFIG_PAGE_IOC_7,
+ *PTR_MPI2_CONFIG_PAGE_IOC_7,
+ Mpi2IOCPage7_t, *pMpi2IOCPage7_t;
+
+#define MPI2_IOCPAGE7_PAGEVERSION (0x02)
+
+
+/*IOC Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_IOC_8 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumDevsPerEnclosure; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U16 MaxPersistentEntries; /*0x08 */
+ U16 MaxNumPhysicalMappedIDs; /*0x0A */
+ U16 Flags; /*0x0C */
+ U16 Reserved3; /*0x0E */
+ U16 IRVolumeMappingFlags; /*0x10 */
+ U16 Reserved4; /*0x12 */
+ U32 Reserved5; /*0x14 */
+} MPI2_CONFIG_PAGE_IOC_8,
+ *PTR_MPI2_CONFIG_PAGE_IOC_8,
+ Mpi2IOCPage8_t, *pMpi2IOCPage8_t;
+
+#define MPI2_IOCPAGE8_PAGEVERSION (0x00)
+
+/*defines for IOC Page 8 Flags field */
+#define MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1 (0x00000020)
+#define MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0 (0x00000010)
+
+#define MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE (0x0000000E)
+#define MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING (0x00000002)
+
+#define MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING (0x00000001)
+#define MPI2_IOCPAGE8_FLAGS_ENABLE_PERSISTENT_MAPPING (0x00000000)
+
+/*defines for IOC Page 8 IRVolumeMappingFlags */
+#define MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE (0x00000003)
+#define MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING (0x00000000)
+#define MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING (0x00000001)
+
+
+/****************************************************************************
+* BIOS Config Pages
+****************************************************************************/
+
+/*BIOS Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 BiosOptions; /*0x04 */
+ U32 IOCSettings; /*0x08 */
+ U32 Reserved1; /*0x0C */
+ U32 DeviceSettings; /*0x10 */
+ U16 NumberOfDevices; /*0x14 */
+ U16 UEFIVersion; /*0x16 */
+ U16 IOTimeoutBlockDevicesNonRM; /*0x18 */
+ U16 IOTimeoutSequential; /*0x1A */
+ U16 IOTimeoutOther; /*0x1C */
+ U16 IOTimeoutBlockDevicesRM; /*0x1E */
+} MPI2_CONFIG_PAGE_BIOS_1,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_1,
+ Mpi2BiosPage1_t, *pMpi2BiosPage1_t;
+
+#define MPI2_BIOSPAGE1_PAGEVERSION (0x05)
+
+/*values for BIOS Page 1 BiosOptions field */
+#define MPI2_BIOSPAGE1_OPTIONS_MASK_UEFI_HII_REGISTRATION (0x00000006)
+#define MPI2_BIOSPAGE1_OPTIONS_ENABLE_UEFI_HII (0x00000000)
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_UEFI_HII (0x00000002)
+#define MPI2_BIOSPAGE1_OPTIONS_VERSION_CHECK_UEFI_HII (0x00000004)
+
+#define MPI2_BIOSPAGE1_OPTIONS_DISABLE_BIOS (0x00000001)
+
+/*values for BIOS Page 1 IOCSettings field */
+#define MPI2_BIOSPAGE1_IOCSET_MASK_BOOT_PREFERENCE (0x00030000)
+#define MPI2_BIOSPAGE1_IOCSET_ENCLOSURE_SLOT_BOOT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_SAS_ADDRESS_BOOT (0x00010000)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_RM_SETTING (0x000000C0)
+#define MPI2_BIOSPAGE1_IOCSET_NONE_RM_SETTING (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BOOT_RM_SETTING (0x00000040)
+#define MPI2_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING (0x00000080)
+
+#define MPI2_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT (0x00000030)
+#define MPI2_BIOSPAGE1_IOCSET_NO_SUPPORT (0x00000000)
+#define MPI2_BIOSPAGE1_IOCSET_BIOS_SUPPORT (0x00000010)
+#define MPI2_BIOSPAGE1_IOCSET_OS_SUPPORT (0x00000020)
+#define MPI2_BIOSPAGE1_IOCSET_ALL_SUPPORT (0x00000030)
+
+#define MPI2_BIOSPAGE1_IOCSET_ALTERNATE_CHS (0x00000008)
+
+/*values for BIOS Page 1 DeviceSettings field */
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SMART_POLLING (0x00000010)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN (0x00000008)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_RM_LUN (0x00000004)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN (0x00000002)
+#define MPI2_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN (0x00000001)
+
+/*defines for BIOS Page 1 UEFIVersion field */
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_MASK (0xFF00)
+#define MPI2_BIOSPAGE1_UEFI_VER_MAJOR_SHIFT (8)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_MASK (0x00FF)
+#define MPI2_BIOSPAGE1_UEFI_VER_MINOR_SHIFT (0)
+
+
+
+/*BIOS Page 2 */
+
+typedef struct _MPI2_BOOT_DEVICE_ADAPTER_ORDER {
+ U32 Reserved1; /*0x00 */
+ U32 Reserved2; /*0x04 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ *PTR_MPI2_BOOT_DEVICE_ADAPTER_ORDER,
+ Mpi2BootDeviceAdapterOrder_t,
+ *pMpi2BootDeviceAdapterOrder_t;
+
+typedef struct _MPI2_BOOT_DEVICE_SAS_WWID {
+ U64 SASAddress; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_SAS_WWID,
+ *PTR_MPI2_BOOT_DEVICE_SAS_WWID,
+ Mpi2BootDeviceSasWwid_t,
+ *pMpi2BootDeviceSasWwid_t;
+
+typedef struct _MPI2_BOOT_DEVICE_ENCLOSURE_SLOT {
+ U64 EnclosureLogicalID; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 SlotNumber; /*0x10 */
+ U16 Reserved3; /*0x12 */
+ U32 Reserved4; /*0x14 */
+} MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ *PTR_MPI2_BOOT_DEVICE_ENCLOSURE_SLOT,
+ Mpi2BootDeviceEnclosureSlot_t,
+ *pMpi2BootDeviceEnclosureSlot_t;
+
+typedef struct _MPI2_BOOT_DEVICE_DEVICE_NAME {
+ U64 DeviceName; /*0x00 */
+ U8 LUN[8]; /*0x08 */
+ U32 Reserved1; /*0x10 */
+ U32 Reserved2; /*0x14 */
+} MPI2_BOOT_DEVICE_DEVICE_NAME,
+ *PTR_MPI2_BOOT_DEVICE_DEVICE_NAME,
+ Mpi2BootDeviceDeviceName_t,
+ *pMpi2BootDeviceDeviceName_t;
+
+typedef union _MPI2_MPI2_BIOSPAGE2_BOOT_DEVICE {
+ MPI2_BOOT_DEVICE_ADAPTER_ORDER AdapterOrder;
+ MPI2_BOOT_DEVICE_SAS_WWID SasWwid;
+ MPI2_BOOT_DEVICE_ENCLOSURE_SLOT EnclosureSlot;
+ MPI2_BOOT_DEVICE_DEVICE_NAME DeviceName;
+} MPI2_BIOSPAGE2_BOOT_DEVICE,
+ *PTR_MPI2_BIOSPAGE2_BOOT_DEVICE,
+ Mpi2BiosPage2BootDevice_t,
+ *pMpi2BiosPage2BootDevice_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_2 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+ U32 Reserved5; /*0x14 */
+ U32 Reserved6; /*0x18 */
+ U8 ReqBootDeviceForm; /*0x1C */
+ U8 Reserved7; /*0x1D */
+ U16 Reserved8; /*0x1E */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedBootDevice; /*0x20 */
+ U8 ReqAltBootDeviceForm; /*0x38 */
+ U8 Reserved9; /*0x39 */
+ U16 Reserved10; /*0x3A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE RequestedAltBootDevice; /*0x3C */
+ U8 CurrentBootDeviceForm; /*0x58 */
+ U8 Reserved11; /*0x59 */
+ U16 Reserved12; /*0x5A */
+ MPI2_BIOSPAGE2_BOOT_DEVICE CurrentBootDevice; /*0x58 */
+} MPI2_CONFIG_PAGE_BIOS_2, *PTR_MPI2_CONFIG_PAGE_BIOS_2,
+ Mpi2BiosPage2_t, *pMpi2BiosPage2_t;
+
+#define MPI2_BIOSPAGE2_PAGEVERSION (0x04)
+
+/*values for BIOS Page 2 BootDeviceForm fields */
+#define MPI2_BIOSPAGE2_FORM_MASK (0x0F)
+#define MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED (0x00)
+#define MPI2_BIOSPAGE2_FORM_SAS_WWID (0x05)
+#define MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT (0x06)
+#define MPI2_BIOSPAGE2_FORM_DEVICE_NAME (0x07)
+
+
+/*BIOS Page 3 */
+
+typedef struct _MPI2_ADAPTER_INFO {
+ U8 PciBusNumber; /*0x00 */
+ U8 PciDeviceAndFunctionNumber; /*0x01 */
+ U16 AdapterFlags; /*0x02 */
+} MPI2_ADAPTER_INFO, *PTR_MPI2_ADAPTER_INFO,
+ Mpi2AdapterInfo_t, *pMpi2AdapterInfo_t;
+
+#define MPI2_ADAPTER_INFO_FLAGS_EMBEDDED (0x0001)
+#define MPI2_ADAPTER_INFO_FLAGS_INIT_STATUS (0x0002)
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_3 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U32 GlobalFlags; /*0x04 */
+ U32 BiosVersion; /*0x08 */
+ MPI2_ADAPTER_INFO AdapterOrder[4]; /*0x0C */
+ U32 Reserved1; /*0x1C */
+} MPI2_CONFIG_PAGE_BIOS_3,
+ *PTR_MPI2_CONFIG_PAGE_BIOS_3,
+ Mpi2BiosPage3_t, *pMpi2BiosPage3_t;
+
+#define MPI2_BIOSPAGE3_PAGEVERSION (0x00)
+
+/*values for BIOS Page 3 GlobalFlags */
+#define MPI2_BIOSPAGE3_FLAGS_PAUSE_ON_ERROR (0x00000002)
+#define MPI2_BIOSPAGE3_FLAGS_VERBOSE_ENABLE (0x00000004)
+#define MPI2_BIOSPAGE3_FLAGS_HOOK_INT_40_DISABLE (0x00000010)
+
+#define MPI2_BIOSPAGE3_FLAGS_DEV_LIST_DISPLAY_MASK (0x000000E0)
+#define MPI2_BIOSPAGE3_FLAGS_INSTALLED_DEV_DISPLAY (0x00000000)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DISPLAY (0x00000020)
+#define MPI2_BIOSPAGE3_FLAGS_ADAPTER_DEV_DISPLAY (0x00000040)
+
+
+/*BIOS Page 4 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_BIOS_PAGE_4_PHY_ENTRIES
+#define MPI2_BIOS_PAGE_4_PHY_ENTRIES (1)
+#endif
+
+typedef struct _MPI2_BIOS4_ENTRY {
+ U64 ReassignmentWWID; /*0x00 */
+ U64 ReassignmentDeviceName; /*0x08 */
+} MPI2_BIOS4_ENTRY, *PTR_MPI2_BIOS4_ENTRY,
+ Mpi2MBios4Entry_t, *pMpi2Bios4Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_BIOS_4 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ MPI2_BIOS4_ENTRY
+ Phy[MPI2_BIOS_PAGE_4_PHY_ENTRIES]; /*0x08 */
+} MPI2_CONFIG_PAGE_BIOS_4, *PTR_MPI2_CONFIG_PAGE_BIOS_4,
+ Mpi2BiosPage4_t, *pMpi2BiosPage4_t;
+
+#define MPI2_BIOSPAGE4_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* RAID Volume Config Pages
+****************************************************************************/
+
+/*RAID Volume Page 0 */
+
+typedef struct _MPI2_RAIDVOL0_PHYS_DISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U8 PhysDiskNum; /*0x02 */
+ U8 Reserved; /*0x03 */
+} MPI2_RAIDVOL0_PHYS_DISK, *PTR_MPI2_RAIDVOL0_PHYS_DISK,
+ Mpi2RaidVol0PhysDisk_t, *pMpi2RaidVol0PhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDVOL0_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDVOL0_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAIDVOL0_SETTINGS {
+ U16 Settings; /*0x00 */
+ U8 HotSparePool; /*0x01 */
+ U8 Reserved; /*0x02 */
+} MPI2_RAIDVOL0_SETTINGS, *PTR_MPI2_RAIDVOL0_SETTINGS,
+ Mpi2RaidVol0Settings_t,
+ *pMpi2RaidVol0Settings_t;
+
+/*RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI2_RAID_HOT_SPARE_POOL_0 (0x01)
+#define MPI2_RAID_HOT_SPARE_POOL_1 (0x02)
+#define MPI2_RAID_HOT_SPARE_POOL_2 (0x04)
+#define MPI2_RAID_HOT_SPARE_POOL_3 (0x08)
+#define MPI2_RAID_HOT_SPARE_POOL_4 (0x10)
+#define MPI2_RAID_HOT_SPARE_POOL_5 (0x20)
+#define MPI2_RAID_HOT_SPARE_POOL_6 (0x40)
+#define MPI2_RAID_HOT_SPARE_POOL_7 (0x80)
+
+/*RAID Volume Page 0 VolumeSettings defines */
+#define MPI2_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX (0x0008)
+#define MPI2_RAIDVOL0_SETTING_AUTO_CONFIG_HSWAP_DISABLE (0x0004)
+
+#define MPI2_RAIDVOL0_SETTING_MASK_WRITE_CACHING (0x0003)
+#define MPI2_RAIDVOL0_SETTING_UNCHANGED (0x0000)
+#define MPI2_RAIDVOL0_SETTING_DISABLE_WRITE_CACHING (0x0001)
+#define MPI2_RAIDVOL0_SETTING_ENABLE_WRITE_CACHING (0x0002)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDisks at runtime.
+ */
+#ifndef MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 VolumeState; /*0x06 */
+ U8 VolumeType; /*0x07 */
+ U32 VolumeStatusFlags; /*0x08 */
+ MPI2_RAIDVOL0_SETTINGS VolumeSettings; /*0x0C */
+ U64 MaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U16 BlockSize; /*0x1C */
+ U16 Reserved1; /*0x1E */
+ U8 SupportedPhysDisks;/*0x20 */
+ U8 ResyncRate; /*0x21 */
+ U16 DataScrubDuration; /*0x22 */
+ U8 NumPhysDisks; /*0x24 */
+ U8 Reserved2; /*0x25 */
+ U8 Reserved3; /*0x26 */
+ U8 InactiveStatus; /*0x27 */
+ MPI2_RAIDVOL0_PHYS_DISK
+ PhysDisk[MPI2_RAID_VOL_PAGE_0_PHYSDISK_MAX]; /*0x28 */
+} MPI2_CONFIG_PAGE_RAID_VOL_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_0,
+ Mpi2RaidVolPage0_t, *pMpi2RaidVolPage0_t;
+
+#define MPI2_RAIDVOLPAGE0_PAGEVERSION (0x0A)
+
+/*values for RAID VolumeState */
+#define MPI2_RAID_VOL_STATE_MISSING (0x00)
+#define MPI2_RAID_VOL_STATE_FAILED (0x01)
+#define MPI2_RAID_VOL_STATE_INITIALIZING (0x02)
+#define MPI2_RAID_VOL_STATE_ONLINE (0x03)
+#define MPI2_RAID_VOL_STATE_DEGRADED (0x04)
+#define MPI2_RAID_VOL_STATE_OPTIMAL (0x05)
+
+/*values for RAID VolumeType */
+#define MPI2_RAID_VOL_TYPE_RAID0 (0x00)
+#define MPI2_RAID_VOL_TYPE_RAID1E (0x01)
+#define MPI2_RAID_VOL_TYPE_RAID1 (0x02)
+#define MPI2_RAID_VOL_TYPE_RAID10 (0x05)
+#define MPI2_RAID_VOL_TYPE_UNKNOWN (0xFF)
+
+/*values for RAID Volume Page 0 VolumeStatusFlags field */
+#define MPI2_RAIDVOL0_STATUS_FLAG_PENDING_RESYNC (0x02000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKG_INIT_PENDING (0x01000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MDC_PENDING (0x00800000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_USER_CONSIST_PENDING (0x00400000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_MAKE_DATA_CONSISTENT (0x00200000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_DATA_SCRUB (0x00100000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CONSISTENCY_CHECK (0x00080000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_CAPACITY_EXPANSION (0x00040000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BACKGROUND_INIT (0x00020000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS (0x00010000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOL_NOT_CONSISTENT (0x00000080)
+#define MPI2_RAIDVOL0_STATUS_FLAG_OCE_ALLOWED (0x00000040)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BGI_COMPLETE (0x00000020)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_OFFSET_MIRROR (0x00000000)
+#define MPI2_RAIDVOL0_STATUS_FLAG_1E_ADJACENT_MIRROR (0x00000010)
+#define MPI2_RAIDVOL0_STATUS_FLAG_BAD_BLOCK_TABLE_FULL (0x00000008)
+#define MPI2_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE (0x00000004)
+#define MPI2_RAIDVOL0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_RAIDVOL0_STATUS_FLAG_ENABLED (0x00000001)
+
+/*values for RAID Volume Page 0 SupportedPhysDisks field */
+#define MPI2_RAIDVOL0_SUPPORT_SOLID_STATE_DISKS (0x08)
+#define MPI2_RAIDVOL0_SUPPORT_HARD_DISKS (0x04)
+#define MPI2_RAIDVOL0_SUPPORT_SAS_PROTOCOL (0x02)
+#define MPI2_RAIDVOL0_SUPPORT_SATA_PROTOCOL (0x01)
+
+/*values for RAID Volume Page 0 InactiveStatus field */
+#define MPI2_RAIDVOLPAGE0_UNKNOWN_INACTIVE (0x00)
+#define MPI2_RAIDVOLPAGE0_STALE_METADATA_INACTIVE (0x01)
+#define MPI2_RAIDVOLPAGE0_FOREIGN_VOLUME_INACTIVE (0x02)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_RESOURCE_INACTIVE (0x03)
+#define MPI2_RAIDVOLPAGE0_CLONE_VOLUME_INACTIVE (0x04)
+#define MPI2_RAIDVOLPAGE0_INSUFFICIENT_METADATA_INACTIVE (0x05)
+#define MPI2_RAIDVOLPAGE0_PREVIOUSLY_DELETED (0x06)
+
+
+/*RAID Volume Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_VOL_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U16 Reserved0; /*0x06 */
+ U8 GUID[24]; /*0x08 */
+ U8 Name[16]; /*0x20 */
+ U64 WWID; /*0x30 */
+ U32 Reserved1; /*0x38 */
+ U32 Reserved2; /*0x3C */
+} MPI2_CONFIG_PAGE_RAID_VOL_1,
+ *PTR_MPI2_CONFIG_PAGE_RAID_VOL_1,
+ Mpi2RaidVolPage1_t, *pMpi2RaidVolPage1_t;
+
+#define MPI2_RAIDVOLPAGE1_PAGEVERSION (0x03)
+
+
+/****************************************************************************
+* RAID Physical Disk Config Pages
+****************************************************************************/
+
+/*RAID Physical Disk Page 0 */
+
+typedef struct _MPI2_RAIDPHYSDISK0_SETTINGS {
+ U16 Reserved1; /*0x00 */
+ U8 HotSparePool; /*0x02 */
+ U8 Reserved2; /*0x03 */
+} MPI2_RAIDPHYSDISK0_SETTINGS,
+ *PTR_MPI2_RAIDPHYSDISK0_SETTINGS,
+ Mpi2RaidPhysDisk0Settings_t,
+ *pMpi2RaidPhysDisk0Settings_t;
+
+/*use MPI2_RAID_HOT_SPARE_POOL_ defines for the HotSparePool field */
+
+typedef struct _MPI2_RAIDPHYSDISK0_INQUIRY_DATA {
+ U8 VendorID[8]; /*0x00 */
+ U8 ProductID[16]; /*0x08 */
+ U8 ProductRevLevel[4]; /*0x18 */
+ U8 SerialNum[32]; /*0x1C */
+} MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ *PTR_MPI2_RAIDPHYSDISK0_INQUIRY_DATA,
+ Mpi2RaidPhysDisk0InquiryData_t,
+ *pMpi2RaidPhysDisk0InquiryData_t;
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_0 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U16 DevHandle; /*0x04 */
+ U8 Reserved1; /*0x06 */
+ U8 PhysDiskNum; /*0x07 */
+ MPI2_RAIDPHYSDISK0_SETTINGS PhysDiskSettings; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ MPI2_RAIDPHYSDISK0_INQUIRY_DATA InquiryData; /*0x10 */
+ U32 Reserved3; /*0x4C */
+ U8 PhysDiskState; /*0x50 */
+ U8 OfflineReason; /*0x51 */
+ U8 IncompatibleReason; /*0x52 */
+ U8 PhysDiskAttributes; /*0x53 */
+ U32 PhysDiskStatusFlags;/*0x54 */
+ U64 DeviceMaxLBA; /*0x58 */
+ U64 HostMaxLBA; /*0x60 */
+ U64 CoercedMaxLBA; /*0x68 */
+ U16 BlockSize; /*0x70 */
+ U16 Reserved5; /*0x72 */
+ U32 Reserved6; /*0x74 */
+} MPI2_CONFIG_PAGE_RD_PDISK_0,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_0,
+ Mpi2RaidPhysDiskPage0_t,
+ *pMpi2RaidPhysDiskPage0_t;
+
+#define MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION (0x05)
+
+/*PhysDiskState defines */
+#define MPI2_RAID_PD_STATE_NOT_CONFIGURED (0x00)
+#define MPI2_RAID_PD_STATE_NOT_COMPATIBLE (0x01)
+#define MPI2_RAID_PD_STATE_OFFLINE (0x02)
+#define MPI2_RAID_PD_STATE_ONLINE (0x03)
+#define MPI2_RAID_PD_STATE_HOT_SPARE (0x04)
+#define MPI2_RAID_PD_STATE_DEGRADED (0x05)
+#define MPI2_RAID_PD_STATE_REBUILDING (0x06)
+#define MPI2_RAID_PD_STATE_OPTIMAL (0x07)
+
+/*OfflineReason defines */
+#define MPI2_PHYSDISK0_ONLINE (0x00)
+#define MPI2_PHYSDISK0_OFFLINE_MISSING (0x01)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED (0x03)
+#define MPI2_PHYSDISK0_OFFLINE_INITIALIZING (0x04)
+#define MPI2_PHYSDISK0_OFFLINE_REQUESTED (0x05)
+#define MPI2_PHYSDISK0_OFFLINE_FAILED_REQUESTED (0x06)
+#define MPI2_PHYSDISK0_OFFLINE_OTHER (0xFF)
+
+/*IncompatibleReason defines */
+#define MPI2_PHYSDISK0_COMPATIBLE (0x00)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_PROTOCOL (0x01)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_BLOCKSIZE (0x02)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MAX_LBA (0x03)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_SATA_EXTENDED_CMD (0x04)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_REMOVEABLE_MEDIA (0x05)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_MEDIA_TYPE (0x06)
+#define MPI2_PHYSDISK0_INCOMPATIBLE_UNKNOWN (0xFF)
+
+/*PhysDiskAttributes defines */
+#define MPI2_PHYSDISK0_ATTRIB_MEDIA_MASK (0x0C)
+#define MPI2_PHYSDISK0_ATTRIB_SOLID_STATE_DRIVE (0x08)
+#define MPI2_PHYSDISK0_ATTRIB_HARD_DISK_DRIVE (0x04)
+
+#define MPI2_PHYSDISK0_ATTRIB_PROTOCOL_MASK (0x03)
+#define MPI2_PHYSDISK0_ATTRIB_SAS_PROTOCOL (0x02)
+#define MPI2_PHYSDISK0_ATTRIB_SATA_PROTOCOL (0x01)
+
+/*PhysDiskStatusFlags defines */
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_CERTIFIED (0x00000040)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OCE_TARGET (0x00000020)
+#define MPI2_PHYSDISK0_STATUS_FLAG_WRITE_CACHE_ENABLED (0x00000010)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OPTIMAL_PREVIOUS (0x00000000)
+#define MPI2_PHYSDISK0_STATUS_FLAG_NOT_OPTIMAL_PREVIOUS (0x00000008)
+#define MPI2_PHYSDISK0_STATUS_FLAG_INACTIVE_VOLUME (0x00000004)
+#define MPI2_PHYSDISK0_STATUS_FLAG_QUIESCED (0x00000002)
+#define MPI2_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC (0x00000001)
+
+
+/*RAID Physical Disk Page 1 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhysDiskPaths at runtime.
+ */
+#ifndef MPI2_RAID_PHYS_DISK1_PATH_MAX
+#define MPI2_RAID_PHYS_DISK1_PATH_MAX (1)
+#endif
+
+typedef struct _MPI2_RAIDPHYSDISK1_PATH {
+ U16 DevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U64 WWID; /*0x04 */
+ U64 OwnerWWID; /*0x0C */
+ U8 OwnerIdentifier; /*0x14 */
+ U8 Reserved2; /*0x15 */
+ U16 Flags; /*0x16 */
+} MPI2_RAIDPHYSDISK1_PATH, *PTR_MPI2_RAIDPHYSDISK1_PATH,
+ Mpi2RaidPhysDisk1Path_t,
+ *pMpi2RaidPhysDisk1Path_t;
+
+/*RAID Physical Disk Page 1 Physical Disk Path Flags field defines */
+#define MPI2_RAID_PHYSDISK1_FLAG_PRIMARY (0x0004)
+#define MPI2_RAID_PHYSDISK1_FLAG_BROKEN (0x0002)
+#define MPI2_RAID_PHYSDISK1_FLAG_INVALID (0x0001)
+
+typedef struct _MPI2_CONFIG_PAGE_RD_PDISK_1 {
+ MPI2_CONFIG_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhysDiskPaths; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 Reserved1; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ MPI2_RAIDPHYSDISK1_PATH
+ PhysicalDiskPath[MPI2_RAID_PHYS_DISK1_PATH_MAX];/*0x0C */
+} MPI2_CONFIG_PAGE_RD_PDISK_1,
+ *PTR_MPI2_CONFIG_PAGE_RD_PDISK_1,
+ Mpi2RaidPhysDiskPage1_t,
+ *pMpi2RaidPhysDiskPage1_t;
+
+#define MPI2_RAIDPHYSDISKPAGE1_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* values for fields used by several types of SAS Config Pages
+****************************************************************************/
+
+/*values for NegotiatedLinkRates fields */
+#define MPI2_SAS_NEG_LINK_RATE_MASK_LOGICAL (0xF0)
+#define MPI2_SAS_NEG_LINK_RATE_SHIFT_LOGICAL (4)
+#define MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL (0x0F)
+/*link rates used for Negotiated Physical and Logical Link Rate */
+#define MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED (0x01)
+#define MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED (0x02)
+#define MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE (0x03)
+#define MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR (0x04)
+#define MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_SAS_NEG_LINK_RATE_UNSUPPORTED_PHY (0x06)
+#define MPI2_SAS_NEG_LINK_RATE_1_5 (0x08)
+#define MPI2_SAS_NEG_LINK_RATE_3_0 (0x09)
+#define MPI2_SAS_NEG_LINK_RATE_6_0 (0x0A)
+#define MPI25_SAS_NEG_LINK_RATE_12_0 (0x0B)
+
+
+/*values for AttachedPhyInfo fields */
+#define MPI2_SAS_APHYINFO_INSIDE_ZPSDS_PERSISTENT (0x00000040)
+#define MPI2_SAS_APHYINFO_REQUESTED_INSIDE_ZPSDS (0x00000020)
+#define MPI2_SAS_APHYINFO_BREAK_REPLY_CAPABLE (0x00000010)
+
+#define MPI2_SAS_APHYINFO_REASON_MASK (0x0000000F)
+#define MPI2_SAS_APHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_APHYINFO_REASON_POWER_ON (0x00000001)
+#define MPI2_SAS_APHYINFO_REASON_HARD_RESET (0x00000002)
+#define MPI2_SAS_APHYINFO_REASON_SMP_PHY_CONTROL (0x00000003)
+#define MPI2_SAS_APHYINFO_REASON_LOSS_OF_SYNC (0x00000004)
+#define MPI2_SAS_APHYINFO_REASON_MULTIPLEXING_SEQ (0x00000005)
+#define MPI2_SAS_APHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00000006)
+#define MPI2_SAS_APHYINFO_REASON_BREAK_TIMEOUT (0x00000007)
+#define MPI2_SAS_APHYINFO_REASON_PHY_TEST_STOPPED (0x00000008)
+
+
+/*values for PhyInfo fields */
+#define MPI2_SAS_PHYINFO_PHY_VACANT (0x80000000)
+
+#define MPI2_SAS_PHYINFO_PHY_POWER_CONDITION_MASK (0x18000000)
+#define MPI2_SAS_PHYINFO_SHIFT_PHY_POWER_CONDITION (27)
+#define MPI2_SAS_PHYINFO_PHY_POWER_ACTIVE (0x00000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_PARTIAL (0x08000000)
+#define MPI2_SAS_PHYINFO_PHY_POWER_SLUMBER (0x10000000)
+
+#define MPI2_SAS_PHYINFO_CHANGED_REQ_INSIDE_ZPSDS (0x04000000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS_PERSISTENT (0x02000000)
+#define MPI2_SAS_PHYINFO_REQ_INSIDE_ZPSDS (0x01000000)
+#define MPI2_SAS_PHYINFO_ZONE_GROUP_PERSISTENT (0x00400000)
+#define MPI2_SAS_PHYINFO_INSIDE_ZPSDS (0x00200000)
+#define MPI2_SAS_PHYINFO_ZONING_ENABLED (0x00100000)
+
+#define MPI2_SAS_PHYINFO_REASON_MASK (0x000F0000)
+#define MPI2_SAS_PHYINFO_REASON_UNKNOWN (0x00000000)
+#define MPI2_SAS_PHYINFO_REASON_POWER_ON (0x00010000)
+#define MPI2_SAS_PHYINFO_REASON_HARD_RESET (0x00020000)
+#define MPI2_SAS_PHYINFO_REASON_SMP_PHY_CONTROL (0x00030000)
+#define MPI2_SAS_PHYINFO_REASON_LOSS_OF_SYNC (0x00040000)
+#define MPI2_SAS_PHYINFO_REASON_MULTIPLEXING_SEQ (0x00050000)
+#define MPI2_SAS_PHYINFO_REASON_IT_NEXUS_LOSS_TIMER (0x00060000)
+#define MPI2_SAS_PHYINFO_REASON_BREAK_TIMEOUT (0x00070000)
+#define MPI2_SAS_PHYINFO_REASON_PHY_TEST_STOPPED (0x00080000)
+
+#define MPI2_SAS_PHYINFO_MULTIPLEXING_SUPPORTED (0x00008000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_ACTIVE (0x00004000)
+#define MPI2_SAS_PHYINFO_SATA_PORT_SELECTOR_PRESENT (0x00002000)
+#define MPI2_SAS_PHYINFO_VIRTUAL_PHY (0x00001000)
+
+#define MPI2_SAS_PHYINFO_MASK_PARTIAL_PATHWAY_TIME (0x00000F00)
+#define MPI2_SAS_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME (8)
+
+#define MPI2_SAS_PHYINFO_MASK_ROUTING_ATTRIBUTE (0x000000F0)
+#define MPI2_SAS_PHYINFO_DIRECT_ROUTING (0x00000000)
+#define MPI2_SAS_PHYINFO_SUBTRACTIVE_ROUTING (0x00000010)
+#define MPI2_SAS_PHYINFO_TABLE_ROUTING (0x00000020)
+
+
+/*values for SAS ProgrammedLinkRate fields */
+#define MPI2_SAS_PRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_PRATE_MAX_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_PRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_PRATE_MAX_RATE_6_0 (0xA0)
+#define MPI2_SAS_PRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE (0x00)
+#define MPI2_SAS_PRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_PRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_PRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_PRATE_MIN_RATE_12_0 (0x0B)
+
+
+/*values for SAS HwLinkRate fields */
+#define MPI2_SAS_HWRATE_MAX_RATE_MASK (0xF0)
+#define MPI2_SAS_HWRATE_MAX_RATE_1_5 (0x80)
+#define MPI2_SAS_HWRATE_MAX_RATE_3_0 (0x90)
+#define MPI2_SAS_HWRATE_MAX_RATE_6_0 (0xA0)
+#define MPI2_SAS_HWRATE_MIN_RATE_MASK (0x0F)
+#define MPI2_SAS_HWRATE_MIN_RATE_1_5 (0x08)
+#define MPI2_SAS_HWRATE_MIN_RATE_3_0 (0x09)
+#define MPI2_SAS_HWRATE_MIN_RATE_6_0 (0x0A)
+#define MPI25_SAS_HWRATE_MIN_RATE_12_0 (0x0B)
+
+
+
+/****************************************************************************
+* SAS IO Unit Config Pages
+****************************************************************************/
+
+/*SAS IO Unit Page 0 */
+
+typedef struct _MPI2_SAS_IO_UNIT0_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 NegotiatedLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo;/*0x04 */
+ U16 AttachedDevHandle; /*0x08 */
+ U16 ControllerDevHandle; /*0x0A */
+ U32 DiscoveryStatus; /*0x0C */
+ U32 Reserved; /*0x10 */
+} MPI2_SAS_IO_UNIT0_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT0_PHY_DATA,
+ Mpi2SasIOUnit0PhyData_t,
+ *pMpi2SasIOUnit0PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT0_PHY_MAX
+#define MPI2_SAS_IOUNIT0_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1;/*0x08 */
+ U8 NumPhys; /*0x0C */
+ U8 Reserved2;/*0x0D */
+ U16 Reserved3;/*0x0E */
+ MPI2_SAS_IO_UNIT0_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT0_PHY_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_0,
+ Mpi2SasIOUnitPage0_t, *pMpi2SasIOUnitPage0_t;
+
+#define MPI2_SASIOUNITPAGE0_PAGEVERSION (0x05)
+
+/*values for SAS IO Unit Page 0 PortFlags */
+#define MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS (0x08)
+#define MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 0 PhyFlags */
+#define MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED (0x10)
+#define MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED (0x08)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+/*values for SAS IO Unit Page 0 DiscoveryStatus */
+#define MPI2_SASIOUNIT0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SASIOUNIT0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SASIOUNIT0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SASIOUNIT0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SASIOUNIT0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SASIOUNIT0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SASIOUNIT0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SASIOUNIT0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SASIOUNIT0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SASIOUNIT0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SASIOUNIT0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SASIOUNIT0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SASIOUNIT0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SASIOUNIT0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SASIOUNIT0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SASIOUNIT0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SASIOUNIT0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SASIOUNIT0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SASIOUNIT0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SASIOUNIT0_DS_LOOP_DETECTED (0x00000001)
+
+
+/*SAS IO Unit Page 1 */
+
+typedef struct _MPI2_SAS_IO_UNIT1_PHY_DATA {
+ U8 Port; /*0x00 */
+ U8 PortFlags; /*0x01 */
+ U8 PhyFlags; /*0x02 */
+ U8 MaxMinLinkRate; /*0x03 */
+ U32 ControllerPhyDeviceInfo; /*0x04 */
+ U16 MaxTargetPortConnectTime; /*0x08 */
+ U16 Reserved1; /*0x0A */
+} MPI2_SAS_IO_UNIT1_PHY_DATA,
+ *PTR_MPI2_SAS_IO_UNIT1_PHY_DATA,
+ Mpi2SasIOUnit1PhyData_t,
+ *pMpi2SasIOUnit1PhyData_t;
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT1_PHY_MAX
+#define MPI2_SAS_IOUNIT1_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U16
+ ControlFlags; /*0x08 */
+ U16
+ SASNarrowMaxQueueDepth; /*0x0A */
+ U16
+ AdditionalControlFlags; /*0x0C */
+ U16
+ SASWideMaxQueueDepth; /*0x0E */
+ U8
+ NumPhys; /*0x10 */
+ U8
+ SATAMaxQDepth; /*0x11 */
+ U8
+ ReportDeviceMissingDelay; /*0x12 */
+ U8
+ IODeviceMissingDelay; /*0x13 */
+ MPI2_SAS_IO_UNIT1_PHY_DATA
+ PhyData[MPI2_SAS_IOUNIT1_PHY_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_1,
+ Mpi2SasIOUnitPage1_t, *pMpi2SasIOUnitPage1_t;
+
+#define MPI2_SASIOUNITPAGE1_PAGEVERSION (0x09)
+
+/*values for SAS IO Unit Page 1 ControlFlags */
+#define MPI2_SASIOUNIT1_CONTROL_DEVICE_SELF_TEST (0x8000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_3_0_MAX (0x4000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_1_5_MAX (0x2000)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SW_PRESERVE (0x1000)
+
+#define MPI2_SASIOUNIT1_CONTROL_MASK_DEV_SUPPORT (0x0600)
+#define MPI2_SASIOUNIT1_CONTROL_SHIFT_DEV_SUPPORT (9)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SUPPORT_BOTH (0x0)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SAS_SUPPORT (0x1)
+#define MPI2_SASIOUNIT1_CONTROL_DEV_SATA_SUPPORT (0x2)
+
+#define MPI2_SASIOUNIT1_CONTROL_SATA_48BIT_LBA_REQUIRED (0x0080)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_SMART_REQUIRED (0x0040)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_NCQ_REQUIRED (0x0020)
+#define MPI2_SASIOUNIT1_CONTROL_SATA_FUA_REQUIRED (0x0010)
+#define MPI2_SASIOUNIT1_CONTROL_TABLE_SUBTRACTIVE_ILLEGAL (0x0008)
+#define MPI2_SASIOUNIT1_CONTROL_SUBTRACTIVE_ILLEGAL (0x0004)
+#define MPI2_SASIOUNIT1_CONTROL_FIRST_LVL_DISC_ONLY (0x0002)
+#define MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION (0x0001)
+
+/*values for SAS IO Unit Page 1 AdditionalControlFlags */
+#define MPI2_SASIOUNIT1_ACONTROL_MULTI_PORT_DOMAIN_ILLEGAL (0x0080)
+#define MPI2_SASIOUNIT1_ACONTROL_SATA_ASYNCHROUNOUS_NOTIFICATION (0x0040)
+#define MPI2_SASIOUNIT1_ACONTROL_INVALID_TOPOLOGY_CORRECTION (0x0020)
+#define MPI2_SASIOUNIT1_ACONTROL_PORT_ENABLE_ONLY_SATA_LINK_RESET (0x0010)
+#define MPI2_SASIOUNIT1_ACONTROL_OTHER_AFFILIATION_SATA_LINK_RESET (0x0008)
+#define MPI2_SASIOUNIT1_ACONTROL_SELF_AFFILIATION_SATA_LINK_RESET (0x0004)
+#define MPI2_SASIOUNIT1_ACONTROL_NO_AFFILIATION_SATA_LINK_RESET (0x0002)
+#define MPI2_SASIOUNIT1_ACONTROL_ALLOW_TABLE_TO_TABLE (0x0001)
+
+/*defines for SAS IO Unit Page 1 ReportDeviceMissingDelay */
+#define MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK (0x7F)
+#define MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16 (0x80)
+
+/*values for SAS IO Unit Page 1 PortFlags */
+#define MPI2_SASIOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG (0x01)
+
+/*values for SAS IO Unit Page 1 PhyFlags */
+#define MPI2_SASIOUNIT1_PHYFLAGS_ZONING_ENABLE (0x10)
+#define MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE (0x08)
+
+/*values for SAS IO Unit Page 1 MaxMinLinkRate */
+#define MPI2_SASIOUNIT1_MAX_RATE_MASK (0xF0)
+#define MPI2_SASIOUNIT1_MAX_RATE_1_5 (0x80)
+#define MPI2_SASIOUNIT1_MAX_RATE_3_0 (0x90)
+#define MPI2_SASIOUNIT1_MAX_RATE_6_0 (0xA0)
+#define MPI25_SASIOUNIT1_MAX_RATE_12_0 (0xB0)
+#define MPI2_SASIOUNIT1_MIN_RATE_MASK (0x0F)
+#define MPI2_SASIOUNIT1_MIN_RATE_1_5 (0x08)
+#define MPI2_SASIOUNIT1_MIN_RATE_3_0 (0x09)
+#define MPI2_SASIOUNIT1_MIN_RATE_6_0 (0x0A)
+#define MPI25_SASIOUNIT1_MIN_RATE_12_0 (0x0B)
+
+/*see mpi2_sas.h for values for
+ *SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+/*SAS IO Unit Page 4 */
+
+typedef struct _MPI2_SAS_IOUNIT4_SPINUP_GROUP {
+ U8 MaxTargetSpinup; /*0x00 */
+ U8 SpinupDelay; /*0x01 */
+ U8 SpinupFlags; /*0x02 */
+ U8 Reserved1; /*0x03 */
+} MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ *PTR_MPI2_SAS_IOUNIT4_SPINUP_GROUP,
+ Mpi2SasIOUnit4SpinupGroup_t,
+ *pMpi2SasIOUnit4SpinupGroup_t;
+/*defines for SAS IO Unit Page 4 SpinupFlags */
+#define MPI2_SASIOUNIT4_SPINUP_DISABLE_FLAG (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT4_PHY_MAX
+#define MPI2_SAS_IOUNIT4_PHY_MAX (4)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header;/*0x00 */
+ MPI2_SAS_IOUNIT4_SPINUP_GROUP
+ SpinupGroupParameters[4]; /*0x08 */
+ U32
+ Reserved1; /*0x18 */
+ U32
+ Reserved2; /*0x1C */
+ U32
+ Reserved3; /*0x20 */
+ U8
+ BootDeviceWaitTime; /*0x24 */
+ U8
+ Reserved4; /*0x25 */
+ U16
+ Reserved5; /*0x26 */
+ U8
+ NumPhys; /*0x28 */
+ U8
+ PEInitialSpinupDelay; /*0x29 */
+ U8
+ PEReplyDelay; /*0x2A */
+ U8
+ Flags; /*0x2B */
+ U8
+ PHY[MPI2_SAS_IOUNIT4_PHY_MAX]; /*0x2C */
+} MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_4,
+ Mpi2SasIOUnitPage4_t, *pMpi2SasIOUnitPage4_t;
+
+#define MPI2_SASIOUNITPAGE4_PAGEVERSION (0x02)
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT4_FLAGS_AUTO_PORTENABLE (0x01)
+
+/*defines for PHY field */
+#define MPI2_SASIOUNIT4_PHY_SPINUP_GROUP_MASK (0x03)
+
+
+/*SAS IO Unit Page 5 */
+
+typedef struct _MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS {
+ U8 ControlFlags; /*0x00 */
+ U8 PortWidthModGroup; /*0x01 */
+ U16 InactivityTimerExponent; /*0x02 */
+ U8 SATAPartialTimeout; /*0x04 */
+ U8 Reserved2; /*0x05 */
+ U8 SATASlumberTimeout; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U8 SASPartialTimeout; /*0x08 */
+ U8 Reserved4; /*0x09 */
+ U8 SASSlumberTimeout; /*0x0A */
+ U8 Reserved5; /*0x0B */
+} MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS,
+ Mpi2SasIOUnit5PhyPmSettings_t,
+ *pMpi2SasIOUnit5PhyPmSettings_t;
+
+/*defines for ControlFlags field */
+#define MPI2_SASIOUNIT5_CONTROL_SAS_SLUMBER_ENABLE (0x08)
+#define MPI2_SASIOUNIT5_CONTROL_SAS_PARTIAL_ENABLE (0x04)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_SLUMBER_ENABLE (0x02)
+#define MPI2_SASIOUNIT5_CONTROL_SATA_PARTIAL_ENABLE (0x01)
+
+/*defines for PortWidthModeGroup field */
+#define MPI2_SASIOUNIT5_PWMG_DISABLE (0xFF)
+
+/*defines for InactivityTimerExponent field */
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_SLUMBER (0x7000)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_SLUMBER (12)
+#define MPI2_SASIOUNIT5_ITE_MASK_SAS_PARTIAL (0x0700)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SAS_PARTIAL (8)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_SLUMBER (0x0070)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_SLUMBER (4)
+#define MPI2_SASIOUNIT5_ITE_MASK_SATA_PARTIAL (0x0007)
+#define MPI2_SASIOUNIT5_ITE_SHIFT_SATA_PARTIAL (0)
+
+#define MPI2_SASIOUNIT5_ITE_TEN_SECONDS (7)
+#define MPI2_SASIOUNIT5_ITE_ONE_SECOND (6)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MILLISECONDS (5)
+#define MPI2_SASIOUNIT5_ITE_TEN_MILLISECONDS (4)
+#define MPI2_SASIOUNIT5_ITE_ONE_MILLISECOND (3)
+#define MPI2_SASIOUNIT5_ITE_HUNDRED_MICROSECONDS (2)
+#define MPI2_SASIOUNIT5_ITE_TEN_MICROSECONDS (1)
+#define MPI2_SASIOUNIT5_ITE_ONE_MICROSECOND (0)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhys at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT5_PHY_MAX
+#define MPI2_SAS_IOUNIT5_PHY_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_5 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumPhys; /*0x08 */
+ U8 Reserved1;/*0x09 */
+ U16 Reserved2;/*0x0A */
+ U32 Reserved3;/*0x0C */
+ MPI2_SAS_IO_UNIT5_PHY_PM_SETTINGS
+ SASPhyPowerManagementSettings[MPI2_SAS_IOUNIT5_PHY_MAX];/*0x10 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_5,
+ Mpi2SasIOUnitPage5_t, *pMpi2SasIOUnitPage5_t;
+
+#define MPI2_SASIOUNITPAGE5_PAGEVERSION (0x01)
+
+
+/*SAS IO Unit Page 6 */
+
+typedef struct _MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS {
+ U8 CurrentStatus; /*0x00 */
+ U8 CurrentModulation; /*0x01 */
+ U8 CurrentUtilization; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 Reserved2; /*0x04 */
+} MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ *PTR_MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS,
+ Mpi2SasIOUnit6PortWidthModGroupStatus_t,
+ *pMpi2SasIOUnit6PortWidthModGroupStatus_t;
+
+/*defines for CurrentStatus field */
+#define MPI2_SASIOUNIT6_STATUS_UNAVAILABLE (0x00)
+#define MPI2_SASIOUNIT6_STATUS_UNCONFIGURED (0x01)
+#define MPI2_SASIOUNIT6_STATUS_INVALID_CONFIG (0x02)
+#define MPI2_SASIOUNIT6_STATUS_LINK_DOWN (0x03)
+#define MPI2_SASIOUNIT6_STATUS_OBSERVATION_ONLY (0x04)
+#define MPI2_SASIOUNIT6_STATUS_INACTIVE (0x05)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_IOUNIT (0x06)
+#define MPI2_SASIOUNIT6_STATUS_ACTIVE_HOST (0x07)
+
+/*defines for CurrentModulation field */
+#define MPI2_SASIOUNIT6_MODULATION_25_PERCENT (0x00)
+#define MPI2_SASIOUNIT6_MODULATION_50_PERCENT (0x01)
+#define MPI2_SASIOUNIT6_MODULATION_75_PERCENT (0x02)
+#define MPI2_SASIOUNIT6_MODULATION_100_PERCENT (0x03)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT6_GROUP_MAX
+#define MPI2_SAS_IOUNIT6_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_6 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U8 NumGroups; /*0x10 */
+ U8 Reserved3; /*0x11 */
+ U16 Reserved4; /*0x12 */
+ MPI2_SAS_IO_UNIT6_PORT_WIDTH_MOD_GROUP_STATUS
+ PortWidthModulationGroupStatus[MPI2_SAS_IOUNIT6_GROUP_MAX]; /*0x14 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_6,
+ Mpi2SasIOUnitPage6_t, *pMpi2SasIOUnitPage6_t;
+
+#define MPI2_SASIOUNITPAGE6_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 7 */
+
+typedef struct _MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS {
+ U8 Flags; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 Threshold75Pct; /*0x04 */
+ U8 Threshold50Pct; /*0x05 */
+ U8 Threshold25Pct; /*0x06 */
+ U8 Reserved3; /*0x07 */
+} MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ *PTR_MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS,
+ Mpi2SasIOUnit7PortWidthModGroupSettings_t,
+ *pMpi2SasIOUnit7PortWidthModGroupSettings_t;
+
+/*defines for Flags field */
+#define MPI2_SASIOUNIT7_FLAGS_ENABLE_PORT_WIDTH_MODULATION (0x01)
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumGroups at runtime.
+ */
+#ifndef MPI2_SAS_IOUNIT7_GROUP_MAX
+#define MPI2_SAS_IOUNIT7_GROUP_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_7 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 SamplingInterval; /*0x08 */
+ U8 WindowLength; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U8 NumGroups; /*0x14 */
+ U8 Reserved4; /*0x15 */
+ U16 Reserved5; /*0x16 */
+ MPI2_SAS_IO_UNIT7_PORT_WIDTH_MOD_GROUP_SETTINGS
+ PortWidthModulationGroupSettings[MPI2_SAS_IOUNIT7_GROUP_MAX];/*0x18 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_7,
+ Mpi2SasIOUnitPage7_t, *pMpi2SasIOUnitPage7_t;
+
+#define MPI2_SASIOUNITPAGE7_PAGEVERSION (0x00)
+
+
+/*SAS IO Unit Page 8 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT_8 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ PowerManagementCapabilities; /*0x0C */
+ U8
+ TxRxSleepStatus; /*0x10 */
+ U8
+ Reserved2; /*0x11 */
+ U16
+ Reserved3; /*0x12 */
+} MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT_8,
+ Mpi2SasIOUnitPage8_t, *pMpi2SasIOUnitPage8_t;
+
+#define MPI2_SASIOUNITPAGE8_PAGEVERSION (0x00)
+
+/*defines for PowerManagementCapabilities field */
+#define MPI2_SASIOUNIT8_PM_HOST_PORT_WIDTH_MOD (0x00001000)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_SLUMBER_MODE (0x00000800)
+#define MPI2_SASIOUNIT8_PM_HOST_SAS_PARTIAL_MODE (0x00000400)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_SLUMBER_MODE (0x00000200)
+#define MPI2_SASIOUNIT8_PM_HOST_SATA_PARTIAL_MODE (0x00000100)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_PORT_WIDTH_MOD (0x00000010)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_SLUMBER_MODE (0x00000008)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SAS_PARTIAL_MODE (0x00000004)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_SLUMBER_MODE (0x00000002)
+#define MPI2_SASIOUNIT8_PM_IOUNIT_SATA_PARTIAL_MODE (0x00000001)
+
+/*defines for TxRxSleepStatus field */
+#define MPI25_SASIOUNIT8_TXRXSLEEP_UNSUPPORTED (0x00)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_DISENGAGED (0x01)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_ACTIVE (0x02)
+#define MPI25_SASIOUNIT8_TXRXSLEEP_SHUTDOWN (0x03)
+
+
+
+/*SAS IO Unit Page 16 */
+
+typedef struct _MPI2_CONFIG_PAGE_SASIOUNIT16 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U64
+ TimeStamp; /*0x08 */
+ U32
+ Reserved1; /*0x10 */
+ U32
+ Reserved2; /*0x14 */
+ U32
+ FastPathPendedRequests; /*0x18 */
+ U32
+ FastPathUnPendedRequests; /*0x1C */
+ U32
+ FastPathHostRequestStarts; /*0x20 */
+ U32
+ FastPathFirmwareRequestStarts; /*0x24 */
+ U32
+ FastPathHostCompletions; /*0x28 */
+ U32
+ FastPathFirmwareCompletions; /*0x2C */
+ U32
+ NonFastPathRequestStarts; /*0x30 */
+ U32
+ NonFastPathHostCompletions; /*0x30 */
+} MPI2_CONFIG_PAGE_SASIOUNIT16,
+ *PTR_MPI2_CONFIG_PAGE_SASIOUNIT16,
+ Mpi2SasIOUnitPage16_t, *pMpi2SasIOUnitPage16_t;
+
+#define MPI2_SASIOUNITPAGE16_PAGEVERSION (0x00)
+
+
+/****************************************************************************
+* SAS Expander Config Pages
+****************************************************************************/
+
+/*SAS Expander Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ ReportGenLength; /*0x09 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ DiscoveryStatus; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ ParentDevHandle; /*0x1A */
+ U16
+ ExpanderChangeCount; /*0x1C */
+ U16
+ ExpanderRouteIndexes; /*0x1E */
+ U8
+ NumPhys; /*0x20 */
+ U8
+ SASLevel; /*0x21 */
+ U16
+ Flags; /*0x22 */
+ U16
+ STPBusInactivityTimeLimit; /*0x24 */
+ U16
+ STPMaxConnectTimeLimit; /*0x26 */
+ U16
+ STP_SMP_NexusLossTime; /*0x28 */
+ U16
+ MaxNumRoutedSasAddresses; /*0x2A */
+ U64
+ ActiveZoneManagerSASAddress;/*0x2C */
+ U16
+ ZoneLockInactivityLimit; /*0x34 */
+ U16
+ Reserved1; /*0x36 */
+ U8
+ TimeToReducedFunc; /*0x38 */
+ U8
+ InitialTimeToReducedFunc; /*0x39 */
+ U8
+ MaxReducedFuncTime; /*0x3A */
+ U8
+ Reserved2; /*0x3B */
+} MPI2_CONFIG_PAGE_EXPANDER_0,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_0,
+ Mpi2ExpanderPage0_t, *pMpi2ExpanderPage0_t;
+
+#define MPI2_SASEXPANDER0_PAGEVERSION (0x06)
+
+/*values for SAS Expander Page 0 DiscoveryStatus field */
+#define MPI2_SAS_EXPANDER0_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_SAS_EXPANDER0_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_SAS_EXPANDER0_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_SAS_EXPANDER0_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_SAS_EXPANDER0_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_SAS_EXPANDER0_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_SAS_EXPANDER0_DS_TABLE_LINK (0x00000400)
+#define MPI2_SAS_EXPANDER0_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_SAS_EXPANDER0_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_SAS_EXPANDER0_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_SAS_EXPANDER0_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_SAS_EXPANDER0_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_SAS_EXPANDER0_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_SAS_EXPANDER0_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_SAS_EXPANDER0_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_SAS_EXPANDER0_DS_LOOP_DETECTED (0x00000001)
+
+/*values for SAS Expander Page 0 Flags field */
+#define MPI2_SAS_EXPANDER0_FLAGS_REDUCED_FUNCTIONALITY (0x2000)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONE_LOCKED (0x1000)
+#define MPI2_SAS_EXPANDER0_FLAGS_SUPPORTED_PHYSICAL_PRES (0x0800)
+#define MPI2_SAS_EXPANDER0_FLAGS_ASSERTED_PHYSICAL_PRES (0x0400)
+#define MPI2_SAS_EXPANDER0_FLAGS_ZONING_SUPPORT (0x0200)
+#define MPI2_SAS_EXPANDER0_FLAGS_ENABLED_ZONING (0x0100)
+#define MPI2_SAS_EXPANDER0_FLAGS_TABLE_TO_TABLE_SUPPORT (0x0080)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONNECTOR_END_DEVICE (0x0010)
+#define MPI2_SAS_EXPANDER0_FLAGS_OTHERS_CONFIG (0x0004)
+#define MPI2_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS (0x0002)
+#define MPI2_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG (0x0001)
+
+
+/*SAS Expander Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_EXPANDER_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PhysicalPort; /*0x08 */
+ U8
+ Reserved1; /*0x09 */
+ U16
+ Reserved2; /*0x0A */
+ U8
+ NumPhys; /*0x0C */
+ U8
+ Phy; /*0x0D */
+ U16
+ NumTableEntriesProgrammed; /*0x0E */
+ U8
+ ProgrammedLinkRate; /*0x10 */
+ U8
+ HwLinkRate; /*0x11 */
+ U16
+ AttachedDevHandle; /*0x12 */
+ U32
+ PhyInfo; /*0x14 */
+ U32
+ AttachedDeviceInfo; /*0x18 */
+ U16
+ ExpanderDevHandle; /*0x1C */
+ U8
+ ChangeCount; /*0x1E */
+ U8
+ NegotiatedLinkRate; /*0x1F */
+ U8
+ PhyIdentifier; /*0x20 */
+ U8
+ AttachedPhyIdentifier; /*0x21 */
+ U8
+ Reserved3; /*0x22 */
+ U8
+ DiscoveryInfo; /*0x23 */
+ U32
+ AttachedPhyInfo; /*0x24 */
+ U8
+ ZoneGroup; /*0x28 */
+ U8
+ SelfConfigStatus; /*0x29 */
+ U16
+ Reserved4; /*0x2A */
+} MPI2_CONFIG_PAGE_EXPANDER_1,
+ *PTR_MPI2_CONFIG_PAGE_EXPANDER_1,
+ Mpi2ExpanderPage1_t, *pMpi2ExpanderPage1_t;
+
+#define MPI2_SASEXPANDER1_PAGEVERSION (0x02)
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*see mpi2_sas.h for the MPI2_SAS_DEVICE_INFO_ defines
+ *used for the AttachedDeviceInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+/*values for SAS Expander Page 1 DiscoveryInfo field */
+#define MPI2_SAS_EXPANDER1_DISCINFO_BAD_PHY_DISABLED (0x04)
+#define MPI2_SAS_EXPANDER1_DISCINFO_LINK_STATUS_CHANGE (0x02)
+#define MPI2_SAS_EXPANDER1_DISCINFO_NO_ROUTING_ENTRIES (0x01)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+
+/****************************************************************************
+* SAS Device Config Pages
+****************************************************************************/
+
+/*SAS Device Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Slot; /*0x08 */
+ U16
+ EnclosureHandle; /*0x0A */
+ U64
+ SASAddress; /*0x0C */
+ U16
+ ParentDevHandle; /*0x14 */
+ U8
+ PhyNum; /*0x16 */
+ U8
+ AccessStatus; /*0x17 */
+ U16
+ DevHandle; /*0x18 */
+ U8
+ AttachedPhyIdentifier; /*0x1A */
+ U8
+ ZoneGroup; /*0x1B */
+ U32
+ DeviceInfo; /*0x1C */
+ U16
+ Flags; /*0x20 */
+ U8
+ PhysicalPort; /*0x22 */
+ U8
+ MaxPortConnections; /*0x23 */
+ U64
+ DeviceName; /*0x24 */
+ U8
+ PortGroups; /*0x2C */
+ U8
+ DmaGroup; /*0x2D */
+ U8
+ ControlGroup; /*0x2E */
+ U8
+ Reserved1; /*0x2F */
+ U32
+ Reserved2; /*0x30 */
+ U32
+ Reserved3; /*0x34 */
+} MPI2_CONFIG_PAGE_SAS_DEV_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_0,
+ Mpi2SasDevicePage0_t,
+ *pMpi2SasDevicePage0_t;
+
+#define MPI2_SASDEVICE0_PAGEVERSION (0x08)
+
+/*values for SAS Device Page 0 AccessStatus field */
+#define MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS (0x00)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED (0x01)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED (0x02)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT (0x03)
+#define MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION (0x04)
+#define MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE (0x05)
+#define MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE (0x06)
+#define MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED (0x07)
+/*specific values for SATA Init failures */
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN (0x10)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT (0x11)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG (0x12)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION (0x13)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER (0x14)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN (0x15)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN (0x16)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN (0x17)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION (0x18)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE (0x19)
+#define MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX (0x1F)
+
+/*see mpi2_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+/*values for SAS Device Page 0 Flags field */
+#define MPI2_SAS_DEVICE0_FLAGS_UNAUTHORIZED_DEVICE (0x8000)
+#define MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH (0x4000)
+#define MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE (0x2000)
+#define MPI2_SAS_DEVICE0_FLAGS_SLUMBER_PM_CAPABLE (0x1000)
+#define MPI2_SAS_DEVICE0_FLAGS_PARTIAL_PM_CAPABLE (0x0800)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY (0x0400)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE (0x0200)
+#define MPI2_SAS_DEVICE0_FLAGS_UNSUPPORTED_DEVICE (0x0100)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_48BIT_LBA_SUPPORTED (0x0080)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED (0x0040)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED (0x0020)
+#define MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED (0x0010)
+#define MPI2_SAS_DEVICE0_FLAGS_PORT_SELECTOR_ATTACH (0x0008)
+#define MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT (0x0001)
+
+
+/*SAS Device Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_DEV_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ SASAddress; /*0x0C */
+ U32
+ Reserved2; /*0x14 */
+ U16
+ DevHandle; /*0x18 */
+ U16
+ Reserved3; /*0x1A */
+ U8
+ InitialRegDeviceFIS[20];/*0x1C */
+} MPI2_CONFIG_PAGE_SAS_DEV_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_DEV_1,
+ Mpi2SasDevicePage1_t,
+ *pMpi2SasDevicePage1_t;
+
+#define MPI2_SASDEVICE1_PAGEVERSION (0x01)
+
+
+/****************************************************************************
+* SAS PHY Config Pages
+****************************************************************************/
+
+/*SAS PHY Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ OwnerDevHandle; /*0x08 */
+ U16
+ Reserved1; /*0x0A */
+ U16
+ AttachedDevHandle; /*0x0C */
+ U8
+ AttachedPhyIdentifier; /*0x0E */
+ U8
+ Reserved2; /*0x0F */
+ U32
+ AttachedPhyInfo; /*0x10 */
+ U8
+ ProgrammedLinkRate; /*0x14 */
+ U8
+ HwLinkRate; /*0x15 */
+ U8
+ ChangeCount; /*0x16 */
+ U8
+ Flags; /*0x17 */
+ U32
+ PhyInfo; /*0x18 */
+ U8
+ NegotiatedLinkRate; /*0x1C */
+ U8
+ Reserved3; /*0x1D */
+ U16
+ Reserved4; /*0x1E */
+} MPI2_CONFIG_PAGE_SAS_PHY_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_0,
+ Mpi2SasPhyPage0_t, *pMpi2SasPhyPage0_t;
+
+#define MPI2_SASPHY0_PAGEVERSION (0x03)
+
+/*use MPI2_SAS_APHYINFO_ defines for AttachedPhyInfo field */
+
+/*use MPI2_SAS_PRATE_ defines for the ProgrammedLinkRate field */
+
+/*use MPI2_SAS_HWRATE_ defines for the HwLinkRate field */
+
+/*values for SAS PHY Page 0 Flags field */
+#define MPI2_SAS_PHY0_FLAGS_SGPIO_DIRECT_ATTACH_ENC (0x01)
+
+/*use MPI2_SAS_PHYINFO_ for the PhyInfo field */
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines for the NegotiatedLinkRate field */
+
+
+/*SAS PHY Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U32
+ InvalidDwordCount; /*0x0C */
+ U32
+ RunningDisparityErrorCount; /*0x10 */
+ U32
+ LossDwordSynchCount; /*0x14 */
+ U32
+ PhyResetProblemCount; /*0x18 */
+} MPI2_CONFIG_PAGE_SAS_PHY_1,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_1,
+ Mpi2SasPhyPage1_t, *pMpi2SasPhyPage1_t;
+
+#define MPI2_SASPHY1_PAGEVERSION (0x01)
+
+
+/*SAS PHY Page 2 */
+
+typedef struct _MPI2_SASPHY2_PHY_EVENT {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 PhyEventInfo; /*0x04 */
+} MPI2_SASPHY2_PHY_EVENT, *PTR_MPI2_SASPHY2_PHY_EVENT,
+ Mpi2SasPhy2PhyEvent_t, *pMpi2SasPhy2PhyEvent_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ for the PhyEventCode field */
+
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY2_PHY_EVENT_MAX
+#define MPI2_SASPHY2_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_2 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY2_PHY_EVENT
+ PhyEvent[MPI2_SASPHY2_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_2,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_2,
+ Mpi2SasPhyPage2_t,
+ *pMpi2SasPhyPage2_t;
+
+#define MPI2_SASPHY2_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 3 */
+
+typedef struct _MPI2_SASPHY3_PHY_EVENT_CONFIG {
+ U8 PhyEventCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U8 CounterType; /*0x04 */
+ U8 ThresholdWindow; /*0x05 */
+ U8 TimeUnits; /*0x06 */
+ U8 Reserved3; /*0x07 */
+ U32 EventThreshold; /*0x08 */
+ U16 ThresholdFlags; /*0x0C */
+ U16 Reserved4; /*0x0E */
+} MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ *PTR_MPI2_SASPHY3_PHY_EVENT_CONFIG,
+ Mpi2SasPhy3PhyEventConfig_t,
+ *pMpi2SasPhy3PhyEventConfig_t;
+
+/*values for PhyEventCode field */
+#define MPI2_SASPHY3_EVENT_CODE_NO_EVENT (0x00)
+#define MPI2_SASPHY3_EVENT_CODE_INVALID_DWORD (0x01)
+#define MPI2_SASPHY3_EVENT_CODE_RUNNING_DISPARITY_ERROR (0x02)
+#define MPI2_SASPHY3_EVENT_CODE_LOSS_DWORD_SYNC (0x03)
+#define MPI2_SASPHY3_EVENT_CODE_PHY_RESET_PROBLEM (0x04)
+#define MPI2_SASPHY3_EVENT_CODE_ELASTICITY_BUF_OVERFLOW (0x05)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ERROR (0x06)
+#define MPI2_SASPHY3_EVENT_CODE_RX_ADDR_FRAME_ERROR (0x20)
+#define MPI2_SASPHY3_EVENT_CODE_TX_AC_OPEN_REJECT (0x21)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AC_OPEN_REJECT (0x22)
+#define MPI2_SASPHY3_EVENT_CODE_TX_RC_OPEN_REJECT (0x23)
+#define MPI2_SASPHY3_EVENT_CODE_RX_RC_OPEN_REJECT (0x24)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_PARTIAL_WAITING_ON (0x25)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP_CONNECT_WAITING_ON (0x26)
+#define MPI2_SASPHY3_EVENT_CODE_TX_BREAK (0x27)
+#define MPI2_SASPHY3_EVENT_CODE_RX_BREAK (0x28)
+#define MPI2_SASPHY3_EVENT_CODE_BREAK_TIMEOUT (0x29)
+#define MPI2_SASPHY3_EVENT_CODE_CONNECTION (0x2A)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_PATHWAY_BLOCKED (0x2B)
+#define MPI2_SASPHY3_EVENT_CODE_PEAKTX_ARB_WAIT_TIME (0x2C)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_ARB_WAIT_TIME (0x2D)
+#define MPI2_SASPHY3_EVENT_CODE_PEAK_CONNECT_TIME (0x2E)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_FRAMES (0x40)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_FRAMES (0x41)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SSP_ERROR_FRAMES (0x42)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SSP_ERROR_FRAMES (0x43)
+#define MPI2_SASPHY3_EVENT_CODE_TX_CREDIT_BLOCKED (0x44)
+#define MPI2_SASPHY3_EVENT_CODE_RX_CREDIT_BLOCKED (0x45)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SATA_FRAMES (0x50)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SATA_FRAMES (0x51)
+#define MPI2_SASPHY3_EVENT_CODE_SATA_OVERFLOW (0x52)
+#define MPI2_SASPHY3_EVENT_CODE_TX_SMP_FRAMES (0x60)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_FRAMES (0x61)
+#define MPI2_SASPHY3_EVENT_CODE_RX_SMP_ERROR_FRAMES (0x63)
+#define MPI2_SASPHY3_EVENT_CODE_HOTPLUG_TIMEOUT (0xD0)
+#define MPI2_SASPHY3_EVENT_CODE_MISALIGNED_MUX_PRIMITIVE (0xD1)
+#define MPI2_SASPHY3_EVENT_CODE_RX_AIP (0xD2)
+
+/*values for the CounterType field */
+#define MPI2_SASPHY3_COUNTER_TYPE_WRAPPING (0x00)
+#define MPI2_SASPHY3_COUNTER_TYPE_SATURATING (0x01)
+#define MPI2_SASPHY3_COUNTER_TYPE_PEAK_VALUE (0x02)
+
+/*values for the TimeUnits field */
+#define MPI2_SASPHY3_TIME_UNITS_10_MICROSECONDS (0x00)
+#define MPI2_SASPHY3_TIME_UNITS_100_MICROSECONDS (0x01)
+#define MPI2_SASPHY3_TIME_UNITS_1_MILLISECOND (0x02)
+#define MPI2_SASPHY3_TIME_UNITS_10_MILLISECONDS (0x03)
+
+/*values for the ThresholdFlags field */
+#define MPI2_SASPHY3_TFLAGS_PHY_RESET (0x0002)
+#define MPI2_SASPHY3_TFLAGS_EVENT_NOTIFY (0x0001)
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumPhyEvents at runtime.
+ */
+#ifndef MPI2_SASPHY3_PHY_EVENT_MAX
+#define MPI2_SASPHY3_PHY_EVENT_MAX (1)
+#endif
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_3 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U8
+ NumPhyEvents; /*0x0C */
+ U8
+ Reserved2; /*0x0D */
+ U16
+ Reserved3; /*0x0E */
+ MPI2_SASPHY3_PHY_EVENT_CONFIG
+ PhyEventConfig[MPI2_SASPHY3_PHY_EVENT_MAX]; /*0x10 */
+} MPI2_CONFIG_PAGE_SAS_PHY_3,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_3,
+ Mpi2SasPhyPage3_t, *pMpi2SasPhyPage3_t;
+
+#define MPI2_SASPHY3_PAGEVERSION (0x00)
+
+
+/*SAS PHY Page 4 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PHY_4 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U16
+ Reserved1; /*0x08 */
+ U8
+ Reserved2; /*0x0A */
+ U8
+ Flags; /*0x0B */
+ U8
+ InitialFrame[28]; /*0x0C */
+} MPI2_CONFIG_PAGE_SAS_PHY_4,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PHY_4,
+ Mpi2SasPhyPage4_t, *pMpi2SasPhyPage4_t;
+
+#define MPI2_SASPHY4_PAGEVERSION (0x00)
+
+/*values for the Flags field */
+#define MPI2_SASPHY4_FLAGS_FRAME_VALID (0x02)
+#define MPI2_SASPHY4_FLAGS_SATA_FRAME (0x01)
+
+
+
+
+/****************************************************************************
+* SAS Port Config Pages
+****************************************************************************/
+
+/*SAS Port Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_PORT_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U8
+ PortNumber; /*0x08 */
+ U8
+ PhysicalPort; /*0x09 */
+ U8
+ PortWidth; /*0x0A */
+ U8
+ PhysicalPortWidth; /*0x0B */
+ U8
+ ZoneGroup; /*0x0C */
+ U8
+ Reserved1; /*0x0D */
+ U16
+ Reserved2; /*0x0E */
+ U64
+ SASAddress; /*0x10 */
+ U32
+ DeviceInfo; /*0x18 */
+ U32
+ Reserved3; /*0x1C */
+ U32
+ Reserved4; /*0x20 */
+} MPI2_CONFIG_PAGE_SAS_PORT_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_PORT_0,
+ Mpi2SasPortPage0_t, *pMpi2SasPortPage0_t;
+
+#define MPI2_SASPORT0_PAGEVERSION (0x00)
+
+/*see mpi2_sas.h for values for SAS Port Page 0 DeviceInfo values */
+
+
+/****************************************************************************
+* SAS Enclosure Config Pages
+****************************************************************************/
+
+/*SAS Enclosure Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved1; /*0x08 */
+ U64
+ EnclosureLogicalID; /*0x0C */
+ U16
+ Flags; /*0x14 */
+ U16
+ EnclosureHandle; /*0x16 */
+ U16
+ NumSlots; /*0x18 */
+ U16
+ StartSlot; /*0x1A */
+ U16
+ Reserved2; /*0x1C */
+ U16
+ SEPDevHandle; /*0x1E */
+ U32
+ Reserved3; /*0x20 */
+ U32
+ Reserved4; /*0x24 */
+} MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ *PTR_MPI2_CONFIG_PAGE_SAS_ENCLOSURE_0,
+ Mpi2SasEnclosurePage0_t, *pMpi2SasEnclosurePage0_t;
+
+#define MPI2_SASENCLOSURE0_PAGEVERSION (0x03)
+
+/*values for SAS Enclosure Page 0 Flags field */
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_MASK (0x000F)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_UNKNOWN (0x0000)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SES (0x0001)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_SGPIO (0x0002)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_EXP_SGPIO (0x0003)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_SES_ENCLOSURE (0x0004)
+#define MPI2_SAS_ENCLS0_FLAGS_MNG_IOC_GPIO (0x0005)
+
+
+/****************************************************************************
+* Log Config Page
+****************************************************************************/
+
+/*Log Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumLogEntries at runtime.
+ */
+#ifndef MPI2_LOG_0_NUM_LOG_ENTRIES
+#define MPI2_LOG_0_NUM_LOG_ENTRIES (1)
+#endif
+
+#define MPI2_LOG_0_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_LOG_0_ENTRY {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8
+ LogData[MPI2_LOG_0_LOG_DATA_LENGTH];/*0x14 */
+} MPI2_LOG_0_ENTRY, *PTR_MPI2_LOG_0_ENTRY,
+ Mpi2Log0Entry_t, *pMpi2Log0Entry_t;
+
+/*values for Log Page 0 LogEntry LogEntryQualifier field */
+#define MPI2_LOG_0_ENTRY_QUAL_ENTRY_UNUSED (0x0000)
+#define MPI2_LOG_0_ENTRY_QUAL_POWER_ON_RESET (0x0001)
+#define MPI2_LOG_0_ENTRY_QUAL_TIMESTAMP_UPDATE (0x0002)
+#define MPI2_LOG_0_ENTRY_QUAL_MIN_IMPLEMENT_SPEC (0x8000)
+#define MPI2_LOG_0_ENTRY_QUAL_MAX_IMPLEMENT_SPEC (0xFFFF)
+
+typedef struct _MPI2_CONFIG_PAGE_LOG_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+ U16 NumLogEntries;/*0x10 */
+ U16 Reserved3; /*0x12 */
+ MPI2_LOG_0_ENTRY
+ LogEntry[MPI2_LOG_0_NUM_LOG_ENTRIES]; /*0x14 */
+} MPI2_CONFIG_PAGE_LOG_0, *PTR_MPI2_CONFIG_PAGE_LOG_0,
+ Mpi2LogPage0_t, *pMpi2LogPage0_t;
+
+#define MPI2_LOG_0_PAGEVERSION (0x02)
+
+
+/****************************************************************************
+* RAID Config Page
+****************************************************************************/
+
+/*RAID Page 0 */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check the value returned for NumElements at runtime.
+ */
+#ifndef MPI2_RAIDCONFIG0_MAX_ELEMENTS
+#define MPI2_RAIDCONFIG0_MAX_ELEMENTS (1)
+#endif
+
+typedef struct _MPI2_RAIDCONFIG0_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 HotSparePool; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ *PTR_MPI2_RAIDCONFIG0_CONFIG_ELEMENT,
+ Mpi2RaidConfig0ConfigElement_t,
+ *pMpi2RaidConfig0ConfigElement_t;
+
+/*values for the ElementFlags field */
+#define MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE (0x000F)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT (0x0001)
+#define MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT (0x0002)
+#define MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT (0x0003)
+
+
+typedef struct _MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumHotSpares; /*0x08 */
+ U8 NumPhysDisks; /*0x09 */
+ U8 NumVolumes; /*0x0A */
+ U8 ConfigNum; /*0x0B */
+ U32 Flags; /*0x0C */
+ U8 ConfigGUID[24]; /*0x10 */
+ U32 Reserved1; /*0x28 */
+ U8 NumElements; /*0x2C */
+ U8 Reserved2; /*0x2D */
+ U16 Reserved3; /*0x2E */
+ MPI2_RAIDCONFIG0_CONFIG_ELEMENT
+ ConfigElement[MPI2_RAIDCONFIG0_MAX_ELEMENTS]; /*0x30 */
+} MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ *PTR_MPI2_CONFIG_PAGE_RAID_CONFIGURATION_0,
+ Mpi2RaidConfigurationPage0_t,
+ *pMpi2RaidConfigurationPage0_t;
+
+#define MPI2_RAIDCONFIG0_PAGEVERSION (0x00)
+
+/*values for RAID Configuration Page 0 Flags field */
+#define MPI2_RAIDCONFIG0_FLAG_FOREIGN_CONFIG (0x00000001)
+
+
+/****************************************************************************
+* Driver Persistent Mapping Config Pages
+****************************************************************************/
+
+/*Driver Persistent Mapping Page 0 */
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY {
+ U64 PhysicalIdentifier; /*0x00 */
+ U16 MappingInformation; /*0x08 */
+ U16 DeviceIndex; /*0x0A */
+ U32 PhysicalBitsMapping; /*0x0C */
+ U32 Reserved1; /*0x10 */
+} MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY,
+ Mpi2DriverMap0Entry_t, *pMpi2DriverMap0Entry_t;
+
+typedef struct _MPI2_CONFIG_PAGE_DRIVER_MAPPING_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY Entry; /*0x08 */
+} MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ *PTR_MPI2_CONFIG_PAGE_DRIVER_MAPPING_0,
+ Mpi2DriverMappingPage0_t, *pMpi2DriverMappingPage0_t;
+
+#define MPI2_DRIVERMAPPING0_PAGEVERSION (0x00)
+
+/*values for Driver Persistent Mapping Page 0 MappingInformation field */
+#define MPI2_DRVMAP0_MAPINFO_SLOT_MASK (0x07F0)
+#define MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT (4)
+#define MPI2_DRVMAP0_MAPINFO_MISSING_MASK (0x000F)
+
+
+/****************************************************************************
+* Ethernet Config Pages
+****************************************************************************/
+
+/*Ethernet Page 0 */
+
+/*IP address (union of IPv4 and IPv6) */
+typedef union _MPI2_ETHERNET_IP_ADDR {
+ U32 IPv4Addr;
+ U32 IPv6Addr[4];
+} MPI2_ETHERNET_IP_ADDR, *PTR_MPI2_ETHERNET_IP_ADDR,
+ Mpi2EthernetIpAddr_t, *pMpi2EthernetIpAddr_t;
+
+#define MPI2_ETHERNET_HOST_NAME_LENGTH (32)
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_0 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER Header; /*0x00 */
+ U8 NumInterfaces; /*0x08 */
+ U8 Reserved0; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Status; /*0x0C */
+ U8 MediaState; /*0x10 */
+ U8 Reserved2; /*0x11 */
+ U16 Reserved3; /*0x12 */
+ U8 MacAddress[6]; /*0x14 */
+ U8 Reserved4; /*0x1A */
+ U8 Reserved5; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR IpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR SubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR GatewayIpAddress;/*0x3C */
+ MPI2_ETHERNET_IP_ADDR DNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR DNS2IpAddress; /*0x5C */
+ MPI2_ETHERNET_IP_ADDR DhcpIpAddress; /*0x6C */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_0,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_0,
+ Mpi2EthernetPage0_t, *pMpi2EthernetPage0_t;
+
+#define MPI2_ETHERNETPAGE0_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 0 Status field */
+#define MPI2_ETHPG0_STATUS_IPV6_CAPABLE (0x80000000)
+#define MPI2_ETHPG0_STATUS_IPV4_CAPABLE (0x40000000)
+#define MPI2_ETHPG0_STATUS_CONSOLE_CONNECTED (0x20000000)
+#define MPI2_ETHPG0_STATUS_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG0_STATUS_FW_DWNLD_ENABLED (0x00000080)
+#define MPI2_ETHPG0_STATUS_TELNET_ENABLED (0x00000040)
+#define MPI2_ETHPG0_STATUS_SSH2_ENABLED (0x00000020)
+#define MPI2_ETHPG0_STATUS_DHCP_CLIENT_ENABLED (0x00000010)
+#define MPI2_ETHPG0_STATUS_IPV6_ENABLED (0x00000008)
+#define MPI2_ETHPG0_STATUS_IPV4_ENABLED (0x00000004)
+#define MPI2_ETHPG0_STATUS_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG0_STATUS_ETH_IF_ENABLED (0x00000001)
+
+/*values for Ethernet Page 0 MediaState field */
+#define MPI2_ETHPG0_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG0_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG0_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG0_MS_CONNECT_SPEED_MASK (0x07)
+#define MPI2_ETHPG0_MS_NOT_CONNECTED (0x00)
+#define MPI2_ETHPG0_MS_10MBIT (0x01)
+#define MPI2_ETHPG0_MS_100MBIT (0x02)
+#define MPI2_ETHPG0_MS_1GBIT (0x03)
+
+
+/*Ethernet Page 1 */
+
+typedef struct _MPI2_CONFIG_PAGE_ETHERNET_1 {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ Reserved0; /*0x08 */
+ U32
+ Flags; /*0x0C */
+ U8
+ MediaState; /*0x10 */
+ U8
+ Reserved1; /*0x11 */
+ U16
+ Reserved2; /*0x12 */
+ U8
+ MacAddress[6]; /*0x14 */
+ U8
+ Reserved3; /*0x1A */
+ U8
+ Reserved4; /*0x1B */
+ MPI2_ETHERNET_IP_ADDR
+ StaticIpAddress; /*0x1C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticSubnetMask; /*0x2C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticGatewayIpAddress; /*0x3C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS1IpAddress; /*0x4C */
+ MPI2_ETHERNET_IP_ADDR
+ StaticDNS2IpAddress; /*0x5C */
+ U32
+ Reserved5; /*0x6C */
+ U32
+ Reserved6; /*0x70 */
+ U32
+ Reserved7; /*0x74 */
+ U32
+ Reserved8; /*0x78 */
+ U8
+ HostName[MPI2_ETHERNET_HOST_NAME_LENGTH];/*0x7C */
+} MPI2_CONFIG_PAGE_ETHERNET_1,
+ *PTR_MPI2_CONFIG_PAGE_ETHERNET_1,
+ Mpi2EthernetPage1_t, *pMpi2EthernetPage1_t;
+
+#define MPI2_ETHERNETPAGE1_PAGEVERSION (0x00)
+
+/*values for Ethernet Page 1 Flags field */
+#define MPI2_ETHPG1_FLAG_SET_DEFAULT_IF (0x00000100)
+#define MPI2_ETHPG1_FLAG_ENABLE_FW_DOWNLOAD (0x00000080)
+#define MPI2_ETHPG1_FLAG_ENABLE_TELNET (0x00000040)
+#define MPI2_ETHPG1_FLAG_ENABLE_SSH2 (0x00000020)
+#define MPI2_ETHPG1_FLAG_ENABLE_DHCP_CLIENT (0x00000010)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV6 (0x00000008)
+#define MPI2_ETHPG1_FLAG_ENABLE_IPV4 (0x00000004)
+#define MPI2_ETHPG1_FLAG_USE_IPV6_ADDRESSES (0x00000002)
+#define MPI2_ETHPG1_FLAG_ENABLE_ETH_IF (0x00000001)
+
+/*values for Ethernet Page 1 MediaState field */
+#define MPI2_ETHPG1_MS_DUPLEX_MASK (0x80)
+#define MPI2_ETHPG1_MS_HALF_DUPLEX (0x00)
+#define MPI2_ETHPG1_MS_FULL_DUPLEX (0x80)
+
+#define MPI2_ETHPG1_MS_DATA_RATE_MASK (0x07)
+#define MPI2_ETHPG1_MS_DATA_RATE_AUTO (0x00)
+#define MPI2_ETHPG1_MS_DATA_RATE_10MBIT (0x01)
+#define MPI2_ETHPG1_MS_DATA_RATE_100MBIT (0x02)
+#define MPI2_ETHPG1_MS_DATA_RATE_1GBIT (0x03)
+
+
+/****************************************************************************
+* Extended Manufacturing Config Pages
+****************************************************************************/
+
+/*
+ *Generic structure to use for product-specific extended manufacturing pages
+ *(currently Extended Manufacturing Page 40 through Extended Manufacturing
+ *Page 60).
+ */
+
+typedef struct _MPI2_CONFIG_PAGE_EXT_MAN_PS {
+ MPI2_CONFIG_EXTENDED_PAGE_HEADER
+ Header; /*0x00 */
+ U32
+ ProductSpecificInfo; /*0x08 */
+} MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ *PTR_MPI2_CONFIG_PAGE_EXT_MAN_PS,
+ Mpi2ExtManufacturingPagePS_t,
+ *pMpi2ExtManufacturingPagePS_t;
+
+/*PageVersion should be provided by product-specific code */
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
new file mode 100644
index 000000000000..a079e5242474
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -0,0 +1,560 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_init.h
+ * Title: MPI SCSI initiator mode messages and structures
+ * Creation Date: June 23, 2006
+ *
+ * mpi2_init.h Version: 02.00.14
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 10-31-07 02.00.01 Fixed name for pMpi2SCSITaskManagementRequest_t.
+ * 12-18-07 02.00.02 Modified Task Management Target Reset Method defines.
+ * 02-29-08 02.00.03 Added Query Task Set and Query Unit Attention.
+ * 03-03-08 02.00.04 Fixed name of struct _MPI2_SCSI_TASK_MANAGE_REPLY.
+ * 05-21-08 02.00.05 Fixed typo in name of Mpi2SepRequest_t.
+ * 10-02-08 02.00.06 Removed Untagged and No Disconnect values from SCSI IO
+ * Control field Task Attribute flags.
+ * Moved LUN field defines to mpi2.h becasue they are
+ * common to many structures.
+ * 05-06-09 02.00.07 Changed task management type of Query Unit Attention to
+ * Query Asynchronous Event.
+ * Defined two new bits in the SlotStatus field of the SCSI
+ * Enclosure Processor Request and Reply.
+ * 10-28-09 02.00.08 Added defines for decoding the ResponseInfo bytes for
+ * both SCSI IO Error Reply and SCSI Task Management Reply.
+ * Added ResponseInfo field to MPI2_SCSI_TASK_MANAGE_REPLY.
+ * Added MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG define.
+ * 02-10-10 02.00.09 Removed unused structure that had "#if 0" around it.
+ * 05-12-10 02.00.10 Added optional vendor-unique region to SCSI IO Request.
+ * 11-10-10 02.00.11 Added MPI2_SCSIIO_NUM_SGLOFFSETS define.
+ * 11-18-11 02.00.12 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.13 Added alternate defines for Task Priority / Command
+ * Priority to match SAM-4.
+ * Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
+ * 07-10-12 02.00.14 Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_INIT_H
+#define MPI2_INIT_H
+
+/*****************************************************************************
+*
+* SCSI Initiator Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SCSI IO messages and associated structures
+****************************************************************************/
+
+typedef struct _MPI2_SCSI_IO_CDB_EEDP32 {
+ U8 CDB[20]; /*0x00 */
+ U32 PrimaryReferenceTag; /*0x14 */
+ U16 PrimaryApplicationTag; /*0x18 */
+ U16 PrimaryApplicationTagMask; /*0x1A */
+ U32 TransferLength; /*0x1C */
+} MPI2_SCSI_IO_CDB_EEDP32, *PTR_MPI2_SCSI_IO_CDB_EEDP32,
+ Mpi2ScsiIoCdbEedp32_t, *pMpi2ScsiIoCdbEedp32_t;
+
+/*MPI v2.0 CDB field */
+typedef union _MPI2_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_SGE_SIMPLE_UNION SGE;
+} MPI2_SCSI_IO_CDB_UNION, *PTR_MPI2_SCSI_IO_CDB_UNION,
+ Mpi2ScsiIoCdb_t, *pMpi2ScsiIoCdb_t;
+
+/*MPI v2.0 SCSI IO Request Message */
+typedef struct _MPI2_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U16 SGLFlags; /*0x10 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U32 EEDPBlockSize; /*0x28 */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI2_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI2_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI2_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI2_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI2_SCSI_IO_REQUEST, *PTR_MPI2_SCSI_IO_REQUEST,
+ Mpi2SCSIIORequest_t, *pMpi2SCSIIORequest_t;
+
+/*SCSI IO MsgFlags bits */
+
+/*MsgFlags for SenseBufferAddressSpace */
+#define MPI2_SCSIIO_MSGFLAGS_MASK_SENSE_ADDR (0x0C)
+#define MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR (0x00)
+#define MPI2_SCSIIO_MSGFLAGS_IOCDDR_SENSE_ADDR (0x04)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLB_SENSE_ADDR (0x08)
+#define MPI2_SCSIIO_MSGFLAGS_IOCPLBNTA_SENSE_ADDR (0x0C)
+
+/*SCSI IO SGLFlags bits */
+
+/*base values for Data Location Address Space */
+#define MPI2_SCSIIO_SGLFLAGS_ADDR_MASK (0x0C)
+#define MPI2_SCSIIO_SGLFLAGS_SYSTEM_ADDR (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_IOCDDR_ADDR (0x04)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLB_ADDR (0x08)
+#define MPI2_SCSIIO_SGLFLAGS_IOCPLBNTA_ADDR (0x0C)
+
+/*base values for Type */
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MASK (0x03)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_MPI (0x00)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE32 (0x01)
+#define MPI2_SCSIIO_SGLFLAGS_TYPE_IEEE64 (0x02)
+
+/*shift values for each sub-field */
+#define MPI2_SCSIIO_SGLFLAGS_SGL3_SHIFT (12)
+#define MPI2_SCSIIO_SGLFLAGS_SGL2_SHIFT (8)
+#define MPI2_SCSIIO_SGLFLAGS_SGL1_SHIFT (4)
+#define MPI2_SCSIIO_SGLFLAGS_SGL0_SHIFT (0)
+
+/*number of SGLOffset fields */
+#define MPI2_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*SCSI IO IoFlags bits */
+
+/*Large CDB Address Space */
+#define MPI2_SCSIIO_CDB_ADDR_MASK (0x6000)
+#define MPI2_SCSIIO_CDB_ADDR_SYSTEM (0x0000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCDDR (0x2000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLB (0x4000)
+#define MPI2_SCSIIO_CDB_ADDR_IOCPLBNTA (0x6000)
+
+#define MPI2_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI2_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI2_SCSIIO_IOFLAGS_MULTICAST (0x0400)
+#define MPI2_SCSIIO_IOFLAGS_CMD_DETERMINES_DATA_DIR (0x0200)
+#define MPI2_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*SCSI IO EEDPFlags bits */
+
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG (0x8000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_REFTAG (0x4000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_PRI_APPTAG (0x2000)
+#define MPI2_SCSIIO_EEDPFLAGS_INC_SEC_APPTAG (0x1000)
+
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG (0x0400)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG (0x0200)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD (0x0100)
+
+#define MPI2_SCSIIO_EEDPFLAGS_PASSTHRU_REFTAG (0x0008)
+
+#define MPI2_SCSIIO_EEDPFLAGS_MASK_OP (0x0007)
+#define MPI2_SCSIIO_EEDPFLAGS_NOOP_OP (0x0000)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_OP (0x0001)
+#define MPI2_SCSIIO_EEDPFLAGS_STRIP_OP (0x0002)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP (0x0003)
+#define MPI2_SCSIIO_EEDPFLAGS_INSERT_OP (0x0004)
+#define MPI2_SCSIIO_EEDPFLAGS_REPLACE_OP (0x0006)
+#define MPI2_SCSIIO_EEDPFLAGS_CHECK_REGEN_OP (0x0007)
+
+/*SCSI IO LUN fields: use MPI2_LUN_ from mpi2.h */
+
+/*SCSI IO Control bits */
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_MASK (0xFC000000)
+#define MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT (26)
+
+#define MPI2_SCSIIO_CONTROL_DATADIRECTION_MASK (0x03000000)
+#define MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION (24)
+#define MPI2_SCSIIO_CONTROL_NODATATRANSFER (0x00000000)
+#define MPI2_SCSIIO_CONTROL_WRITE (0x01000000)
+#define MPI2_SCSIIO_CONTROL_READ (0x02000000)
+#define MPI2_SCSIIO_CONTROL_BIDIRECTIONAL (0x03000000)
+
+#define MPI2_SCSIIO_CONTROL_TASKPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_TASKPRI_SHIFT (11)
+/*alternate name for the previous field; called Command Priority in SAM-4 */
+#define MPI2_SCSIIO_CONTROL_CMDPRI_MASK (0x00007800)
+#define MPI2_SCSIIO_CONTROL_CMDPRI_SHIFT (11)
+
+#define MPI2_SCSIIO_CONTROL_TASKATTRIBUTE_MASK (0x00000700)
+#define MPI2_SCSIIO_CONTROL_SIMPLEQ (0x00000000)
+#define MPI2_SCSIIO_CONTROL_HEADOFQ (0x00000100)
+#define MPI2_SCSIIO_CONTROL_ORDEREDQ (0x00000200)
+#define MPI2_SCSIIO_CONTROL_ACAQ (0x00000400)
+
+#define MPI2_SCSIIO_CONTROL_TLR_MASK (0x000000C0)
+#define MPI2_SCSIIO_CONTROL_NO_TLR (0x00000000)
+#define MPI2_SCSIIO_CONTROL_TLR_ON (0x00000040)
+#define MPI2_SCSIIO_CONTROL_TLR_OFF (0x00000080)
+
+/*MPI v2.5 CDB field */
+typedef union _MPI25_SCSI_IO_CDB_UNION {
+ U8 CDB32[32];
+ MPI2_SCSI_IO_CDB_EEDP32 EEDP32;
+ MPI2_IEEE_SGE_SIMPLE64 SGE;
+} MPI25_SCSI_IO_CDB_UNION, *PTR_MPI25_SCSI_IO_CDB_UNION,
+ Mpi25ScsiIoCdb_t, *pMpi25ScsiIoCdb_t;
+
+/*MPI v2.5 SCSI IO Request Message */
+typedef struct _MPI25_SCSI_IO_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U32 SenseBufferLowAddress; /*0x0C */
+ U8 DMAFlags; /*0x10 */
+ U8 Reserved5; /*0x11 */
+ U8 SenseBufferLength; /*0x12 */
+ U8 Reserved4; /*0x13 */
+ U8 SGLOffset0; /*0x14 */
+ U8 SGLOffset1; /*0x15 */
+ U8 SGLOffset2; /*0x16 */
+ U8 SGLOffset3; /*0x17 */
+ U32 SkipCount; /*0x18 */
+ U32 DataLength; /*0x1C */
+ U32 BidirectionalDataLength; /*0x20 */
+ U16 IoFlags; /*0x24 */
+ U16 EEDPFlags; /*0x26 */
+ U16 EEDPBlockSize; /*0x28 */
+ U16 Reserved6; /*0x2A */
+ U32 SecondaryReferenceTag; /*0x2C */
+ U16 SecondaryApplicationTag; /*0x30 */
+ U16 ApplicationTagTranslationMask; /*0x32 */
+ U8 LUN[8]; /*0x34 */
+ U32 Control; /*0x3C */
+ MPI25_SCSI_IO_CDB_UNION CDB; /*0x40 */
+
+#ifdef MPI25_SCSI_IO_VENDOR_UNIQUE_REGION /*typically this is left undefined */
+ MPI25_SCSI_IO_VENDOR_UNIQUE VendorRegion;
+#endif
+
+ MPI25_SGE_IO_UNION SGL; /*0x60 */
+
+} MPI25_SCSI_IO_REQUEST, *PTR_MPI25_SCSI_IO_REQUEST,
+ Mpi25SCSIIORequest_t, *pMpi25SCSIIORequest_t;
+
+/*use MPI2_SCSIIO_MSGFLAGS_ defines for the MsgFlags field */
+
+/*Defines for the DMAFlags field
+ * Each setting affects 4 SGLS, from SGL0 to SGL3.
+ * D = Data
+ * C = Cache DIF
+ * I = Interleaved
+ * H = Host DIF
+ */
+#define MPI25_SCSIIO_DMAFLAGS_OP_MASK (0x0F)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_D (0x00)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_C (0x01)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_D_I (0x02)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_C (0x03)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_C_I (0x04)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_D_I_I (0x05)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_C (0x06)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_C_I (0x07)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_C_I_I (0x08)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_I_I_I (0x09)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_D (0x0A)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_C (0x0B)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_D_I (0x0C)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_C (0x0D)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_C_I (0x0E)
+#define MPI25_SCSIIO_DMAFLAGS_OP_D_H_I_I (0x0F)
+
+/*number of SGLOffset fields */
+#define MPI25_SCSIIO_NUM_SGLOFFSETS (4)
+
+/*defines for the IoFlags field */
+#define MPI25_SCSIIO_IOFLAGS_IO_PATH_MASK (0xC000)
+#define MPI25_SCSIIO_IOFLAGS_NORMAL_PATH (0x0000)
+#define MPI25_SCSIIO_IOFLAGS_FAST_PATH (0x4000)
+
+#define MPI25_SCSIIO_IOFLAGS_LARGE_CDB (0x1000)
+#define MPI25_SCSIIO_IOFLAGS_BIDIRECTIONAL (0x0800)
+#define MPI25_SCSIIO_IOFLAGS_CDBLENGTH_MASK (0x01FF)
+
+/*MPI v2.5 defines for the EEDPFlags bits */
+/*use MPI2_SCSIIO_EEDPFLAGS_ defines for the other EEDPFlags bits */
+#define MPI25_SCSIIO_EEDPFLAGS_ESCAPE_MODE_MASK (0x00C0)
+#define MPI25_SCSIIO_EEDPFLAGS_COMPATIBLE_MODE (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_DO_NOT_DISABLE_MODE (0x0040)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_DISABLE_MODE (0x0080)
+#define MPI25_SCSIIO_EEDPFLAGS_APPTAG_REFTAG_DISABLE_MODE (0x00C0)
+
+#define MPI25_SCSIIO_EEDPFLAGS_HOST_GUARD_METHOD_MASK (0x0030)
+#define MPI25_SCSIIO_EEDPFLAGS_T10_CRC_HOST_GUARD (0x0000)
+#define MPI25_SCSIIO_EEDPFLAGS_IP_CHKSUM_HOST_GUARD (0x0010)
+
+/*use MPI2_LUN_ defines from mpi2.h for the LUN field */
+
+/*use MPI2_SCSIIO_CONTROL_ defines for the Control field */
+
+/*NOTE: The SCSI IO Reply is nearly the same for MPI 2.0 and MPI 2.5, so
+ * MPI2_SCSI_IO_REPLY is used for both.
+ */
+
+/*SCSI IO Error Reply Message */
+typedef struct _MPI2_SCSI_IO_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 SCSIStatus; /*0x0C */
+ U8 SCSIState; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferCount; /*0x14 */
+ U32 SenseCount; /*0x18 */
+ U32 ResponseInfo; /*0x1C */
+ U16 TaskTag; /*0x20 */
+ U16 Reserved4; /*0x22 */
+ U32 BidirectionalTransferCount; /*0x24 */
+ U32 EEDPErrorOffset; /*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
+ U32 Reserved6; /*0x2C */
+} MPI2_SCSI_IO_REPLY, *PTR_MPI2_SCSI_IO_REPLY,
+ Mpi2SCSIIOReply_t, *pMpi2SCSIIOReply_t;
+
+/*SCSI IO Reply SCSIStatus values (SAM-4 status codes) */
+
+#define MPI2_SCSI_STATUS_GOOD (0x00)
+#define MPI2_SCSI_STATUS_CHECK_CONDITION (0x02)
+#define MPI2_SCSI_STATUS_CONDITION_MET (0x04)
+#define MPI2_SCSI_STATUS_BUSY (0x08)
+#define MPI2_SCSI_STATUS_INTERMEDIATE (0x10)
+#define MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET (0x14)
+#define MPI2_SCSI_STATUS_RESERVATION_CONFLICT (0x18)
+#define MPI2_SCSI_STATUS_COMMAND_TERMINATED (0x22) /*obsolete */
+#define MPI2_SCSI_STATUS_TASK_SET_FULL (0x28)
+#define MPI2_SCSI_STATUS_ACA_ACTIVE (0x30)
+#define MPI2_SCSI_STATUS_TASK_ABORTED (0x40)
+
+/*SCSI IO Reply SCSIState flags */
+
+#define MPI2_SCSI_STATE_RESPONSE_INFO_VALID (0x10)
+#define MPI2_SCSI_STATE_TERMINATED (0x08)
+#define MPI2_SCSI_STATE_NO_SCSI_STATUS (0x04)
+#define MPI2_SCSI_STATE_AUTOSENSE_FAILED (0x02)
+#define MPI2_SCSI_STATE_AUTOSENSE_VALID (0x01)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSI_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSI_RI_SHIFT_REASONCODE (0)
+
+#define MPI2_SCSI_TASKTAG_UNKNOWN (0xFFFF)
+
+/****************************************************************************
+* SCSI Task Management messages
+****************************************************************************/
+
+/*SCSI Task Management Request Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved1; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 LUN[8]; /*0x0C */
+ U32 Reserved4[7]; /*0x14 */
+ U16 TaskMID; /*0x30 */
+ U16 Reserved5; /*0x32 */
+} MPI2_SCSI_TASK_MANAGE_REQUEST,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REQUEST,
+ Mpi2SCSITaskManagementRequest_t,
+ *pMpi2SCSITaskManagementRequest_t;
+
+/*TaskType values */
+
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK (0x01)
+#define MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET (0x02)
+#define MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET (0x03)
+#define MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET (0x05)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET (0x06)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK (0x07)
+#define MPI2_SCSITASKMGMT_TASKTYPE_CLR_ACA (0x08)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_TASK_SET (0x09)
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT (0x0A)
+
+/*obsolete TaskType name */
+#define MPI2_SCSITASKMGMT_TASKTYPE_QRY_UNIT_ATTENTION \
+ (MPI2_SCSITASKMGMT_TASKTYPE_QRY_ASYNC_EVENT)
+
+/*MsgFlags bits */
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_MASK_TARGET_RESET (0x18)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET (0x00)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_NEXUS_RESET_SRST (0x08)
+#define MPI2_SCSITASKMGMT_MSGFLAGS_SAS_HARD_LINK_RESET (0x10)
+
+#define MPI2_SCSITASKMGMT_MSGFLAGS_DO_NOT_SEND_TASK_IU (0x01)
+
+/*SCSI Task Management Reply Message */
+typedef struct _MPI2_SCSI_TASK_MANAGE_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 ResponseCode; /*0x04 */
+ U8 TaskType; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TerminationCount; /*0x14 */
+ U32 ResponseInfo; /*0x18 */
+} MPI2_SCSI_TASK_MANAGE_REPLY,
+ *PTR_MPI2_SCSI_TASK_MANAGE_REPLY,
+ Mpi2SCSITaskManagementReply_t, *pMpi2SCSIManagementReply_t;
+
+/*ResponseCode values */
+
+#define MPI2_SCSITASKMGMT_RSP_TM_COMPLETE (0x00)
+#define MPI2_SCSITASKMGMT_RSP_INVALID_FRAME (0x02)
+#define MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED (0x04)
+#define MPI2_SCSITASKMGMT_RSP_TM_FAILED (0x05)
+#define MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED (0x08)
+#define MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN (0x09)
+#define MPI2_SCSITASKMGMT_RSP_TM_OVERLAPPED_TAG (0x0A)
+#define MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC (0x80)
+
+/*masks and shifts for the ResponseInfo field */
+
+#define MPI2_SCSITASKMGMT_RI_MASK_REASONCODE (0x000000FF)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_REASONCODE (0)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI2 (0x0000FF00)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI2 (8)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI1 (0x00FF0000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI1 (16)
+#define MPI2_SCSITASKMGMT_RI_MASK_ARI0 (0xFF000000)
+#define MPI2_SCSITASKMGMT_RI_SHIFT_ARI0 (24)
+
+/****************************************************************************
+* SCSI Enclosure Processor messages
+****************************************************************************/
+
+/*SCSI Enclosure Processor Request Message */
+typedef struct _MPI2_SEP_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 SlotStatus; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REQUEST, *PTR_MPI2_SEP_REQUEST,
+ Mpi2SepRequest_t, *pMpi2SepRequest_t;
+
+/*Action defines */
+#define MPI2_SEP_REQ_ACTION_WRITE_STATUS (0x00)
+#define MPI2_SEP_REQ_ACTION_READ_STATUS (0x01)
+
+/*Flags defines */
+#define MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS (0x00)
+#define MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS (0x01)
+
+/*SlotStatus defines */
+#define MPI2_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE (0x00040000)
+#define MPI2_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REQ_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REQ_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REQ_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REQ_SLOTSTATUS_NO_ERROR (0x00000001)
+
+/*SCSI Enclosure Processor Reply Message */
+typedef struct _MPI2_SEP_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Action; /*0x04 */
+ U8 Flags; /*0x05 */
+ U8 Reserved1; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 SlotStatus; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U16 Slot; /*0x1C */
+ U16 EnclosureHandle; /*0x1E */
+} MPI2_SEP_REPLY, *PTR_MPI2_SEP_REPLY,
+ Mpi2SepReply_t, *pMpi2SepReply_t;
+
+/*SlotStatus defines */
+#define MPI2_SEP_REPLY_SLOTSTATUS_REMOVE_READY (0x00040000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST (0x00020000)
+#define MPI2_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED (0x00000200)
+#define MPI2_SEP_REPLY_SLOTSTATUS_HOT_SPARE (0x00000100)
+#define MPI2_SEP_REPLY_SLOTSTATUS_UNCONFIGURED (0x00000080)
+#define MPI2_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT (0x00000040)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY (0x00000010)
+#define MPI2_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY (0x00000008)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING (0x00000004)
+#define MPI2_SEP_REPLY_SLOTSTATUS_DEV_FAULTY (0x00000002)
+#define MPI2_SEP_REPLY_SLOTSTATUS_NO_ERROR (0x00000001)
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
new file mode 100644
index 000000000000..0de425d8fd70
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -0,0 +1,1665 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_ioc.h
+ * Title: MPI IOC, Port, Event, FW Download, and FW Upload messages
+ * Creation Date: October 11, 2006
+ *
+ * mpi2_ioc.h Version: 02.00.21
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-04-07 02.00.01 In IOCFacts Reply structure, renamed MaxDevices to
+ * MaxTargets.
+ * Added TotalImageSize field to FWDownload Request.
+ * Added reserved words to FWUpload Request.
+ * 06-26-07 02.00.02 Added IR Configuration Change List Event.
+ * 08-31-07 02.00.03 Removed SystemReplyQueueDepth field from the IOCInit
+ * request and replaced it with
+ * ReplyDescriptorPostQueueDepth and ReplyFreeQueueDepth.
+ * Replaced the MinReplyQueueDepth field of the IOCFacts
+ * reply with MaxReplyDescriptorPostQueueDepth.
+ * Added MPI2_RDPQ_DEPTH_MIN define to specify the minimum
+ * depth for the Reply Descriptor Post Queue.
+ * Added SASAddress field to Initiator Device Table
+ * Overflow Event data.
+ * 10-31-07 02.00.04 Added ReasonCode MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING
+ * for SAS Initiator Device Status Change Event data.
+ * Modified Reason Code defines for SAS Topology Change
+ * List Event data, including adding a bit for PHY Vacant
+ * status, and adding a mask for the Reason Code.
+ * Added define for
+ * MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING.
+ * Added define for MPI2_EXT_IMAGE_TYPE_MEGARAID.
+ * 12-18-07 02.00.05 Added Boot Status defines for the IOCExceptions field of
+ * the IOCFacts Reply.
+ * Removed MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Moved MPI2_VERSION_UNION to mpi2.h.
+ * Changed MPI2_EVENT_NOTIFICATION_REQUEST to use masks
+ * instead of enables, and added SASBroadcastPrimitiveMasks
+ * field.
+ * Added Log Entry Added Event and related structure.
+ * 02-29-08 02.00.06 Added define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID.
+ * Removed define MPI2_IOCFACTS_PROTOCOL_SMP_TARGET.
+ * Added MaxVolumes and MaxPersistentEntries fields to
+ * IOCFacts reply.
+ * Added ProtocalFlags and IOCCapabilities fields to
+ * MPI2_FW_IMAGE_HEADER.
+ * Removed MPI2_PORTENABLE_FLAGS_ENABLE_SINGLE_PORT.
+ * 03-03-08 02.00.07 Fixed MPI2_FW_IMAGE_HEADER by changing Reserved26 to
+ * a U16 (from a U32).
+ * Removed extra 's' from EventMasks name.
+ * 06-27-08 02.00.08 Fixed an offset in a comment.
+ * 10-02-08 02.00.09 Removed SystemReplyFrameSize from MPI2_IOC_INIT_REQUEST.
+ * Removed CurReplyFrameSize from MPI2_IOC_FACTS_REPLY and
+ * renamed MinReplyFrameSize to ReplyFrameSize.
+ * Added MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX.
+ * Added two new RAIDOperation values for Integrated RAID
+ * Operations Status Event data.
+ * Added four new IR Configuration Change List Event data
+ * ReasonCode values.
+ * Added two new ReasonCode defines for SAS Device Status
+ * Change Event data.
+ * Added three new DiscoveryStatus bits for the SAS
+ * Discovery event data.
+ * Added Multiplexing Status Change bit to the PhyStatus
+ * field of the SAS Topology Change List event data.
+ * Removed define for MPI2_INIT_IMAGE_BOOTFLAGS_XMEMCOPY.
+ * BootFlags are now product-specific.
+ * Added defines for the indivdual signature bytes
+ * for MPI2_INIT_IMAGE_FOOTER.
+ * 01-19-09 02.00.10 Added MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY define.
+ * Added MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR
+ * define.
+ * Added MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE
+ * define.
+ * Removed MPI2_EVENT_SAS_DISC_DS_SATA_INIT_FAILURE define.
+ * 05-06-09 02.00.11 Added MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR define.
+ * Added MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX define.
+ * Added two new reason codes for SAS Device Status Change
+ * Event.
+ * Added new event: SAS PHY Counter.
+ * 07-30-09 02.00.12 Added GPIO Interrupt event define and structure.
+ * Added MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER define.
+ * Added new product id family for 2208.
+ * 10-28-09 02.00.13 Added HostMSIxVectors field to MPI2_IOC_INIT_REQUEST.
+ * Added MaxMSIxVectors field to MPI2_IOC_FACTS_REPLY.
+ * Added MinDevHandle field to MPI2_IOC_FACTS_REPLY.
+ * Added MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY.
+ * Added MPI2_EVENT_HOST_BASED_DISCOVERY_PHY define.
+ * Added MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER define.
+ * Added Host Based Discovery Phy Event data.
+ * Added defines for ProductID Product field
+ * (MPI2_FW_HEADER_PID_).
+ * Modified values for SAS ProductID Family
+ * (MPI2_FW_HEADER_PID_FAMILY_).
+ * 02-10-10 02.00.14 Added SAS Quiesce Event structure and defines.
+ * Added PowerManagementControl Request structures and
+ * defines.
+ * 05-12-10 02.00.15 Marked Task Set Full Event as obsolete.
+ * Added MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY define.
+ * 11-10-10 02.00.16 Added MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC.
+ * 02-23-11 02.00.17 Added SAS NOTIFY Primitive event, and added
+ * SASNotifyPrimitiveMasks field to
+ * MPI2_EVENT_NOTIFICATION_REQUEST.
+ * Added Temperature Threshold Event.
+ * Added Host Message Event.
+ * Added Send Host Message request and reply.
+ * 05-25-11 02.00.18 For Extended Image Header, added
+ * MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC and
+ * MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC defines.
+ * Deprecated MPI2_EXT_IMAGE_TYPE_MAX define.
+ * 08-24-11 02.00.19 Added PhysicalPort field to
+ * MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE structure.
+ * Marked MPI2_PM_CONTROL_FEATURE_PCIE_LINK as obsolete.
+ * 11-18-11 02.00.20 Incorporating additions for MPI v2.5.
+ * 03-29-12 02.00.21 Added a product specific range to event values.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_IOC_H
+#define MPI2_IOC_H
+
+/*****************************************************************************
+*
+* IOC Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* IOCInit message
+****************************************************************************/
+
+/*IOCInit Request message */
+typedef struct _MPI2_IOC_INIT_REQUEST {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 MsgVersion; /*0x0C */
+ U16 HeaderVersion; /*0x0E */
+ U32 Reserved5; /*0x10 */
+ U16 Reserved6; /*0x14 */
+ U8 Reserved7; /*0x16 */
+ U8 HostMSIxVectors; /*0x17 */
+ U16 Reserved8; /*0x18 */
+ U16 SystemRequestFrameSize; /*0x1A */
+ U16 ReplyDescriptorPostQueueDepth; /*0x1C */
+ U16 ReplyFreeQueueDepth; /*0x1E */
+ U32 SenseBufferAddressHigh; /*0x20 */
+ U32 SystemReplyAddressHigh; /*0x24 */
+ U64 SystemRequestFrameBaseAddress; /*0x28 */
+ U64 ReplyDescriptorPostQueueAddress; /*0x30 */
+ U64 ReplyFreeQueueAddress; /*0x38 */
+ U64 TimeStamp; /*0x40 */
+} MPI2_IOC_INIT_REQUEST, *PTR_MPI2_IOC_INIT_REQUEST,
+ Mpi2IOCInitRequest_t, *pMpi2IOCInitRequest_t;
+
+/*WhoInit values */
+#define MPI2_WHOINIT_NOT_INITIALIZED (0x00)
+#define MPI2_WHOINIT_SYSTEM_BIOS (0x01)
+#define MPI2_WHOINIT_ROM_BIOS (0x02)
+#define MPI2_WHOINIT_PCI_PEER (0x03)
+#define MPI2_WHOINIT_HOST_DRIVER (0x04)
+#define MPI2_WHOINIT_MANUFACTURER (0x05)
+
+/*MsgVersion */
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCINIT_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCINIT_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCINIT_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCINIT_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT (0)
+
+/*minimum depth for the Reply Descriptor Post Queue */
+#define MPI2_RDPQ_DEPTH_MIN (16)
+
+/*IOCInit Reply message */
+typedef struct _MPI2_IOC_INIT_REPLY {
+ U8 WhoInit; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_IOC_INIT_REPLY, *PTR_MPI2_IOC_INIT_REPLY,
+ Mpi2IOCInitReply_t, *pMpi2IOCInitReply_t;
+
+/****************************************************************************
+* IOCFacts message
+****************************************************************************/
+
+/*IOCFacts Request message */
+typedef struct _MPI2_IOC_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_IOC_FACTS_REQUEST, *PTR_MPI2_IOC_FACTS_REQUEST,
+ Mpi2IOCFactsRequest_t, *pMpi2IOCFactsRequest_t;
+
+/*IOCFacts Reply message */
+typedef struct _MPI2_IOC_FACTS_REPLY {
+ U16 MsgVersion; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 HeaderVersion; /*0x04 */
+ U8 IOCNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U16 IOCExceptions; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 MaxChainDepth; /*0x14 */
+ U8 WhoInit; /*0x15 */
+ U8 NumberOfPorts; /*0x16 */
+ U8 MaxMSIxVectors; /*0x17 */
+ U16 RequestCredit; /*0x18 */
+ U16 ProductID; /*0x1A */
+ U32 IOCCapabilities; /*0x1C */
+ MPI2_VERSION_UNION FWVersion; /*0x20 */
+ U16 IOCRequestFrameSize; /*0x24 */
+ U16 IOCMaxChainSegmentSize; /*0x26 */
+ U16 MaxInitiators; /*0x28 */
+ U16 MaxTargets; /*0x2A */
+ U16 MaxSasExpanders; /*0x2C */
+ U16 MaxEnclosures; /*0x2E */
+ U16 ProtocolFlags; /*0x30 */
+ U16 HighPriorityCredit; /*0x32 */
+ U16 MaxReplyDescriptorPostQueueDepth; /*0x34 */
+ U8 ReplyFrameSize; /*0x36 */
+ U8 MaxVolumes; /*0x37 */
+ U16 MaxDevHandle; /*0x38 */
+ U16 MaxPersistentEntries; /*0x3A */
+ U16 MinDevHandle; /*0x3C */
+ U16 Reserved4; /*0x3E */
+} MPI2_IOC_FACTS_REPLY, *PTR_MPI2_IOC_FACTS_REPLY,
+ Mpi2IOCFactsReply_t, *pMpi2IOCFactsReply_t;
+
+/*MsgVersion */
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK (0xFF00)
+#define MPI2_IOCFACTS_MSGVERSION_MAJOR_SHIFT (8)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_MASK (0x00FF)
+#define MPI2_IOCFACTS_MSGVERSION_MINOR_SHIFT (0)
+
+/*HeaderVersion */
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_MASK (0xFF00)
+#define MPI2_IOCFACTS_HDRVERSION_UNIT_SHIFT (8)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_MASK (0x00FF)
+#define MPI2_IOCFACTS_HDRVERSION_DEV_SHIFT (0)
+
+/*IOCExceptions */
+#define MPI2_IOCFACTS_EXCEPT_IR_FOREIGN_CONFIG_MAX (0x0100)
+
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_MASK (0x00E0)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_GOOD (0x0000)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_BACKUP (0x0020)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_RESTORED (0x0040)
+#define MPI2_IOCFACTS_EXCEPT_BOOTSTAT_CORRUPT_BACKUP (0x0060)
+
+#define MPI2_IOCFACTS_EXCEPT_METADATA_UNSUPPORTED (0x0010)
+#define MPI2_IOCFACTS_EXCEPT_MANUFACT_CHECKSUM_FAIL (0x0008)
+#define MPI2_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL (0x0004)
+#define MPI2_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID (0x0002)
+#define MPI2_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL (0x0001)
+
+/*defines for WhoInit field are after the IOCInit Request */
+
+/*ProductID field uses MPI2_FW_HEADER_PID_ */
+
+/*IOCCapabilities */
+#define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE (0x00020000)
+#define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY (0x00010000)
+#define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX (0x00008000)
+#define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR (0x00004000)
+#define MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY (0x00002000)
+#define MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID (0x00001000)
+#define MPI2_IOCFACTS_CAPABILITY_TLR (0x00000800)
+#define MPI2_IOCFACTS_CAPABILITY_MULTICAST (0x00000100)
+#define MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET (0x00000080)
+#define MPI2_IOCFACTS_CAPABILITY_EEDP (0x00000040)
+#define MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER (0x00000020)
+#define MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER (0x00000010)
+#define MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER (0x00000008)
+#define MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING (0x00000004)
+
+/*ProtocolFlags */
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET (0x0001)
+#define MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR (0x0002)
+
+/****************************************************************************
+* PortFacts message
+****************************************************************************/
+
+/*PortFacts Request message */
+typedef struct _MPI2_PORT_FACTS_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+} MPI2_PORT_FACTS_REQUEST, *PTR_MPI2_PORT_FACTS_REQUEST,
+ Mpi2PortFactsRequest_t, *pMpi2PortFactsRequest_t;
+
+/*PortFacts Reply message */
+typedef struct _MPI2_PORT_FACTS_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 PortNumber; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 Reserved5; /*0x14 */
+ U8 PortType; /*0x15 */
+ U16 Reserved6; /*0x16 */
+ U16 MaxPostedCmdBuffers; /*0x18 */
+ U16 Reserved7; /*0x1A */
+} MPI2_PORT_FACTS_REPLY, *PTR_MPI2_PORT_FACTS_REPLY,
+ Mpi2PortFactsReply_t, *pMpi2PortFactsReply_t;
+
+/*PortType values */
+#define MPI2_PORTFACTS_PORTTYPE_INACTIVE (0x00)
+#define MPI2_PORTFACTS_PORTTYPE_FC (0x10)
+#define MPI2_PORTFACTS_PORTTYPE_ISCSI (0x20)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_PHYSICAL (0x30)
+#define MPI2_PORTFACTS_PORTTYPE_SAS_VIRTUAL (0x31)
+
+/****************************************************************************
+* PortEnable message
+****************************************************************************/
+
+/*PortEnable Request message */
+typedef struct _MPI2_PORT_ENABLE_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_PORT_ENABLE_REQUEST, *PTR_MPI2_PORT_ENABLE_REQUEST,
+ Mpi2PortEnableRequest_t, *pMpi2PortEnableRequest_t;
+
+/*PortEnable Reply message */
+typedef struct _MPI2_PORT_ENABLE_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U8 Reserved2; /*0x04 */
+ U8 PortFlags; /*0x05 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PORT_ENABLE_REPLY, *PTR_MPI2_PORT_ENABLE_REPLY,
+ Mpi2PortEnableReply_t, *pMpi2PortEnableReply_t;
+
+/****************************************************************************
+* EventNotification message
+****************************************************************************/
+
+/*EventNotification Request message */
+#define MPI2_EVENT_NOTIFY_EVENTMASK_WORDS (4)
+
+typedef struct _MPI2_EVENT_NOTIFICATION_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 EventMasks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; /*0x14 */
+ U16 SASBroadcastPrimitiveMasks; /*0x24 */
+ U16 SASNotifyPrimitiveMasks; /*0x26 */
+ U32 Reserved8; /*0x28 */
+} MPI2_EVENT_NOTIFICATION_REQUEST,
+ *PTR_MPI2_EVENT_NOTIFICATION_REQUEST,
+ Mpi2EventNotificationRequest_t,
+ *pMpi2EventNotificationRequest_t;
+
+/*EventNotification Reply message */
+typedef struct _MPI2_EVENT_NOTIFICATION_REPLY {
+ U16 EventDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 AckRequired; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U16 Event; /*0x14 */
+ U16 Reserved4; /*0x16 */
+ U32 EventContext; /*0x18 */
+ U32 EventData[1]; /*0x1C */
+} MPI2_EVENT_NOTIFICATION_REPLY, *PTR_MPI2_EVENT_NOTIFICATION_REPLY,
+ Mpi2EventNotificationReply_t,
+ *pMpi2EventNotificationReply_t;
+
+/*AckRequired */
+#define MPI2_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI2_EVENT_NOTIFICATION_ACK_REQUIRED (0x01)
+
+/*Event */
+#define MPI2_EVENT_LOG_DATA (0x0001)
+#define MPI2_EVENT_STATE_CHANGE (0x0002)
+#define MPI2_EVENT_HARD_RESET_RECEIVED (0x0005)
+#define MPI2_EVENT_EVENT_CHANGE (0x000A)
+#define MPI2_EVENT_TASK_SET_FULL (0x000E) /*obsolete */
+#define MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE (0x000F)
+#define MPI2_EVENT_IR_OPERATION_STATUS (0x0014)
+#define MPI2_EVENT_SAS_DISCOVERY (0x0016)
+#define MPI2_EVENT_SAS_BROADCAST_PRIMITIVE (0x0017)
+#define MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE (0x0018)
+#define MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW (0x0019)
+#define MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST (0x001C)
+#define MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE (0x001D)
+#define MPI2_EVENT_IR_VOLUME (0x001E)
+#define MPI2_EVENT_IR_PHYSICAL_DISK (0x001F)
+#define MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST (0x0020)
+#define MPI2_EVENT_LOG_ENTRY_ADDED (0x0021)
+#define MPI2_EVENT_SAS_PHY_COUNTER (0x0022)
+#define MPI2_EVENT_GPIO_INTERRUPT (0x0023)
+#define MPI2_EVENT_HOST_BASED_DISCOVERY_PHY (0x0024)
+#define MPI2_EVENT_SAS_QUIESCE (0x0025)
+#define MPI2_EVENT_SAS_NOTIFY_PRIMITIVE (0x0026)
+#define MPI2_EVENT_TEMP_THRESHOLD (0x0027)
+#define MPI2_EVENT_HOST_MESSAGE (0x0028)
+#define MPI2_EVENT_POWER_PERFORMANCE_CHANGE (0x0029)
+#define MPI2_EVENT_MIN_PRODUCT_SPECIFIC (0x006E)
+#define MPI2_EVENT_MAX_PRODUCT_SPECIFIC (0x007F)
+
+/*Log Entry Added Event data */
+
+/*the following structure matches MPI2_LOG_0_ENTRY in mpi2_cnfg.h */
+#define MPI2_EVENT_DATA_LOG_DATA_LENGTH (0x1C)
+
+typedef struct _MPI2_EVENT_DATA_LOG_ENTRY_ADDED {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U16 LogSequence; /*0x0C */
+ U16 LogEntryQualifier; /*0x0E */
+ U8 VP_ID; /*0x10 */
+ U8 VF_ID; /*0x11 */
+ U16 Reserved2; /*0x12 */
+ U8 LogData[MPI2_EVENT_DATA_LOG_DATA_LENGTH]; /*0x14 */
+} MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ *PTR_MPI2_EVENT_DATA_LOG_ENTRY_ADDED,
+ Mpi2EventDataLogEntryAdded_t,
+ *pMpi2EventDataLogEntryAdded_t;
+
+/*GPIO Interrupt Event data */
+
+typedef struct _MPI2_EVENT_DATA_GPIO_INTERRUPT {
+ U8 GPIONum; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ *PTR_MPI2_EVENT_DATA_GPIO_INTERRUPT,
+ Mpi2EventDataGpioInterrupt_t,
+ *pMpi2EventDataGpioInterrupt_t;
+
+/*Temperature Threshold Event data */
+
+typedef struct _MPI2_EVENT_DATA_TEMPERATURE {
+ U16 Status; /*0x00 */
+ U8 SensorNum; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U16 CurrentTemperature; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U32 Reserved3; /*0x08 */
+ U32 Reserved4; /*0x0C */
+} MPI2_EVENT_DATA_TEMPERATURE,
+ *PTR_MPI2_EVENT_DATA_TEMPERATURE,
+ Mpi2EventDataTemperature_t, *pMpi2EventDataTemperature_t;
+
+/*Temperature Threshold Event data Status bits */
+#define MPI2_EVENT_TEMPERATURE3_EXCEEDED (0x0008)
+#define MPI2_EVENT_TEMPERATURE2_EXCEEDED (0x0004)
+#define MPI2_EVENT_TEMPERATURE1_EXCEEDED (0x0002)
+#define MPI2_EVENT_TEMPERATURE0_EXCEEDED (0x0001)
+
+/*Host Message Event data */
+
+typedef struct _MPI2_EVENT_DATA_HOST_MESSAGE {
+ U8 SourceVF_ID; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ U32 HostData[1]; /*0x08 */
+} MPI2_EVENT_DATA_HOST_MESSAGE, *PTR_MPI2_EVENT_DATA_HOST_MESSAGE,
+ Mpi2EventDataHostMessage_t, *pMpi2EventDataHostMessage_t;
+
+/*Power Performance Change Event */
+
+typedef struct _MPI2_EVENT_DATA_POWER_PERF_CHANGE {
+ U8 CurrentPowerMode; /*0x00 */
+ U8 PreviousPowerMode; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ *PTR_MPI2_EVENT_DATA_POWER_PERF_CHANGE,
+ Mpi2EventDataPowerPerfChange_t,
+ *pMpi2EventDataPowerPerfChange_t;
+
+/*defines for CurrentPowerMode and PreviousPowerMode fields */
+#define MPI2_EVENT_PM_INIT_MASK (0xC0)
+#define MPI2_EVENT_PM_INIT_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_INIT_HOST (0x40)
+#define MPI2_EVENT_PM_INIT_IO_UNIT (0x80)
+#define MPI2_EVENT_PM_INIT_PCIE_DPA (0xC0)
+
+#define MPI2_EVENT_PM_MODE_MASK (0x07)
+#define MPI2_EVENT_PM_MODE_UNAVAILABLE (0x00)
+#define MPI2_EVENT_PM_MODE_UNKNOWN (0x01)
+#define MPI2_EVENT_PM_MODE_FULL_POWER (0x04)
+#define MPI2_EVENT_PM_MODE_REDUCED_POWER (0x05)
+#define MPI2_EVENT_PM_MODE_STANDBY (0x06)
+
+/*Hard Reset Received Event data */
+
+typedef struct _MPI2_EVENT_DATA_HARD_RESET_RECEIVED {
+ U8 Reserved1; /*0x00 */
+ U8 Port; /*0x01 */
+ U16 Reserved2; /*0x02 */
+} MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ *PTR_MPI2_EVENT_DATA_HARD_RESET_RECEIVED,
+ Mpi2EventDataHardResetReceived_t,
+ *pMpi2EventDataHardResetReceived_t;
+
+/*Task Set Full Event data */
+/* this event is obsolete */
+
+typedef struct _MPI2_EVENT_DATA_TASK_SET_FULL {
+ U16 DevHandle; /*0x00 */
+ U16 CurrentDepth; /*0x02 */
+} MPI2_EVENT_DATA_TASK_SET_FULL, *PTR_MPI2_EVENT_DATA_TASK_SET_FULL,
+ Mpi2EventDataTaskSetFull_t, *pMpi2EventDataTaskSetFull_t;
+
+/*SAS Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE {
+ U16 TaskTag; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U8 ASC; /*0x04 */
+ U8 ASCQ; /*0x05 */
+ U16 DevHandle; /*0x06 */
+ U32 Reserved2; /*0x08 */
+ U64 SASAddress; /*0x0C */
+ U8 LUN[8]; /*0x14 */
+} MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+ Mpi2EventDataSasDeviceStatusChange_t,
+ *pMpi2EventDataSasDeviceStatusChange_t;
+
+/*SAS Device Status Change Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA (0x05)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED (0x07)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET (0x08)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL (0x09)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL (0x0A)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL (0x0B)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL (0x0C)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION (0x0D)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET (0x0E)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL (0x0F)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE (0x10)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY (0x11)
+#define MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY (0x12)
+
+/*Integrated RAID Operation Status Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_OPERATION_STATUS {
+ U16 VolDevHandle; /*0x00 */
+ U16 Reserved1; /*0x02 */
+ U8 RAIDOperation; /*0x04 */
+ U8 PercentComplete; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U32 Resereved3; /*0x08 */
+} MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ *PTR_MPI2_EVENT_DATA_IR_OPERATION_STATUS,
+ Mpi2EventDataIrOperationStatus_t,
+ *pMpi2EventDataIrOperationStatus_t;
+
+/*Integrated RAID Operation Status Event data RAIDOperation values */
+#define MPI2_EVENT_IR_RAIDOP_RESYNC (0x00)
+#define MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK (0x02)
+#define MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT (0x03)
+#define MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT (0x04)
+
+/*Integrated RAID Volume Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_VOLUME {
+ U16 VolDevHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 NewValue; /*0x04 */
+ U32 PreviousValue; /*0x08 */
+} MPI2_EVENT_DATA_IR_VOLUME, *PTR_MPI2_EVENT_DATA_IR_VOLUME,
+ Mpi2EventDataIrVolume_t, *pMpi2EventDataIrVolume_t;
+
+/*Integrated RAID Volume Event data ReasonCode values */
+#define MPI2_EVENT_IR_VOLUME_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_VOLUME_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Physical Disk Event data */
+
+typedef struct _MPI2_EVENT_DATA_IR_PHYSICAL_DISK {
+ U16 Reserved1; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysDiskNum; /*0x03 */
+ U16 PhysDiskDevHandle; /*0x04 */
+ U16 Reserved2; /*0x06 */
+ U16 Slot; /*0x08 */
+ U16 EnclosureHandle; /*0x0A */
+ U32 NewValue; /*0x0C */
+ U32 PreviousValue; /*0x10 */
+} MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ *PTR_MPI2_EVENT_DATA_IR_PHYSICAL_DISK,
+ Mpi2EventDataIrPhysicalDisk_t,
+ *pMpi2EventDataIrPhysicalDisk_t;
+
+/*Integrated RAID Physical Disk Event data ReasonCode values */
+#define MPI2_EVENT_IR_PHYSDISK_RC_SETTINGS_CHANGED (0x01)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATUS_FLAGS_CHANGED (0x02)
+#define MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED (0x03)
+
+/*Integrated RAID Configuration Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumElements at runtime.
+ */
+#ifndef MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT
+#define MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_IR_CONFIG_ELEMENT {
+ U16 ElementFlags; /*0x00 */
+ U16 VolDevHandle; /*0x02 */
+ U8 ReasonCode; /*0x04 */
+ U8 PhysDiskNum; /*0x05 */
+ U16 PhysDiskDevHandle; /*0x06 */
+} MPI2_EVENT_IR_CONFIG_ELEMENT, *PTR_MPI2_EVENT_IR_CONFIG_ELEMENT,
+ Mpi2EventIrConfigElement_t, *pMpi2EventIrConfigElement_t;
+
+/*IR Configuration Change List Event data ElementFlags values */
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK (0x000F)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT (0x0000)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT (0x0001)
+#define MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT (0x0002)
+
+/*IR Configuration Change List Event data ReasonCode values */
+#define MPI2_EVENT_IR_CHANGE_RC_ADDED (0x01)
+#define MPI2_EVENT_IR_CHANGE_RC_REMOVED (0x02)
+#define MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE (0x03)
+#define MPI2_EVENT_IR_CHANGE_RC_HIDE (0x04)
+#define MPI2_EVENT_IR_CHANGE_RC_UNHIDE (0x05)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED (0x06)
+#define MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED (0x07)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_CREATED (0x08)
+#define MPI2_EVENT_IR_CHANGE_RC_PD_DELETED (0x09)
+
+typedef struct _MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST {
+ U8 NumElements; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 Reserved2; /*0x02 */
+ U8 ConfigNum; /*0x03 */
+ U32 Flags; /*0x04 */
+ MPI2_EVENT_IR_CONFIG_ELEMENT
+ ConfigElement[MPI2_EVENT_IR_CONFIG_ELEMENT_COUNT];/*0x08 */
+} MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_IR_CONFIG_CHANGE_LIST,
+ Mpi2EventDataIrConfigChangeList_t,
+ *pMpi2EventDataIrConfigChangeList_t;
+
+/*IR Configuration Change List Event data Flags values */
+#define MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG (0x00000001)
+
+/*SAS Discovery Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_DISCOVERY {
+ U8 Flags; /*0x00 */
+ U8 ReasonCode; /*0x01 */
+ U8 PhysicalPort; /*0x02 */
+ U8 Reserved1; /*0x03 */
+ U32 DiscoveryStatus; /*0x04 */
+} MPI2_EVENT_DATA_SAS_DISCOVERY,
+ *PTR_MPI2_EVENT_DATA_SAS_DISCOVERY,
+ Mpi2EventDataSasDiscovery_t, *pMpi2EventDataSasDiscovery_t;
+
+/*SAS Discovery Event data Flags values */
+#define MPI2_EVENT_SAS_DISC_DEVICE_CHANGE (0x02)
+#define MPI2_EVENT_SAS_DISC_IN_PROGRESS (0x01)
+
+/*SAS Discovery Event data ReasonCode values */
+#define MPI2_EVENT_SAS_DISC_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_DISC_RC_COMPLETED (0x02)
+
+/*SAS Discovery Event data DiscoveryStatus values */
+#define MPI2_EVENT_SAS_DISC_DS_MAX_ENCLOSURES_EXCEED (0x80000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_EXPANDERS_EXCEED (0x40000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_DEVICES_EXCEED (0x20000000)
+#define MPI2_EVENT_SAS_DISC_DS_MAX_TOPO_PHYS_EXCEED (0x10000000)
+#define MPI2_EVENT_SAS_DISC_DS_DOWNSTREAM_INITIATOR (0x08000000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_SUBTRACTIVE_SUBTRACTIVE (0x00008000)
+#define MPI2_EVENT_SAS_DISC_DS_EXP_MULTI_SUBTRACTIVE (0x00004000)
+#define MPI2_EVENT_SAS_DISC_DS_MULTI_PORT_DOMAIN (0x00002000)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_TO_SUBTRACTIVE_LINK (0x00001000)
+#define MPI2_EVENT_SAS_DISC_DS_UNSUPPORTED_DEVICE (0x00000800)
+#define MPI2_EVENT_SAS_DISC_DS_TABLE_LINK (0x00000400)
+#define MPI2_EVENT_SAS_DISC_DS_SUBTRACTIVE_LINK (0x00000200)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_CRC_ERROR (0x00000100)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_FUNCTION_FAILED (0x00000080)
+#define MPI2_EVENT_SAS_DISC_DS_INDEX_NOT_EXIST (0x00000040)
+#define MPI2_EVENT_SAS_DISC_DS_OUT_ROUTE_ENTRIES (0x00000020)
+#define MPI2_EVENT_SAS_DISC_DS_SMP_TIMEOUT (0x00000010)
+#define MPI2_EVENT_SAS_DISC_DS_MULTIPLE_PORTS (0x00000004)
+#define MPI2_EVENT_SAS_DISC_DS_UNADDRESSABLE_DEVICE (0x00000002)
+#define MPI2_EVENT_SAS_DISC_DS_LOOP_DETECTED (0x00000001)
+
+/*SAS Broadcast Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 PortWidth; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_BROADCAST_PRIMITIVE,
+ Mpi2EventDataSasBroadcastPrimitive_t,
+ *pMpi2EventDataSasBroadcastPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_PRIMITIVE_CHANGE (0x01)
+#define MPI2_EVENT_PRIMITIVE_SES (0x02)
+#define MPI2_EVENT_PRIMITIVE_EXPANDER (0x03)
+#define MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT (0x04)
+#define MPI2_EVENT_PRIMITIVE_RESERVED3 (0x05)
+#define MPI2_EVENT_PRIMITIVE_RESERVED4 (0x06)
+#define MPI2_EVENT_PRIMITIVE_CHANGE0_RESERVED (0x07)
+#define MPI2_EVENT_PRIMITIVE_CHANGE1_RESERVED (0x08)
+
+/*SAS Notify Primitive Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE {
+ U8 PhyNum; /*0x00 */
+ U8 Port; /*0x01 */
+ U8 Reserved1; /*0x02 */
+ U8 Primitive; /*0x03 */
+} MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ *PTR_MPI2_EVENT_DATA_SAS_NOTIFY_PRIMITIVE,
+ Mpi2EventDataSasNotifyPrimitive_t,
+ *pMpi2EventDataSasNotifyPrimitive_t;
+
+/*defines for the Primitive field */
+#define MPI2_EVENT_NOTIFY_ENABLE_SPINUP (0x01)
+#define MPI2_EVENT_NOTIFY_POWER_LOSS_EXPECTED (0x02)
+#define MPI2_EVENT_NOTIFY_RESERVED1 (0x03)
+#define MPI2_EVENT_NOTIFY_RESERVED2 (0x04)
+
+/*SAS Initiator Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE {
+ U8 ReasonCode; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U16 DevHandle; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasInitDevStatusChange_t,
+ *pMpi2EventDataSasInitDevStatusChange_t;
+
+/*SAS Initiator Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_INIT_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_INIT_RC_NOT_RESPONDING (0x02)
+
+/*SAS Initiator Device Table Overflow Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW {
+ U16 MaxInit; /*0x00 */
+ U16 CurrentInit; /*0x02 */
+ U64 SASAddress; /*0x04 */
+} MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ *PTR_MPI2_EVENT_DATA_SAS_INIT_TABLE_OVERFLOW,
+ Mpi2EventDataSasInitTableOverflow_t,
+ *pMpi2EventDataSasInitTableOverflow_t;
+
+/*SAS Topology Change List Event data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumEntries at runtime.
+ */
+#ifndef MPI2_EVENT_SAS_TOPO_PHY_COUNT
+#define MPI2_EVENT_SAS_TOPO_PHY_COUNT (1)
+#endif
+
+typedef struct _MPI2_EVENT_SAS_TOPO_PHY_ENTRY {
+ U16 AttachedDevHandle; /*0x00 */
+ U8 LinkRate; /*0x02 */
+ U8 PhyStatus; /*0x03 */
+} MPI2_EVENT_SAS_TOPO_PHY_ENTRY, *PTR_MPI2_EVENT_SAS_TOPO_PHY_ENTRY,
+ Mpi2EventSasTopoPhyEntry_t, *pMpi2EventSasTopoPhyEntry_t;
+
+typedef struct _MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST {
+ U16 EnclosureHandle; /*0x00 */
+ U16 ExpanderDevHandle; /*0x02 */
+ U8 NumPhys; /*0x04 */
+ U8 Reserved1; /*0x05 */
+ U16 Reserved2; /*0x06 */
+ U8 NumEntries; /*0x08 */
+ U8 StartPhyNum; /*0x09 */
+ U8 ExpStatus; /*0x0A */
+ U8 PhysicalPort; /*0x0B */
+ MPI2_EVENT_SAS_TOPO_PHY_ENTRY
+ PHY[MPI2_EVENT_SAS_TOPO_PHY_COUNT]; /*0x0C */
+} MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ *PTR_MPI2_EVENT_DATA_SAS_TOPOLOGY_CHANGE_LIST,
+ Mpi2EventDataSasTopologyChangeList_t,
+ *pMpi2EventDataSasTopologyChangeList_t;
+
+/*values for the ExpStatus field */
+#define MPI2_EVENT_SAS_TOPO_ES_NO_EXPANDER (0x00)
+#define MPI2_EVENT_SAS_TOPO_ES_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_ES_RESPONDING (0x03)
+#define MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING (0x04)
+
+/*defines for the LinkRate field */
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_MASK (0xF0)
+#define MPI2_EVENT_SAS_TOPO_LR_CURRENT_SHIFT (4)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_LR_PREV_SHIFT (0)
+
+#define MPI2_EVENT_SAS_TOPO_LR_UNKNOWN_LINK_RATE (0x00)
+#define MPI2_EVENT_SAS_TOPO_LR_PHY_DISABLED (0x01)
+#define MPI2_EVENT_SAS_TOPO_LR_NEGOTIATION_FAILED (0x02)
+#define MPI2_EVENT_SAS_TOPO_LR_SATA_OOB_COMPLETE (0x03)
+#define MPI2_EVENT_SAS_TOPO_LR_PORT_SELECTOR (0x04)
+#define MPI2_EVENT_SAS_TOPO_LR_SMP_RESET_IN_PROGRESS (0x05)
+#define MPI2_EVENT_SAS_TOPO_LR_UNSUPPORTED_PHY (0x06)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_1_5 (0x08)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_3_0 (0x09)
+#define MPI2_EVENT_SAS_TOPO_LR_RATE_6_0 (0x0A)
+#define MPI25_EVENT_SAS_TOPO_LR_RATE_12_0 (0x0B)
+
+/*values for the PhyStatus field */
+#define MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT (0x80)
+#define MPI2_EVENT_SAS_TOPO_PS_MULTIPLEX_CHANGE (0x10)
+/*values for the PhyStatus ReasonCode sub-field */
+#define MPI2_EVENT_SAS_TOPO_RC_MASK (0x0F)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED (0x01)
+#define MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING (0x02)
+#define MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED (0x03)
+#define MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE (0x04)
+#define MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING (0x05)
+
+/*SAS Enclosure Device Status Change Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE {
+ U16 EnclosureHandle; /*0x00 */
+ U8 ReasonCode; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U64 EnclosureLogicalID; /*0x04 */
+ U16 NumSlots; /*0x0C */
+ U16 StartSlot; /*0x0E */
+ U32 PhyBits; /*0x10 */
+} MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ *PTR_MPI2_EVENT_DATA_SAS_ENCL_DEV_STATUS_CHANGE,
+ Mpi2EventDataSasEnclDevStatusChange_t,
+ *pMpi2EventDataSasEnclDevStatusChange_t;
+
+/*SAS Enclosure Device Status Change event ReasonCode values */
+#define MPI2_EVENT_SAS_ENCL_RC_ADDED (0x01)
+#define MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING (0x02)
+
+/*SAS PHY Counter Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_PHY_COUNTER {
+ U64 TimeStamp; /*0x00 */
+ U32 Reserved1; /*0x08 */
+ U8 PhyEventCode; /*0x0C */
+ U8 PhyNum; /*0x0D */
+ U16 Reserved2; /*0x0E */
+ U32 PhyEventInfo; /*0x10 */
+ U8 CounterType; /*0x14 */
+ U8 ThresholdWindow; /*0x15 */
+ U8 TimeUnits; /*0x16 */
+ U8 Reserved3; /*0x17 */
+ U32 EventThreshold; /*0x18 */
+ U16 ThresholdFlags; /*0x1C */
+ U16 Reserved4; /*0x1E */
+} MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ *PTR_MPI2_EVENT_DATA_SAS_PHY_COUNTER,
+ Mpi2EventDataSasPhyCounter_t,
+ *pMpi2EventDataSasPhyCounter_t;
+
+/*use MPI2_SASPHY3_EVENT_CODE_ values from mpi2_cnfg.h
+ *for the PhyEventCode field */
+
+/*use MPI2_SASPHY3_COUNTER_TYPE_ values from mpi2_cnfg.h
+ *for the CounterType field */
+
+/*use MPI2_SASPHY3_TIME_UNITS_ values from mpi2_cnfg.h
+ *for the TimeUnits field */
+
+/*use MPI2_SASPHY3_TFLAGS_ values from mpi2_cnfg.h
+ *for the ThresholdFlags field */
+
+/*SAS Quiesce Event data */
+
+typedef struct _MPI2_EVENT_DATA_SAS_QUIESCE {
+ U8 ReasonCode; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+} MPI2_EVENT_DATA_SAS_QUIESCE,
+ *PTR_MPI2_EVENT_DATA_SAS_QUIESCE,
+ Mpi2EventDataSasQuiesce_t, *pMpi2EventDataSasQuiesce_t;
+
+/*SAS Quiesce Event data ReasonCode values */
+#define MPI2_EVENT_SAS_QUIESCE_RC_STARTED (0x01)
+#define MPI2_EVENT_SAS_QUIESCE_RC_COMPLETED (0x02)
+
+/*Host Based Discovery Phy Event data */
+
+typedef struct _MPI2_EVENT_HBD_PHY_SAS {
+ U8 Flags; /*0x00 */
+ U8 NegotiatedLinkRate; /*0x01 */
+ U8 PhyNum; /*0x02 */
+ U8 PhysicalPort; /*0x03 */
+ U32 Reserved1; /*0x04 */
+ U8 InitialFrame[28]; /*0x08 */
+} MPI2_EVENT_HBD_PHY_SAS, *PTR_MPI2_EVENT_HBD_PHY_SAS,
+ Mpi2EventHbdPhySas_t, *pMpi2EventHbdPhySas_t;
+
+/*values for the Flags field */
+#define MPI2_EVENT_HBD_SAS_FLAGS_FRAME_VALID (0x02)
+#define MPI2_EVENT_HBD_SAS_FLAGS_SATA_FRAME (0x01)
+
+/*use MPI2_SAS_NEG_LINK_RATE_ defines from mpi2_cnfg.h
+ *for the NegotiatedLinkRate field */
+
+typedef union _MPI2_EVENT_HBD_DESCRIPTOR {
+ MPI2_EVENT_HBD_PHY_SAS Sas;
+} MPI2_EVENT_HBD_DESCRIPTOR, *PTR_MPI2_EVENT_HBD_DESCRIPTOR,
+ Mpi2EventHbdDescriptor_t, *pMpi2EventHbdDescriptor_t;
+
+typedef struct _MPI2_EVENT_DATA_HBD_PHY {
+ U8 DescriptorType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Reserved3; /*0x04 */
+ MPI2_EVENT_HBD_DESCRIPTOR Descriptor; /*0x08 */
+} MPI2_EVENT_DATA_HBD_PHY, *PTR_MPI2_EVENT_DATA_HBD_PHY,
+ Mpi2EventDataHbdPhy_t,
+ *pMpi2EventDataMpi2EventDataHbdPhy_t;
+
+/*values for the DescriptorType field */
+#define MPI2_EVENT_HBD_DT_SAS (0x01)
+
+/****************************************************************************
+* EventAck message
+****************************************************************************/
+
+/*EventAck Request message */
+typedef struct _MPI2_EVENT_ACK_REQUEST {
+ U16 Reserved1; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Event; /*0x0C */
+ U16 Reserved5; /*0x0E */
+ U32 EventContext; /*0x10 */
+} MPI2_EVENT_ACK_REQUEST, *PTR_MPI2_EVENT_ACK_REQUEST,
+ Mpi2EventAckRequest_t, *pMpi2EventAckRequest_t;
+
+/*EventAck Reply message */
+typedef struct _MPI2_EVENT_ACK_REPLY {
+ U16 Reserved1; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_EVENT_ACK_REPLY, *PTR_MPI2_EVENT_ACK_REPLY,
+ Mpi2EventAckReply_t, *pMpi2EventAckReply_t;
+
+/****************************************************************************
+* SendHostMessage message
+****************************************************************************/
+
+/*SendHostMessage Request message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REQUEST {
+ U16 HostDataLength; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U8 Reserved4; /*0x0C */
+ U8 DestVF_ID; /*0x0D */
+ U16 Reserved5; /*0x0E */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 Reserved8; /*0x18 */
+ U32 Reserved9; /*0x1C */
+ U32 Reserved10; /*0x20 */
+ U32 HostData[1]; /*0x24 */
+} MPI2_SEND_HOST_MESSAGE_REQUEST,
+ *PTR_MPI2_SEND_HOST_MESSAGE_REQUEST,
+ Mpi2SendHostMessageRequest_t,
+ *pMpi2SendHostMessageRequest_t;
+
+/*SendHostMessage Reply message */
+typedef struct _MPI2_SEND_HOST_MESSAGE_REPLY {
+ U16 HostDataLength; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved1; /*0x04 */
+ U8 Reserved2; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SEND_HOST_MESSAGE_REPLY, *PTR_MPI2_SEND_HOST_MESSAGE_REPLY,
+ Mpi2SendHostMessageReply_t, *pMpi2SendHostMessageReply_t;
+
+/****************************************************************************
+* FWDownload message
+****************************************************************************/
+
+/*MPI v2.0 FWDownload Request message */
+typedef struct _MPI2_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_DOWNLOAD_REQUEST, *PTR_MPI2_FW_DOWNLOAD_REQUEST,
+ Mpi2FWDownloadRequest, *pMpi2FWDownloadRequest;
+
+#define MPI2_FW_DOWNLOAD_MSGFLGS_LAST_SEGMENT (0x01)
+
+#define MPI2_FW_DOWNLOAD_ITYPE_FW (0x01)
+#define MPI2_FW_DOWNLOAD_ITYPE_BIOS (0x02)
+#define MPI2_FW_DOWNLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_DOWNLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
+
+/*MPI v2.0 FWDownload TransactionContext Element */
+typedef struct _MPI2_FW_DOWNLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_DOWNLOAD_TCSGE, *PTR_MPI2_FW_DOWNLOAD_TCSGE,
+ Mpi2FWDownloadTCSGE_t, *pMpi2FWDownloadTCSGE_t;
+
+/*MPI v2.5 FWDownload Request message */
+typedef struct _MPI25_FW_DOWNLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 TotalImageSize; /*0x0C */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_DOWNLOAD_REQUEST, *PTR_MPI25_FW_DOWNLOAD_REQUEST,
+ Mpi25FWDownloadRequest, *pMpi25FWDownloadRequest;
+
+/*FWDownload Reply message */
+typedef struct _MPI2_FW_DOWNLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_FW_DOWNLOAD_REPLY, *PTR_MPI2_FW_DOWNLOAD_REPLY,
+ Mpi2FWDownloadReply_t, *pMpi2FWDownloadReply_t;
+
+/****************************************************************************
+* FWUpload message
+****************************************************************************/
+
+/*MPI v2.0 FWUpload Request message */
+typedef struct _MPI2_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ MPI2_MPI_SGE_UNION SGL; /*0x14 */
+} MPI2_FW_UPLOAD_REQUEST, *PTR_MPI2_FW_UPLOAD_REQUEST,
+ Mpi2FWUploadRequest_t, *pMpi2FWUploadRequest_t;
+
+#define MPI2_FW_UPLOAD_ITYPE_FW_CURRENT (0x00)
+#define MPI2_FW_UPLOAD_ITYPE_FW_FLASH (0x01)
+#define MPI2_FW_UPLOAD_ITYPE_BIOS_FLASH (0x02)
+#define MPI2_FW_UPLOAD_ITYPE_FW_BACKUP (0x05)
+#define MPI2_FW_UPLOAD_ITYPE_MANUFACTURING (0x06)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_1 (0x07)
+#define MPI2_FW_UPLOAD_ITYPE_CONFIG_2 (0x08)
+#define MPI2_FW_UPLOAD_ITYPE_MEGARAID (0x09)
+#define MPI2_FW_UPLOAD_ITYPE_COMPLETE (0x0A)
+#define MPI2_FW_UPLOAD_ITYPE_COMMON_BOOT_BLOCK (0x0B)
+
+/*MPI v2.0 FWUpload TransactionContext Element */
+typedef struct _MPI2_FW_UPLOAD_TCSGE {
+ U8 Reserved1; /*0x00 */
+ U8 ContextSize; /*0x01 */
+ U8 DetailsLength; /*0x02 */
+ U8 Flags; /*0x03 */
+ U32 Reserved2; /*0x04 */
+ U32 ImageOffset; /*0x08 */
+ U32 ImageSize; /*0x0C */
+} MPI2_FW_UPLOAD_TCSGE, *PTR_MPI2_FW_UPLOAD_TCSGE,
+ Mpi2FWUploadTCSGE_t, *pMpi2FWUploadTCSGE_t;
+
+/*MPI v2.5 FWUpload Request message */
+typedef struct _MPI25_FW_UPLOAD_REQUEST {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U32 Reserved7; /*0x14 */
+ U32 ImageOffset; /*0x18 */
+ U32 ImageSize; /*0x1C */
+ MPI25_SGE_IO_UNION SGL; /*0x20 */
+} MPI25_FW_UPLOAD_REQUEST, *PTR_MPI25_FW_UPLOAD_REQUEST,
+ Mpi25FWUploadRequest_t, *pMpi25FWUploadRequest_t;
+
+/*FWUpload Reply message */
+typedef struct _MPI2_FW_UPLOAD_REPLY {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ActualImageSize; /*0x14 */
+} MPI2_FW_UPLOAD_REPLY, *PTR_MPI2_FW_UPLOAD_REPLY,
+ Mpi2FWUploadReply_t, *pMPi2FWUploadReply_t;
+
+/*FW Image Header */
+typedef struct _MPI2_FW_IMAGE_HEADER {
+ U32 Signature; /*0x00 */
+ U32 Signature0; /*0x04 */
+ U32 Signature1; /*0x08 */
+ U32 Signature2; /*0x0C */
+ MPI2_VERSION_UNION MPIVersion; /*0x10 */
+ MPI2_VERSION_UNION FWVersion; /*0x14 */
+ MPI2_VERSION_UNION NVDATAVersion; /*0x18 */
+ MPI2_VERSION_UNION PackageVersion; /*0x1C */
+ U16 VendorID; /*0x20 */
+ U16 ProductID; /*0x22 */
+ U16 ProtocolFlags; /*0x24 */
+ U16 Reserved26; /*0x26 */
+ U32 IOCCapabilities; /*0x28 */
+ U32 ImageSize; /*0x2C */
+ U32 NextImageHeaderOffset; /*0x30 */
+ U32 Checksum; /*0x34 */
+ U32 Reserved38; /*0x38 */
+ U32 Reserved3C; /*0x3C */
+ U32 Reserved40; /*0x40 */
+ U32 Reserved44; /*0x44 */
+ U32 Reserved48; /*0x48 */
+ U32 Reserved4C; /*0x4C */
+ U32 Reserved50; /*0x50 */
+ U32 Reserved54; /*0x54 */
+ U32 Reserved58; /*0x58 */
+ U32 Reserved5C; /*0x5C */
+ U32 Reserved60; /*0x60 */
+ U32 FirmwareVersionNameWhat; /*0x64 */
+ U8 FirmwareVersionName[32]; /*0x68 */
+ U32 VendorNameWhat; /*0x88 */
+ U8 VendorName[32]; /*0x8C */
+ U32 PackageNameWhat; /*0x88 */
+ U8 PackageName[32]; /*0x8C */
+ U32 ReservedD0; /*0xD0 */
+ U32 ReservedD4; /*0xD4 */
+ U32 ReservedD8; /*0xD8 */
+ U32 ReservedDC; /*0xDC */
+ U32 ReservedE0; /*0xE0 */
+ U32 ReservedE4; /*0xE4 */
+ U32 ReservedE8; /*0xE8 */
+ U32 ReservedEC; /*0xEC */
+ U32 ReservedF0; /*0xF0 */
+ U32 ReservedF4; /*0xF4 */
+ U32 ReservedF8; /*0xF8 */
+ U32 ReservedFC; /*0xFC */
+} MPI2_FW_IMAGE_HEADER, *PTR_MPI2_FW_IMAGE_HEADER,
+ Mpi2FWImageHeader_t, *pMpi2FWImageHeader_t;
+
+/*Signature field */
+#define MPI2_FW_HEADER_SIGNATURE_OFFSET (0x00)
+#define MPI2_FW_HEADER_SIGNATURE_MASK (0xFF000000)
+#define MPI2_FW_HEADER_SIGNATURE (0xEA000000)
+
+/*Signature0 field */
+#define MPI2_FW_HEADER_SIGNATURE0_OFFSET (0x04)
+#define MPI2_FW_HEADER_SIGNATURE0 (0x5AFAA55A)
+
+/*Signature1 field */
+#define MPI2_FW_HEADER_SIGNATURE1_OFFSET (0x08)
+#define MPI2_FW_HEADER_SIGNATURE1 (0xA55AFAA5)
+
+/*Signature2 field */
+#define MPI2_FW_HEADER_SIGNATURE2_OFFSET (0x0C)
+#define MPI2_FW_HEADER_SIGNATURE2 (0x5AA55AFA)
+
+/*defines for using the ProductID field */
+#define MPI2_FW_HEADER_PID_TYPE_MASK (0xF000)
+#define MPI2_FW_HEADER_PID_TYPE_SAS (0x2000)
+
+#define MPI2_FW_HEADER_PID_PROD_MASK (0x0F00)
+#define MPI2_FW_HEADER_PID_PROD_A (0x0000)
+#define MPI2_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI (0x0200)
+#define MPI2_FW_HEADER_PID_PROD_IR_SCSI (0x0700)
+
+#define MPI2_FW_HEADER_PID_FAMILY_MASK (0x00FF)
+/*SAS ProductID Family bits */
+#define MPI2_FW_HEADER_PID_FAMILY_2108_SAS (0x0013)
+#define MPI2_FW_HEADER_PID_FAMILY_2208_SAS (0x0014)
+#define MPI25_FW_HEADER_PID_FAMILY_3108_SAS (0x0021)
+
+/*use MPI2_IOCFACTS_PROTOCOL_ defines for ProtocolFlags field */
+
+/*use MPI2_IOCFACTS_CAPABILITY_ defines for IOCCapabilities field */
+
+#define MPI2_FW_HEADER_IMAGESIZE_OFFSET (0x2C)
+#define MPI2_FW_HEADER_NEXTIMAGE_OFFSET (0x30)
+#define MPI2_FW_HEADER_VERNMHWAT_OFFSET (0x64)
+
+#define MPI2_FW_HEADER_WHAT_SIGNATURE (0x29232840)
+
+#define MPI2_FW_HEADER_SIZE (0x100)
+
+/*Extended Image Header */
+typedef struct _MPI2_EXT_IMAGE_HEADER {
+ U8 ImageType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 Checksum; /*0x04 */
+ U32 ImageSize; /*0x08 */
+ U32 NextImageHeaderOffset; /*0x0C */
+ U32 PackageVersion; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U32 Reserved4; /*0x18 */
+ U32 Reserved5; /*0x1C */
+ U8 IdentifyString[32]; /*0x20 */
+} MPI2_EXT_IMAGE_HEADER, *PTR_MPI2_EXT_IMAGE_HEADER,
+ Mpi2ExtImageHeader_t, *pMpi2ExtImageHeader_t;
+
+/*useful offsets */
+#define MPI2_EXT_IMAGE_IMAGETYPE_OFFSET (0x00)
+#define MPI2_EXT_IMAGE_IMAGESIZE_OFFSET (0x08)
+#define MPI2_EXT_IMAGE_NEXTIMAGE_OFFSET (0x0C)
+
+#define MPI2_EXT_IMAGE_HEADER_SIZE (0x40)
+
+/*defines for the ImageType field */
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID (0x08)
+#define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+#define MPI2_EXT_IMAGE_TYPE_MAX (MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC)
+
+/*FLASH Layout Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check RegionsPerLayout at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_REGIONS
+#define MPI2_FLASH_NUMBER_OF_REGIONS (1)
+#endif
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfLayouts at runtime.
+ */
+#ifndef MPI2_FLASH_NUMBER_OF_LAYOUTS
+#define MPI2_FLASH_NUMBER_OF_LAYOUTS (1)
+#endif
+
+typedef struct _MPI2_FLASH_REGION {
+ U8 RegionType; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 RegionOffset; /*0x04 */
+ U32 RegionSize; /*0x08 */
+ U32 Reserved3; /*0x0C */
+} MPI2_FLASH_REGION, *PTR_MPI2_FLASH_REGION,
+ Mpi2FlashRegion_t, *pMpi2FlashRegion_t;
+
+typedef struct _MPI2_FLASH_LAYOUT {
+ U32 FlashSize; /*0x00 */
+ U32 Reserved1; /*0x04 */
+ U32 Reserved2; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ MPI2_FLASH_REGION Region[MPI2_FLASH_NUMBER_OF_REGIONS]; /*0x10 */
+} MPI2_FLASH_LAYOUT, *PTR_MPI2_FLASH_LAYOUT,
+ Mpi2FlashLayout_t, *pMpi2FlashLayout_t;
+
+typedef struct _MPI2_FLASH_LAYOUT_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 SizeOfRegion; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U16 NumberOfLayouts; /*0x04 */
+ U16 RegionsPerLayout; /*0x06 */
+ U16 MinimumSectorAlignment; /*0x08 */
+ U16 Reserved3; /*0x0A */
+ U32 Reserved4; /*0x0C */
+ MPI2_FLASH_LAYOUT Layout[MPI2_FLASH_NUMBER_OF_LAYOUTS]; /*0x10 */
+} MPI2_FLASH_LAYOUT_DATA, *PTR_MPI2_FLASH_LAYOUT_DATA,
+ Mpi2FlashLayoutData_t, *pMpi2FlashLayoutData_t;
+
+/*defines for the RegionType field */
+#define MPI2_FLASH_REGION_UNUSED (0x00)
+#define MPI2_FLASH_REGION_FIRMWARE (0x01)
+#define MPI2_FLASH_REGION_BIOS (0x02)
+#define MPI2_FLASH_REGION_NVDATA (0x03)
+#define MPI2_FLASH_REGION_FIRMWARE_BACKUP (0x05)
+#define MPI2_FLASH_REGION_MFG_INFORMATION (0x06)
+#define MPI2_FLASH_REGION_CONFIG_1 (0x07)
+#define MPI2_FLASH_REGION_CONFIG_2 (0x08)
+#define MPI2_FLASH_REGION_MEGARAID (0x09)
+#define MPI2_FLASH_REGION_INIT (0x0A)
+
+/*ImageRevision */
+#define MPI2_FLASH_LAYOUT_IMAGE_REVISION (0x00)
+
+/*Supported Devices Extended Image Data */
+
+/*
+ *Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ *one and check NumberOfDevices at runtime.
+ */
+#ifndef MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES
+#define MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES (1)
+#endif
+
+typedef struct _MPI2_SUPPORTED_DEVICE {
+ U16 DeviceID; /*0x00 */
+ U16 VendorID; /*0x02 */
+ U16 DeviceIDMask; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U8 LowPCIRev; /*0x08 */
+ U8 HighPCIRev; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+} MPI2_SUPPORTED_DEVICE, *PTR_MPI2_SUPPORTED_DEVICE,
+ Mpi2SupportedDevice_t, *pMpi2SupportedDevice_t;
+
+typedef struct _MPI2_SUPPORTED_DEVICES_DATA {
+ U8 ImageRevision; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 NumberOfDevices; /*0x02 */
+ U8 Reserved2; /*0x03 */
+ U32 Reserved3; /*0x04 */
+ MPI2_SUPPORTED_DEVICE
+ SupportedDevice[MPI2_SUPPORTED_DEVICES_IMAGE_NUM_DEVICES];/*0x08 */
+} MPI2_SUPPORTED_DEVICES_DATA, *PTR_MPI2_SUPPORTED_DEVICES_DATA,
+ Mpi2SupportedDevicesData_t, *pMpi2SupportedDevicesData_t;
+
+/*ImageRevision */
+#define MPI2_SUPPORTED_DEVICES_IMAGE_REVISION (0x00)
+
+/*Init Extended Image Data */
+
+typedef struct _MPI2_INIT_IMAGE_FOOTER {
+ U32 BootFlags; /*0x00 */
+ U32 ImageSize; /*0x04 */
+ U32 Signature0; /*0x08 */
+ U32 Signature1; /*0x0C */
+ U32 Signature2; /*0x10 */
+ U32 ResetVector; /*0x14 */
+} MPI2_INIT_IMAGE_FOOTER, *PTR_MPI2_INIT_IMAGE_FOOTER,
+ Mpi2InitImageFooter_t, *pMpi2InitImageFooter_t;
+
+/*defines for the BootFlags field */
+#define MPI2_INIT_IMAGE_BOOTFLAGS_OFFSET (0x00)
+
+/*defines for the ImageSize field */
+#define MPI2_INIT_IMAGE_IMAGESIZE_OFFSET (0x04)
+
+/*defines for the Signature0 field */
+#define MPI2_INIT_IMAGE_SIGNATURE0_OFFSET (0x08)
+#define MPI2_INIT_IMAGE_SIGNATURE0 (0x5AA55AEA)
+
+/*defines for the Signature1 field */
+#define MPI2_INIT_IMAGE_SIGNATURE1_OFFSET (0x0C)
+#define MPI2_INIT_IMAGE_SIGNATURE1 (0xA55AEAA5)
+
+/*defines for the Signature2 field */
+#define MPI2_INIT_IMAGE_SIGNATURE2_OFFSET (0x10)
+#define MPI2_INIT_IMAGE_SIGNATURE2 (0x5AEAA55A)
+
+/*Signature fields as individual bytes */
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_0 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_1 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_2 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_3 (0x5A)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_4 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_5 (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_6 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_7 (0xA5)
+
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_8 (0x5A)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_9 (0xA5)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_A (0xEA)
+#define MPI2_INIT_IMAGE_SIGNATURE_BYTE_B (0x5A)
+
+/*defines for the ResetVector field */
+#define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET (0x14)
+
+/****************************************************************************
+* PowerManagementControl message
+****************************************************************************/
+
+/*PowerManagementControl Request message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REQUEST {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Parameter1; /*0x0C */
+ U8 Parameter2; /*0x0D */
+ U8 Parameter3; /*0x0E */
+ U8 Parameter4; /*0x0F */
+ U32 Reserved5; /*0x10 */
+ U32 Reserved6; /*0x14 */
+} MPI2_PWR_MGMT_CONTROL_REQUEST, *PTR_MPI2_PWR_MGMT_CONTROL_REQUEST,
+ Mpi2PwrMgmtControlRequest_t, *pMpi2PwrMgmtControlRequest_t;
+
+/*defines for the Feature field */
+#define MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND (0x01)
+#define MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION (0x02)
+#define MPI2_PM_CONTROL_FEATURE_PCIE_LINK (0x03) /*obsolete */
+#define MPI2_PM_CONTROL_FEATURE_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE (0x05)
+#define MPI2_PM_CONTROL_FEATURE_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_PM_CONTROL_FEATURE_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_DA_PHY_POWER_COND Feature */
+/*Parameter1 contains a PHY number */
+/*Parameter2 indicates power condition action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_PARTIAL (0x01)
+#define MPI2_PM_CONTROL_PARAM2_SLUMBER (0x02)
+#define MPI2_PM_CONTROL_PARAM2_EXIT_PWR_MGMT (0x03)
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PORT_WIDTH_MODULATION
+ * Feature */
+/*Parameter1 contains SAS port width modulation group number */
+/*Parameter2 indicates IOC action using these defines */
+#define MPI2_PM_CONTROL_PARAM2_REQUEST_OWNERSHIP (0x01)
+#define MPI2_PM_CONTROL_PARAM2_CHANGE_MODULATION (0x02)
+#define MPI2_PM_CONTROL_PARAM2_RELINQUISH_OWNERSHIP (0x03)
+/*Parameter3 indicates desired modulation level using these defines */
+#define MPI2_PM_CONTROL_PARAM3_25_PERCENT (0x00)
+#define MPI2_PM_CONTROL_PARAM3_50_PERCENT (0x01)
+#define MPI2_PM_CONTROL_PARAM3_75_PERCENT (0x02)
+#define MPI2_PM_CONTROL_PARAM3_100_PERCENT (0x03)
+/*Parameter4 is reserved */
+
+/*this next set (_PCIE_LINK) is obsolete */
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_PCIE_LINK Feature */
+/*Parameter1 indicates desired PCIe link speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_2_5_GBPS (0x00) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_5_0_GBPS (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM1_PCIE_8_0_GBPS (0x02) /*obsolete */
+/*Parameter2 indicates desired PCIe link width using these defines */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X1 (0x01) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X2 (0x02) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X4 (0x04) /*obsolete */
+#define MPI2_PM_CONTROL_PARAM2_WIDTH_X8 (0x08) /*obsolete */
+/*Parameter3 and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_IOC_SPEED Feature */
+/*Parameter1 indicates desired IOC hardware clock speed using these defines */
+#define MPI2_PM_CONTROL_PARAM1_FULL_IOC_SPEED (0x01)
+#define MPI2_PM_CONTROL_PARAM1_HALF_IOC_SPEED (0x02)
+#define MPI2_PM_CONTROL_PARAM1_QUARTER_IOC_SPEED (0x04)
+#define MPI2_PM_CONTROL_PARAM1_EIGHTH_IOC_SPEED (0x08)
+/*Parameter2, Parameter3, and Parameter4 are reserved */
+
+/*parameter usage for the MPI2_PM_CONTROL_FEATURE_GLOBAL_PWR_MGMT_MODE Feature*/
+/*Parameter1 indicates host action regarding global power management mode */
+#define MPI2_PM_CONTROL_PARAM1_TAKE_CONTROL (0x01)
+#define MPI2_PM_CONTROL_PARAM1_CHANGE_GLOBAL_MODE (0x02)
+#define MPI2_PM_CONTROL_PARAM1_RELEASE_CONTROL (0x03)
+/*Parameter2 indicates the requested global power management mode */
+#define MPI2_PM_CONTROL_PARAM2_FULL_PWR_PERF (0x01)
+#define MPI2_PM_CONTROL_PARAM2_REDUCED_PWR_PERF (0x08)
+#define MPI2_PM_CONTROL_PARAM2_STANDBY (0x40)
+/*Parameter3 and Parameter4 are reserved */
+
+/*PowerManagementControl Reply message */
+typedef struct _MPI2_PWR_MGMT_CONTROL_REPLY {
+ U8 Feature; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_PWR_MGMT_CONTROL_REPLY, *PTR_MPI2_PWR_MGMT_CONTROL_REPLY,
+ Mpi2PwrMgmtControlReply_t, *pMpi2PwrMgmtControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
new file mode 100644
index 000000000000..d1d9866cf300
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_raid.h
+ * Title: MPI Integrated RAID messages and structures
+ * Creation Date: April 26, 2007
+ *
+ * mpi2_raid.h Version: 02.00.08
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 08-31-07 02.00.01 Modifications to RAID Action request and reply,
+ * including the Actions and ActionData.
+ * 02-29-08 02.00.02 Added MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD.
+ * 05-21-08 02.00.03 Added MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS so that
+ * the PhysDisk array in MPI2_RAID_VOLUME_CREATION_STRUCT
+ * can be sized by the build environment.
+ * 07-30-09 02.00.04 Added proper define for the Use Default Settings bit of
+ * VolumeCreationFlags and marked the old one as obsolete.
+ * 05-12-10 02.00.05 Added MPI2_RAID_VOL_FLAGS_OP_MDC define.
+ * 08-24-10 02.00.06 Added MPI2_RAID_ACTION_COMPATIBILITY_CHECK along with
+ * related structures and defines.
+ * Added product-specific range to RAID Action values.
+ * 11-18-11 02.00.07 Incorporating additions for MPI v2.5.
+ * 02-06-12 02.00.08 Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_RAID_H
+#define MPI2_RAID_H
+
+/*****************************************************************************
+*
+* Integrated RAID Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* RAID Action messages
+****************************************************************************/
+
+/*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
+#define MPI2_RAID_ACTION_ADATA_KEEP_LBA0 (0x00000000)
+#define MPI2_RAID_ACTION_ADATA_ZERO_LBA0 (0x00000001)
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES action */
+#define MPI2_RAID_ACTION_ADATA_DISABL_FULL_REBUILD (0x00000001)
+
+/*ActionDataWord for MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE Action */
+typedef struct _MPI2_RAID_ACTION_RATE_DATA {
+ U8 RateToChange; /*0x00 */
+ U8 RateOrMode; /*0x01 */
+ U16 DataScrubDuration; /*0x02 */
+} MPI2_RAID_ACTION_RATE_DATA, *PTR_MPI2_RAID_ACTION_RATE_DATA,
+ Mpi2RaidActionRateData_t, *pMpi2RaidActionRateData_t;
+
+#define MPI2_RAID_ACTION_SET_RATE_RESYNC (0x00)
+#define MPI2_RAID_ACTION_SET_RATE_DATA_SCRUB (0x01)
+#define MPI2_RAID_ACTION_SET_RATE_POWERSAVE_MODE (0x02)
+
+/*ActionDataWord for MPI2_RAID_ACTION_START_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_START_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_START_RAID_FUNCTION,
+ Mpi2RaidActionStartRaidFunction_t,
+ *pMpi2RaidActionStartRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_START_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_START_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_START_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_START_NEW (0x00)
+#define MPI2_RAID_ACTION_START_RESUME (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_STOP_RAID_FUNCTION Action */
+typedef struct _MPI2_RAID_ACTION_STOP_RAID_FUNCTION {
+ U8 RAIDFunction; /*0x00 */
+ U8 Flags; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ *PTR_MPI2_RAID_ACTION_STOP_RAID_FUNCTION,
+ Mpi2RaidActionStopRaidFunction_t,
+ *pMpi2RaidActionStopRaidFunction_t;
+
+/*defines for the RAIDFunction field */
+#define MPI2_RAID_ACTION_STOP_BACKGROUND_INIT (0x00)
+#define MPI2_RAID_ACTION_STOP_ONLINE_CAP_EXPANSION (0x01)
+#define MPI2_RAID_ACTION_STOP_CONSISTENCY_CHECK (0x02)
+
+/*defines for the Flags field */
+#define MPI2_RAID_ACTION_STOP_ABORT (0x00)
+#define MPI2_RAID_ACTION_STOP_PAUSE (0x01)
+
+/*ActionDataWord for MPI2_RAID_ACTION_CREATE_HOT_SPARE Action */
+typedef struct _MPI2_RAID_ACTION_HOT_SPARE {
+ U8 HotSparePool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 DevHandle; /*0x02 */
+} MPI2_RAID_ACTION_HOT_SPARE, *PTR_MPI2_RAID_ACTION_HOT_SPARE,
+ Mpi2RaidActionHotSpare_t, *pMpi2RaidActionHotSpare_t;
+
+/*ActionDataWord for MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE Action */
+typedef struct _MPI2_RAID_ACTION_FW_UPDATE_MODE {
+ U8 Flags; /*0x00 */
+ U8 DeviceFirmwareUpdateModeTimeout; /*0x01 */
+ U16 Reserved1; /*0x02 */
+} MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ *PTR_MPI2_RAID_ACTION_FW_UPDATE_MODE,
+ Mpi2RaidActionFwUpdateMode_t,
+ *pMpi2RaidActionFwUpdateMode_t;
+
+/*ActionDataWord defines for use with
+ *MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE action */
+#define MPI2_RAID_ACTION_ADATA_DISABLE_FW_UPDATE (0x00)
+#define MPI2_RAID_ACTION_ADATA_ENABLE_FW_UPDATE (0x01)
+
+typedef union _MPI2_RAID_ACTION_DATA {
+ U32 Word;
+ MPI2_RAID_ACTION_RATE_DATA Rates;
+ MPI2_RAID_ACTION_START_RAID_FUNCTION StartRaidFunction;
+ MPI2_RAID_ACTION_STOP_RAID_FUNCTION StopRaidFunction;
+ MPI2_RAID_ACTION_HOT_SPARE HotSpare;
+ MPI2_RAID_ACTION_FW_UPDATE_MODE FwUpdateMode;
+} MPI2_RAID_ACTION_DATA, *PTR_MPI2_RAID_ACTION_DATA,
+ Mpi2RaidActionData_t, *pMpi2RaidActionData_t;
+
+/*RAID Action Request Message */
+typedef struct _MPI2_RAID_ACTION_REQUEST {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U32 Reserved3; /*0x0C */
+ MPI2_RAID_ACTION_DATA ActionDataWord; /*0x10 */
+ MPI2_SGE_SIMPLE_UNION ActionDataSGE; /*0x14 */
+} MPI2_RAID_ACTION_REQUEST, *PTR_MPI2_RAID_ACTION_REQUEST,
+ Mpi2RaidActionRequest_t, *pMpi2RaidActionRequest_t;
+
+/*RAID Action request Action values */
+
+#define MPI2_RAID_ACTION_INDICATOR_STRUCT (0x01)
+#define MPI2_RAID_ACTION_CREATE_VOLUME (0x02)
+#define MPI2_RAID_ACTION_DELETE_VOLUME (0x03)
+#define MPI2_RAID_ACTION_DISABLE_ALL_VOLUMES (0x04)
+#define MPI2_RAID_ACTION_ENABLE_ALL_VOLUMES (0x05)
+#define MPI2_RAID_ACTION_PHYSDISK_OFFLINE (0x0A)
+#define MPI2_RAID_ACTION_PHYSDISK_ONLINE (0x0B)
+#define MPI2_RAID_ACTION_FAIL_PHYSDISK (0x0F)
+#define MPI2_RAID_ACTION_ACTIVATE_VOLUME (0x11)
+#define MPI2_RAID_ACTION_DEVICE_FW_UPDATE_MODE (0x15)
+#define MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE (0x17)
+#define MPI2_RAID_ACTION_SET_VOLUME_NAME (0x18)
+#define MPI2_RAID_ACTION_SET_RAID_FUNCTION_RATE (0x19)
+#define MPI2_RAID_ACTION_ENABLE_FAILED_VOLUME (0x1C)
+#define MPI2_RAID_ACTION_CREATE_HOT_SPARE (0x1D)
+#define MPI2_RAID_ACTION_DELETE_HOT_SPARE (0x1E)
+#define MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED (0x20)
+#define MPI2_RAID_ACTION_START_RAID_FUNCTION (0x21)
+#define MPI2_RAID_ACTION_STOP_RAID_FUNCTION (0x22)
+#define MPI2_RAID_ACTION_COMPATIBILITY_CHECK (0x23)
+#define MPI2_RAID_ACTION_PHYSDISK_HIDDEN (0x24)
+#define MPI2_RAID_ACTION_MIN_PRODUCT_SPECIFIC (0x80)
+#define MPI2_RAID_ACTION_MAX_PRODUCT_SPECIFIC (0xFF)
+
+/*RAID Volume Creation Structure */
+
+/*
+ *The following define can be customized for the targeted product.
+ */
+#ifndef MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS
+#define MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS (1)
+#endif
+
+typedef struct _MPI2_RAID_VOLUME_PHYSDISK {
+ U8 RAIDSetNum; /*0x00 */
+ U8 PhysDiskMap; /*0x01 */
+ U16 PhysDiskDevHandle; /*0x02 */
+} MPI2_RAID_VOLUME_PHYSDISK, *PTR_MPI2_RAID_VOLUME_PHYSDISK,
+ Mpi2RaidVolumePhysDisk_t, *pMpi2RaidVolumePhysDisk_t;
+
+/*defines for the PhysDiskMap field */
+#define MPI2_RAIDACTION_PHYSDISK_PRIMARY (0x01)
+#define MPI2_RAIDACTION_PHYSDISK_SECONDARY (0x02)
+
+typedef struct _MPI2_RAID_VOLUME_CREATION_STRUCT {
+ U8 NumPhysDisks; /*0x00 */
+ U8 VolumeType; /*0x01 */
+ U16 Reserved1; /*0x02 */
+ U32 VolumeCreationFlags; /*0x04 */
+ U32 VolumeSettings; /*0x08 */
+ U8 Reserved2; /*0x0C */
+ U8 ResyncRate; /*0x0D */
+ U16 DataScrubDuration; /*0x0E */
+ U64 VolumeMaxLBA; /*0x10 */
+ U32 StripeSize; /*0x18 */
+ U8 Name[16]; /*0x1C */
+ MPI2_RAID_VOLUME_PHYSDISK
+ PhysDisk[MPI2_RAID_VOL_CREATION_NUM_PHYSDISKS]; /*0x2C */
+} MPI2_RAID_VOLUME_CREATION_STRUCT,
+ *PTR_MPI2_RAID_VOLUME_CREATION_STRUCT,
+ Mpi2RaidVolumeCreationStruct_t,
+ *pMpi2RaidVolumeCreationStruct_t;
+
+/*use MPI2_RAID_VOL_TYPE_ defines from mpi2_cnfg.h for VolumeType */
+
+/*defines for the VolumeCreationFlags field */
+#define MPI2_RAID_VOL_CREATION_DEFAULT_SETTINGS (0x80000000)
+#define MPI2_RAID_VOL_CREATION_BACKGROUND_INIT (0x00000004)
+#define MPI2_RAID_VOL_CREATION_LOW_LEVEL_INIT (0x00000002)
+#define MPI2_RAID_VOL_CREATION_MIGRATE_DATA (0x00000001)
+/*The following is an obsolete define.
+ *It must be shifted left 24 bits in order to set the proper bit.
+ */
+#define MPI2_RAID_VOL_CREATION_USE_DEFAULT_SETTINGS (0x80)
+
+/*RAID Online Capacity Expansion Structure */
+
+typedef struct _MPI2_RAID_ONLINE_CAPACITY_EXPANSION {
+ U32 Flags; /*0x00 */
+ U16 DevHandle0; /*0x04 */
+ U16 Reserved1; /*0x06 */
+ U16 DevHandle1; /*0x08 */
+ U16 Reserved2; /*0x0A */
+} MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ *PTR_MPI2_RAID_ONLINE_CAPACITY_EXPANSION,
+ Mpi2RaidOnlineCapacityExpansion_t,
+ *pMpi2RaidOnlineCapacityExpansion_t;
+
+/*RAID Compatibility Input Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_INPUT_STRUCT {
+ U16 SourceDevHandle; /*0x00 */
+ U16 CandidateDevHandle; /*0x02 */
+ U32 Flags; /*0x04 */
+ U32 Reserved1; /*0x08 */
+ U32 Reserved2; /*0x0C */
+} MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_INPUT_STRUCT,
+ Mpi2RaidCompatibilityInputStruct_t,
+ *pMpi2RaidCompatibilityInputStruct_t;
+
+/*defines for RAID Compatibility Structure Flags field */
+#define MPI2_RAID_COMPAT_SOURCE_IS_VOLUME_FLAG (0x00000002)
+#define MPI2_RAID_COMPAT_REPORT_SOURCE_INFO_FLAG (0x00000001)
+
+/*RAID Volume Indicator Structure */
+
+typedef struct _MPI2_RAID_VOL_INDICATOR {
+ U64 TotalBlocks; /*0x00 */
+ U64 BlocksRemaining; /*0x08 */
+ U32 Flags; /*0x10 */
+} MPI2_RAID_VOL_INDICATOR, *PTR_MPI2_RAID_VOL_INDICATOR,
+ Mpi2RaidVolIndicator_t, *pMpi2RaidVolIndicator_t;
+
+/*defines for RAID Volume Indicator Flags field */
+#define MPI2_RAID_VOL_FLAGS_OP_MASK (0x0000000F)
+#define MPI2_RAID_VOL_FLAGS_OP_BACKGROUND_INIT (0x00000000)
+#define MPI2_RAID_VOL_FLAGS_OP_ONLINE_CAP_EXPANSION (0x00000001)
+#define MPI2_RAID_VOL_FLAGS_OP_CONSISTENCY_CHECK (0x00000002)
+#define MPI2_RAID_VOL_FLAGS_OP_RESYNC (0x00000003)
+#define MPI2_RAID_VOL_FLAGS_OP_MDC (0x00000004)
+
+/*RAID Compatibility Result Structure */
+
+typedef struct _MPI2_RAID_COMPATIBILITY_RESULT_STRUCT {
+ U8 State; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U16 Reserved2; /*0x02 */
+ U32 GenericAttributes; /*0x04 */
+ U32 OEMSpecificAttributes; /*0x08 */
+ U32 Reserved3; /*0x0C */
+ U32 Reserved4; /*0x10 */
+} MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ *PTR_MPI2_RAID_COMPATIBILITY_RESULT_STRUCT,
+ Mpi2RaidCompatibilityResultStruct_t,
+ *pMpi2RaidCompatibilityResultStruct_t;
+
+/*defines for RAID Compatibility Result Structure State field */
+#define MPI2_RAID_COMPAT_STATE_COMPATIBLE (0x00)
+#define MPI2_RAID_COMPAT_STATE_NOT_COMPATIBLE (0x01)
+
+/*defines for RAID Compatibility Result Structure GenericAttributes field */
+#define MPI2_RAID_COMPAT_GENATTRIB_4K_SECTOR (0x00000010)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_MEDIA_MASK (0x0000000C)
+#define MPI2_RAID_COMPAT_GENATTRIB_SOLID_STATE_DRIVE (0x00000008)
+#define MPI2_RAID_COMPAT_GENATTRIB_HARD_DISK_DRIVE (0x00000004)
+
+#define MPI2_RAID_COMPAT_GENATTRIB_PROTOCOL_MASK (0x00000003)
+#define MPI2_RAID_COMPAT_GENATTRIB_SAS_PROTOCOL (0x00000002)
+#define MPI2_RAID_COMPAT_GENATTRIB_SATA_PROTOCOL (0x00000001)
+
+/*RAID Action Reply ActionData union */
+typedef union _MPI2_RAID_ACTION_REPLY_DATA {
+ U32 Word[5];
+ MPI2_RAID_VOL_INDICATOR RaidVolumeIndicator;
+ U16 VolDevHandle;
+ U8 VolumeState;
+ U8 PhysDiskNum;
+ MPI2_RAID_COMPATIBILITY_RESULT_STRUCT RaidCompatibilityResult;
+} MPI2_RAID_ACTION_REPLY_DATA, *PTR_MPI2_RAID_ACTION_REPLY_DATA,
+ Mpi2RaidActionReplyData_t, *pMpi2RaidActionReplyData_t;
+
+/*use MPI2_RAIDVOL0_SETTING_ defines from mpi2_cnfg.h for
+ *MPI2_RAID_ACTION_CHANGE_VOL_WRITE_CACHE action */
+
+/*RAID Action Reply Message */
+typedef struct _MPI2_RAID_ACTION_REPLY {
+ U8 Action; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 VolDevHandle; /*0x04 */
+ U8 PhysDiskNum; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved2; /*0x0A */
+ U16 Reserved3; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ MPI2_RAID_ACTION_REPLY_DATA ActionData; /*0x14 */
+} MPI2_RAID_ACTION_REPLY, *PTR_MPI2_RAID_ACTION_REPLY,
+ Mpi2RaidActionReply_t, *pMpi2RaidActionReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
new file mode 100644
index 000000000000..b4e7084aba31
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -0,0 +1,295 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_sas.h
+ * Title: MPI Serial Attached SCSI structures and definitions
+ * Creation Date: February 9, 2007
+ *
+ * mpi2_sas.h Version: 02.00.07
+ *
+ * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
+ * prefix are for use only on MPI v2.5 products, and must not be used
+ * with MPI v2.0 products. Unless otherwise noted, names beginning with
+ * MPI2 or Mpi2 are for use with both MPI v2.0 and MPI v2.5 products.
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 06-26-07 02.00.01 Added Clear All Persistent Operation to SAS IO Unit
+ * Control Request.
+ * 10-02-08 02.00.02 Added Set IOC Parameter Operation to SAS IO Unit Control
+ * Request.
+ * 10-28-09 02.00.03 Changed the type of SGL in MPI2_SATA_PASSTHROUGH_REQUEST
+ * to MPI2_SGE_IO_UNION since it supports chained SGLs.
+ * 05-12-10 02.00.04 Modified some comments.
+ * 08-11-10 02.00.05 Added NCQ operations to SAS IO Unit Control.
+ * 11-18-11 02.00.06 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.07 Added MPI2_SATA_PT_SGE_UNION for use in the SATA
+ * Passthrough Request message.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_SAS_H
+#define MPI2_SAS_H
+
+/*
+ *Values for SASStatus.
+ */
+#define MPI2_SASSTATUS_SUCCESS (0x00)
+#define MPI2_SASSTATUS_UNKNOWN_ERROR (0x01)
+#define MPI2_SASSTATUS_INVALID_FRAME (0x02)
+#define MPI2_SASSTATUS_UTC_BAD_DEST (0x03)
+#define MPI2_SASSTATUS_UTC_BREAK_RECEIVED (0x04)
+#define MPI2_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED (0x05)
+#define MPI2_SASSTATUS_UTC_PORT_LAYER_REQUEST (0x06)
+#define MPI2_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED (0x07)
+#define MPI2_SASSTATUS_UTC_STP_RESOURCES_BUSY (0x08)
+#define MPI2_SASSTATUS_UTC_WRONG_DESTINATION (0x09)
+#define MPI2_SASSTATUS_SHORT_INFORMATION_UNIT (0x0A)
+#define MPI2_SASSTATUS_LONG_INFORMATION_UNIT (0x0B)
+#define MPI2_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA (0x0C)
+#define MPI2_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR (0x0D)
+#define MPI2_SASSTATUS_XFER_RDY_NOT_EXPECTED (0x0E)
+#define MPI2_SASSTATUS_DATA_INCORRECT_DATA_LENGTH (0x0F)
+#define MPI2_SASSTATUS_DATA_TOO_MUCH_READ_DATA (0x10)
+#define MPI2_SASSTATUS_DATA_OFFSET_ERROR (0x11)
+#define MPI2_SASSTATUS_SDSF_NAK_RECEIVED (0x12)
+#define MPI2_SASSTATUS_SDSF_CONNECTION_FAILED (0x13)
+#define MPI2_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT (0x14)
+
+/*
+ *Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ *data and SAS Configuration pages.
+ */
+#define MPI2_SAS_DEVICE_INFO_SEP (0x00004000)
+#define MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE (0x00002000)
+#define MPI2_SAS_DEVICE_INFO_LSI_DEVICE (0x00001000)
+#define MPI2_SAS_DEVICE_INFO_DIRECT_ATTACH (0x00000800)
+#define MPI2_SAS_DEVICE_INFO_SSP_TARGET (0x00000400)
+#define MPI2_SAS_DEVICE_INFO_STP_TARGET (0x00000200)
+#define MPI2_SAS_DEVICE_INFO_SMP_TARGET (0x00000100)
+#define MPI2_SAS_DEVICE_INFO_SATA_DEVICE (0x00000080)
+#define MPI2_SAS_DEVICE_INFO_SSP_INITIATOR (0x00000040)
+#define MPI2_SAS_DEVICE_INFO_STP_INITIATOR (0x00000020)
+#define MPI2_SAS_DEVICE_INFO_SMP_INITIATOR (0x00000010)
+#define MPI2_SAS_DEVICE_INFO_SATA_HOST (0x00000008)
+
+#define MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE (0x00000007)
+#define MPI2_SAS_DEVICE_INFO_NO_DEVICE (0x00000000)
+#define MPI2_SAS_DEVICE_INFO_END_DEVICE (0x00000001)
+#define MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER (0x00000002)
+#define MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER (0x00000003)
+
+/*****************************************************************************
+*
+* SAS Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* SMP Passthrough messages
+****************************************************************************/
+
+/*SMP Passthrough Request Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REQUEST {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 RequestDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U64 SASAddress; /*0x10 */
+ U32 Reserved3; /*0x18 */
+ U32 Reserved4; /*0x1C */
+ MPI2_SIMPLE_SGE_UNION SGL;/*0x20 */
+} MPI2_SMP_PASSTHROUGH_REQUEST, *PTR_MPI2_SMP_PASSTHROUGH_REQUEST,
+ Mpi2SmpPassthroughRequest_t, *pMpi2SmpPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE (0x80)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SMP Passthrough Reply Message */
+typedef struct _MPI2_SMP_PASSTHROUGH_REPLY {
+ U8 PassthroughFlags; /*0x00 */
+ U8 PhysicalPort; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 ResponseDataLength; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 Reserved3; /*0x14 */
+ U8 ResponseData[4]; /*0x18 */
+} MPI2_SMP_PASSTHROUGH_REPLY, *PTR_MPI2_SMP_PASSTHROUGH_REPLY,
+ Mpi2SmpPassthroughReply_t, *pMpi2SmpPassthroughReply_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE (0x80)
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SATA Passthrough messages
+****************************************************************************/
+
+typedef union _MPI2_SATA_PT_SGE_UNION {
+ MPI2_SGE_SIMPLE_UNION MpiSimple; /*MPI v2.0 only */
+ MPI2_SGE_CHAIN_UNION MpiChain; /*MPI v2.0 only */
+ MPI2_IEEE_SGE_SIMPLE_UNION IeeeSimple;
+ MPI2_IEEE_SGE_CHAIN_UNION IeeeChain; /*MPI v2.0 only */
+ MPI25_IEEE_SGE_CHAIN64 IeeeChain64; /*MPI v2.5 only */
+} MPI2_SATA_PT_SGE_UNION, *PTR_MPI2_SATA_PT_SGE_UNION,
+ Mpi2SataPTSGEUnion_t, *pMpi2SataPTSGEUnion_t;
+
+/*SATA Passthrough Request Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REQUEST {
+ U16 DevHandle; /*0x00 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06*//*MPI v2.0 only. Reserved on MPI v2.5*/
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U32 Reserved2; /*0x0C */
+ U32 Reserved3; /*0x10 */
+ U32 Reserved4; /*0x14 */
+ U32 DataLength; /*0x18 */
+ U8 CommandFIS[20]; /*0x1C */
+ MPI2_SATA_PT_SGE_UNION SGL;/*0x30*//*MPI v2.5: IEEE 64 elements only*/
+} MPI2_SATA_PASSTHROUGH_REQUEST, *PTR_MPI2_SATA_PASSTHROUGH_REQUEST,
+ Mpi2SataPassthroughRequest_t,
+ *pMpi2SataPassthroughRequest_t;
+
+/*values for PassthroughFlags field */
+#define MPI2_SATA_PT_REQ_PT_FLAGS_EXECUTE_DIAG (0x0100)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_DMA (0x0020)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_PIO (0x0010)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_UNSPECIFIED_VU (0x0004)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_WRITE (0x0002)
+#define MPI2_SATA_PT_REQ_PT_FLAGS_READ (0x0001)
+
+/*MPI v2.0: use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*SATA Passthrough Reply Message */
+typedef struct _MPI2_SATA_PASSTHROUGH_REPLY {
+ U16 DevHandle; /*0x00 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 PassthroughFlags; /*0x04 */
+ U8 SGLFlags; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved1; /*0x0A */
+ U8 Reserved2; /*0x0C */
+ U8 SASStatus; /*0x0D */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 StatusFIS[20]; /*0x14 */
+ U32 StatusControlRegisters; /*0x28 */
+ U32 TransferCount; /*0x2C */
+} MPI2_SATA_PASSTHROUGH_REPLY, *PTR_MPI2_SATA_PASSTHROUGH_REPLY,
+ Mpi2SataPassthroughReply_t, *pMpi2SataPassthroughReply_t;
+
+/*values for SASStatus field are at the top of this file */
+
+/****************************************************************************
+* SAS IO Unit Control messages
+****************************************************************************/
+
+/*SAS IO Unit Control Request Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REQUEST {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U8 PhyNum; /*0x0E */
+ U8 PrimFlags; /*0x0F */
+ U32 Primitive; /*0x10 */
+ U8 LookupMethod; /*0x14 */
+ U8 Reserved5; /*0x15 */
+ U16 SlotNumber; /*0x16 */
+ U64 LookupAddress; /*0x18 */
+ U32 IOCParameterValue; /*0x20 */
+ U32 Reserved7; /*0x24 */
+ U32 Reserved8; /*0x28 */
+} MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REQUEST,
+ Mpi2SasIoUnitControlRequest_t,
+ *pMpi2SasIoUnitControlRequest_t;
+
+/*values for the Operation field */
+#define MPI2_SAS_OP_CLEAR_ALL_PERSISTENT (0x02)
+#define MPI2_SAS_OP_PHY_LINK_RESET (0x06)
+#define MPI2_SAS_OP_PHY_HARD_RESET (0x07)
+#define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG (0x08)
+#define MPI2_SAS_OP_SEND_PRIMITIVE (0x0A)
+#define MPI2_SAS_OP_FORCE_FULL_DISCOVERY (0x0B)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
+#define MPI2_SAS_OP_REMOVE_DEVICE (0x0D)
+#define MPI2_SAS_OP_LOOKUP_MAPPING (0x0E)
+#define MPI2_SAS_OP_SET_IOC_PARAMETER (0x0F)
+#define MPI25_SAS_OP_ENABLE_FP_DEVICE (0x10)
+#define MPI25_SAS_OP_DISABLE_FP_DEVICE (0x11)
+#define MPI25_SAS_OP_ENABLE_FP_ALL (0x12)
+#define MPI25_SAS_OP_DISABLE_FP_ALL (0x13)
+#define MPI2_SAS_OP_DEV_ENABLE_NCQ (0x14)
+#define MPI2_SAS_OP_DEV_DISABLE_NCQ (0x15)
+#define MPI2_SAS_OP_PRODUCT_SPECIFIC_MIN (0x80)
+
+/*values for the PrimFlags field */
+#define MPI2_SAS_PRIMFLAGS_SINGLE (0x08)
+#define MPI2_SAS_PRIMFLAGS_TRIPLE (0x02)
+#define MPI2_SAS_PRIMFLAGS_REDUNDANT (0x01)
+
+/*values for the LookupMethod field */
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ADDRESS (0x01)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_ENCLOSURE_SLOT (0x02)
+#define MPI2_SAS_LOOKUP_METHOD_SAS_DEVICE_NAME (0x03)
+
+/*SAS IO Unit Control Reply Message */
+typedef struct _MPI2_SAS_IOUNIT_CONTROL_REPLY {
+ U8 Operation; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 DevHandle; /*0x04 */
+ U8 IOCParameter; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved3; /*0x0A */
+ U16 Reserved4; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ *PTR_MPI2_SAS_IOUNIT_CONTROL_REPLY,
+ Mpi2SasIoUnitControlReply_t, *pMpi2SasIoUnitControlReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
new file mode 100644
index 000000000000..71453d11c1c1
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -0,0 +1,437 @@
+/*
+ * Copyright (c) 2000-2012 LSI Corporation.
+ *
+ *
+ * Name: mpi2_tool.h
+ * Title: MPI diagnostic tool structures and definitions
+ * Creation Date: March 26, 2007
+ *
+ * mpi2_tool.h Version: 02.00.09
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * 12-18-07 02.00.01 Added Diagnostic Buffer Post and Diagnostic Release
+ * structures and defines.
+ * 02-29-08 02.00.02 Modified various names to make them 32-character unique.
+ * 05-06-09 02.00.03 Added ISTWI Read Write Tool and Diagnostic CLI Tool.
+ * 07-30-09 02.00.04 Added ExtendedType field to DiagnosticBufferPost request
+ * and reply messages.
+ * Added MPI2_DIAG_BUF_TYPE_EXTENDED.
+ * Incremented MPI2_DIAG_BUF_TYPE_COUNT.
+ * 05-12-10 02.00.05 Added Diagnostic Data Upload tool.
+ * 08-11-10 02.00.06 Added defines that were missing for Diagnostic Buffer
+ * Post Request.
+ * 05-25-11 02.00.07 Added Flags field and related defines to
+ * MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
+ * 11-18-11 02.00.08 Incorporating additions for MPI v2.5.
+ * 07-10-12 02.00.09 Add MPI v2.5 Toolbox Diagnostic CLI Tool Request
+ * message.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TOOL_H
+#define MPI2_TOOL_H
+
+/*****************************************************************************
+*
+* Toolbox Messages
+*
+*****************************************************************************/
+
+/*defines for the Tools */
+#define MPI2_TOOLBOX_CLEAN_TOOL (0x00)
+#define MPI2_TOOLBOX_MEMORY_MOVE_TOOL (0x01)
+#define MPI2_TOOLBOX_DIAG_DATA_UPLOAD_TOOL (0x02)
+#define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL (0x03)
+#define MPI2_TOOLBOX_BEACON_TOOL (0x05)
+#define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL (0x06)
+
+/****************************************************************************
+* Toolbox reply
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_TOOLBOX_REPLY, *PTR_MPI2_TOOLBOX_REPLY,
+ Mpi2ToolboxReply_t, *pMpi2ToolboxReply_t;
+
+/****************************************************************************
+* Toolbox Clean Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_CLEAN_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Flags; /*0x0C */
+} MPI2_TOOLBOX_CLEAN_REQUEST, *PTR_MPI2_TOOLBOX_CLEAN_REQUEST,
+ Mpi2ToolboxCleanRequest_t, *pMpi2ToolboxCleanRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_CLEAN_BOOT_SERVICES (0x80000000)
+#define MPI2_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES (0x40000000)
+#define MPI2_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES (0x20000000)
+#define MPI2_TOOLBOX_CLEAN_FW_CURRENT (0x10000000)
+#define MPI2_TOOLBOX_CLEAN_FW_BACKUP (0x08000000)
+#define MPI2_TOOLBOX_CLEAN_MEGARAID (0x02000000)
+#define MPI2_TOOLBOX_CLEAN_INITIALIZATION (0x01000000)
+#define MPI2_TOOLBOX_CLEAN_FLASH (0x00000004)
+#define MPI2_TOOLBOX_CLEAN_SEEPROM (0x00000002)
+#define MPI2_TOOLBOX_CLEAN_NVSRAM (0x00000001)
+
+/****************************************************************************
+* Toolbox Memory Move request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_MEM_MOVE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x0C */
+} MPI2_TOOLBOX_MEM_MOVE_REQUEST, *PTR_MPI2_TOOLBOX_MEM_MOVE_REQUEST,
+ Mpi2ToolboxMemMoveRequest_t, *pMpi2ToolboxMemMoveRequest_t;
+
+/****************************************************************************
+* Toolbox Diagnostic Data Upload request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 Flags; /*0x10 */
+ U32 DataLength; /*0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x18 */
+} MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+ Mpi2ToolboxDiagDataUploadRequest_t,
+ *pMpi2ToolboxDiagDataUploadRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+typedef struct _MPI2_DIAG_DATA_UPLOAD_HEADER {
+ U32 DiagDataLength; /*00h */
+ U8 FormatCode; /*04h */
+ U8 Reserved1; /*05h */
+ U16 Reserved2; /*06h */
+} MPI2_DIAG_DATA_UPLOAD_HEADER, *PTR_MPI2_DIAG_DATA_UPLOAD_HEADER,
+ Mpi2DiagDataUploadHeader_t, *pMpi2DiagDataUploadHeader_t;
+
+/****************************************************************************
+* Toolbox ISTWI Read Write Tool
+****************************************************************************/
+
+/*Toolbox ISTWI Read Write Tool request message */
+typedef struct _MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 Reserved6; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 SGLFlags; /*0x16 */
+ U8 Flags; /*0x17 */
+ U16 TxDataLength; /*0x18 */
+ U16 RxDataLength; /*0x1A */
+ U32 Reserved8; /*0x1C */
+ U32 Reserved9; /*0x20 */
+ U32 Reserved10; /*0x24 */
+ U32 Reserved11; /*0x28 */
+ U32 Reserved12; /*0x2C */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x30 */
+} MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ *PTR_MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+ Mpi2ToolboxIstwiReadWriteRequest_t,
+ *pMpi2ToolboxIstwiReadWriteRequest_t;
+
+/*values for the Action field */
+#define MPI2_TOOL_ISTWI_ACTION_READ_DATA (0x01)
+#define MPI2_TOOL_ISTWI_ACTION_WRITE_DATA (0x02)
+#define MPI2_TOOL_ISTWI_ACTION_SEQUENCE (0x03)
+#define MPI2_TOOL_ISTWI_ACTION_RESERVE_BUS (0x10)
+#define MPI2_TOOL_ISTWI_ACTION_RELEASE_BUS (0x11)
+#define MPI2_TOOL_ISTWI_ACTION_RESET (0x12)
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*values for the Flags field */
+#define MPI2_TOOL_ISTWI_FLAG_AUTO_RESERVE_RELEASE (0x80)
+#define MPI2_TOOL_ISTWI_FLAG_PAGE_ADDR_MASK (0x07)
+
+/*Toolbox ISTWI Read Write Tool reply message */
+typedef struct _MPI2_TOOLBOX_ISTWI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U8 DevIndex; /*0x14 */
+ U8 Action; /*0x15 */
+ U8 IstwiStatus; /*0x16 */
+ U8 Reserved6; /*0x17 */
+ U16 TxDataCount; /*0x18 */
+ U16 RxDataCount; /*0x1A */
+} MPI2_TOOLBOX_ISTWI_REPLY, *PTR_MPI2_TOOLBOX_ISTWI_REPLY,
+ Mpi2ToolboxIstwiReply_t, *pMpi2ToolboxIstwiReply_t;
+
+/****************************************************************************
+* Toolbox Beacon Tool request
+****************************************************************************/
+
+typedef struct _MPI2_TOOLBOX_BEACON_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 Reserved5; /*0x0C */
+ U8 PhysicalPort; /*0x0D */
+ U8 Reserved6; /*0x0E */
+ U8 Flags; /*0x0F */
+} MPI2_TOOLBOX_BEACON_REQUEST, *PTR_MPI2_TOOLBOX_BEACON_REQUEST,
+ Mpi2ToolboxBeaconRequest_t, *pMpi2ToolboxBeaconRequest_t;
+
+/*values for the Flags field */
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_OFF (0x00)
+#define MPI2_TOOLBOX_FLAGS_BEACONMODE_ON (0x01)
+
+/****************************************************************************
+* Toolbox Diagnostic CLI Tool
+****************************************************************************/
+
+#define MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH (0x5C)
+
+/*MPI v2.0 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U8 SGLFlags; /*0x0C */
+ U8 Reserved5; /*0x0D */
+ U16 Reserved6; /*0x0E */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI2_SGE_SIMPLE_UNION SGL; /*0x70 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi2ToolboxDiagnosticCliRequest_t,
+ *pMpi2ToolboxDiagnosticCliRequest_t;
+
+/*use MPI2_SGLFLAGS_ defines from mpi2.h for the SGLFlags field */
+
+/*MPI v2.5 Toolbox Diagnostic CLI Tool request message */
+typedef struct _MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U32 Reserved5; /*0x0C */
+ U32 DataLength; /*0x10 */
+ U8 DiagnosticCliCommand[MPI2_TOOLBOX_DIAG_CLI_CMD_LENGTH];/*0x14 */
+ MPI25_SGE_IO_UNION SGL; /*0x70 */
+} MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ *PTR_MPI25_TOOLBOX_DIAGNOSTIC_CLI_REQUEST,
+ Mpi25ToolboxDiagnosticCliRequest_t,
+ *pMpi25ToolboxDiagnosticCliRequest_t;
+
+/*Toolbox Diagnostic CLI Tool reply message */
+typedef struct _MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY {
+ U8 Tool; /*0x00 */
+ U8 Reserved1; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 ReturnedDataLength; /*0x14 */
+} MPI2_TOOLBOX_DIAGNOSTIC_CLI_REPLY,
+ *PTR_MPI2_TOOLBOX_DIAG_CLI_REPLY,
+ Mpi2ToolboxDiagnosticCliReply_t,
+ *pMpi2ToolboxDiagnosticCliReply_t;
+
+/*****************************************************************************
+*
+* Diagnostic Buffer Messages
+*
+*****************************************************************************/
+
+/****************************************************************************
+* Diagnostic Buffer Post request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REQUEST {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U64 BufferAddress; /*0x0C */
+ U32 BufferLength; /*0x14 */
+ U32 Reserved5; /*0x18 */
+ U32 Reserved6; /*0x1C */
+ U32 Flags; /*0x20 */
+ U32 ProductSpecific[23]; /*0x24 */
+} MPI2_DIAG_BUFFER_POST_REQUEST, *PTR_MPI2_DIAG_BUFFER_POST_REQUEST,
+ Mpi2DiagBufferPostRequest_t, *pMpi2DiagBufferPostRequest_t;
+
+/*values for the ExtendedType field */
+#define MPI2_DIAG_EXTENDED_TYPE_UTILIZATION (0x02)
+
+/*values for the BufferType field */
+#define MPI2_DIAG_BUF_TYPE_TRACE (0x00)
+#define MPI2_DIAG_BUF_TYPE_SNAPSHOT (0x01)
+#define MPI2_DIAG_BUF_TYPE_EXTENDED (0x02)
+/*count of the number of buffer types */
+#define MPI2_DIAG_BUF_TYPE_COUNT (0x03)
+
+/*values for the Flags field */
+#define MPI2_DIAG_BUF_FLAG_RELEASE_ON_FULL (0x00000002)
+#define MPI2_DIAG_BUF_FLAG_IMMEDIATE_RELEASE (0x00000001)
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_BUFFER_POST_REPLY {
+ U8 ExtendedType; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+ U32 TransferLength; /*0x14 */
+} MPI2_DIAG_BUFFER_POST_REPLY, *PTR_MPI2_DIAG_BUFFER_POST_REPLY,
+ Mpi2DiagBufferPostReply_t, *pMpi2DiagBufferPostReply_t;
+
+/****************************************************************************
+* Diagnostic Release request
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REQUEST {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 ChainOffset; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+} MPI2_DIAG_RELEASE_REQUEST, *PTR_MPI2_DIAG_RELEASE_REQUEST,
+ Mpi2DiagReleaseRequest_t, *pMpi2DiagReleaseRequest_t;
+
+/****************************************************************************
+* Diagnostic Buffer Post reply
+****************************************************************************/
+
+typedef struct _MPI2_DIAG_RELEASE_REPLY {
+ U8 Reserved1; /*0x00 */
+ U8 BufferType; /*0x01 */
+ U8 MsgLength; /*0x02 */
+ U8 Function; /*0x03 */
+ U16 Reserved2; /*0x04 */
+ U8 Reserved3; /*0x06 */
+ U8 MsgFlags; /*0x07 */
+ U8 VP_ID; /*0x08 */
+ U8 VF_ID; /*0x09 */
+ U16 Reserved4; /*0x0A */
+ U16 Reserved5; /*0x0C */
+ U16 IOCStatus; /*0x0E */
+ U32 IOCLogInfo; /*0x10 */
+} MPI2_DIAG_RELEASE_REPLY, *PTR_MPI2_DIAG_RELEASE_REPLY,
+ Mpi2DiagReleaseReply_t, *pMpi2DiagReleaseReply_t;
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
new file mode 100644
index 000000000000..516f959573f5
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000-2007 LSI Corporation.
+ *
+ *
+ * Name: mpi2_type.h
+ * Title: MPI basic type definitions
+ * Creation Date: August 16, 2006
+ *
+ * mpi2_type.h Version: 02.00.00
+ *
+ * Version History
+ * ---------------
+ *
+ * Date Version Description
+ * -------- -------- ------------------------------------------------------
+ * 04-30-07 02.00.00 Corresponds to Fusion-MPT MPI Specification Rev A.
+ * --------------------------------------------------------------------------
+ */
+
+#ifndef MPI2_TYPE_H
+#define MPI2_TYPE_H
+
+/*******************************************************************************
+ * Define * if it hasn't already been defined. By default
+ * * is defined to be a near pointer. MPI2_POINTER can be defined as
+ * a far pointer by defining * as "far *" before this header file is
+ * included.
+ */
+
+/* the basic types may have already been included by mpi_type.h */
+#ifndef MPI_TYPE_H
+/*****************************************************************************
+*
+* Basic Types
+*
+*****************************************************************************/
+
+typedef u8 U8;
+typedef __le16 U16;
+typedef __le32 U32;
+typedef __le64 U64 __attribute__ ((aligned(4)));
+
+/*****************************************************************************
+*
+* Pointer Types
+*
+*****************************************************************************/
+
+typedef U8 *PU8;
+typedef U16 *PU16;
+typedef U32 *PU32;
+typedef U64 *PU64;
+
+#endif
+
+#endif
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
new file mode 100644
index 000000000000..18360032a520
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -0,0 +1,4837 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/time.h>
+#include <linux/kthread.h>
+#include <linux/aer.h>
+
+
+#include "mpt3sas_base.h"
+
+static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
+
+
+#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
+
+ /* maximum controller queue depth */
+#define MAX_HBA_QUEUE_DEPTH 30000
+#define MAX_CHAIN_DEPTH 100000
+static int max_queue_depth = -1;
+module_param(max_queue_depth, int, 0);
+MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
+
+static int max_sgl_entries = -1;
+module_param(max_sgl_entries, int, 0);
+MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
+
+static int msix_disable = -1;
+module_param(msix_disable, int, 0);
+MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
+
+
+static int mpt3sas_fwfault_debug;
+MODULE_PARM_DESC(mpt3sas_fwfault_debug,
+ " enable detection of firmware fault and halt firmware - (default=0)");
+
+
+/**
+ * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
+ *
+ */
+static int
+_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+ return 0;
+}
+module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
+ param_get_int, &mpt3sas_fwfault_debug, 0644);
+
+/**
+ * mpt3sas_remove_dead_ioc_func - kthread context to remove dead ioc
+ * @arg: input argument, used to derive ioc
+ *
+ * Return 0 if controller is removed from pci subsystem.
+ * Return -1 for other case.
+ */
+static int mpt3sas_remove_dead_ioc_func(void *arg)
+{
+ struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
+ struct pci_dev *pdev;
+
+ if ((ioc == NULL))
+ return -1;
+
+ pdev = ioc->pdev;
+ if ((pdev == NULL))
+ return -1;
+ pci_stop_and_remove_bus_device(pdev);
+ return 0;
+}
+
+/**
+ * _base_fault_reset_work - workq handling ioc fault conditions
+ * @work: input argument, used to derive ioc
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+static void
+_base_fault_reset_work(struct work_struct *work)
+{
+ struct MPT3SAS_ADAPTER *ioc =
+ container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
+ unsigned long flags;
+ u32 doorbell;
+ int rc;
+ struct task_struct *p;
+
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->shost_recovery)
+ goto rearm_timer;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
+ pr_err(MPT3SAS_FMT "SAS host is non-operational !!!!\n",
+ ioc->name);
+
+ /*
+ * Call _scsih_flush_pending_cmds callback so that we flush all
+ * pending commands back to OS. This call is required to aovid
+ * deadlock at block layer. Dead IOC will fail to do diag reset,
+ * and this call is safe since dead ioc will never return any
+ * command back from HW.
+ */
+ ioc->schedule_dead_ioc_flush_running_cmds(ioc);
+ /*
+ * Set remove_host flag early since kernel thread will
+ * take some time to execute.
+ */
+ ioc->remove_host = 1;
+ /*Remove the Dead Host */
+ p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
+ "mpt3sas_dead_ioc_%d", ioc->id);
+ if (IS_ERR(p))
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
+ ioc->name, __func__);
+ else
+ pr_err(MPT3SAS_FMT
+ "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
+ ioc->name, __func__);
+ return; /* don't rearm timer */
+ }
+
+ if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_warn(MPT3SAS_FMT "%s: hard reset: %s\n", ioc->name,
+ __func__, (rc == 0) ? "success" : "failed");
+ doorbell = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc, doorbell &
+ MPI2_DOORBELL_DATA_MASK);
+ if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_OPERATIONAL)
+ return; /* don't rearm timer */
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ rearm_timer:
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_start_watchdog - start the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+
+ if (ioc->fault_reset_work_q)
+ return;
+
+ /* initialize fault polling */
+
+ INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
+ snprintf(ioc->fault_reset_work_q_name,
+ sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
+ ioc->fault_reset_work_q =
+ create_singlethread_workqueue(ioc->fault_reset_work_q_name);
+ if (!ioc->fault_reset_work_q) {
+ pr_err(MPT3SAS_FMT "%s: failed (line=%d)\n",
+ ioc->name, __func__, __LINE__);
+ return;
+ }
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ if (ioc->fault_reset_work_q)
+ queue_delayed_work(ioc->fault_reset_work_q,
+ &ioc->fault_reset_work,
+ msecs_to_jiffies(FAULT_POLLING_INTERVAL));
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+}
+
+/**
+ * mpt3sas_base_stop_watchdog - stop the fault_reset_work_q
+ * @ioc: per adapter object
+ * Context: sleep.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
+{
+ unsigned long flags;
+ struct workqueue_struct *wq;
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ wq = ioc->fault_reset_work_q;
+ ioc->fault_reset_work_q = NULL;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ if (wq) {
+ if (!cancel_delayed_work(&ioc->fault_reset_work))
+ flush_workqueue(wq);
+ destroy_workqueue(wq);
+ }
+}
+
+/**
+ * mpt3sas_base_fault_info - verbose translation of firmware FAULT code
+ * @ioc: per adapter object
+ * @fault_code: fault code
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
+{
+ pr_err(MPT3SAS_FMT "fault_state(0x%04x)!\n",
+ ioc->name, fault_code);
+}
+
+/**
+ * mpt3sas_halt_firmware - halt's mpt controller firmware
+ * @ioc: per adapter object
+ *
+ * For debugging timeout related issues. Writing 0xCOFFEE00
+ * to the doorbell register will halt controller firmware. With
+ * the purpose to stop both driver and firmware, the enduser can
+ * obtain a ring buffer from controller UART.
+ */
+void
+mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 doorbell;
+
+ if (!ioc->fwfault_debug)
+ return;
+
+ dump_stack();
+
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ mpt3sas_base_fault_info(ioc , doorbell);
+ else {
+ writel(0xC0FFEE00, &ioc->chip->Doorbell);
+ pr_err(MPT3SAS_FMT "Firmware is halted due to command timeout\n",
+ ioc->name);
+ }
+
+ if (ioc->fwfault_debug == 2)
+ for (;;)
+ ;
+ else
+ panic("panic in %s\n", __func__);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _base_sas_ioc_info - verbose translation of the ioc status
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @request_hdr: request mf
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
+ MPI2RequestHeader_t *request_hdr)
+{
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ char *desc = NULL;
+ u16 frame_sz;
+ char *func_str = NULL;
+
+ /* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
+ if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return;
+
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return;
+
+ switch (ioc_status) {
+
+/****************************************************************************
+* Common IOCStatus values for all replies
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_BUSY:
+ desc = "busy";
+ break;
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ desc = "invalid sgl";
+ break;
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ desc = "internal error";
+ break;
+ case MPI2_IOCSTATUS_INVALID_VPID:
+ desc = "invalid vpid";
+ break;
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ desc = "insufficient resources";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ desc = "invalid field";
+ break;
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ desc = "invalid state";
+ break;
+ case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
+ desc = "op state not supported";
+ break;
+
+/****************************************************************************
+* Config IOCStatus values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
+ desc = "config invalid action";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
+ desc = "config invalid type";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
+ desc = "config invalid page";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
+ desc = "config invalid data";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
+ desc = "config no defaults";
+ break;
+ case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
+ desc = "config cant commit";
+ break;
+
+/****************************************************************************
+* SCSI IO Reply
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ break;
+
+/****************************************************************************
+* For use by SCSI Initiator and SCSI Target end-to-end data protection
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc = "eedp app tag error";
+ break;
+
+/****************************************************************************
+* SCSI Target values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
+ desc = "target invalid io index";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ABORTED:
+ desc = "target aborted";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
+ desc = "target no conn retryable";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
+ desc = "target no connection";
+ break;
+ case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
+ desc = "target xfer count mismatch";
+ break;
+ case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
+ desc = "target data offset error";
+ break;
+ case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
+ desc = "target too much write data";
+ break;
+ case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
+ desc = "target iu too short";
+ break;
+ case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
+ desc = "target ack nak timeout";
+ break;
+ case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
+ desc = "target nak received";
+ break;
+
+/****************************************************************************
+* Serial Attached SCSI values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
+ desc = "smp request failed";
+ break;
+ case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
+ desc = "smp data overrun";
+ break;
+
+/****************************************************************************
+* Diagnostic Buffer Post / Diagnostic Release values
+****************************************************************************/
+
+ case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
+ desc = "diagnostic released";
+ break;
+ default:
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ switch (request_hdr->Function) {
+ case MPI2_FUNCTION_CONFIG:
+ frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
+ func_str = "config_page";
+ break;
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
+ func_str = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
+ func_str = "sas_iounit_ctl";
+ break;
+ case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
+ frame_sz = sizeof(Mpi2SepRequest_t);
+ func_str = "enclosure";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ frame_sz = sizeof(Mpi2IOCInitRequest_t);
+ func_str = "ioc_init";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ frame_sz = sizeof(Mpi2PortEnableRequest_t);
+ func_str = "port_enable";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
+ func_str = "smp_passthru";
+ break;
+ default:
+ frame_sz = 32;
+ func_str = "unknown";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
+ ioc->name, desc, ioc_status, request_hdr, func_str);
+
+ _debug_dump_mf(request_hdr, frame_sz/4);
+}
+
+/**
+ * _base_display_event_data - verbose translation of firmware asyn events
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * Return nothing.
+ */
+static void
+_base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ char *desc = NULL;
+ u16 event;
+
+ if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ switch (event) {
+ case MPI2_EVENT_LOG_DATA:
+ desc = "Log Data";
+ break;
+ case MPI2_EVENT_STATE_CHANGE:
+ desc = "Status Change";
+ break;
+ case MPI2_EVENT_HARD_RESET_RECEIVED:
+ desc = "Hard Reset Received";
+ break;
+ case MPI2_EVENT_EVENT_CHANGE:
+ desc = "Event Change";
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ desc = "Device Status Change";
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ desc = "IR Operation Status";
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ {
+ Mpi2EventDataSasDiscovery_t *event_data =
+ (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
+ pr_info(MPT3SAS_FMT "Discovery: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ return;
+ }
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ desc = "SAS Broadcast Primitive";
+ break;
+ case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
+ desc = "SAS Init Device Status Change";
+ break;
+ case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
+ desc = "SAS Init Table Overflow";
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ desc = "SAS Topology Change List";
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ desc = "SAS Enclosure Device Status Change";
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ desc = "IR Volume";
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ desc = "IR Physical Disk";
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ desc = "IR Configuration Change List";
+ break;
+ case MPI2_EVENT_LOG_ENTRY_ADDED:
+ desc = "Log Entry Added";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s\n", ioc->name, desc);
+}
+#endif
+
+/**
+ * _base_sas_log_info - verbose translation of firmware log info
+ * @ioc: per adapter object
+ * @log_info: log info
+ *
+ * Return nothing.
+ */
+static void
+_base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
+{
+ union loginfo_type {
+ u32 loginfo;
+ struct {
+ u32 subcode:16;
+ u32 code:8;
+ u32 originator:4;
+ u32 bus_type:4;
+ } dw;
+ };
+ union loginfo_type sas_loginfo;
+ char *originator_str = NULL;
+
+ sas_loginfo.loginfo = log_info;
+ if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
+ return;
+
+ /* each nexus loss loginfo */
+ if (log_info == 0x31170000)
+ return;
+
+ /* eat the loginfos associated with task aborts */
+ if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
+ 0x31140000 || log_info == 0x31130000))
+ return;
+
+ switch (sas_loginfo.dw.originator) {
+ case 0:
+ originator_str = "IOP";
+ break;
+ case 1:
+ originator_str = "PL";
+ break;
+ case 2:
+ originator_str = "IR";
+ break;
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
+ ioc->name, log_info,
+ originator_str, sas_loginfo.dw.code,
+ sas_loginfo.dw.subcode);
+}
+
+/**
+ * _base_display_reply_info -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return nothing.
+ */
+static void
+_base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+ u32 loginfo = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
+ (ioc->logging_level & MPT_DEBUG_REPLY)) {
+ _base_sas_ioc_info(ioc , mpi_reply,
+ mpt3sas_base_get_msg_frame(ioc, smid));
+ }
+#endif
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+ loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
+ _base_sas_log_info(ioc, loginfo);
+ }
+
+ if (ioc_status || loginfo) {
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
+ }
+}
+
+/**
+ * mpt3sas_base_done - base internal command completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
+ return 1;
+
+ if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ }
+ ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
+
+ complete(&ioc->base_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_async_event - main callback handler for firmware asyn events
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ Mpi2EventAckRequest_t *ack_request;
+ u16 smid;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+ if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
+ return 1;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _base_display_event_data(ioc, mpi_reply);
+#endif
+ if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
+ goto out;
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
+ ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
+ ack_request->Event = mpi_reply->Event;
+ ack_request->EventContext = mpi_reply->EventContext;
+ ack_request->VF_ID = 0; /* TODO */
+ ack_request->VP_ID = 0;
+ mpt3sas_base_put_smid_default(ioc, smid);
+
+ out:
+
+ /* scsih callback handler */
+ mpt3sas_scsih_event_callback(ioc, msix_index, reply);
+
+ /* ctl callback handler */
+ mpt3sas_ctl_event_callback(ioc, msix_index, reply);
+
+ return 1;
+}
+
+/**
+ * _base_get_cb_idx - obtain the callback index
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return callback index.
+ */
+static u8
+_base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ int i;
+ u8 cb_idx;
+
+ if (smid < ioc->hi_priority_smid) {
+ i = smid - 1;
+ cb_idx = ioc->scsi_lookup[i].cb_idx;
+ } else if (smid < ioc->internal_smid) {
+ i = smid - ioc->hi_priority_smid;
+ cb_idx = ioc->hpr_lookup[i].cb_idx;
+ } else if (smid <= ioc->hba_queue_depth) {
+ i = smid - ioc->internal_smid;
+ cb_idx = ioc->internal_lookup[i].cb_idx;
+ } else
+ cb_idx = 0xFF;
+ return cb_idx;
+}
+
+/**
+ * _base_mask_interrupts - disable interrupts
+ * @ioc: per adapter object
+ *
+ * Disabling ResetIRQ, Reply and Doorbell Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ ioc->mask_interrupts = 1;
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ readl(&ioc->chip->HostInterruptMask);
+}
+
+/**
+ * _base_unmask_interrupts - enable interrupts
+ * @ioc: per adapter object
+ *
+ * Enabling only Reply Interrupts
+ *
+ * Return nothing.
+ */
+static void
+_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
+{
+ u32 him_register;
+
+ him_register = readl(&ioc->chip->HostInterruptMask);
+ him_register &= ~MPI2_HIM_RIM;
+ writel(him_register, &ioc->chip->HostInterruptMask);
+ ioc->mask_interrupts = 0;
+}
+
+union reply_descriptor {
+ u64 word;
+ struct {
+ u32 low;
+ u32 high;
+ } u;
+};
+
+/**
+ * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
+ * @irq: irq number (not used)
+ * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ * @r: pt_regs pointer (not used)
+ *
+ * Return IRQ_HANDLE if processed, else IRQ_NONE.
+ */
+static irqreturn_t
+_base_interrupt(int irq, void *bus_id)
+{
+ struct adapter_reply_queue *reply_q = bus_id;
+ union reply_descriptor rd;
+ u32 completed_cmds;
+ u8 request_desript_type;
+ u16 smid;
+ u8 cb_idx;
+ u32 reply;
+ u8 msix_index = reply_q->msix_index;
+ struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
+ Mpi2ReplyDescriptorsUnion_t *rpf;
+ u8 rc;
+
+ if (ioc->mask_interrupts)
+ return IRQ_NONE;
+
+ if (!atomic_add_unless(&reply_q->busy, 1, 1))
+ return IRQ_NONE;
+
+ rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
+ request_desript_type = rpf->Default.ReplyFlags
+ & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ completed_cmds = 0;
+ cb_idx = 0xFF;
+ do {
+ rd.word = le64_to_cpu(rpf->Words);
+ if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
+ goto out;
+ reply = 0;
+ smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
+ if (request_desript_type ==
+ MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
+ request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, 0);
+ if (rc)
+ mpt3sas_base_free_smid(ioc, smid);
+ }
+ } else if (request_desript_type ==
+ MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
+ reply = le32_to_cpu(
+ rpf->AddressReply.ReplyFrameAddress);
+ if (reply > ioc->reply_dma_max_address ||
+ reply < ioc->reply_dma_min_address)
+ reply = 0;
+ if (smid) {
+ cb_idx = _base_get_cb_idx(ioc, smid);
+ if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
+ (likely(mpt_callbacks[cb_idx] != NULL))) {
+ rc = mpt_callbacks[cb_idx](ioc, smid,
+ msix_index, reply);
+ if (reply)
+ _base_display_reply_info(ioc,
+ smid, msix_index, reply);
+ if (rc)
+ mpt3sas_base_free_smid(ioc,
+ smid);
+ }
+ } else {
+ _base_async_event(ioc, msix_index, reply);
+ }
+
+ /* reply free queue handling */
+ if (reply) {
+ ioc->reply_free_host_index =
+ (ioc->reply_free_host_index ==
+ (ioc->reply_free_queue_depth - 1)) ?
+ 0 : ioc->reply_free_host_index + 1;
+ ioc->reply_free[ioc->reply_free_host_index] =
+ cpu_to_le32(reply);
+ wmb();
+ writel(ioc->reply_free_host_index,
+ &ioc->chip->ReplyFreeHostIndex);
+ }
+ }
+
+ rpf->Words = cpu_to_le64(ULLONG_MAX);
+ reply_q->reply_post_host_index =
+ (reply_q->reply_post_host_index ==
+ (ioc->reply_post_queue_depth - 1)) ? 0 :
+ reply_q->reply_post_host_index + 1;
+ request_desript_type =
+ reply_q->reply_post_free[reply_q->reply_post_host_index].
+ Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+ completed_cmds++;
+ if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
+ goto out;
+ if (!reply_q->reply_post_host_index)
+ rpf = reply_q->reply_post_free;
+ else
+ rpf++;
+ } while (1);
+
+ out:
+
+ if (!completed_cmds) {
+ atomic_dec(&reply_q->busy);
+ return IRQ_NONE;
+ }
+
+ wmb();
+ writel(reply_q->reply_post_host_index | (msix_index <<
+ MPI2_RPHI_MSIX_INDEX_SHIFT), &ioc->chip->ReplyPostHostIndex);
+ atomic_dec(&reply_q->busy);
+ return IRQ_HANDLED;
+}
+
+/**
+ * _base_is_controller_msix_enabled - is controller support muli-reply queues
+ * @ioc: per adapter object
+ *
+ */
+static inline int
+_base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
+{
+ return (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
+}
+
+/**
+ * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
+ * @ioc: per adapter object
+ * Context: ISR conext
+ *
+ * Called when a Task Management request has completed. We want
+ * to flush the other reply queues so all the outstanding IO has been
+ * completed back to OS before we process the TM completetion.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+
+ /* If MSIX capability is turned off
+ * then multi-queues are not enabled
+ */
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ if (ioc->shost_recovery)
+ return;
+ /* TMs are on msix_index == 0 */
+ if (reply_q->msix_index == 0)
+ continue;
+ _base_interrupt(reply_q->vector, (void *)reply_q);
+ }
+}
+
+/**
+ * mpt3sas_base_release_callback_handler - clear interrupt callback handler
+ * @cb_idx: callback index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_release_callback_handler(u8 cb_idx)
+{
+ mpt_callbacks[cb_idx] = NULL;
+}
+
+/**
+ * mpt3sas_base_register_callback_handler - obtain index for the interrupt callback handler
+ * @cb_func: callback function
+ *
+ * Returns cb_func.
+ */
+u8
+mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
+{
+ u8 cb_idx;
+
+ for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
+ if (mpt_callbacks[cb_idx] == NULL)
+ break;
+
+ mpt_callbacks[cb_idx] = cb_func;
+ return cb_idx;
+}
+
+/**
+ * mpt3sas_base_initialize_callback_handler - initialize the interrupt callback handler
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_initialize_callback_handler(void)
+{
+ u8 cb_idx;
+
+ for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
+ mpt3sas_base_release_callback_handler(cb_idx);
+}
+
+
+/**
+ * _base_build_zero_len_sge - build zero length sg entry
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
+ MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
+ MPI2_SGE_FLAGS_SHIFT);
+ ioc->base_add_sg_single(paddr, flags_length, -1);
+}
+
+/**
+ * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple32_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le32(dma_addr);
+}
+
+
+/**
+ * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
+ * @paddr: virtual address for SGE
+ * @flags_length: SGE flags and data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
+{
+ Mpi2SGESimple64_t *sgel = paddr;
+
+ flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
+ MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
+ sgel->FlagsLength = cpu_to_le32(flags_length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_get_chain_buffer_tracker - obtain chain tracker
+ * @ioc: per adapter object
+ * @smid: smid associated to an IO request
+ *
+ * Returns chain tracker(from ioc->free_chain_list)
+ */
+static struct chain_tracker *
+_base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct chain_tracker *chain_req;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_chain_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "chain buffers not available\n", ioc->name));
+ return NULL;
+ }
+ chain_req = list_entry(ioc->free_chain_list.next,
+ struct chain_tracker, tracker_list);
+ list_del_init(&chain_req->tracker_list);
+ list_add_tail(&chain_req->tracker_list,
+ &ioc->scsi_lookup[smid - 1].chain_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return chain_req;
+}
+
+
+/**
+ * _base_build_sg - build generic sg
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u32 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_out_sz, data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ data_in_sz, data_in_dma);
+ }
+}
+
+/* IEEE format sgls */
+
+/**
+ * _base_add_sg_single_ieee - add sg element for IEEE format
+ * @paddr: virtual address for SGE
+ * @flags: SGE flags
+ * @chain_offset: number of 128 byte elements from start of segment
+ * @length: data transfer length
+ * @dma_addr: Physical address
+ *
+ * Return nothing.
+ */
+static void
+_base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
+ dma_addr_t dma_addr)
+{
+ Mpi25IeeeSgeChain64_t *sgel = paddr;
+
+ sgel->Flags = flags;
+ sgel->NextChainOffset = chain_offset;
+ sgel->Length = cpu_to_le32(length);
+ sgel->Address = cpu_to_le64(dma_addr);
+}
+
+/**
+ * _base_build_zero_len_sge_ieee - build zero length sg entry for IEEE format
+ * @ioc: per adapter object
+ * @paddr: virtual address for SGE
+ *
+ * Create a zero length scatter gather entry to insure the IOCs hardware has
+ * something to use if the target device goes brain dead and tries
+ * to send data even when none is asked for.
+ *
+ * Return nothing.
+ */
+static void
+_base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
+{
+ u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
+ _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
+}
+
+/**
+ * _base_build_sg_scmd_ieee - main sg creation routine for IEEE format
+ * @ioc: per adapter object
+ * @scmd: scsi command
+ * @smid: system request message index
+ * Context: none.
+ *
+ * The main routine that builds scatter gather table from a given
+ * scsi request sent via the .queuecommand main handler.
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ dma_addr_t chain_dma;
+ struct scatterlist *sg_scmd;
+ void *sg_local, *chain;
+ u32 chain_offset;
+ u32 chain_length;
+ int sges_left;
+ u32 sges_in_segment;
+ u8 simple_sgl_flags;
+ u8 simple_sgl_flags_last;
+ u8 chain_sgl_flags;
+ struct chain_tracker *chain_req;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ /* init scatter gather flags */
+ simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ simple_sgl_flags_last = simple_sgl_flags |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+
+ sg_scmd = scsi_sglist(scmd);
+ sges_left = scsi_dma_map(scmd);
+ if (!sges_left) {
+ sdev_printk(KERN_ERR, scmd->device,
+ "pci_map_sg failed: request for %d bytes!\n",
+ scsi_bufflen(scmd));
+ return -ENOMEM;
+ }
+
+ sg_local = &mpi_request->SGL;
+ sges_in_segment = (ioc->request_sz -
+ offsetof(Mpi2SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
+ if (sges_left <= sges_in_segment)
+ goto fill_in_last_segment;
+
+ mpi_request->ChainOffset = (sges_in_segment - 1 /* chain element */) +
+ (offsetof(Mpi2SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
+
+ /* fill in main message segment when there is a chain following */
+ while (sges_in_segment > 1) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ /* initializing the pointers */
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ do {
+ sges_in_segment = (sges_left <=
+ ioc->max_sges_in_chain_message) ? sges_left :
+ ioc->max_sges_in_chain_message;
+ chain_offset = (sges_left == sges_in_segment) ?
+ 0 : sges_in_segment;
+ chain_length = sges_in_segment * ioc->sge_size_ieee;
+ if (chain_offset)
+ chain_length += ioc->sge_size_ieee;
+ _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
+ chain_offset, chain_length, chain_dma);
+
+ sg_local = chain;
+ if (!chain_offset)
+ goto fill_in_last_segment;
+
+ /* fill in chain segments */
+ while (sges_in_segment) {
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ sges_in_segment--;
+ }
+
+ chain_req = _base_get_chain_buffer_tracker(ioc, smid);
+ if (!chain_req)
+ return -1;
+ chain = chain_req->chain_buffer;
+ chain_dma = chain_req->chain_buffer_dma;
+ } while (1);
+
+
+ fill_in_last_segment:
+
+ /* fill the last segment */
+ while (sges_left) {
+ if (sges_left == 1)
+ _base_add_sg_single_ieee(sg_local,
+ simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
+ sg_dma_address(sg_scmd));
+ else
+ _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
+ sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
+ sg_scmd = sg_next(sg_scmd);
+ sg_local += ioc->sge_size_ieee;
+ sges_left--;
+ }
+
+ return 0;
+}
+
+/**
+ * _base_build_sg_ieee - build generic sg for IEEE format
+ * @ioc: per adapter object
+ * @psge: virtual address for SGE
+ * @data_out_dma: physical address for WRITES
+ * @data_out_sz: data xfer size for WRITES
+ * @data_in_dma: physical address for READS
+ * @data_in_sz: data xfer size for READS
+ *
+ * Return nothing.
+ */
+static void
+_base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
+ size_t data_in_sz)
+{
+ u8 sgl_flags;
+
+ if (!data_out_sz && !data_in_sz) {
+ _base_build_zero_len_sge_ieee(ioc, psge);
+ return;
+ }
+
+ if (data_out_sz && data_in_sz) {
+ /* WRITE sgel first */
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size_ieee;
+
+ /* READ sgel last */
+ sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ } else if (data_out_sz) /* WRITE */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
+ data_out_dma);
+ } else if (data_in_sz) /* READ */ {
+ sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
+ MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
+ _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
+ data_in_dma);
+ }
+}
+
+#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
+
+/**
+ * _base_config_dma_addressing - set dma addressing
+ * @ioc: per adapter object
+ * @pdev: PCI device struct
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
+{
+ struct sysinfo s;
+ char *desc = NULL;
+
+ if (sizeof(dma_addr_t) > 4) {
+ const uint64_t required_mask =
+ dma_get_required_mask(&pdev->dev);
+ if ((required_mask > DMA_BIT_MASK(32)) &&
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+ !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_64;
+ ioc->sge_size = sizeof(Mpi2SGESimple64_t);
+ desc = "64";
+ goto out;
+ }
+ }
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
+ ioc->base_add_sg_single = &_base_add_sg_single_32;
+ ioc->sge_size = sizeof(Mpi2SGESimple32_t);
+ desc = "32";
+ } else
+ return -ENODEV;
+
+ out:
+ si_meminfo(&s);
+ pr_info(MPT3SAS_FMT
+ "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+ ioc->name, desc, convert_to_kb(s.totalram));
+
+ return 0;
+}
+
+/**
+ * _base_check_enable_msix - checks MSIX capabable.
+ * @ioc: per adapter object
+ *
+ * Check to see if card is capable of MSIX, and set number
+ * of available msix vectors
+ */
+static int
+_base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ int base;
+ u16 message_control;
+
+ base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
+ if (!base) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT "msix not supported\n",
+ ioc->name));
+ return -EINVAL;
+ }
+
+ /* get msix vector count */
+
+ pci_read_config_word(ioc->pdev, base + 2, &message_control);
+ ioc->msix_vector_count = (message_control & 0x3FF) + 1;
+ if (ioc->msix_vector_count > 8)
+ ioc->msix_vector_count = 8;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "msix is supported, vector_count(%d)\n",
+ ioc->name, ioc->msix_vector_count));
+ return 0;
+}
+
+/**
+ * _base_free_irq - free irq
+ * @ioc: per adapter object
+ *
+ * Freeing respective reply_queue from the list.
+ */
+static void
+_base_free_irq(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q, *next;
+
+ if (list_empty(&ioc->reply_queue_list))
+ return;
+
+ list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
+ list_del(&reply_q->list);
+ synchronize_irq(reply_q->vector);
+ free_irq(reply_q->vector, reply_q);
+ kfree(reply_q);
+ }
+}
+
+/**
+ * _base_request_irq - request irq
+ * @ioc: per adapter object
+ * @index: msix index into vector table
+ * @vector: irq vector
+ *
+ * Inserting respective reply_queue into the list.
+ */
+static int
+_base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index, u32 vector)
+{
+ struct adapter_reply_queue *reply_q;
+ int r;
+
+ reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
+ if (!reply_q) {
+ pr_err(MPT3SAS_FMT "unable to allocate memory %d!\n",
+ ioc->name, (int)sizeof(struct adapter_reply_queue));
+ return -ENOMEM;
+ }
+ reply_q->ioc = ioc;
+ reply_q->msix_index = index;
+ reply_q->vector = vector;
+ atomic_set(&reply_q->busy, 0);
+ if (ioc->msix_enable)
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
+ MPT3SAS_DRIVER_NAME, ioc->id, index);
+ else
+ snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
+ MPT3SAS_DRIVER_NAME, ioc->id);
+ r = request_irq(vector, _base_interrupt, IRQF_SHARED, reply_q->name,
+ reply_q);
+ if (r) {
+ pr_err(MPT3SAS_FMT "unable to allocate interrupt %d!\n",
+ reply_q->name, vector);
+ kfree(reply_q);
+ return -EBUSY;
+ }
+
+ INIT_LIST_HEAD(&reply_q->list);
+ list_add_tail(&reply_q->list, &ioc->reply_queue_list);
+ return 0;
+}
+
+/**
+ * _base_assign_reply_queues - assigning msix index for each cpu
+ * @ioc: per adapter object
+ *
+ * The enduser would need to set the affinity via /proc/irq/#/smp_affinity
+ *
+ * It would nice if we could call irq_set_affinity, however it is not
+ * an exported symbol
+ */
+static void
+_base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct adapter_reply_queue *reply_q;
+ int cpu_id;
+ int cpu_grouping, loop, grouping, grouping_mod;
+ int reply_queue;
+
+ if (!_base_is_controller_msix_enabled(ioc))
+ return;
+
+ memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
+
+ /* NUMA Hardware bug workaround - drop to less reply queues */
+ if (ioc->reply_queue_count > ioc->facts.MaxMSIxVectors) {
+ ioc->reply_queue_count = ioc->facts.MaxMSIxVectors;
+ reply_queue = 0;
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->msix_index = reply_queue;
+ if (++reply_queue == ioc->reply_queue_count)
+ reply_queue = 0;
+ }
+ }
+
+ /* when there are more cpus than available msix vectors,
+ * then group cpus togeather on same irq
+ */
+ if (ioc->cpu_count > ioc->msix_vector_count) {
+ grouping = ioc->cpu_count / ioc->msix_vector_count;
+ grouping_mod = ioc->cpu_count % ioc->msix_vector_count;
+ if (grouping < 2 || (grouping == 2 && !grouping_mod))
+ cpu_grouping = 2;
+ else if (grouping < 4 || (grouping == 4 && !grouping_mod))
+ cpu_grouping = 4;
+ else if (grouping < 8 || (grouping == 8 && !grouping_mod))
+ cpu_grouping = 8;
+ else
+ cpu_grouping = 16;
+ } else
+ cpu_grouping = 0;
+
+ loop = 0;
+ reply_q = list_entry(ioc->reply_queue_list.next,
+ struct adapter_reply_queue, list);
+ for_each_online_cpu(cpu_id) {
+ if (!cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] = reply_q->msix_index;
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ } else {
+ if (loop < cpu_grouping) {
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop++;
+ } else {
+ reply_q = list_entry(reply_q->list.next,
+ struct adapter_reply_queue, list);
+ ioc->cpu_msix_table[cpu_id] =
+ reply_q->msix_index;
+ loop = 1;
+ }
+ }
+ }
+}
+
+/**
+ * _base_disable_msix - disables msix
+ * @ioc: per adapter object
+ *
+ */
+static void
+_base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ if (!ioc->msix_enable)
+ return;
+ pci_disable_msix(ioc->pdev);
+ ioc->msix_enable = 0;
+}
+
+/**
+ * _base_enable_msix - enables msix, failback to io_apic
+ * @ioc: per adapter object
+ *
+ */
+static int
+_base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct msix_entry *entries, *a;
+ int r;
+ int i;
+ u8 try_msix = 0;
+
+ INIT_LIST_HEAD(&ioc->reply_queue_list);
+
+ if (msix_disable == -1 || msix_disable == 0)
+ try_msix = 1;
+
+ if (!try_msix)
+ goto try_ioapic;
+
+ if (_base_check_enable_msix(ioc) != 0)
+ goto try_ioapic;
+
+ ioc->reply_queue_count = min_t(int, ioc->cpu_count,
+ ioc->msix_vector_count);
+
+ entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
+ GFP_KERNEL);
+ if (!entries) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "kcalloc failed @ at %s:%d/%s() !!!\n",
+ ioc->name, __FILE__, __LINE__, __func__));
+ goto try_ioapic;
+ }
+
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
+ a->entry = i;
+
+ r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
+ if (r) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "pci_enable_msix failed (r=%d) !!!\n",
+ ioc->name, r));
+ kfree(entries);
+ goto try_ioapic;
+ }
+
+ ioc->msix_enable = 1;
+ for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++) {
+ r = _base_request_irq(ioc, i, a->vector);
+ if (r) {
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ kfree(entries);
+ goto try_ioapic;
+ }
+ }
+
+ kfree(entries);
+ return 0;
+
+/* failback to io_apic interrupt routing */
+ try_ioapic:
+
+ r = _base_request_irq(ioc, 0, ioc->pdev->irq);
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_map_resources - map in controller resources (io/irq/memap)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+ u32 memap_sz;
+ u32 pio_sz;
+ int i, r = 0;
+ u64 pio_chip = 0;
+ u64 chip_phys = 0;
+ struct adapter_reply_queue *reply_q;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n",
+ ioc->name, __func__));
+
+ ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
+ if (pci_enable_device_mem(pdev)) {
+ pr_warn(MPT3SAS_FMT "pci_enable_device_mem: failed\n",
+ ioc->name);
+ return -ENODEV;
+ }
+
+
+ if (pci_request_selected_regions(pdev, ioc->bars,
+ MPT3SAS_DRIVER_NAME)) {
+ pr_warn(MPT3SAS_FMT "pci_request_selected_regions: failed\n",
+ ioc->name);
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+/* AER (Advanced Error Reporting) hooks */
+ pci_enable_pcie_error_reporting(pdev);
+
+ pci_set_master(pdev);
+
+
+ if (_base_config_dma_addressing(ioc, pdev) != 0) {
+ pr_warn(MPT3SAS_FMT "no suitable DMA mask for %s\n",
+ ioc->name, pci_name(pdev));
+ r = -ENODEV;
+ goto out_fail;
+ }
+
+ for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+ if (pio_sz)
+ continue;
+ pio_chip = (u64)pci_resource_start(pdev, i);
+ pio_sz = pci_resource_len(pdev, i);
+ } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
+ if (memap_sz)
+ continue;
+ ioc->chip_phys = pci_resource_start(pdev, i);
+ chip_phys = (u64)ioc->chip_phys;
+ memap_sz = pci_resource_len(pdev, i);
+ ioc->chip = ioremap(ioc->chip_phys, memap_sz);
+ if (ioc->chip == NULL) {
+ pr_err(MPT3SAS_FMT "unable to map adapter memory!\n",
+ ioc->name);
+ r = -EINVAL;
+ goto out_fail;
+ }
+ }
+ }
+
+ _base_mask_interrupts(ioc);
+ r = _base_enable_msix(ioc);
+ if (r)
+ goto out_fail;
+
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
+ pr_info(MPT3SAS_FMT "%s: IRQ %d\n",
+ reply_q->name, ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
+ "IO-APIC enabled"), reply_q->vector);
+
+ pr_info(MPT3SAS_FMT "iomem(0x%016llx), mapped(0x%p), size(%d)\n",
+ ioc->name, (unsigned long long)chip_phys, ioc->chip, memap_sz);
+ pr_info(MPT3SAS_FMT "ioport(0x%016llx), size(%d)\n",
+ ioc->name, (unsigned long long)pio_chip, pio_sz);
+
+ /* Save PCI configuration state for recovery from PCI AER/EEH errors */
+ pci_save_state(pdev);
+ return 0;
+
+ out_fail:
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return r;
+}
+
+/**
+ * mpt3sas_base_get_msg_frame - obtain request mf pointer
+ * @ioc: per adapter object
+ * @smid: system request message index(smid zero is invalid)
+ *
+ * Returns virt pointer to message frame.
+ */
+void *
+mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->request + (smid * ioc->request_sz));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer - obtain a sense buffer virt addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns virt pointer to sense buffer.
+ */
+void *
+mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_sense_buffer_dma - obtain a sense buffer dma addr
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns phys pointer to the low 32bit address of the sense buffer.
+ */
+__le32
+mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
+ SCSI_SENSE_BUFFERSIZE));
+}
+
+/**
+ * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
+ * @ioc: per adapter object
+ * @phys_addr: lower 32 physical addr of the reply
+ *
+ * Converts 32bit lower physical addr into a virt address.
+ */
+void *
+mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
+{
+ if (!phys_addr)
+ return NULL;
+ return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
+}
+
+/**
+ * mpt3sas_base_get_smid - obtain a free smid from internal queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->internal_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->internal_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ * @scmd: pointer to scsi command object
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd)
+{
+ unsigned long flags;
+ struct scsiio_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ pr_err(MPT3SAS_FMT "%s: smid not available\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request = list_entry(ioc->free_list.next,
+ struct scsiio_tracker, tracker_list);
+ request->scmd = scmd;
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
+ * @ioc: per adapter object
+ * @cb_idx: callback index
+ *
+ * Returns smid (zero is invalid)
+ */
+u16
+mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
+{
+ unsigned long flags;
+ struct request_tracker *request;
+ u16 smid;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (list_empty(&ioc->hpr_free_list)) {
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return 0;
+ }
+
+ request = list_entry(ioc->hpr_free_list.next,
+ struct request_tracker, tracker_list);
+ request->cb_idx = cb_idx;
+ smid = request->smid;
+ list_del(&request->tracker_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * mpt3sas_base_free_smid - put smid back on free_list
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ int i;
+ struct chain_tracker *chain_req, *next;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ if (smid < ioc->hi_priority_smid) {
+ /* scsiio queue */
+ i = smid - 1;
+ if (!list_empty(&ioc->scsi_lookup[i].chain_list)) {
+ list_for_each_entry_safe(chain_req, next,
+ &ioc->scsi_lookup[i].chain_list, tracker_list) {
+ list_del_init(&chain_req->tracker_list);
+ list_add(&chain_req->tracker_list,
+ &ioc->free_chain_list);
+ }
+ }
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add(&ioc->scsi_lookup[i].tracker_list, &ioc->free_list);
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /*
+ * See _wait_for_commands_to_complete() call with regards
+ * to this code.
+ */
+ if (ioc->shost_recovery && ioc->pending_io_count) {
+ if (ioc->pending_io_count == 1)
+ wake_up(&ioc->reset_wq);
+ ioc->pending_io_count--;
+ }
+ return;
+ } else if (smid < ioc->internal_smid) {
+ /* hi-priority */
+ i = smid - ioc->hi_priority_smid;
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
+ } else if (smid <= ioc->hba_queue_depth) {
+ /* internal queue */
+ i = smid - ioc->internal_smid;
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ list_add(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+}
+
+/**
+ * _base_writeq - 64 bit write to MMIO
+ * @ioc: per adapter object
+ * @b: data payload
+ * @addr: address in MMIO space
+ * @writeq_lock: spin lock
+ *
+ * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
+ * care of 32 bit environment where its not quarenteed to send the entire word
+ * in one transfer.
+ */
+#if defined(writeq) && defined(CONFIG_64BIT)
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ writeq(cpu_to_le64(b), addr);
+}
+#else
+static inline void
+_base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
+{
+ unsigned long flags;
+ __u64 data_out = cpu_to_le64(b);
+
+ spin_lock_irqsave(writeq_lock, flags);
+ writel((u32)(data_out), addr);
+ writel((u32)(data_out >> 32), (addr + 4));
+ spin_unlock_irqrestore(writeq_lock, flags);
+}
+#endif
+
+static inline u8
+_base_get_msix_index(struct MPT3SAS_ADAPTER *ioc)
+{
+ return ioc->cpu_msix_table[raw_smp_processor_id()];
+}
+
+/**
+ * mpt3sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+
+ descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_fast_path - send fast path request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.SCSIIO.RequestFlags =
+ MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
+ descriptor.SCSIIO.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.SCSIIO.SMID = cpu_to_le16(smid);
+ descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
+ descriptor.SCSIIO.LMID = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_hi_priority - send Task Managment request to firmware
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.HighPriority.RequestFlags =
+ MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
+ descriptor.HighPriority.MSIxIndex = 0;
+ descriptor.HighPriority.SMID = cpu_to_le16(smid);
+ descriptor.HighPriority.LMID = 0;
+ descriptor.HighPriority.Reserved1 = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+/**
+ * mpt3sas_base_put_smid_default - Default, primarily used for config pages
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ Mpi2RequestDescriptorUnion_t descriptor;
+ u64 *request = (u64 *)&descriptor;
+
+ descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
+ descriptor.Default.MSIxIndex = _base_get_msix_index(ioc);
+ descriptor.Default.SMID = cpu_to_le16(smid);
+ descriptor.Default.LMID = 0;
+ descriptor.Default.DescriptorTypeDependent = 0;
+ _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
+ &ioc->scsi_lookup_lock);
+}
+
+
+
+/**
+ * _base_display_ioc_capabilities - Disply IOC's capabilities.
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i = 0;
+ char desc[16];
+ u32 iounit_pg1_flags;
+ u32 bios_version;
+
+ bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+ strncpy(desc, ioc->manu_pg0.ChipName, 16);
+ pr_info(MPT3SAS_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "\
+ "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
+ ioc->name, desc,
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF,
+ ioc->pdev->revision,
+ (bios_version & 0xFF000000) >> 24,
+ (bios_version & 0x00FF0000) >> 16,
+ (bios_version & 0x0000FF00) >> 8,
+ bios_version & 0x000000FF);
+
+ pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
+ pr_info("Initiator");
+ i++;
+ }
+
+ if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
+ pr_info("%sTarget", i ? "," : "");
+ i++;
+ }
+
+ i = 0;
+ pr_info("), ");
+ pr_info("Capabilities=(");
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
+ pr_info("Raid");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
+ pr_info("%sTLR", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
+ pr_info("%sMulticast", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
+ pr_info("%sBIDI Target", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
+ pr_info("%sEEDP", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
+ pr_info("%sSnapshot Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
+ pr_info("%sDiag Trace Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
+ pr_info("%sDiag Extended Buffer", i ? "," : "");
+ i++;
+ }
+
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
+ pr_info("%sTask Set Full", i ? "," : "");
+ i++;
+ }
+
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
+ pr_info("%sNCQ", i ? "," : "");
+ i++;
+ }
+
+ pr_info(")\n");
+}
+
+/**
+ * mpt3sas_base_update_missing_delay - change the missing delay timers
+ * @ioc: per adapter object
+ * @device_missing_delay: amount of time till device is reported missing
+ * @io_missing_delay: interval IO is returned when there is a missing device
+ *
+ * Return nothing.
+ *
+ * Passed on the command line, this function will modify the device missing
+ * delay, as well as the io missing delay. This should be called at driver
+ * load time.
+ */
+void
+mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay)
+{
+ u16 dmd, dmd_new, dmd_orignal;
+ u8 io_missing_delay_original;
+ u16 sz;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 num_phys = 0;
+ u16 ioc_status;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
+ if (!num_phys)
+ return;
+
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* device missing delay */
+ dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ dmd_orignal = dmd;
+ if (device_missing_delay > 0x7F) {
+ dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
+ device_missing_delay;
+ dmd = dmd / 16;
+ dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
+ } else
+ dmd = device_missing_delay;
+ sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
+
+ /* io missing delay */
+ io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
+ sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
+
+ if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ dmd_new = (dmd &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ dmd_new =
+ dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+ pr_info(MPT3SAS_FMT "device_missing_delay: old(%d), new(%d)\n",
+ ioc->name, dmd_orignal, dmd_new);
+ pr_info(MPT3SAS_FMT "ioc_missing_delay: old(%d), new(%d)\n",
+ ioc->name, io_missing_delay_original,
+ io_missing_delay);
+ ioc->device_missing_delay = dmd_new;
+ ioc->io_missing_delay = io_missing_delay;
+ }
+
+out:
+ kfree(sas_iounit_pg1);
+}
+/**
+ * _base_static_config_pages - static start of day config pages
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ u32 iounit_pg1_flags;
+
+ mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
+ if (ioc->ir_firmware)
+ mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+ &ioc->manu_pg10);
+
+ /*
+ * Ensure correct T10 PI operation if vendor left EEDPTagMode
+ * flag unset in NVDATA.
+ */
+ mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
+ if (ioc->manu_pg11.EEDPTagMode == 0) {
+ pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
+ ioc->name);
+ ioc->manu_pg11.EEDPTagMode &= ~0x3;
+ ioc->manu_pg11.EEDPTagMode |= 0x1;
+ mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
+ &ioc->manu_pg11);
+ }
+
+ mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
+ mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
+ mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
+ mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
+ mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+ _base_display_ioc_capabilities(ioc);
+
+ /*
+ * Enable task_set_full handling in iounit_pg1 when the
+ * facts capabilities indicate that its supported.
+ */
+ iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
+ iounit_pg1_flags &=
+ ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ else
+ iounit_pg1_flags |=
+ MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
+ ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
+ mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
+}
+
+/**
+ * _base_release_memory_pools - release memory
+ * @ioc: per adapter object
+ *
+ * Free memory allocated from _base_allocate_memory_pools.
+ *
+ * Return nothing.
+ */
+static void
+_base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->request) {
+ pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
+ ioc->request, ioc->request_dma);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request_pool(0x%p): free\n",
+ ioc->name, ioc->request));
+ ioc->request = NULL;
+ }
+
+ if (ioc->sense) {
+ pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
+ if (ioc->sense_dma_pool)
+ pci_pool_destroy(ioc->sense_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense_pool(0x%p): free\n",
+ ioc->name, ioc->sense));
+ ioc->sense = NULL;
+ }
+
+ if (ioc->reply) {
+ pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
+ if (ioc->reply_dma_pool)
+ pci_pool_destroy(ioc->reply_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_pool(0x%p): free\n",
+ ioc->name, ioc->reply));
+ ioc->reply = NULL;
+ }
+
+ if (ioc->reply_free) {
+ pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
+ ioc->reply_free_dma);
+ if (ioc->reply_free_dma_pool)
+ pci_pool_destroy(ioc->reply_free_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_pool(0x%p): free\n",
+ ioc->name, ioc->reply_free));
+ ioc->reply_free = NULL;
+ }
+
+ if (ioc->reply_post_free) {
+ pci_pool_free(ioc->reply_post_free_dma_pool,
+ ioc->reply_post_free, ioc->reply_post_free_dma);
+ if (ioc->reply_post_free_dma_pool)
+ pci_pool_destroy(ioc->reply_post_free_dma_pool);
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_pool(0x%p): free\n", ioc->name,
+ ioc->reply_post_free));
+ ioc->reply_post_free = NULL;
+ }
+
+ if (ioc->config_page) {
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config_page(0x%p): free\n", ioc->name,
+ ioc->config_page));
+ pci_free_consistent(ioc->pdev, ioc->config_page_sz,
+ ioc->config_page, ioc->config_page_dma);
+ }
+
+ if (ioc->scsi_lookup) {
+ free_pages((ulong)ioc->scsi_lookup, ioc->scsi_lookup_pages);
+ ioc->scsi_lookup = NULL;
+ }
+ kfree(ioc->hpr_lookup);
+ kfree(ioc->internal_lookup);
+ if (ioc->chain_lookup) {
+ for (i = 0; i < ioc->chain_depth; i++) {
+ if (ioc->chain_lookup[i].chain_buffer)
+ pci_pool_free(ioc->chain_dma_pool,
+ ioc->chain_lookup[i].chain_buffer,
+ ioc->chain_lookup[i].chain_buffer_dma);
+ }
+ if (ioc->chain_dma_pool)
+ pci_pool_destroy(ioc->chain_dma_pool);
+ free_pages((ulong)ioc->chain_lookup, ioc->chain_pages);
+ ioc->chain_lookup = NULL;
+ }
+}
+
+/**
+ * _base_allocate_memory_pools - allocate start of day memory pools
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 success, anything else error
+ */
+static int
+_base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ struct mpt3sas_facts *facts;
+ u16 max_sge_elements;
+ u16 chains_needed_per_io;
+ u32 sz, total_sz, reply_post_free_sz;
+ u32 retry_sz;
+ u16 max_request_credit;
+ unsigned short sg_tablesize;
+ u16 sge_size;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+
+ retry_sz = 0;
+ facts = &ioc->facts;
+
+ /* command line tunables for max sgl entries */
+ if (max_sgl_entries != -1)
+ sg_tablesize = max_sgl_entries;
+ else
+ sg_tablesize = MPT3SAS_SG_DEPTH;
+
+ if (sg_tablesize < MPT3SAS_MIN_PHYS_SEGMENTS)
+ sg_tablesize = MPT3SAS_MIN_PHYS_SEGMENTS;
+ else if (sg_tablesize > MPT3SAS_MAX_PHYS_SEGMENTS)
+ sg_tablesize = MPT3SAS_MAX_PHYS_SEGMENTS;
+ ioc->shost->sg_tablesize = sg_tablesize;
+
+ ioc->hi_priority_depth = facts->HighPriorityCredit;
+ ioc->internal_depth = ioc->hi_priority_depth + (5);
+ /* command line tunables for max controller queue depth */
+ if (max_queue_depth != -1 && max_queue_depth != 0) {
+ max_request_credit = min_t(u16, max_queue_depth +
+ ioc->hi_priority_depth + ioc->internal_depth,
+ facts->RequestCredit);
+ if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+ max_request_credit = MAX_HBA_QUEUE_DEPTH;
+ } else
+ max_request_credit = min_t(u16, facts->RequestCredit,
+ MAX_HBA_QUEUE_DEPTH);
+
+ ioc->hba_queue_depth = max_request_credit;
+
+ /* request frame size */
+ ioc->request_sz = facts->IOCRequestFrameSize * 4;
+
+ /* reply frame size */
+ ioc->reply_sz = facts->ReplyFrameSize * 4;
+
+ /* calculate the max scatter element size */
+ sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
+
+ retry_allocation:
+ total_sz = 0;
+ /* calculate number of sg elements left over in the 1st frame */
+ max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
+ sizeof(Mpi2SGEIOUnion_t)) + sge_size);
+ ioc->max_sges_in_main_message = max_sge_elements/sge_size;
+
+ /* now do the same for a chain buffer */
+ max_sge_elements = ioc->request_sz - sge_size;
+ ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
+
+ /*
+ * MPT3SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
+ */
+ chains_needed_per_io = ((ioc->shost->sg_tablesize -
+ ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
+ + 1;
+ if (chains_needed_per_io > facts->MaxChainDepth) {
+ chains_needed_per_io = facts->MaxChainDepth;
+ ioc->shost->sg_tablesize = min_t(u16,
+ ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
+ * chains_needed_per_io), ioc->shost->sg_tablesize);
+ }
+ ioc->chains_needed_per_io = chains_needed_per_io;
+
+ /* reply free queue sizing - taking into account for 64 FW events */
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+
+ /* calculate reply descriptor post queue depth */
+ ioc->reply_post_queue_depth = ioc->hba_queue_depth +
+ ioc->reply_free_queue_depth + 1 ;
+ /* align the reply post queue on the next 16 count boundary */
+ if (ioc->reply_post_queue_depth % 16)
+ ioc->reply_post_queue_depth += 16 -
+ (ioc->reply_post_queue_depth % 16);
+
+
+ if (ioc->reply_post_queue_depth >
+ facts->MaxReplyDescriptorPostQueueDepth) {
+ ioc->reply_post_queue_depth =
+ facts->MaxReplyDescriptorPostQueueDepth -
+ (facts->MaxReplyDescriptorPostQueueDepth % 16);
+ ioc->hba_queue_depth =
+ ((ioc->reply_post_queue_depth - 64) / 2) - 1;
+ ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scatter gather: " \
+ "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
+ "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
+ ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
+ ioc->chains_needed_per_io));
+
+ ioc->scsiio_depth = ioc->hba_queue_depth -
+ ioc->hi_priority_depth - ioc->internal_depth;
+
+ /* set the scsi host can_queue depth
+ * with some internal commands that could be outstanding
+ */
+ ioc->shost->can_queue = ioc->scsiio_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "scsi host: can_queue depth (%d)\n",
+ ioc->name, ioc->shost->can_queue));
+
+
+ /* contiguous pool for request and chains, 16 byte align, one extra "
+ * "frame for smid=0
+ */
+ ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
+ sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
+
+ /* hi-priority queue */
+ sz += (ioc->hi_priority_depth * ioc->request_sz);
+
+ /* internal queue */
+ sz += (ioc->internal_depth * ioc->request_sz);
+
+ ioc->request_dma_sz = sz;
+ ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
+ if (!ioc->request) {
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+ if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
+ goto out;
+ retry_sz += 64;
+ ioc->hba_queue_depth = max_request_credit - retry_sz;
+ goto retry_allocation;
+ }
+
+ if (retry_sz)
+ pr_err(MPT3SAS_FMT "request pool: pci_alloc_consistent " \
+ "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
+ "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
+ ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
+
+ /* hi-priority queue */
+ ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+ ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
+ ioc->request_sz);
+
+ /* internal queue */
+ ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
+ ioc->request_sz);
+ ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
+ ioc->request_sz);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->request, ioc->hba_queue_depth, ioc->request_sz,
+ (ioc->hba_queue_depth * ioc->request_sz)/1024));
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "request pool: dma(0x%llx)\n",
+ ioc->name, (unsigned long long) ioc->request_dma));
+ total_sz += sz;
+
+ sz = ioc->scsiio_depth * sizeof(struct scsiio_tracker);
+ ioc->scsi_lookup_pages = get_order(sz);
+ ioc->scsi_lookup = (struct scsiio_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->scsi_lookup_pages);
+ if (!ioc->scsi_lookup) {
+ pr_err(MPT3SAS_FMT "scsi_lookup: get_free_pages failed, sz(%d)\n",
+ ioc->name, (int)sz);
+ goto out;
+ }
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "scsiio(0x%p): depth(%d)\n",
+ ioc->name, ioc->request, ioc->scsiio_depth));
+
+ ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
+ sz = ioc->chain_depth * sizeof(struct chain_tracker);
+ ioc->chain_pages = get_order(sz);
+ ioc->chain_lookup = (struct chain_tracker *)__get_free_pages(
+ GFP_KERNEL, ioc->chain_pages);
+ if (!ioc->chain_lookup) {
+ pr_err(MPT3SAS_FMT "chain_lookup: __get_free_pages failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->chain_dma_pool = pci_pool_create("chain pool", ioc->pdev,
+ ioc->request_sz, 16, 0);
+ if (!ioc->chain_dma_pool) {
+ pr_err(MPT3SAS_FMT "chain_dma_pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ for (i = 0; i < ioc->chain_depth; i++) {
+ ioc->chain_lookup[i].chain_buffer = pci_pool_alloc(
+ ioc->chain_dma_pool , GFP_KERNEL,
+ &ioc->chain_lookup[i].chain_buffer_dma);
+ if (!ioc->chain_lookup[i].chain_buffer) {
+ ioc->chain_depth = i;
+ goto chain_done;
+ }
+ total_sz += ioc->request_sz;
+ }
+ chain_done:
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->chain_depth, ioc->request_sz,
+ ((ioc->chain_depth * ioc->request_sz))/1024));
+
+ /* initialize hi-priority queue smid's */
+ ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->hpr_lookup) {
+ pr_err(MPT3SAS_FMT "hpr_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->hi_priority_smid = ioc->scsiio_depth + 1;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hi_priority(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->hi_priority,
+ ioc->hi_priority_depth, ioc->hi_priority_smid));
+
+ /* initialize internal queue smid's */
+ ioc->internal_lookup = kcalloc(ioc->internal_depth,
+ sizeof(struct request_tracker), GFP_KERNEL);
+ if (!ioc->internal_lookup) {
+ pr_err(MPT3SAS_FMT "internal_lookup: kcalloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "internal(0x%p): depth(%d), start smid(%d)\n",
+ ioc->name, ioc->internal,
+ ioc->internal_depth, ioc->internal_smid));
+
+ /* sense buffers, 4 byte align */
+ sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
+ ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->sense_dma_pool) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
+ &ioc->sense_dma);
+ if (!ioc->sense) {
+ pr_err(MPT3SAS_FMT "sense pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
+ "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
+ SCSI_SENSE_BUFFERSIZE, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "sense_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->sense_dma));
+ total_sz += sz;
+
+ /* reply pool, 4 byte align */
+ sz = ioc->reply_free_queue_depth * ioc->reply_sz;
+ ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
+ 0);
+ if (!ioc->reply_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
+ &ioc->reply_dma);
+ if (!ioc->reply) {
+ pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
+ ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->reply,
+ ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_dma));
+ total_sz += sz;
+
+ /* reply free queue, 16 byte align */
+ sz = ioc->reply_free_queue_depth * 4;
+ ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_free_dma_pool) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
+ &ioc->reply_free_dma);
+ if (!ioc->reply_free) {
+ pr_err(MPT3SAS_FMT "reply_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply_free pool(0x%p): " \
+ "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
+ ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_free_dma (0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->reply_free_dma));
+ total_sz += sz;
+
+ /* reply post queue, 16 byte align */
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ if (_base_is_controller_msix_enabled(ioc))
+ sz = reply_post_free_sz * ioc->reply_queue_count;
+ else
+ sz = reply_post_free_sz;
+ ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+ ioc->pdev, sz, 16, 0);
+ if (!ioc->reply_post_free_dma_pool) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_create failed\n",
+ ioc->name);
+ goto out;
+ }
+ ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
+ GFP_KERNEL, &ioc->reply_post_free_dma);
+ if (!ioc->reply_post_free) {
+ pr_err(MPT3SAS_FMT
+ "reply_post_free pool: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ memset(ioc->reply_post_free, 0, sz);
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
+ "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
+ ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
+ sz/1024));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "reply_post_free_dma = (0x%llx)\n",
+ ioc->name, (unsigned long long)
+ ioc->reply_post_free_dma));
+ total_sz += sz;
+
+ ioc->config_page_sz = 512;
+ ioc->config_page = pci_alloc_consistent(ioc->pdev,
+ ioc->config_page_sz, &ioc->config_page_dma);
+ if (!ioc->config_page) {
+ pr_err(MPT3SAS_FMT
+ "config page: pci_pool_alloc failed\n",
+ ioc->name);
+ goto out;
+ }
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "config page(0x%p): size(%d)\n",
+ ioc->name, ioc->config_page, ioc->config_page_sz));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "config_page_dma(0x%llx)\n",
+ ioc->name, (unsigned long long)ioc->config_page_dma));
+ total_sz += ioc->config_page_sz;
+
+ pr_info(MPT3SAS_FMT "Allocated physical memory: size(%d kB)\n",
+ ioc->name, total_sz/1024);
+ pr_info(MPT3SAS_FMT
+ "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
+ ioc->name, ioc->shost->can_queue, facts->RequestCredit);
+ pr_info(MPT3SAS_FMT "Scatter Gather Elements per IO(%d)\n",
+ ioc->name, ioc->shost->sg_tablesize);
+ return 0;
+
+ out:
+ return -ENOMEM;
+}
+
+/**
+ * mpt3sas_base_get_iocstate - Get the current state of a MPT adapter.
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @cooked: Request raw or cooked IOC state
+ *
+ * Returns all IOC Doorbell register bits if cooked==0, else just the
+ * Doorbell bits in MPI_IOC_STATE_MASK.
+ */
+u32
+mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
+{
+ u32 s, sc;
+
+ s = readl(&ioc->chip->Doorbell);
+ sc = s & MPI2_IOC_STATE_MASK;
+ return cooked ? sc : s;
+}
+
+/**
+ * _base_wait_on_iocstate - waiting on a particular ioc state
+ * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
+ int sleep_flag)
+{
+ u32 count, cntdn;
+ u32 current_state;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ current_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (current_state == ioc_state)
+ return 0;
+ if (count && current_state == MPI2_IOC_STATE_FAULT)
+ break;
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ return current_state;
+}
+
+/**
+ * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
+ * a write to the doorbell)
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
+ */
+static int
+_base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
+ * doorbell.
+ */
+static int
+_base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 int_status;
+ u32 doorbell;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ int_status = readl(&ioc->chip->HostInterruptStatus);
+ if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
+ doorbell = readl(&ioc->chip->Doorbell);
+ if ((doorbell & MPI2_IOC_STATE_MASK) ==
+ MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc , doorbell);
+ return -EFAULT;
+ }
+ } else if (int_status == 0xFFFFFFFF)
+ goto out;
+
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ out:
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), int_status(%x)!\n",
+ ioc->name, __func__, count, int_status);
+ return -EFAULT;
+}
+
+/**
+ * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
+ * @ioc: per adapter object
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout,
+ int sleep_flag)
+{
+ u32 cntdn, count;
+ u32 doorbell_reg;
+
+ count = 0;
+ cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
+ do {
+ doorbell_reg = readl(&ioc->chip->Doorbell);
+ if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: successful count(%d), timeout(%d)\n",
+ ioc->name, __func__, count, timeout));
+ return 0;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ udelay(500);
+ count++;
+ } while (--cntdn);
+
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
+ ioc->name, __func__, count, doorbell_reg);
+ return -EFAULT;
+}
+
+/**
+ * _base_send_ioc_reset - send doorbell reset
+ * @ioc: per adapter object
+ * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout,
+ int sleep_flag)
+{
+ u32 ioc_state;
+ int r = 0;
+
+ if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
+ pr_err(MPT3SAS_FMT "%s: unknown reset_type\n",
+ ioc->name, __func__);
+ return -EFAULT;
+ }
+
+ if (!(ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
+ return -EFAULT;
+
+ pr_info(MPT3SAS_FMT "sending message unit reset !!\n", ioc->name);
+
+ writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
+ &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
+ r = -EFAULT;
+ goto out;
+ }
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+ timeout, sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ r = -EFAULT;
+ goto out;
+ }
+ out:
+ pr_info(MPT3SAS_FMT "message unit reset: %s\n",
+ ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * _base_handshake_req_reply_wait - send request thru doorbell interface
+ * @ioc: per adapter object
+ * @request_bytes: request length
+ * @request: pointer having request payload
+ * @reply_bytes: reply length
+ * @reply: pointer to reply payload
+ * @timeout: timeout in second
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
+ u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
+{
+ MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
+ int i;
+ u8 failed;
+ u16 dummy;
+ __le32 *mfp;
+
+ /* make sure doorbell is not in use */
+ if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
+ pr_err(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* clear pending doorbell interrupts from previous state changes */
+ if (readl(&ioc->chip->HostInterruptStatus) &
+ MPI2_HIS_IOC2SYS_DB_STATUS)
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ /* send message to ioc */
+ writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
+ ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
+ &ioc->chip->Doorbell);
+
+ if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake ack failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* send message 32-bits at a time */
+ for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
+ writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
+ if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
+ failed = 1;
+ }
+
+ if (failed) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake sending request failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* now wait for the reply */
+ if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+
+ /* read the first two 16-bits, it gives the total length of the reply */
+ reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ for (i = 2; i < default_reply->MsgLength * 2; i++) {
+ if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
+ pr_err(MPT3SAS_FMT
+ "doorbell handshake int failed (line=%d)\n",
+ ioc->name, __LINE__);
+ return -EFAULT;
+ }
+ if (i >= reply_bytes/2) /* overflow case */
+ dummy = readl(&ioc->chip->Doorbell);
+ else
+ reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
+ & MPI2_DOORBELL_DATA_MASK);
+ writel(0, &ioc->chip->HostInterruptStatus);
+ }
+
+ _base_wait_for_doorbell_int(ioc, 5, sleep_flag);
+ if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "doorbell is in use (line=%d)\n", ioc->name, __LINE__));
+ }
+ writel(0, &ioc->chip->HostInterruptStatus);
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ mfp = (__le32 *)reply;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < reply_bytes/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_base_sas_iounit_control - send sas iounit control to FW
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SAS IO Unit Control Request message allows the host to perform low-level
+ * operations, such as resets on the PHYs of the IO Unit, also allows the host
+ * to obtain the IOC assigned device handles for a device if it has other
+ * identifying information about the device, in addition allows the host to
+ * remove IOC resources associated with the device.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ u8 issue_reset;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
+ if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
+ ioc->ioc_link_reset_in_progress = 1;
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
+ mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
+ ioc->ioc_link_reset_in_progress)
+ ioc->ioc_link_reset_in_progress = 0;
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SasIoUnitControlRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SasIoUnitControlReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * mpt3sas_base_scsi_enclosure_processor - sending request to sep device
+ * @ioc: per adapter object
+ * @mpi_reply: the reply payload from FW
+ * @mpi_request: the request payload sent to FW
+ *
+ * The SCSI Enclosure Processor request message causes the IOC to
+ * communicate with SES devices to control LED status signals.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ u8 issue_reset;
+ int rc;
+ void *request;
+ u16 wait_state_count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mutex_lock(&ioc->base_cmds.mutex);
+
+ if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: base_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
+ msecs_to_jiffies(10000));
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SepRequest_t)/4);
+ if (!(ioc->base_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
+ memcpy(mpi_reply, ioc->base_cmds.reply,
+ sizeof(Mpi2SepReply_t));
+ else
+ memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EFAULT;
+ out:
+ mutex_unlock(&ioc->base_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _base_get_port_facts - obtain port facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port, int sleep_flag)
+{
+ Mpi2PortFactsRequest_t mpi_request;
+ Mpi2PortFactsReply_t mpi_reply;
+ struct mpt3sas_port_facts *pfacts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
+ mpi_request.PortNumber = port;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ pfacts = &ioc->pfacts[port];
+ memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
+ pfacts->PortNumber = mpi_reply.PortNumber;
+ pfacts->VP_ID = mpi_reply.VP_ID;
+ pfacts->VF_ID = mpi_reply.VF_ID;
+ pfacts->MaxPostedCmdBuffers =
+ le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
+
+ return 0;
+}
+
+/**
+ * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCFactsRequest_t mpi_request;
+ Mpi2IOCFactsReply_t mpi_reply;
+ struct mpt3sas_facts *facts;
+ int mpi_reply_sz, mpi_request_sz, r;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
+ mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
+ memset(&mpi_request, 0, mpi_request_sz);
+ mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
+ r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
+ (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ facts = &ioc->facts;
+ memset(facts, 0, sizeof(struct mpt3sas_facts));
+ facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
+ facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
+ facts->VP_ID = mpi_reply.VP_ID;
+ facts->VF_ID = mpi_reply.VF_ID;
+ facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
+ facts->MaxChainDepth = mpi_reply.MaxChainDepth;
+ facts->WhoInit = mpi_reply.WhoInit;
+ facts->NumberOfPorts = mpi_reply.NumberOfPorts;
+ facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
+ facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
+ facts->MaxReplyDescriptorPostQueueDepth =
+ le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
+ facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
+ facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
+ if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
+ ioc->ir_firmware = 1;
+ facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
+ facts->IOCRequestFrameSize =
+ le16_to_cpu(mpi_reply.IOCRequestFrameSize);
+ facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
+ facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
+ ioc->shost->max_id = -1;
+ facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
+ facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
+ facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
+ facts->HighPriorityCredit =
+ le16_to_cpu(mpi_reply.HighPriorityCredit);
+ facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
+ facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "hba queue depth(%d), max chains per io(%d)\n",
+ ioc->name, facts->RequestCredit,
+ facts->MaxChainDepth));
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "request frame size(%d), reply frame size(%d)\n", ioc->name,
+ facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
+ return 0;
+}
+
+/**
+ * _base_send_ioc_init - send ioc_init to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2IOCInitRequest_t mpi_request;
+ Mpi2IOCInitReply_t mpi_reply;
+ int r;
+ struct timeval current_time;
+ u16 ioc_status;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
+ mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
+ mpi_request.VF_ID = 0; /* TODO */
+ mpi_request.VP_ID = 0;
+ mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
+ mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
+
+ if (_base_is_controller_msix_enabled(ioc))
+ mpi_request.HostMSIxVectors = ioc->reply_queue_count;
+ mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
+ mpi_request.ReplyDescriptorPostQueueDepth =
+ cpu_to_le16(ioc->reply_post_queue_depth);
+ mpi_request.ReplyFreeQueueDepth =
+ cpu_to_le16(ioc->reply_free_queue_depth);
+
+ mpi_request.SenseBufferAddressHigh =
+ cpu_to_le32((u64)ioc->sense_dma >> 32);
+ mpi_request.SystemReplyAddressHigh =
+ cpu_to_le32((u64)ioc->reply_dma >> 32);
+ mpi_request.SystemRequestFrameBaseAddress =
+ cpu_to_le64((u64)ioc->request_dma);
+ mpi_request.ReplyFreeQueueAddress =
+ cpu_to_le64((u64)ioc->reply_free_dma);
+ mpi_request.ReplyDescriptorPostQueueAddress =
+ cpu_to_le64((u64)ioc->reply_post_free_dma);
+
+
+ /* This time stamp specifies number of milliseconds
+ * since epoch ~ midnight January 1, 1970.
+ */
+ do_gettimeofday(&current_time);
+ mpi_request.TimeStamp = cpu_to_le64((u64)current_time.tv_sec * 1000 +
+ (current_time.tv_usec / 1000));
+
+ if (ioc->logging_level & MPT_DEBUG_INIT) {
+ __le32 *mfp;
+ int i;
+
+ mfp = (__le32 *)&mpi_request;
+ pr_info("\toffset:data\n");
+ for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
+ pr_info("\t[0x%02x]:%08x\n", i*4,
+ le32_to_cpu(mfp[i]));
+ }
+
+ r = _base_handshake_req_reply_wait(ioc,
+ sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
+ sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
+ sleep_flag);
+
+ if (r != 0) {
+ pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
+ ioc->name, __func__, r);
+ return r;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
+ mpi_reply.IOCLogInfo) {
+ pr_err(MPT3SAS_FMT "%s: failed\n", ioc->name, __func__);
+ r = -EIO;
+ }
+
+ return 0;
+}
+
+/**
+ * mpt3sas_port_enable_done - command completion routine for port enable
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ u16 ioc_status;
+
+ if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (!mpi_reply)
+ return 1;
+
+ if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
+ return 1;
+
+ ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
+ ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
+ ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ ioc->port_enable_failed = 1;
+
+ if (ioc->is_driver_loading) {
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ mpt3sas_port_enable_complete(ioc);
+ return 1;
+ } else {
+ ioc->start_scan_failed = ioc_status;
+ ioc->start_scan = 0;
+ return 1;
+ }
+ }
+ complete(&ioc->port_enable_cmds.done);
+ return 1;
+}
+
+/**
+ * _base_send_port_enable - send port_enable(discovery stuff) to firmware
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_send_port_enable(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ Mpi2PortEnableReply_t *mpi_reply;
+ unsigned long timeleft;
+ int r = 0;
+ u16 smid;
+ u16 ioc_status;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ init_completion(&ioc->port_enable_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->port_enable_cmds.done,
+ 300*HZ);
+ if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2PortEnableRequest_t)/4);
+ if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ goto out;
+ }
+
+ mpi_reply = ioc->port_enable_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "%s: failed with (ioc_status=0x%08x)\n",
+ ioc->name, __func__, ioc_status);
+ r = -EFAULT;
+ goto out;
+ }
+
+ out:
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT "port enable: %s\n", ioc->name, ((r == 0) ?
+ "SUCCESS" : "FAILED"));
+ return r;
+}
+
+/**
+ * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2PortEnableRequest_t *mpi_request;
+ u16 smid;
+
+ pr_info(MPT3SAS_FMT "sending port enable !!\n", ioc->name);
+
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->port_enable_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
+
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+}
+
+/**
+ * _base_determine_wait_on_discovery - desposition
+ * @ioc: per adapter object
+ *
+ * Decide whether to wait on discovery to complete. Used to either
+ * locate boot device, or report volumes ahead of physical devices.
+ *
+ * Returns 1 for wait, 0 for don't wait
+ */
+static int
+_base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
+{
+ /* We wait for discovery to complete if IR firmware is loaded.
+ * The sas topology events arrive before PD events, so we need time to
+ * turn on the bit in ioc->pd_handles to indicate PD
+ * Also, it maybe required to report Volumes ahead of physical
+ * devices when MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING is set.
+ */
+ if (ioc->ir_firmware)
+ return 1;
+
+ /* if no Bios, then we don't need to wait */
+ if (!ioc->bios_pg3.BiosVersion)
+ return 0;
+
+ /* Bios is present, then we drop down here.
+ *
+ * If there any entries in the Bios Page 2, then we wait
+ * for discovery to complete.
+ */
+
+ /* Current Boot Device */
+ if ((ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Request Boot Device */
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
+ /* Alternate Request Boot Device */
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK) ==
+ MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
+ return 0;
+
+ return 1;
+}
+
+/**
+ * _base_unmask_events - turn on notification for this event
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The mask is stored in ioc->event_masks.
+ */
+static void
+_base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u32 desired_event;
+
+ if (event >= 128)
+ return;
+
+ desired_event = (1 << (event % 32));
+
+ if (event < 32)
+ ioc->event_masks[0] &= ~desired_event;
+ else if (event < 64)
+ ioc->event_masks[1] &= ~desired_event;
+ else if (event < 96)
+ ioc->event_masks[2] &= ~desired_event;
+ else if (event < 128)
+ ioc->event_masks[3] &= ~desired_event;
+}
+
+/**
+ * _base_event_notification - send event notification
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_event_notification(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ Mpi2EventNotificationRequest_t *mpi_request;
+ unsigned long timeleft;
+ u16 smid;
+ int r = 0;
+ int i;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ pr_err(MPT3SAS_FMT "%s: internal command already in use\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return -EAGAIN;
+ }
+ ioc->base_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->base_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ mpi_request->EventMasks[i] =
+ cpu_to_le32(ioc->event_masks[i]);
+ init_completion(&ioc->base_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
+ if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2EventNotificationRequest_t)/4);
+ if (ioc->base_cmds.status & MPT3_CMD_RESET)
+ r = -EFAULT;
+ else
+ r = -ETIME;
+ } else
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s: complete\n",
+ ioc->name, __func__));
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ return r;
+}
+
+/**
+ * mpt3sas_base_validate_event_type - validating event types
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * This will turn on firmware event notification when application
+ * ask for that event. We don't mask events that are already enabled.
+ */
+void
+mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
+{
+ int i, j;
+ u32 event_mask, desired_event;
+ u8 send_update_to_fw;
+
+ for (i = 0, send_update_to_fw = 0; i <
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
+ event_mask = ~event_type[i];
+ desired_event = 1;
+ for (j = 0; j < 32; j++) {
+ if (!(event_mask & desired_event) &&
+ (ioc->event_masks[i] & desired_event)) {
+ ioc->event_masks[i] &= ~desired_event;
+ send_update_to_fw = 1;
+ }
+ desired_event = (desired_event << 1);
+ }
+ }
+
+ if (!send_update_to_fw)
+ return;
+
+ mutex_lock(&ioc->base_cmds.mutex);
+ _base_event_notification(ioc, CAN_SLEEP);
+ mutex_unlock(&ioc->base_cmds.mutex);
+}
+
+/**
+ * _base_diag_reset - the "big hammer" start of day reset
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_diag_reset(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 host_diagnostic;
+ u32 ioc_state;
+ u32 count;
+ u32 hcb_size;
+
+ pr_info(MPT3SAS_FMT "sending diag reset !!\n", ioc->name);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "clear interrupts\n",
+ ioc->name));
+
+ count = 0;
+ do {
+ /* Write magic sequence to WriteSequence register
+ * Loop until in diagnostic mode
+ */
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "write magic sequence\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
+ writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ /* wait 100 msec */
+ if (sleep_flag == CAN_SLEEP)
+ msleep(100);
+ else
+ mdelay(100);
+
+ if (count++ > 20)
+ goto out;
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
+ ioc->name, count, host_diagnostic));
+
+ } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
+
+ hcb_size = readl(&ioc->chip->HCBSize);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "diag reset: issued\n",
+ ioc->name));
+ writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
+ &ioc->chip->HostDiagnostic);
+
+ /* don't access any registers for 50 milliseconds */
+ msleep(50);
+
+ /* 300 second max wait */
+ for (count = 0; count < 3000000 ; count++) {
+
+ host_diagnostic = readl(&ioc->chip->HostDiagnostic);
+
+ if (host_diagnostic == 0xFFFFFFFF)
+ goto out;
+ if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
+ break;
+
+ /* wait 1 msec */
+ if (sleep_flag == CAN_SLEEP)
+ usleep_range(1000, 1500);
+ else
+ mdelay(1);
+ }
+
+ if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "restart the adapter assuming the HCB Address points to good F/W\n",
+ ioc->name));
+ host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
+ host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
+ writel(host_diagnostic, &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "re-enable the HCDW\n", ioc->name));
+ writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
+ &ioc->chip->HCBSize);
+ }
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT "restart the adapter\n",
+ ioc->name));
+ writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
+ &ioc->chip->HostDiagnostic);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "disable writes to the diagnostic register\n", ioc->name));
+ writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
+
+ drsprintk(ioc, pr_info(MPT3SAS_FMT
+ "Wait for FW to go to the READY state\n", ioc->name));
+ ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
+ sleep_flag);
+ if (ioc_state) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ goto out;
+ }
+
+ pr_info(MPT3SAS_FMT "diag reset: SUCCESS\n", ioc->name);
+ return 0;
+
+ out:
+ pr_err(MPT3SAS_FMT "diag reset: FAILED\n", ioc->name);
+ return -EFAULT;
+}
+
+/**
+ * _base_make_ioc_ready - put controller in READY state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ u32 ioc_state;
+ int rc;
+ int count;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery)
+ return 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT "%s: ioc_state(0x%08x)\n",
+ ioc->name, __func__, ioc_state));
+
+ /* if in RESET state, it should move to READY state shortly */
+ count = 0;
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
+ while ((ioc_state & MPI2_IOC_STATE_MASK) !=
+ MPI2_IOC_STATE_READY) {
+ if (count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed going to ready state (ioc_state=0x%x)\n",
+ ioc->name, __func__, ioc_state);
+ return -EFAULT;
+ }
+ if (sleep_flag == CAN_SLEEP)
+ ssleep(1);
+ else
+ mdelay(1000);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ }
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
+ return 0;
+
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n",
+ ioc->name));
+ goto issue_diag_reset;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ goto issue_diag_reset;
+ }
+
+ if (type == FORCE_BIG_HAMMER)
+ goto issue_diag_reset;
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
+ if (!(_base_send_ioc_reset(ioc,
+ MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP))) {
+ return 0;
+ }
+
+ issue_diag_reset:
+ rc = _base_diag_reset(ioc, CAN_SLEEP);
+ return rc;
+}
+
+/**
+ * _base_make_ioc_operational - put controller in OPERATIONAL state
+ * @ioc: per adapter object
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ int r, i;
+ unsigned long flags;
+ u32 reply_address;
+ u16 smid;
+ struct _tr_list *delayed_tr, *delayed_tr_next;
+ struct adapter_reply_queue *reply_q;
+ long reply_post_free;
+ u32 reply_post_free_sz;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* clean the delayed target reset list */
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+
+ list_for_each_entry_safe(delayed_tr, delayed_tr_next,
+ &ioc->delayed_tr_volume_list, list) {
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ }
+
+ /* initialize the scsi lookup free list */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ INIT_LIST_HEAD(&ioc->free_list);
+ smid = 1;
+ for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
+ INIT_LIST_HEAD(&ioc->scsi_lookup[i].chain_list);
+ ioc->scsi_lookup[i].cb_idx = 0xFF;
+ ioc->scsi_lookup[i].smid = smid;
+ ioc->scsi_lookup[i].scmd = NULL;
+ list_add_tail(&ioc->scsi_lookup[i].tracker_list,
+ &ioc->free_list);
+ }
+
+ /* hi-priority queue */
+ INIT_LIST_HEAD(&ioc->hpr_free_list);
+ smid = ioc->hi_priority_smid;
+ for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
+ ioc->hpr_lookup[i].cb_idx = 0xFF;
+ ioc->hpr_lookup[i].smid = smid;
+ list_add_tail(&ioc->hpr_lookup[i].tracker_list,
+ &ioc->hpr_free_list);
+ }
+
+ /* internal queue */
+ INIT_LIST_HEAD(&ioc->internal_free_list);
+ smid = ioc->internal_smid;
+ for (i = 0; i < ioc->internal_depth; i++, smid++) {
+ ioc->internal_lookup[i].cb_idx = 0xFF;
+ ioc->internal_lookup[i].smid = smid;
+ list_add_tail(&ioc->internal_lookup[i].tracker_list,
+ &ioc->internal_free_list);
+ }
+
+ /* chain pool */
+ INIT_LIST_HEAD(&ioc->free_chain_list);
+ for (i = 0; i < ioc->chain_depth; i++)
+ list_add_tail(&ioc->chain_lookup[i].tracker_list,
+ &ioc->free_chain_list);
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ /* initialize Reply Free Queue */
+ for (i = 0, reply_address = (u32)ioc->reply_dma ;
+ i < ioc->reply_free_queue_depth ; i++, reply_address +=
+ ioc->reply_sz)
+ ioc->reply_free[i] = cpu_to_le32(reply_address);
+
+ /* initialize reply queues */
+ if (ioc->is_driver_loading)
+ _base_assign_reply_queues(ioc);
+
+ /* initialize Reply Post Free Queue */
+ reply_post_free = (long)ioc->reply_post_free;
+ reply_post_free_sz = ioc->reply_post_queue_depth *
+ sizeof(Mpi2DefaultReplyDescriptor_t);
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ reply_q->reply_post_host_index = 0;
+ reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
+ reply_post_free;
+ for (i = 0; i < ioc->reply_post_queue_depth; i++)
+ reply_q->reply_post_free[i].Words =
+ cpu_to_le64(ULLONG_MAX);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_free_queue;
+ reply_post_free += reply_post_free_sz;
+ }
+ skip_init_reply_post_free_queue:
+
+ r = _base_send_ioc_init(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ /* initialize reply free host index */
+ ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
+ writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
+
+ /* initialize reply post host index */
+ list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
+ writel(reply_q->msix_index << MPI2_RPHI_MSIX_INDEX_SHIFT,
+ &ioc->chip->ReplyPostHostIndex);
+ if (!_base_is_controller_msix_enabled(ioc))
+ goto skip_init_reply_post_host_index;
+ }
+
+ skip_init_reply_post_host_index:
+
+ _base_unmask_interrupts(ioc);
+ r = _base_event_notification(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ if (sleep_flag == CAN_SLEEP)
+ _base_static_config_pages(ioc);
+
+
+ if (ioc->is_driver_loading) {
+ ioc->wait_for_discovery_to_complete =
+ _base_determine_wait_on_discovery(ioc);
+
+ return r; /* scan_start and scan_finished support */
+ }
+
+ r = _base_send_port_enable(ioc, sleep_flag);
+ if (r)
+ return r;
+
+ return r;
+}
+
+/**
+ * mpt3sas_base_free_resources - free resources controller resources
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct pci_dev *pdev = ioc->pdev;
+
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ _base_mask_interrupts(ioc);
+ ioc->shost_recovery = 1;
+ _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ ioc->shost_recovery = 0;
+ _base_free_irq(ioc);
+ _base_disable_msix(ioc);
+ if (ioc->chip_phys)
+ iounmap(ioc->chip);
+ ioc->chip_phys = 0;
+ pci_release_selected_regions(ioc->pdev, ioc->bars);
+ pci_disable_pcie_error_reporting(pdev);
+ pci_disable_device(pdev);
+ return;
+}
+
+/**
+ * mpt3sas_base_attach - attach controller instance
+ * @ioc: per adapter object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
+{
+ int r, i;
+ int cpu_id, last_cpu_id = 0;
+
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ /* setup cpu_msix_table */
+ ioc->cpu_count = num_online_cpus();
+ for_each_online_cpu(cpu_id)
+ last_cpu_id = cpu_id;
+ ioc->cpu_msix_table_sz = last_cpu_id + 1;
+ ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
+ ioc->reply_queue_count = 1;
+ if (!ioc->cpu_msix_table) {
+ dfailprintk(ioc, pr_info(MPT3SAS_FMT
+ "allocation for cpu_msix_table failed!!!\n",
+ ioc->name));
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ goto out_free_resources;
+
+
+ pci_set_drvdata(ioc->pdev, ioc->shost);
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ /*
+ * In SAS3.0,
+ * SCSI_IO, SMP_PASSTHRU, SATA_PASSTHRU, Target Assist, and
+ * Target Status - all require the IEEE formated scatter gather
+ * elements.
+ */
+
+ ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
+ ioc->build_sg = &_base_build_sg_ieee;
+ ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
+ ioc->mpi25 = 1;
+ ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
+
+ /*
+ * These function pointers for other requests that don't
+ * the require IEEE scatter gather elements.
+ *
+ * For example Configuration Pages and SAS IOUNIT Control don't.
+ */
+ ioc->build_sg_mpi = &_base_build_sg;
+ ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
+
+ r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
+ if (r)
+ goto out_free_resources;
+
+ ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
+ sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
+ if (!ioc->pfacts) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
+ r = _base_get_port_facts(ioc, i, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+ }
+
+ r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ init_waitqueue_head(&ioc->reset_wq);
+
+ /* allocate memory pd handle bitmask list */
+ ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
+ if (ioc->facts.MaxDevHandle % 8)
+ ioc->pd_handles_sz++;
+ ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->pd_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+ ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
+ GFP_KERNEL);
+ if (!ioc->blocking_handles) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ ioc->fwfault_debug = mpt3sas_fwfault_debug;
+
+ /* base internal command bits */
+ mutex_init(&ioc->base_cmds.mutex);
+ ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* port_enable command bits */
+ ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
+
+ /* transport internal command bits */
+ ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->transport_cmds.mutex);
+
+ /* scsih internal command bits */
+ ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->scsih_cmds.mutex);
+
+ /* task management internal command bits */
+ ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->tm_cmds.mutex);
+
+ /* config page internal command bits */
+ ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->config_cmds.mutex);
+
+ /* ctl module internal command bits */
+ ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
+ ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_init(&ioc->ctl_cmds.mutex);
+
+ if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply ||
+ !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply ||
+ !ioc->config_cmds.reply || !ioc->ctl_cmds.reply ||
+ !ioc->ctl_cmds.sense) {
+ r = -ENOMEM;
+ goto out_free_resources;
+ }
+
+ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
+ ioc->event_masks[i] = -1;
+
+ /* here we enable the events we care about */
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
+ _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
+ _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
+
+ r = _base_make_ioc_operational(ioc, CAN_SLEEP);
+ if (r)
+ goto out_free_resources;
+
+ return 0;
+
+ out_free_resources:
+
+ ioc->remove_host = 1;
+
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->pfacts);
+ ioc->ctl_cmds.reply = NULL;
+ ioc->base_cmds.reply = NULL;
+ ioc->tm_cmds.reply = NULL;
+ ioc->scsih_cmds.reply = NULL;
+ ioc->transport_cmds.reply = NULL;
+ ioc->config_cmds.reply = NULL;
+ ioc->pfacts = NULL;
+ return r;
+}
+
+
+/**
+ * mpt3sas_base_detach - remove controller instance
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
+{
+ dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ _base_release_memory_pools(ioc);
+ pci_set_drvdata(ioc->pdev, NULL);
+ kfree(ioc->cpu_msix_table);
+ kfree(ioc->pd_handles);
+ kfree(ioc->blocking_handles);
+ kfree(ioc->pfacts);
+ kfree(ioc->ctl_cmds.reply);
+ kfree(ioc->ctl_cmds.sense);
+ kfree(ioc->base_cmds.reply);
+ kfree(ioc->port_enable_cmds.reply);
+ kfree(ioc->tm_cmds.reply);
+ kfree(ioc->transport_cmds.reply);
+ kfree(ioc->scsih_cmds.reply);
+ kfree(ioc->config_cmds.reply);
+}
+
+/**
+ * _base_reset_handler - reset callback handler (for base)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+static void
+_base_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ mpt3sas_scsih_reset_handler(ioc, reset_phase);
+ mpt3sas_ctl_reset_handler(ioc, reset_phase);
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
+ ioc->transport_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
+ complete(&ioc->transport_cmds.done);
+ }
+ if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
+ ioc->base_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
+ complete(&ioc->base_cmds.done);
+ }
+ if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
+ ioc->port_enable_failed = 1;
+ ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
+ if (ioc->is_driver_loading) {
+ ioc->start_scan_failed =
+ MPI2_IOCSTATUS_INTERNAL_ERROR;
+ ioc->start_scan = 0;
+ ioc->port_enable_cmds.status =
+ MPT3_CMD_NOT_USED;
+ } else
+ complete(&ioc->port_enable_cmds.done);
+ }
+ if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
+ ioc->config_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ break;
+ }
+}
+
+/**
+ * _wait_for_commands_to_complete - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * This function waiting(3s) for all pending commands to complete
+ * prior to putting controller in reset.
+ */
+static void
+_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc, int sleep_flag)
+{
+ u32 ioc_state;
+ unsigned long flags;
+ u16 i;
+
+ ioc->pending_io_count = 0;
+ if (sleep_flag != CAN_SLEEP)
+ return;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
+ return;
+
+ /* pending command count */
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = 0; i < ioc->scsiio_depth; i++)
+ if (ioc->scsi_lookup[i].cb_idx != 0xFF)
+ ioc->pending_io_count++;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!ioc->pending_io_count)
+ return;
+
+ /* wait for pending commands to complete */
+ wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
+}
+
+/**
+ * mpt3sas_base_hard_reset_handler - reset controller
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ * @type: FORCE_BIG_HAMMER or SOFT_RESET
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type)
+{
+ int r;
+ unsigned long flags;
+ u32 ioc_state;
+ u8 is_fault = 0, is_trigger = 0;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ if (ioc->pci_error_recovery) {
+ pr_err(MPT3SAS_FMT "%s: pci error recovery reset\n",
+ ioc->name, __func__);
+ r = 0;
+ goto out_unlocked;
+ }
+
+ if (mpt3sas_fwfault_debug)
+ mpt3sas_halt_firmware(ioc);
+
+ /* TODO - What we really should be doing is pulling
+ * out all the code associated with NO_SLEEP; its never used.
+ * That is legacy code from mpt fusion driver, ported over.
+ * I will leave this BUG_ON here for now till its been resolved.
+ */
+ BUG_ON(sleep_flag == NO_SLEEP);
+
+ /* wait for an active reset in progress to complete */
+ if (!mutex_trylock(&ioc->reset_in_progress_mutex)) {
+ do {
+ ssleep(1);
+ } while (ioc->shost_recovery == 1);
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return ioc->ioc_reset_in_progress_status;
+ }
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->shost_recovery = 1;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))) {
+ is_trigger = 1;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
+ is_fault = 1;
+ }
+ _base_reset_handler(ioc, MPT3_IOC_PRE_RESET);
+ _wait_for_commands_to_complete(ioc, sleep_flag);
+ _base_mask_interrupts(ioc);
+ r = _base_make_ioc_ready(ioc, sleep_flag, type);
+ if (r)
+ goto out;
+ _base_reset_handler(ioc, MPT3_IOC_AFTER_RESET);
+
+ /* If this hard reset is called while port enable is active, then
+ * there is no reason to call make_ioc_operational
+ */
+ if (ioc->is_driver_loading && ioc->port_enable_failed) {
+ ioc->remove_host = 1;
+ r = -EFAULT;
+ goto out;
+ }
+ r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+ if (r)
+ goto out;
+ r = _base_make_ioc_operational(ioc, sleep_flag);
+ if (!r)
+ _base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
+
+ out:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: %s\n",
+ ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
+
+ spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_in_progress_status = r;
+ ioc->shost_recovery = 0;
+ spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+ ioc->ioc_reset_count++;
+ mutex_unlock(&ioc->reset_in_progress_mutex);
+
+ out_unlocked:
+ if ((r == 0) && is_trigger) {
+ if (is_fault)
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
+ else
+ mpt3sas_trigger_master(ioc,
+ MASTER_TRIGGER_ADAPTER_RESET);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+ return r;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
new file mode 100644
index 000000000000..994656cbfac9
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -0,0 +1,1139 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * for access to MPT (Message Passing Technology) firmware.
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_BASE_H_INCLUDED
+#define MPT3SAS_BASE_H_INCLUDED
+
+#include "mpi/mpi2_type.h"
+#include "mpi/mpi2.h"
+#include "mpi/mpi2_ioc.h"
+#include "mpi/mpi2_cnfg.h"
+#include "mpi/mpi2_init.h"
+#include "mpi/mpi2_raid.h"
+#include "mpi/mpi2_tool.h"
+#include "mpi/mpi2_sas.h"
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "mpt3sas_debug.h"
+#include "mpt3sas_trigger_diag.h"
+
+/* driver versioning info */
+#define MPT3SAS_DRIVER_NAME "mpt3sas"
+#define MPT3SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
+#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
+#define MPT3SAS_DRIVER_VERSION "01.100.01.00"
+#define MPT3SAS_MAJOR_VERSION 1
+#define MPT3SAS_MINOR_VERSION 100
+#define MPT3SAS_BUILD_VERSION 1
+#define MPT3SAS_RELEASE_VERSION 00
+
+/*
+ * Set MPT3SAS_SG_DEPTH value based on user input.
+ */
+#define MPT3SAS_MAX_PHYS_SEGMENTS SCSI_MAX_SG_SEGMENTS
+#define MPT3SAS_MIN_PHYS_SEGMENTS 16
+#ifdef CONFIG_SCSI_MPT3SAS_MAX_SGE
+#define MPT3SAS_SG_DEPTH CONFIG_SCSI_MPT3SAS_MAX_SGE
+#else
+#define MPT3SAS_SG_DEPTH MPT3SAS_MAX_PHYS_SEGMENTS
+#endif
+
+
+/*
+ * Generic Defines
+ */
+#define MPT3SAS_SATA_QUEUE_DEPTH 32
+#define MPT3SAS_SAS_QUEUE_DEPTH 254
+#define MPT3SAS_RAID_QUEUE_DEPTH 128
+
+#define MPT_NAME_LENGTH 32 /* generic length of strings */
+#define MPT_STRING_LENGTH 64
+
+#define MPT_MAX_CALLBACKS 32
+
+
+#define CAN_SLEEP 1
+#define NO_SLEEP 0
+
+#define INTERNAL_CMDS_COUNT 10 /* reserved cmds */
+
+#define MPI3_HIM_MASK 0xFFFFFFFF /* mask every bit*/
+
+#define MPT3SAS_INVALID_DEVICE_HANDLE 0xFFFF
+
+/*
+ * reset phases
+ */
+#define MPT3_IOC_PRE_RESET 1 /* prior to host reset */
+#define MPT3_IOC_AFTER_RESET 2 /* just after host reset */
+#define MPT3_IOC_DONE_RESET 3 /* links re-initialized */
+
+/*
+ * logging format
+ */
+#define MPT3SAS_FMT "%s: "
+
+/*
+ * per target private data
+ */
+#define MPT_TARGET_FLAGS_RAID_COMPONENT 0x01
+#define MPT_TARGET_FLAGS_VOLUME 0x02
+#define MPT_TARGET_FLAGS_DELETED 0x04
+#define MPT_TARGET_FASTPATH_IO 0x08
+
+
+
+/*
+ * status bits for ioc->diag_buffer_status
+ */
+#define MPT3_DIAG_BUFFER_IS_REGISTERED (0x01)
+#define MPT3_DIAG_BUFFER_IS_RELEASED (0x02)
+#define MPT3_DIAG_BUFFER_IS_DIAG_RESET (0x04)
+
+
+/* OEM Identifiers */
+#define MFG10_OEM_ID_INVALID (0x00000000)
+#define MFG10_OEM_ID_DELL (0x00000001)
+#define MFG10_OEM_ID_FSC (0x00000002)
+#define MFG10_OEM_ID_SUN (0x00000003)
+#define MFG10_OEM_ID_IBM (0x00000004)
+
+/* GENERIC Flags 0*/
+#define MFG10_GF0_OCE_DISABLED (0x00000001)
+#define MFG10_GF0_R1E_DRIVE_COUNT (0x00000002)
+#define MFG10_GF0_R10_DISPLAY (0x00000004)
+#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE (0x00000008)
+#define MFG10_GF0_SINGLE_DRIVE_R0 (0x00000010)
+
+/* OEM Specific Flags will come from OEM specific header files */
+struct Mpi2ManufacturingPage10_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ U8 OEMIdentifier; /* 04h */
+ U8 Reserved1; /* 05h */
+ U16 Reserved2; /* 08h */
+ U32 Reserved3; /* 0Ch */
+ U32 GenericFlags0; /* 10h */
+ U32 GenericFlags1; /* 14h */
+ U32 Reserved4; /* 18h */
+ U32 OEMSpecificFlags0; /* 1Ch */
+ U32 OEMSpecificFlags1; /* 20h */
+ U32 Reserved5[18]; /* 24h - 60h*/
+};
+
+
+/* Miscellaneous options */
+struct Mpi2ManufacturingPage11_t {
+ MPI2_CONFIG_PAGE_HEADER Header; /* 00h */
+ __le32 Reserved1; /* 04h */
+ u8 Reserved2; /* 08h */
+ u8 EEDPTagMode; /* 09h */
+ u8 Reserved3; /* 0Ah */
+ u8 Reserved4; /* 0Bh */
+ __le32 Reserved5[23]; /* 0Ch-60h*/
+};
+
+/**
+ * struct MPT3SAS_TARGET - starget private hostdata
+ * @starget: starget object
+ * @sas_address: target sas address
+ * @handle: device handle
+ * @num_luns: number luns
+ * @flags: MPT_TARGET_FLAGS_XXX flags
+ * @deleted: target flaged for deletion
+ * @tm_busy: target is busy with TM request.
+ */
+struct MPT3SAS_TARGET {
+ struct scsi_target *starget;
+ u64 sas_address;
+ u16 handle;
+ int num_luns;
+ u32 flags;
+ u8 deleted;
+ u8 tm_busy;
+};
+
+
+/*
+ * per device private data
+ */
+#define MPT_DEVICE_FLAGS_INIT 0x01
+#define MPT_DEVICE_TLR_ON 0x02
+
+/**
+ * struct MPT3SAS_DEVICE - sdev private hostdata
+ * @sas_target: starget private hostdata
+ * @lun: lun number
+ * @flags: MPT_DEVICE_XXX flags
+ * @configured_lun: lun is configured
+ * @block: device is in SDEV_BLOCK state
+ * @tlr_snoop_check: flag used in determining whether to disable TLR
+ * @eedp_enable: eedp support enable bit
+ * @eedp_type: 0(type_1), 1(type_2), 2(type_3)
+ * @eedp_block_length: block size
+ */
+struct MPT3SAS_DEVICE {
+ struct MPT3SAS_TARGET *sas_target;
+ unsigned int lun;
+ u32 flags;
+ u8 configured_lun;
+ u8 block;
+ u8 tlr_snoop_check;
+};
+
+#define MPT3_CMD_NOT_USED 0x8000 /* free */
+#define MPT3_CMD_COMPLETE 0x0001 /* completed */
+#define MPT3_CMD_PENDING 0x0002 /* pending */
+#define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
+#define MPT3_CMD_RESET 0x0008 /* host reset dropped the command */
+
+/**
+ * struct _internal_cmd - internal commands struct
+ * @mutex: mutex
+ * @done: completion
+ * @reply: reply message pointer
+ * @sense: sense data
+ * @status: MPT3_CMD_XXX status
+ * @smid: system message id
+ */
+struct _internal_cmd {
+ struct mutex mutex;
+ struct completion done;
+ void *reply;
+ void *sense;
+ u16 status;
+ u16 smid;
+};
+
+
+
+/**
+ * struct _sas_device - attached device information
+ * @list: sas device list
+ * @starget: starget object
+ * @sas_address: device sas address
+ * @device_name: retrieved from the SAS IDENTIFY frame.
+ * @handle: device handle
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: enclosure handle
+ * @enclosure_logical_id: enclosure logical identifier
+ * @volume_handle: volume handle (valid when hidden raid member)
+ * @volume_wwid: volume unique identifier
+ * @device_info: bitfield provides detailed info about the device
+ * @id: target id
+ * @channel: target channel
+ * @slot: number number
+ * @phy: phy identifier provided in sas device page 0
+ * @fast_path: fast path feature enable bit
+ * @responding: used in _scsih_sas_device_mark_responding
+ */
+struct _sas_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ u64 sas_address;
+ u64 device_name;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u16 volume_handle;
+ u64 volume_wwid;
+ u32 device_info;
+ int id;
+ int channel;
+ u16 slot;
+ u8 phy;
+ u8 responding;
+ u8 fast_path;
+};
+
+/**
+ * struct _raid_device - raid volume link list
+ * @list: sas device list
+ * @starget: starget object
+ * @sdev: scsi device struct (volumes are single lun)
+ * @wwid: unique identifier for the volume
+ * @handle: device handle
+ * @id: target id
+ * @channel: target channel
+ * @volume_type: the raid level
+ * @device_info: bitfield provides detailed info about the hidden components
+ * @num_pds: number of hidden raid components
+ * @responding: used in _scsih_raid_device_mark_responding
+ * @percent_complete: resync percent complete
+ */
+#define MPT_MAX_WARPDRIVE_PDS 8
+struct _raid_device {
+ struct list_head list;
+ struct scsi_target *starget;
+ struct scsi_device *sdev;
+ u64 wwid;
+ u16 handle;
+ int id;
+ int channel;
+ u8 volume_type;
+ u8 num_pds;
+ u8 responding;
+ u8 percent_complete;
+ u32 device_info;
+};
+
+/**
+ * struct _boot_device - boot device info
+ * @is_raid: flag to indicate whether this is volume
+ * @device: holds pointer for either struct _sas_device or
+ * struct _raid_device
+ */
+struct _boot_device {
+ u8 is_raid;
+ void *device;
+};
+
+/**
+ * struct _sas_port - wide/narrow sas port information
+ * @port_list: list of ports belonging to expander
+ * @num_phys: number of phys belonging to this port
+ * @remote_identify: attached device identification
+ * @rphy: sas transport rphy object
+ * @port: sas transport wide/narrow port object
+ * @phy_list: _sas_phy list objects belonging to this port
+ */
+struct _sas_port {
+ struct list_head port_list;
+ u8 num_phys;
+ struct sas_identify remote_identify;
+ struct sas_rphy *rphy;
+ struct sas_port *port;
+ struct list_head phy_list;
+};
+
+/**
+ * struct _sas_phy - phy information
+ * @port_siblings: list of phys belonging to a port
+ * @identify: phy identification
+ * @remote_identify: attached device identification
+ * @phy: sas transport phy object
+ * @phy_id: unique phy id
+ * @handle: device handle for this phy
+ * @attached_handle: device handle for attached device
+ * @phy_belongs_to_port: port has been created for this phy
+ */
+struct _sas_phy {
+ struct list_head port_siblings;
+ struct sas_identify identify;
+ struct sas_identify remote_identify;
+ struct sas_phy *phy;
+ u8 phy_id;
+ u16 handle;
+ u16 attached_handle;
+ u8 phy_belongs_to_port;
+};
+
+/**
+ * struct _sas_node - sas_host/expander information
+ * @list: list of expanders
+ * @parent_dev: parent device class
+ * @num_phys: number phys belonging to this sas_host/expander
+ * @sas_address: sas address of this sas_host/expander
+ * @handle: handle for this sas_host/expander
+ * @sas_address_parent: sas address of parent expander or sas host
+ * @enclosure_handle: handle for this a member of an enclosure
+ * @device_info: bitwise defining capabilities of this sas_host/expander
+ * @responding: used in _scsih_expander_device_mark_responding
+ * @phy: a list of phys that make up this sas_host/expander
+ * @sas_port_list: list of ports attached to this sas_host/expander
+ */
+struct _sas_node {
+ struct list_head list;
+ struct device *parent_dev;
+ u8 num_phys;
+ u64 sas_address;
+ u16 handle;
+ u64 sas_address_parent;
+ u16 enclosure_handle;
+ u64 enclosure_logical_id;
+ u8 responding;
+ struct _sas_phy *phy;
+ struct list_head sas_port_list;
+};
+
+/**
+ * enum reset_type - reset state
+ * @FORCE_BIG_HAMMER: issue diagnostic reset
+ * @SOFT_RESET: issue message_unit_reset, if fails to to big hammer
+ */
+enum reset_type {
+ FORCE_BIG_HAMMER,
+ SOFT_RESET,
+};
+
+/**
+ * struct chain_tracker - firmware chain tracker
+ * @chain_buffer: chain buffer
+ * @chain_buffer_dma: physical address
+ * @tracker_list: list of free request (ioc->free_chain_list)
+ */
+struct chain_tracker {
+ void *chain_buffer;
+ dma_addr_t chain_buffer_dma;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct scsiio_tracker - scsi mf request tracker
+ * @smid: system message id
+ * @scmd: scsi request pointer
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct scsiio_tracker {
+ u16 smid;
+ struct scsi_cmnd *scmd;
+ u8 cb_idx;
+ struct list_head chain_list;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct request_tracker - firmware request tracker
+ * @smid: system message id
+ * @cb_idx: callback index
+ * @tracker_list: list of free request (ioc->free_list)
+ */
+struct request_tracker {
+ u16 smid;
+ u8 cb_idx;
+ struct list_head tracker_list;
+};
+
+/**
+ * struct _tr_list - target reset list
+ * @handle: device handle
+ * @state: state machine
+ */
+struct _tr_list {
+ struct list_head list;
+ u16 handle;
+ u16 state;
+};
+
+
+/**
+ * struct adapter_reply_queue - the reply queue struct
+ * @ioc: per adapter object
+ * @msix_index: msix index into vector table
+ * @vector: irq vector
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @reply_post_free: reply post base virt address
+ * @name: the name registered to request_irq()
+ * @busy: isr is actively processing replies on another cpu
+ * @list: this list
+*/
+struct adapter_reply_queue {
+ struct MPT3SAS_ADAPTER *ioc;
+ u8 msix_index;
+ unsigned int vector;
+ u32 reply_post_host_index;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ char name[MPT_NAME_LENGTH];
+ atomic_t busy;
+ struct list_head list;
+};
+
+typedef void (*MPT_ADD_SGE)(void *paddr, u32 flags_length, dma_addr_t dma_addr);
+
+/* SAS3.0 support */
+typedef int (*MPT_BUILD_SG_SCMD)(struct MPT3SAS_ADAPTER *ioc,
+ struct scsi_cmnd *scmd, u16 smid);
+typedef void (*MPT_BUILD_SG)(struct MPT3SAS_ADAPTER *ioc, void *psge,
+ dma_addr_t data_out_dma, size_t data_out_sz,
+ dma_addr_t data_in_dma, size_t data_in_sz);
+typedef void (*MPT_BUILD_ZERO_LEN_SGE)(struct MPT3SAS_ADAPTER *ioc,
+ void *paddr);
+
+
+
+/* IOC Facts and Port Facts converted from little endian to cpu */
+union mpi3_version_union {
+ MPI2_VERSION_STRUCT Struct;
+ u32 Word;
+};
+
+struct mpt3sas_facts {
+ u16 MsgVersion;
+ u16 HeaderVersion;
+ u8 IOCNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u16 IOCExceptions;
+ u16 IOCStatus;
+ u32 IOCLogInfo;
+ u8 MaxChainDepth;
+ u8 WhoInit;
+ u8 NumberOfPorts;
+ u8 MaxMSIxVectors;
+ u16 RequestCredit;
+ u16 ProductID;
+ u32 IOCCapabilities;
+ union mpi3_version_union FWVersion;
+ u16 IOCRequestFrameSize;
+ u16 Reserved3;
+ u16 MaxInitiators;
+ u16 MaxTargets;
+ u16 MaxSasExpanders;
+ u16 MaxEnclosures;
+ u16 ProtocolFlags;
+ u16 HighPriorityCredit;
+ u16 MaxReplyDescriptorPostQueueDepth;
+ u8 ReplyFrameSize;
+ u8 MaxVolumes;
+ u16 MaxDevHandle;
+ u16 MaxPersistentEntries;
+ u16 MinDevHandle;
+};
+
+struct mpt3sas_port_facts {
+ u8 PortNumber;
+ u8 VP_ID;
+ u8 VF_ID;
+ u8 PortType;
+ u16 MaxPostedCmdBuffers;
+};
+
+/**
+ * enum mutex_type - task management mutex type
+ * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
+ * @TM_MUTEX_ON: mutex is required
+ */
+enum mutex_type {
+ TM_MUTEX_OFF = 0,
+ TM_MUTEX_ON = 1,
+};
+
+typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
+/**
+ * struct MPT3SAS_ADAPTER - per adapter struct
+ * @list: ioc_list
+ * @shost: shost object
+ * @id: unique adapter id
+ * @cpu_count: number online cpus
+ * @name: generic ioc string
+ * @tmp_string: tmp string used for logging
+ * @pdev: pci pdev object
+ * @pio_chip: physical io register space
+ * @chip: memory mapped register space
+ * @chip_phys: physical addrss prior to mapping
+ * @logging_level: see mpt3sas_debug.h
+ * @fwfault_debug: debuging FW timeouts
+ * @ir_firmware: IR firmware present
+ * @bars: bitmask of BAR's that must be configured
+ * @mask_interrupts: ignore interrupt
+ * @fault_reset_work_q_name: fw fault work queue
+ * @fault_reset_work_q: ""
+ * @fault_reset_work: ""
+ * @firmware_event_name: fw event work queue
+ * @firmware_event_thread: ""
+ * @fw_event_lock:
+ * @fw_event_list: list of fw events
+ * @aen_event_read_flag: event log was read
+ * @broadcast_aen_busy: broadcast aen waiting to be serviced
+ * @shost_recovery: host reset in progress
+ * @ioc_reset_in_progress_lock:
+ * @ioc_link_reset_in_progress: phy/hard reset in progress
+ * @ignore_loginfos: ignore loginfos during task management
+ * @remove_host: flag for when driver unloads, to avoid sending dev resets
+ * @pci_error_recovery: flag to prevent ioc access until slot reset completes
+ * @wait_for_discovery_to_complete: flag set at driver load time when
+ * waiting on reporting devices
+ * @is_driver_loading: flag set at driver load time
+ * @port_enable_failed: flag set when port enable has failed
+ * @start_scan: flag set from scan_start callback, cleared from _mpt3sas_fw_work
+ * @start_scan_failed: means port enable failed, return's the ioc_status
+ * @msix_enable: flag indicating msix is enabled
+ * @msix_vector_count: number msix vectors
+ * @cpu_msix_table: table for mapping cpus to msix index
+ * @cpu_msix_table_sz: table size
+ * @schedule_dead_ioc_flush_running_cmds: callback to flush pending commands
+ * @scsi_io_cb_idx: shost generated commands
+ * @tm_cb_idx: task management commands
+ * @scsih_cb_idx: scsih internal commands
+ * @transport_cb_idx: transport internal commands
+ * @ctl_cb_idx: clt internal commands
+ * @base_cb_idx: base internal commands
+ * @config_cb_idx: base internal commands
+ * @tm_tr_cb_idx : device removal target reset handshake
+ * @tm_tr_volume_cb_idx : volume removal target reset
+ * @base_cmds:
+ * @transport_cmds:
+ * @scsih_cmds:
+ * @tm_cmds:
+ * @ctl_cmds:
+ * @config_cmds:
+ * @base_add_sg_single: handler for either 32/64 bit sgl's
+ * @event_type: bits indicating which events to log
+ * @event_context: unique id for each logged event
+ * @event_log: event log pointer
+ * @event_masks: events that are masked
+ * @facts: static facts data
+ * @pfacts: static port facts data
+ * @manu_pg0: static manufacturing page 0
+ * @manu_pg10: static manufacturing page 10
+ * @manu_pg11: static manufacturing page 11
+ * @bios_pg2: static bios page 2
+ * @bios_pg3: static bios page 3
+ * @ioc_pg8: static ioc page 8
+ * @iounit_pg0: static iounit page 0
+ * @iounit_pg1: static iounit page 1
+ * @sas_hba: sas host object
+ * @sas_expander_list: expander object list
+ * @sas_node_lock:
+ * @sas_device_list: sas device object list
+ * @sas_device_init_list: sas device object list (used only at init time)
+ * @sas_device_lock:
+ * @io_missing_delay: time for IO completed by fw when PDR enabled
+ * @device_missing_delay: time for device missing by fw when PDR enabled
+ * @sas_id : used for setting volume target IDs
+ * @blocking_handles: bitmask used to identify which devices need blocking
+ * @pd_handles : bitmask for PD handles
+ * @pd_handles_sz : size of pd_handle bitmask
+ * @config_page_sz: config page size
+ * @config_page: reserve memory for config page payload
+ * @config_page_dma:
+ * @hba_queue_depth: hba request queue depth
+ * @sge_size: sg element size for either 32/64 bit
+ * @scsiio_depth: SCSI_IO queue depth
+ * @request_sz: per request frame size
+ * @request: pool of request frames
+ * @request_dma:
+ * @request_dma_sz:
+ * @scsi_lookup: firmware request tracker list
+ * @scsi_lookup_lock:
+ * @free_list: free list of request
+ * @pending_io_count:
+ * @reset_wq:
+ * @chain: pool of chains
+ * @chain_dma:
+ * @max_sges_in_main_message: number sg elements in main message
+ * @max_sges_in_chain_message: number sg elements per chain
+ * @chains_needed_per_io: max chains per io
+ * @chain_depth: total chains allocated
+ * @hi_priority_smid:
+ * @hi_priority:
+ * @hi_priority_dma:
+ * @hi_priority_depth:
+ * @hpr_lookup:
+ * @hpr_free_list:
+ * @internal_smid:
+ * @internal:
+ * @internal_dma:
+ * @internal_depth:
+ * @internal_lookup:
+ * @internal_free_list:
+ * @sense: pool of sense
+ * @sense_dma:
+ * @sense_dma_pool:
+ * @reply_depth: hba reply queue depth:
+ * @reply_sz: per reply frame size:
+ * @reply: pool of replys:
+ * @reply_dma:
+ * @reply_dma_pool:
+ * @reply_free_queue_depth: reply free depth
+ * @reply_free: pool for reply free queue (32 bit addr)
+ * @reply_free_dma:
+ * @reply_free_dma_pool:
+ * @reply_free_host_index: tail index in pool to insert free replys
+ * @reply_post_queue_depth: reply post queue depth
+ * @reply_post_free: pool for reply post (64bit descriptor)
+ * @reply_post_free_dma:
+ * @reply_queue_count: number of reply queue's
+ * @reply_queue_list: link list contaning the reply queue info
+ * @reply_post_host_index: head index in the pool where FW completes IO
+ * @delayed_tr_list: target reset link list
+ * @delayed_tr_volume_list: volume target reset link list
+ */
+struct MPT3SAS_ADAPTER {
+ struct list_head list;
+ struct Scsi_Host *shost;
+ u8 id;
+ int cpu_count;
+ char name[MPT_NAME_LENGTH];
+ char tmp_string[MPT_STRING_LENGTH];
+ struct pci_dev *pdev;
+ Mpi2SystemInterfaceRegs_t __iomem *chip;
+ resource_size_t chip_phys;
+ int logging_level;
+ int fwfault_debug;
+ u8 ir_firmware;
+ int bars;
+ u8 mask_interrupts;
+
+ /* fw fault handler */
+ char fault_reset_work_q_name[20];
+ struct workqueue_struct *fault_reset_work_q;
+ struct delayed_work fault_reset_work;
+
+ /* fw event handler */
+ char firmware_event_name[20];
+ struct workqueue_struct *firmware_event_thread;
+ spinlock_t fw_event_lock;
+ struct list_head fw_event_list;
+
+ /* misc flags */
+ int aen_event_read_flag;
+ u8 broadcast_aen_busy;
+ u16 broadcast_aen_pending;
+ u8 shost_recovery;
+
+ struct mutex reset_in_progress_mutex;
+ spinlock_t ioc_reset_in_progress_lock;
+ u8 ioc_link_reset_in_progress;
+ u8 ioc_reset_in_progress_status;
+
+ u8 ignore_loginfos;
+ u8 remove_host;
+ u8 pci_error_recovery;
+ u8 wait_for_discovery_to_complete;
+ u8 is_driver_loading;
+ u8 port_enable_failed;
+ u8 start_scan;
+ u16 start_scan_failed;
+
+ u8 msix_enable;
+ u16 msix_vector_count;
+ u8 *cpu_msix_table;
+ u16 cpu_msix_table_sz;
+ u32 ioc_reset_count;
+ MPT3SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
+
+ /* internal commands, callback index */
+ u8 scsi_io_cb_idx;
+ u8 tm_cb_idx;
+ u8 transport_cb_idx;
+ u8 scsih_cb_idx;
+ u8 ctl_cb_idx;
+ u8 base_cb_idx;
+ u8 port_enable_cb_idx;
+ u8 config_cb_idx;
+ u8 tm_tr_cb_idx;
+ u8 tm_tr_volume_cb_idx;
+ u8 tm_sas_control_cb_idx;
+ struct _internal_cmd base_cmds;
+ struct _internal_cmd port_enable_cmds;
+ struct _internal_cmd transport_cmds;
+ struct _internal_cmd scsih_cmds;
+ struct _internal_cmd tm_cmds;
+ struct _internal_cmd ctl_cmds;
+ struct _internal_cmd config_cmds;
+
+ MPT_ADD_SGE base_add_sg_single;
+
+ /* function ptr for either IEEE or MPI sg elements */
+ MPT_BUILD_SG_SCMD build_sg_scmd;
+ MPT_BUILD_SG build_sg;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge;
+ u8 mpi25;
+ u16 sge_size_ieee;
+
+ /* function ptr for MPI sg elements only */
+ MPT_BUILD_SG build_sg_mpi;
+ MPT_BUILD_ZERO_LEN_SGE build_zero_len_sge_mpi;
+
+ /* event log */
+ u32 event_type[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+ u32 event_context;
+ void *event_log;
+ u32 event_masks[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+
+ /* static config pages */
+ struct mpt3sas_facts facts;
+ struct mpt3sas_port_facts *pfacts;
+ Mpi2ManufacturingPage0_t manu_pg0;
+ struct Mpi2ManufacturingPage10_t manu_pg10;
+ struct Mpi2ManufacturingPage11_t manu_pg11;
+ Mpi2BiosPage2_t bios_pg2;
+ Mpi2BiosPage3_t bios_pg3;
+ Mpi2IOCPage8_t ioc_pg8;
+ Mpi2IOUnitPage0_t iounit_pg0;
+ Mpi2IOUnitPage1_t iounit_pg1;
+
+ struct _boot_device req_boot_device;
+ struct _boot_device req_alt_boot_device;
+ struct _boot_device current_boot_device;
+
+ /* sas hba, expander, and device list */
+ struct _sas_node sas_hba;
+ struct list_head sas_expander_list;
+ spinlock_t sas_node_lock;
+ struct list_head sas_device_list;
+ struct list_head sas_device_init_list;
+ spinlock_t sas_device_lock;
+ struct list_head raid_device_list;
+ spinlock_t raid_device_lock;
+ u8 io_missing_delay;
+ u16 device_missing_delay;
+ int sas_id;
+
+ void *blocking_handles;
+ void *pd_handles;
+ u16 pd_handles_sz;
+
+ /* config page */
+ u16 config_page_sz;
+ void *config_page;
+ dma_addr_t config_page_dma;
+
+ /* scsiio request */
+ u16 hba_queue_depth;
+ u16 sge_size;
+ u16 scsiio_depth;
+ u16 request_sz;
+ u8 *request;
+ dma_addr_t request_dma;
+ u32 request_dma_sz;
+ struct scsiio_tracker *scsi_lookup;
+ ulong scsi_lookup_pages;
+ spinlock_t scsi_lookup_lock;
+ struct list_head free_list;
+ int pending_io_count;
+ wait_queue_head_t reset_wq;
+
+ /* chain */
+ struct chain_tracker *chain_lookup;
+ struct list_head free_chain_list;
+ struct dma_pool *chain_dma_pool;
+ ulong chain_pages;
+ u16 max_sges_in_main_message;
+ u16 max_sges_in_chain_message;
+ u16 chains_needed_per_io;
+ u32 chain_depth;
+
+ /* hi-priority queue */
+ u16 hi_priority_smid;
+ u8 *hi_priority;
+ dma_addr_t hi_priority_dma;
+ u16 hi_priority_depth;
+ struct request_tracker *hpr_lookup;
+ struct list_head hpr_free_list;
+
+ /* internal queue */
+ u16 internal_smid;
+ u8 *internal;
+ dma_addr_t internal_dma;
+ u16 internal_depth;
+ struct request_tracker *internal_lookup;
+ struct list_head internal_free_list;
+
+ /* sense */
+ u8 *sense;
+ dma_addr_t sense_dma;
+ struct dma_pool *sense_dma_pool;
+
+ /* reply */
+ u16 reply_sz;
+ u8 *reply;
+ dma_addr_t reply_dma;
+ u32 reply_dma_max_address;
+ u32 reply_dma_min_address;
+ struct dma_pool *reply_dma_pool;
+
+ /* reply free queue */
+ u16 reply_free_queue_depth;
+ __le32 *reply_free;
+ dma_addr_t reply_free_dma;
+ struct dma_pool *reply_free_dma_pool;
+ u32 reply_free_host_index;
+
+ /* reply post queue */
+ u16 reply_post_queue_depth;
+ Mpi2ReplyDescriptorsUnion_t *reply_post_free;
+ dma_addr_t reply_post_free_dma;
+ struct dma_pool *reply_post_free_dma_pool;
+ u8 reply_queue_count;
+ struct list_head reply_queue_list;
+
+ struct list_head delayed_tr_list;
+ struct list_head delayed_tr_volume_list;
+
+ /* diag buffer support */
+ u8 *diag_buffer[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 diag_buffer_sz[MPI2_DIAG_BUF_TYPE_COUNT];
+ dma_addr_t diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
+ u8 diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
+ u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
+ u32 ring_buffer_offset;
+ u32 ring_buffer_sz;
+ spinlock_t diag_trigger_lock;
+ u8 diag_trigger_active;
+ struct SL_WH_MASTER_TRIGGER_T diag_trigger_master;
+ struct SL_WH_EVENT_TRIGGERS_T diag_trigger_event;
+ struct SL_WH_SCSI_TRIGGERS_T diag_trigger_scsi;
+ struct SL_WH_MPI_TRIGGERS_T diag_trigger_mpi;
+};
+
+typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+
+
+/* base shared API */
+extern struct list_head mpt3sas_ioc_list;
+void mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc);
+
+int mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc);
+void mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc);
+int mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc, int sleep_flag,
+ enum reset_type type);
+
+void *mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void *mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+__le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
+ u16 smid);
+void mpt3sas_base_flush_reply_queues(struct MPT3SAS_ADAPTER *ioc);
+
+/* hi-priority queue */
+u16 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+u16 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
+ struct scsi_cmnd *scmd);
+
+u16 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx);
+void mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u16 handle);
+void mpt3sas_base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+void mpt3sas_base_initialize_callback_handler(void);
+u8 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func);
+void mpt3sas_base_release_callback_handler(u8 cb_idx);
+
+u8 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+u8 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply);
+void *mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc,
+ u32 phys_addr);
+
+u32 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked);
+
+void mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code);
+int mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SasIoUnitControlReply_t *mpi_reply,
+ Mpi2SasIoUnitControlRequest_t *mpi_request);
+int mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request);
+
+void mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc,
+ u32 *event_type);
+
+void mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc);
+
+void mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
+ u16 device_missing_delay, u8 io_missing_delay);
+
+int mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc);
+
+
+/* scsih shared API */
+u8 mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply);
+void mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+
+int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ uint channel, uint id, uint lun, u8 type, u16 smid_task,
+ ulong timeout, unsigned long serial_number, enum mutex_type m_type);
+void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
+void mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+void mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address);
+
+struct _sas_node *mpt3sas_scsih_expander_find_by_handle(
+ struct MPT3SAS_ADAPTER *ioc, u16 handle);
+struct _sas_node *mpt3sas_scsih_expander_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+struct _sas_device *mpt3sas_scsih_sas_device_find_by_sas_address(
+ struct MPT3SAS_ADAPTER *ioc, u64 sas_address);
+
+void mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc);
+
+/* config shared API */
+u8 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+int mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc,
+ u8 *num_phys);
+int mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page);
+
+int mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+int mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page);
+
+int mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage2_t *config_page);
+int mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page);
+int mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage0_t *config_page);
+int mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOUnitPage1_t *config_page);
+int mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz);
+int mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2IOCPage8_t *config_page);
+int mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ExpanderPage1_t *config_page,
+ u32 phy_number, u16 handle);
+int mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasEnclosurePage0_t *config_page,
+ u32 form, u32 handle);
+int mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number);
+int mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number);
+int mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle);
+int mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds);
+int mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz);
+int mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page,
+ u32 form, u32 form_specific);
+int mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle);
+int mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc,
+ u16 volume_handle, u64 *wwid);
+
+/* ctl shared API */
+extern struct device_attribute *mpt3sas_host_attrs[];
+extern struct device_attribute *mpt3sas_dev_attrs[];
+void mpt3sas_ctl_init(void);
+void mpt3sas_ctl_exit(void);
+u8 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+void mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase);
+u8 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc,
+ u8 msix_index, u32 reply);
+void mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply);
+
+void mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc,
+ u8 bits_to_regsiter);
+int mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset);
+
+/* transport shared API */
+u8 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply);
+struct _sas_port *mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, u64 sas_address);
+void mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent);
+int mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
+int mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_phy *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev);
+void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
+extern struct sas_function_template mpt3sas_transport_functions;
+extern struct scsi_transport_template *mpt3sas_transport_template;
+extern int scsi_internal_device_block(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev,
+ enum scsi_device_state new_state);
+/* trigger data externs */
+void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
+void mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc,
+ u32 tigger_bitmask);
+void mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier);
+void mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key,
+ u8 asc, u8 ascq);
+void mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status,
+ u32 loginfo);
+#endif /* MPT3SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
new file mode 100644
index 000000000000..1df9ed4f371d
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -0,0 +1,1649 @@
+/*
+ * This module provides common API for accessing firmware configuration pages
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include "mpt3sas_base.h"
+
+/* local definitions */
+
+/* Timeout for config page request (in seconds) */
+#define MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT 15
+
+/* Common sgl flags for READING a config page. */
+#define MPT3_CONFIG_COMMON_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST) << MPI2_SGE_FLAGS_SHIFT)
+
+/* Common sgl flags for WRITING a config page. */
+#define MPT3_CONFIG_COMMON_WRITE_SGLFLAGS ((MPI2_SGE_FLAGS_SIMPLE_ELEMENT | \
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER \
+ | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC) \
+ << MPI2_SGE_FLAGS_SHIFT)
+
+/**
+ * struct config_request - obtain dma memory via routine
+ * @sz: size
+ * @page: virt pointer
+ * @page_dma: phys pointer
+ *
+ */
+struct config_request {
+ u16 sz;
+ void *page;
+ dma_addr_t page_dma;
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _config_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_config_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_CONFIG))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Header.PageType & MPI2_CONFIG_PAGETYPE_MASK) {
+ case MPI2_CONFIG_PAGETYPE_IO_UNIT:
+ desc = "io_unit";
+ break;
+ case MPI2_CONFIG_PAGETYPE_IOC:
+ desc = "ioc";
+ break;
+ case MPI2_CONFIG_PAGETYPE_BIOS:
+ desc = "bios";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_VOLUME:
+ desc = "raid_volume";
+ break;
+ case MPI2_CONFIG_PAGETYPE_MANUFACTURING:
+ desc = "manufaucturing";
+ break;
+ case MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK:
+ desc = "physdisk";
+ break;
+ case MPI2_CONFIG_PAGETYPE_EXTENDED:
+ switch (mpi_request->ExtPageType) {
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT:
+ desc = "sas_io_unit";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER:
+ desc = "sas_expander";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE:
+ desc = "sas_device";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_SAS_PHY:
+ desc = "sas_phy";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_LOG:
+ desc = "log";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE:
+ desc = "enclosure";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG:
+ desc = "raid_config";
+ break;
+ case MPI2_CONFIG_EXTPAGETYPE_DRIVER_MAPPING:
+ desc = "driver_mappping";
+ break;
+ }
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT
+ "%s: %s(%d), action(%d), form(0x%08x), smid(%d)\n",
+ ioc->name, calling_function_name, desc,
+ mpi_request->Header.PageNumber, mpi_request->Action,
+ le32_to_cpu(mpi_request->PageAddress), smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+}
+#endif
+
+/**
+ * _config_alloc_config_dma_memory - obtain physical memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper for obtaining dma-able memory for config page request.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_alloc_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ int r = 0;
+
+ if (mem->sz > ioc->config_page_sz) {
+ mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
+ &mem->page_dma, GFP_KERNEL);
+ if (!mem->page) {
+ pr_err(MPT3SAS_FMT
+ "%s: dma_alloc_coherent failed asking for (%d) bytes!!\n",
+ ioc->name, __func__, mem->sz);
+ r = -ENOMEM;
+ }
+ } else { /* use tmp buffer if less than 512 bytes */
+ mem->page = ioc->config_page;
+ mem->page_dma = ioc->config_page_dma;
+ }
+ return r;
+}
+
+/**
+ * _config_free_config_dma_memory - wrapper to free the memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static void
+_config_free_config_dma_memory(struct MPT3SAS_ADAPTER *ioc,
+ struct config_request *mem)
+{
+ if (mem->sz > ioc->config_page_sz)
+ dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
+ mem->page_dma);
+}
+
+/**
+ * mpt3sas_config_done - config page completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using _config_request.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->config_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->config_cmds.smid != smid)
+ return 1;
+ ioc->config_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ ioc->config_cmds.status |= MPT3_CMD_REPLY_VALID;
+ memcpy(ioc->config_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ }
+ ioc->config_cmds.status &= ~MPT3_CMD_PENDING;
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_done", mpi_reply);
+#endif
+ ioc->config_cmds.smid = USHRT_MAX;
+ complete(&ioc->config_cmds.done);
+ return 1;
+}
+
+/**
+ * _config_request - main routine for sending config page requests
+ * @ioc: per adapter object
+ * @mpi_request: request message frame
+ * @mpi_reply: reply mf payload returned from firmware
+ * @timeout: timeout in seconds
+ * @config_page: contents of the config page
+ * @config_page_sz: size of config page
+ * Context: sleep
+ *
+ * A generic API for config page requests to firmware.
+ *
+ * The ioc->config_cmds.status flag should be MPT3_CMD_NOT_USED before calling
+ * this API.
+ *
+ * The callback index is set inside `ioc->config_cb_idx.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
+ *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
+ void *config_page, u16 config_page_sz)
+{
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ Mpi2ConfigRequest_t *config_request;
+ int r;
+ u8 retry_count, issue_host_reset = 0;
+ u16 wait_state_count;
+ struct config_request mem;
+ u32 ioc_status = UINT_MAX;
+
+ mutex_lock(&ioc->config_cmds.mutex);
+ if (ioc->config_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: config_cmd in use\n",
+ ioc->name, __func__);
+ mutex_unlock(&ioc->config_cmds.mutex);
+ return -EAGAIN;
+ }
+
+ retry_count = 0;
+ memset(&mem, 0, sizeof(struct config_request));
+
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ if (config_page) {
+ mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
+ mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
+ mpi_request->Header.PageType = mpi_reply->Header.PageType;
+ mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
+ mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
+ mpi_request->ExtPageType = mpi_reply->ExtPageType;
+ if (mpi_request->Header.PageLength)
+ mem.sz = mpi_request->Header.PageLength * 4;
+ else
+ mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
+ r = _config_alloc_config_dma_memory(ioc, &mem);
+ if (r != 0)
+ goto out;
+ if (mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT ||
+ mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM) {
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
+ mem.page_dma);
+ memcpy(mem.page, config_page, min_t(u16, mem.sz,
+ config_page_sz));
+ } else {
+ memset(config_page, 0, config_page_sz);
+ ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+ MPT3_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
+ memset(mem.page, 0, min_t(u16, mem.sz, config_page_sz));
+ }
+ }
+
+ retry_config:
+ if (retry_count) {
+ if (retry_count > 2) { /* attempt only 2 retries */
+ r = -EFAULT;
+ goto free_mem;
+ }
+ pr_info(MPT3SAS_FMT "%s: attempting retry (%d)\n",
+ ioc->name, __func__, retry_count);
+ }
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EFAULT;
+ goto free_mem;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->config_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ r = -EAGAIN;
+ goto free_mem;
+ }
+
+ r = 0;
+ memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
+ ioc->config_cmds.status = MPT3_CMD_PENDING;
+ config_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->config_cmds.smid = smid;
+ memcpy(config_request, mpi_request, sizeof(Mpi2ConfigRequest_t));
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _config_display_some_debug(ioc, smid, "config_request", NULL);
+#endif
+ init_completion(&ioc->config_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
+ timeout*HZ);
+ if (!(ioc->config_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2ConfigRequest_t)/4);
+ retry_count++;
+ if (ioc->config_cmds.smid == smid)
+ mpt3sas_base_free_smid(ioc, smid);
+ if ((ioc->shost_recovery) || (ioc->config_cmds.status &
+ MPT3_CMD_RESET) || ioc->pci_error_recovery)
+ goto retry_config;
+ issue_host_reset = 1;
+ r = -EFAULT;
+ goto free_mem;
+ }
+
+ if (ioc->config_cmds.status & MPT3_CMD_REPLY_VALID) {
+ memcpy(mpi_reply, ioc->config_cmds.reply,
+ sizeof(Mpi2ConfigReply_t));
+
+ /* Reply Frame Sanity Checks to workaround FW issues */
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (mpi_reply->Header.PageType & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested PageType(0x%02x)" \
+ " Reply PageType(0x%02x)\n", \
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (mpi_reply->Header.PageType & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ mpi_request->ExtPageType != mpi_reply->ExtPageType) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ panic(KERN_WARNING MPT3SAS_FMT "%s: Firmware BUG:" \
+ " mpi_reply mismatch: Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__, mpi_request->ExtPageType,
+ mpi_reply->ExtPageType);
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ }
+
+ if (retry_count)
+ pr_info(MPT3SAS_FMT "%s: retry (%d) completed!!\n", \
+ ioc->name, __func__, retry_count);
+
+ if ((ioc_status == MPI2_IOCSTATUS_SUCCESS) &&
+ config_page && mpi_request->Action ==
+ MPI2_CONFIG_ACTION_PAGE_READ_CURRENT) {
+ u8 *p = (u8 *)mem.page;
+
+ /* Config Page Sanity Checks to workaround FW issues */
+ if (p) {
+ if ((mpi_request->Header.PageType & 0xF) !=
+ (p[3] & 0xF)) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested PageType(0x%02x)"
+ " Reply PageType(0x%02x)\n",
+ ioc->name, __func__,
+ (mpi_request->Header.PageType & 0xF),
+ (p[3] & 0xF));
+ }
+
+ if (((mpi_request->Header.PageType & 0xF) ==
+ MPI2_CONFIG_PAGETYPE_EXTENDED) &&
+ (mpi_request->ExtPageType != p[6])) {
+ _debug_dump_mf(mpi_request, ioc->request_sz/4);
+ _debug_dump_reply(mpi_reply, ioc->request_sz/4);
+ _debug_dump_config(p, min_t(u16, mem.sz,
+ config_page_sz)/4);
+ panic(KERN_WARNING MPT3SAS_FMT
+ "%s: Firmware BUG:" \
+ " config page mismatch:"
+ " Requested ExtPageType(0x%02x)"
+ " Reply ExtPageType(0x%02x)\n",
+ ioc->name, __func__,
+ mpi_request->ExtPageType, p[6]);
+ }
+ }
+ memcpy(config_page, mem.page, min_t(u16, mem.sz,
+ config_page_sz));
+ }
+
+ free_mem:
+ if (config_page)
+ _config_free_config_dma_memory(ioc, &mem);
+ out:
+ ioc->config_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->config_cmds.mutex);
+
+ if (issue_host_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg7 - obtain manufacturing page 7
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg7(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage7_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 7;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING7_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg10(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage10_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 10;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_manufacturing_pg11 - obtain manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_manufacturing_pg11 - set manufacturing page 11
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_manufacturing_pg11(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply,
+ struct Mpi2ManufacturingPage11_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+ mpi_request.Header.PageNumber = 11;
+ mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg2 - obtain bios page 2
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg2(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2BiosPage2_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 2;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_bios_pg3 - obtain bios page 3
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_bios_pg3(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2BiosPage3_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_BIOS;
+ mpi_request.Header.PageNumber = 3;
+ mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg0 - obtain iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage0_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_iounit_pg1 - obtain iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_iounit_pg1 - set iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_ioc_pg8 - obtain ioc page 8
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_ioc_pg8(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2IOCPage8_t *config_page)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_IOC;
+ mpi_request.Header.PageNumber = 8;
+ mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg0 - obtain sas device page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage0_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_device_pg1 - obtain sas device page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: device handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_device_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasDevicePage1_t *config_page,
+ u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_DEVICE;
+ mpi_request.Header.PageVersion = MPI2_SASDEVICE1_PAGEVERSION;
+ mpi_request.Header.PageNumber = 1;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_hba_phys - obtain number of phys on the host
+ * @ioc: per adapter object
+ * @num_phys: pointer returned with the number of phys
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_hba_phys(struct MPT3SAS_ADAPTER *ioc, u8 *num_phys)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+ u16 ioc_status;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t config_page;
+
+ *num_phys = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2SasIOUnitPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_phys = config_page.NumPhys;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg0 - obtain sas iounit page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage0_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_sas_iounit_pg1 - obtain sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_set_sas_iounit_pg1 - send sas iounit page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Calling function should call config_get_number_hba_phys prior to
+ * this function, so enough memory is allocated for config_page.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_set_sas_iounit_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2SasIOUnitPage1_t *config_page,
+ u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+ _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg0 - obtain expander page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_expander_pg1 - obtain expander page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_expander_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2ExpanderPage1_t *config_page, u32 phy_number,
+ u16 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_EXPANDER;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
+ (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_enclosure_pg0 - obtain enclosure page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: expander handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_enclosure_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasEnclosurePage0_t *config_page, u32 form, u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_ENCLOSURE;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg0 - obtain phy page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage0_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phy_pg1 - obtain phy page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @phy_number: phy number
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phy_pg1(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2SasPhyPage1_t *config_page, u32 phy_number)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_SAS_PHY;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg1 - obtain raid volume page 1
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg1(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage1_t *config_page, u32 form,
+ u32 handle)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 1;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_number_pds - obtain number of phys disk assigned to volume
+ * @ioc: per adapter object
+ * @handle: volume handle
+ * @num_pds: returns pds count
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_number_pds(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 *num_pds)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2RaidVolPage0_t config_page;
+ Mpi2ConfigReply_t mpi_reply;
+ int r;
+ u16 ioc_status;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ *num_pds = 0;
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress =
+ cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+ sizeof(Mpi2RaidVolPage0_t));
+ if (!r) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+ *num_pds = config_page.NumPhysDisks;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_raid_volume_pg0 - obtain raid volume page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_HANDLE or HANDLE
+ * @handle: volume handle
+ * @sz: size of buffer passed in config_page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_raid_volume_pg0(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2ConfigReply_t *mpi_reply, Mpi2RaidVolPage0_t *config_page, u32 form,
+ u32 handle, u16 sz)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | handle);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_phys_disk_pg0 - obtain phys disk page 0
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * @form: GET_NEXT_PHYSDISKNUM, PHYSDISKNUM, DEVHANDLE
+ * @form_specific: specific to the form
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_phys_disk_pg0(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigReply_t
+ *mpi_reply, Mpi2RaidPhysDiskPage0_t *config_page, u32 form,
+ u32 form_specific)
+{
+ Mpi2ConfigRequest_t mpi_request;
+ int r;
+
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
+ mpi_request.Header.PageNumber = 0;
+ mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.PageAddress = cpu_to_le32(form | form_specific);
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ r = _config_request(ioc, &mpi_request, mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ sizeof(*config_page));
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_handle - returns volume handle for give hidden
+ * raid components
+ * @ioc: per adapter object
+ * @pd_handle: phys disk handle
+ * @volume_handle: volume handle
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_handle(struct MPT3SAS_ADAPTER *ioc, u16 pd_handle,
+ u16 *volume_handle)
+{
+ Mpi2RaidConfigurationPage0_t *config_page = NULL;
+ Mpi2ConfigRequest_t mpi_request;
+ Mpi2ConfigReply_t mpi_reply;
+ int r, i, config_page_sz;
+ u16 ioc_status;
+ int config_num;
+ u16 element_type;
+ u16 phys_disk_dev_handle;
+
+ *volume_handle = 0;
+ memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_CONFIG;
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+ mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED;
+ mpi_request.ExtPageType = MPI2_CONFIG_EXTPAGETYPE_RAID_CONFIG;
+ mpi_request.Header.PageVersion = MPI2_RAIDCONFIG0_PAGEVERSION;
+ mpi_request.Header.PageNumber = 0;
+ ioc->build_zero_len_sge_mpi(ioc, &mpi_request.PageBufferSGE);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+ if (r)
+ goto out;
+
+ mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+ config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
+ config_page = kmalloc(config_page_sz, GFP_KERNEL);
+ if (!config_page) {
+ r = -1;
+ goto out;
+ }
+
+ config_num = 0xff;
+ while (1) {
+ mpi_request.PageAddress = cpu_to_le32(config_num +
+ MPI2_RAID_PGAD_FORM_GET_NEXT_CONFIGNUM);
+ r = _config_request(ioc, &mpi_request, &mpi_reply,
+ MPT3_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+ config_page_sz);
+ if (r)
+ goto out;
+ r = -1;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < config_page->NumElements; i++) {
+ element_type = le16_to_cpu(config_page->
+ ConfigElement[i].ElementFlags) &
+ MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE;
+ if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_VOL_PHYS_DISK_ELEMENT ||
+ element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_OCE_ELEMENT) {
+ phys_disk_dev_handle =
+ le16_to_cpu(config_page->ConfigElement[i].
+ PhysDiskDevHandle);
+ if (phys_disk_dev_handle == pd_handle) {
+ *volume_handle =
+ le16_to_cpu(config_page->
+ ConfigElement[i].VolDevHandle);
+ r = 0;
+ goto out;
+ }
+ } else if (element_type ==
+ MPI2_RAIDCONFIG0_EFLAGS_HOT_SPARE_ELEMENT) {
+ *volume_handle = 0;
+ r = 0;
+ goto out;
+ }
+ }
+ config_num = config_page->ConfigNum;
+ }
+ out:
+ kfree(config_page);
+ return r;
+}
+
+/**
+ * mpt3sas_config_get_volume_wwid - returns wwid given the volume handle
+ * @ioc: per adapter object
+ * @volume_handle: volume handle
+ * @wwid: volume wwid
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_config_get_volume_wwid(struct MPT3SAS_ADAPTER *ioc, u16 volume_handle,
+ u64 *wwid)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2RaidVolPage1_t raid_vol_pg1;
+
+ *wwid = 0;
+ if (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &raid_vol_pg1, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE,
+ volume_handle))) {
+ *wwid = le64_to_cpu(raid_vol_pg1.WWID);
+ return 0;
+ } else
+ return -1;
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
new file mode 100644
index 000000000000..054d5231c974
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -0,0 +1,3296 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+#include "mpt3sas_ctl.h"
+
+
+static struct fasync_struct *async_queue;
+static DECLARE_WAIT_QUEUE_HEAD(ctl_poll_wait);
+
+
+/**
+ * enum block_state - blocking state
+ * @NON_BLOCKING: non blocking
+ * @BLOCKING: blocking
+ *
+ * These states are for ioctls that need to wait for a response
+ * from firmware, so they probably require sleep.
+ */
+enum block_state {
+ NON_BLOCKING,
+ BLOCKING,
+};
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _ctl_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_ctl_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->handle != handle)
+ continue;
+ r = sas_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _ctl_display_some_debug - debug routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @calling_function_name: string pass from calling function
+ * @mpi_reply: reply message frame
+ * Context: none.
+ *
+ * Function for displaying debug info helpful when debugging issues
+ * in this module.
+ */
+static void
+_ctl_display_some_debug(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ char *calling_function_name, MPI2DefaultReply_t *mpi_reply)
+{
+ Mpi2ConfigRequest_t *mpi_request;
+ char *desc = NULL;
+
+ if (!(ioc->logging_level & MPT_DEBUG_IOCTL))
+ return;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "scsi_io, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ desc = "task_mgmt";
+ break;
+ case MPI2_FUNCTION_IOC_INIT:
+ desc = "ioc_init";
+ break;
+ case MPI2_FUNCTION_IOC_FACTS:
+ desc = "ioc_facts";
+ break;
+ case MPI2_FUNCTION_CONFIG:
+ {
+ Mpi2ConfigRequest_t *config_request =
+ (Mpi2ConfigRequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "config, type(0x%02x), ext_type(0x%02x), number(%d)",
+ (config_request->Header.PageType &
+ MPI2_CONFIG_PAGETYPE_MASK), config_request->ExtPageType,
+ config_request->Header.PageNumber);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_PORT_FACTS:
+ desc = "port_facts";
+ break;
+ case MPI2_FUNCTION_PORT_ENABLE:
+ desc = "port_enable";
+ break;
+ case MPI2_FUNCTION_EVENT_NOTIFICATION:
+ desc = "event_notification";
+ break;
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ desc = "fw_download";
+ break;
+ case MPI2_FUNCTION_FW_UPLOAD:
+ desc = "fw_upload";
+ break;
+ case MPI2_FUNCTION_RAID_ACTION:
+ desc = "raid_action";
+ break;
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsi_request =
+ (Mpi2SCSIIORequest_t *)mpi_request;
+
+ snprintf(ioc->tmp_string, MPT_STRING_LENGTH,
+ "raid_pass, cmd(0x%02x), cdb_len(%d)",
+ scsi_request->CDB.CDB32[0],
+ le16_to_cpu(scsi_request->IoFlags) & 0xF);
+ desc = ioc->tmp_string;
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ desc = "sas_iounit_cntl";
+ break;
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ desc = "sata_pass";
+ break;
+ case MPI2_FUNCTION_DIAG_BUFFER_POST:
+ desc = "diag_buffer_post";
+ break;
+ case MPI2_FUNCTION_DIAG_RELEASE:
+ desc = "diag_release";
+ break;
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ desc = "smp_passthrough";
+ break;
+ }
+
+ if (!desc)
+ return;
+
+ pr_info(MPT3SAS_FMT "%s: %s, smid(%d)\n",
+ ioc->name, calling_function_name, desc, smid);
+
+ if (!mpi_reply)
+ return;
+
+ if (mpi_reply->IOCStatus || mpi_reply->IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "\tiocstatus(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ Mpi2SCSIIOReply_t *scsi_reply =
+ (Mpi2SCSIIOReply_t *)mpi_reply;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _ctl_sas_device_find_by_handle(ioc,
+ le16_to_cpu(scsi_reply->DevHandle));
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (scsi_reply->SCSIState || scsi_reply->SCSIStatus)
+ pr_info(MPT3SAS_FMT
+ "\tscsi_state(0x%02x), scsi_status"
+ "(0x%02x)\n", ioc->name,
+ scsi_reply->SCSIState,
+ scsi_reply->SCSIStatus);
+ }
+}
+
+#endif
+
+/**
+ * mpt3sas_ctl_done - ctl module completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using ioc->ctl_cb_idx.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+ Mpi2SCSIIOReply_t *scsiio_reply;
+ const void *sense_data;
+ u32 sz;
+
+ if (ioc->ctl_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->ctl_cmds.smid != smid)
+ return 1;
+ ioc->ctl_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->ctl_cmds.status |= MPT3_CMD_REPLY_VALID;
+ /* get sense data */
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_reply->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ scsiio_reply = (Mpi2SCSIIOReply_t *)mpi_reply;
+ if (scsiio_reply->SCSIState &
+ MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(scsiio_reply->SenseCount));
+ sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ memcpy(ioc->ctl_cmds.sense, sense_data, sz);
+ }
+ }
+ }
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_done", mpi_reply);
+#endif
+ ioc->ctl_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->ctl_cmds.done);
+ return 1;
+}
+
+/**
+ * _ctl_check_event_type - determines when an event needs logging
+ * @ioc: per adapter object
+ * @event: firmware event
+ *
+ * The bitmask in ioc->event_type[] indicates which events should be
+ * be saved in the driver event_log. This bitmask is set by application.
+ *
+ * Returns 1 when event should be captured, or zero means no match.
+ */
+static int
+_ctl_check_event_type(struct MPT3SAS_ADAPTER *ioc, u16 event)
+{
+ u16 i;
+ u32 desired_event;
+
+ if (event >= 128 || !event || !ioc->event_log)
+ return 0;
+
+ desired_event = (1 << (event % 32));
+ if (!desired_event)
+ desired_event = 1;
+ i = event / 32;
+ return desired_event & ioc->event_type[i];
+}
+
+/**
+ * mpt3sas_ctl_add_to_event_log - add event
+ * @ioc: per adapter object
+ * @mpi_reply: reply message frame
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_ctl_add_to_event_log(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventNotificationReply_t *mpi_reply)
+{
+ struct MPT3_IOCTL_EVENTS *event_log;
+ u16 event;
+ int i;
+ u32 sz, event_data_sz;
+ u8 send_aen = 0;
+
+ if (!ioc->event_log)
+ return;
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (_ctl_check_event_type(ioc, event)) {
+
+ /* insert entry into circular event_log */
+ i = ioc->event_context % MPT3SAS_CTL_EVENT_LOG_SIZE;
+ event_log = ioc->event_log;
+ event_log[i].event = event;
+ event_log[i].context = ioc->event_context++;
+
+ event_data_sz = le16_to_cpu(mpi_reply->EventDataLength)*4;
+ sz = min_t(u32, event_data_sz, MPT3_EVENT_DATA_SIZE);
+ memset(event_log[i].data, 0, MPT3_EVENT_DATA_SIZE);
+ memcpy(event_log[i].data, mpi_reply->EventData, sz);
+ send_aen = 1;
+ }
+
+ /* This aen_event_read_flag flag is set until the
+ * application has read the event log.
+ * For MPI2_EVENT_LOG_ENTRY_ADDED, we always notify.
+ */
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED ||
+ (send_aen && !ioc->aen_event_read_flag)) {
+ ioc->aen_event_read_flag = 1;
+ wake_up_interruptible(&ctl_poll_wait);
+ if (async_queue)
+ kill_fasync(&async_queue, SIGIO, POLL_IN);
+ }
+}
+
+/**
+ * mpt3sas_ctl_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ return 1;
+}
+
+/**
+ * _ctl_verify_adapter - validates ioc_number passed from application
+ * @ioc: per adapter object
+ * @iocpp: The ioc pointer is returned in this.
+ *
+ * Return (-1) means error, else ioc_number.
+ */
+static int
+_ctl_verify_adapter(int ioc_number, struct MPT3SAS_ADAPTER **iocpp)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->id != ioc_number)
+ continue;
+ *iocpp = ioc;
+ return ioc_number;
+ }
+ *iocpp = NULL;
+ return -1;
+}
+
+/**
+ * mpt3sas_ctl_reset_handler - reset callback handler (for ctl)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ */
+void
+mpt3sas_ctl_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ int i;
+ u8 issue_reset;
+
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ mpt3sas_send_diag_release(ioc, i, &issue_reset);
+ }
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->ctl_cmds.status & MPT3_CMD_PENDING) {
+ ioc->ctl_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->ctl_cmds.smid);
+ complete(&ioc->ctl_cmds.done);
+ }
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ ioc->diag_buffer_status[i] |=
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ }
+ break;
+ }
+}
+
+/**
+ * _ctl_fasync -
+ * @fd -
+ * @filep -
+ * @mode -
+ *
+ * Called when application request fasyn callback handler.
+ */
+static int
+_ctl_fasync(int fd, struct file *filep, int mode)
+{
+ return fasync_helper(fd, filep, mode, &async_queue);
+}
+
+/**
+ * _ctl_release -
+ * @inode -
+ * @filep -
+ *
+ * Called when application releases the fasyn callback handler.
+ */
+static int
+_ctl_release(struct inode *inode, struct file *filep)
+{
+ return fasync_helper(-1, filep, 0, &async_queue);
+}
+
+/**
+ * _ctl_poll -
+ * @file -
+ * @wait -
+ *
+ */
+static unsigned int
+_ctl_poll(struct file *filep, poll_table *wait)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+
+ poll_wait(filep, &ctl_poll_wait, wait);
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+ if (ioc->aen_event_read_flag)
+ return POLLIN | POLLRDNORM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_set_task_mid - assign an active smid to tm request
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @tm_request - pointer to mf from user space
+ *
+ * Returns 0 when an smid if found, else fail.
+ * during failure, the reply frame is filled.
+ */
+static int
+_ctl_set_task_mid(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command *karg,
+ Mpi2SCSITaskManagementRequest_t *tm_request)
+{
+ u8 found = 0;
+ u16 i;
+ u16 handle;
+ struct scsi_cmnd *scmd;
+ struct MPT3SAS_DEVICE *priv_data;
+ unsigned long flags;
+ Mpi2SCSITaskManagementReply_t *tm_reply;
+ u32 sz;
+ u32 lun;
+ char *desc = NULL;
+
+ if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ desc = "abort_task";
+ else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+ desc = "query_task";
+ else
+ return 0;
+
+ lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
+
+ handle = le16_to_cpu(tm_request->DevHandle);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ for (i = ioc->scsiio_depth; i && !found; i--) {
+ scmd = ioc->scsi_lookup[i - 1].scmd;
+ if (scmd == NULL || scmd->device == NULL ||
+ scmd->device->hostdata == NULL)
+ continue;
+ if (lun != scmd->device->lun)
+ continue;
+ priv_data = scmd->device->hostdata;
+ if (priv_data->sas_target == NULL)
+ continue;
+ if (priv_data->sas_target->handle != handle)
+ continue;
+ tm_request->TaskMID = cpu_to_le16(ioc->scsi_lookup[i - 1].smid);
+ found = 1;
+ }
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ if (!found) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), no active mid!!\n",
+ ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun));
+ tm_reply = ioc->ctl_cmds.reply;
+ tm_reply->DevHandle = tm_request->DevHandle;
+ tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ tm_reply->TaskType = tm_request->TaskType;
+ tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
+ tm_reply->VP_ID = tm_request->VP_ID;
+ tm_reply->VF_ID = tm_request->VF_ID;
+ sz = min_t(u32, karg->max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz))
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ return 1;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+ desc, le16_to_cpu(tm_request->DevHandle), lun,
+ le16_to_cpu(tm_request->TaskMID)));
+ return 0;
+}
+
+/**
+ * _ctl_do_mpt_command - main handler for MPT3COMMAND opcode
+ * @ioc: per adapter object
+ * @karg - (struct mpt3_ioctl_command)
+ * @mf - pointer to mf in user space
+ */
+static long
+_ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
+ void __user *mf)
+{
+ MPI2RequestHeader_t *mpi_request = NULL, *request;
+ MPI2DefaultReply_t *mpi_reply;
+ u32 ioc_state;
+ u16 ioc_status;
+ u16 smid;
+ unsigned long timeout, timeleft;
+ u8 issue_reset;
+ u32 sz;
+ void *psge;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma = 0;
+ size_t data_out_sz = 0;
+ void *data_in = NULL;
+ dma_addr_t data_in_dma = 0;
+ size_t data_in_sz = 0;
+ long ret;
+ u16 wait_state_count;
+
+ issue_reset = 0;
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name,
+ __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ mpi_request = kzalloc(ioc->request_sz, GFP_KERNEL);
+ if (!mpi_request) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed obtaining a memory for mpi_request\n",
+ ioc->name, __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* Check for overflow and wraparound */
+ if (karg.data_sge_offset * 4 > ioc->request_sz ||
+ karg.data_sge_offset > (UINT_MAX / 4)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* copy in request message frame from user */
+ if (copy_from_user(mpi_request, mf, karg.data_sge_offset*4)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__, __LINE__,
+ __func__);
+ ret = -EFAULT;
+ goto out;
+ }
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ } else {
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->ctl_cb_idx, NULL);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ret = -EAGAIN;
+ goto out;
+ }
+ }
+
+ ret = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memcpy(request, mpi_request, karg.data_sge_offset*4);
+ ioc->ctl_cmds.smid = smid;
+ data_out_sz = karg.data_out_size;
+ data_in_sz = karg.data_in_size;
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
+ if (!le16_to_cpu(mpi_request->FunctionDependent1) ||
+ le16_to_cpu(mpi_request->FunctionDependent1) >
+ ioc->facts.MaxDevHandle) {
+ ret = -EINVAL;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ /* obtain dma-able memory for data transfer */
+ if (data_out_sz) /* WRITE */ {
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz,
+ &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ if (copy_from_user(data_out, karg.data_out_buf_ptr,
+ data_out_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -EFAULT;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ if (data_in_sz) /* READ */ {
+ data_in = pci_alloc_consistent(ioc->pdev, data_in_sz,
+ &data_in_dma);
+ if (!data_in) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ psge = (void *)request + (karg.data_sge_offset*4);
+
+ /* send command to firmware */
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ _ctl_display_some_debug(ioc, smid, "ctl_request", NULL);
+#endif
+
+ init_completion(&ioc->ctl_cmds.done);
+ switch (mpi_request->Function) {
+ case MPI2_FUNCTION_SCSI_IO_REQUEST:
+ case MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+ {
+ Mpi2SCSIIORequest_t *scsiio_request =
+ (Mpi2SCSIIORequest_t *)request;
+ scsiio_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ scsiio_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ memset(ioc->ctl_cmds.sense, 0, SCSI_SENSE_BUFFERSIZE);
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)
+ mpt3sas_base_put_smid_scsi_io(ioc, smid,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SCSI_TASK_MGMT:
+ {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)request;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "TASK_MGMT: handle(0x%04x), task_type(0x%02x)\n",
+ ioc->name,
+ le16_to_cpu(tm_request->DevHandle), tm_request->TaskType));
+
+ if (tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+ tm_request->TaskType ==
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+ if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ }
+
+ mpt3sas_scsih_set_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SMP_PASSTHROUGH:
+ {
+ Mpi2SmpPassthroughRequest_t *smp_request =
+ (Mpi2SmpPassthroughRequest_t *)mpi_request;
+ u8 *data;
+
+ /* ioc determines which port to use */
+ smp_request->PhysicalPort = 0xFF;
+ if (smp_request->PassthroughFlags &
+ MPI2_SMP_PT_REQ_PT_FLAGS_IMMEDIATE)
+ data = (u8 *)&smp_request->SGL;
+ else {
+ if (unlikely(data_out == NULL)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ mpt3sas_base_free_smid(ioc, smid);
+ ret = -EINVAL;
+ goto out;
+ }
+ data = data_out;
+ }
+
+ if (data[1] == 0x91 && (data[10] == 1 || data[10] == 2)) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SATA_PASSTHROUGH:
+ case MPI2_FUNCTION_FW_DOWNLOAD:
+ case MPI2_FUNCTION_FW_UPLOAD:
+ {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_TOOLBOX:
+ {
+ Mpi2ToolboxCleanRequest_t *toolbox_request =
+ (Mpi2ToolboxCleanRequest_t *)mpi_request;
+
+ if (toolbox_request->Tool == MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL) {
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ } else {
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ }
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+ case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
+ {
+ Mpi2SasIoUnitControlRequest_t *sasiounit_request =
+ (Mpi2SasIoUnitControlRequest_t *)mpi_request;
+
+ if (sasiounit_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET
+ || sasiounit_request->Operation ==
+ MPI2_SAS_OP_PHY_LINK_RESET) {
+ ioc->ioc_link_reset_in_progress = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ /* drop to default case for posting the request */
+ }
+ default:
+ ioc->build_sg_mpi(ioc, psge, data_out_dma, data_out_sz,
+ data_in_dma, data_in_sz);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ break;
+ }
+
+ if (karg.timeout < MPT3_IOCTL_DEFAULT_TIMEOUT)
+ timeout = MPT3_IOCTL_DEFAULT_TIMEOUT;
+ else
+ timeout = karg.timeout;
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ timeout*HZ);
+ if (mpi_request->Function == MPI2_FUNCTION_SCSI_TASK_MGMT) {
+ Mpi2SCSITaskManagementRequest_t *tm_request =
+ (Mpi2SCSITaskManagementRequest_t *)mpi_request;
+ mpt3sas_scsih_clear_tm_flag(ioc, le16_to_cpu(
+ tm_request->DevHandle));
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ } else if ((mpi_request->Function == MPI2_FUNCTION_SMP_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SAS_IO_UNIT_CONTROL) &&
+ ioc->ioc_link_reset_in_progress) {
+ ioc->ioc_link_reset_in_progress = 0;
+ ioc->ignore_loginfos = 0;
+ }
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request, karg.data_sge_offset);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (mpi_reply->Function == MPI2_FUNCTION_SCSI_TASK_MGMT &&
+ (ioc->logging_level & MPT_DEBUG_TM)) {
+ Mpi2SCSITaskManagementReply_t *tm_reply =
+ (Mpi2SCSITaskManagementReply_t *)mpi_reply;
+
+ pr_info(MPT3SAS_FMT "TASK_MGMT: " \
+ "IOCStatus(0x%04x), IOCLogInfo(0x%08x), "
+ "TerminationCount(0x%08x)\n", ioc->name,
+ le16_to_cpu(tm_reply->IOCStatus),
+ le32_to_cpu(tm_reply->IOCLogInfo),
+ le32_to_cpu(tm_reply->TerminationCount));
+ }
+#endif
+ /* copy out xdata to user */
+ if (data_in_sz) {
+ if (copy_to_user(karg.data_in_buf_ptr, data_in,
+ data_in_sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out reply message frame to user */
+ if (karg.max_reply_bytes) {
+ sz = min_t(u32, karg.max_reply_bytes, ioc->reply_sz);
+ if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ /* copy out sense to user */
+ if (karg.max_sense_bytes && (mpi_request->Function ==
+ MPI2_FUNCTION_SCSI_IO_REQUEST || mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+ sz = min_t(u32, karg.max_sense_bytes, SCSI_SENSE_BUFFERSIZE);
+ if (copy_to_user(karg.sense_data_ptr, ioc->ctl_cmds.sense,
+ sz)) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ ret = -ENODATA;
+ goto out;
+ }
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ ret = -ENODATA;
+ if ((mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
+ mpi_request->Function ==
+ MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
+ mpi_request->Function == MPI2_FUNCTION_SATA_PASSTHROUGH)) {
+ pr_info(MPT3SAS_FMT "issue target reset: handle = (0x%04x)\n",
+ ioc->name,
+ le16_to_cpu(mpi_request->FunctionDependent1));
+ mpt3sas_halt_firmware(ioc);
+ mpt3sas_scsih_issue_tm(ioc,
+ le16_to_cpu(mpi_request->FunctionDependent1), 0, 0,
+ 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 30,
+ 0, TM_MUTEX_ON);
+ } else
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ }
+
+ out:
+
+ /* free memory associated with sg buffers */
+ if (data_in)
+ pci_free_consistent(ioc->pdev, data_in_sz, data_in,
+ data_in_dma);
+
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz, data_out,
+ data_out_dma);
+
+ kfree(mpi_request);
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return ret;
+}
+
+/**
+ * _ctl_getiocinfo - main handler for MPT3IOCINFO opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_getiocinfo(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_iocinfo karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memset(&karg, 0 , sizeof(karg));
+ karg.adapter_type = MPT3_IOCTL_INTERFACE_SAS3;
+ if (ioc->pfacts)
+ karg.port_number = ioc->pfacts[0].PortNumber;
+ karg.hw_rev = ioc->pdev->revision;
+ karg.pci_id = ioc->pdev->device;
+ karg.subsystem_device = ioc->pdev->subsystem_device;
+ karg.subsystem_vendor = ioc->pdev->subsystem_vendor;
+ karg.pci_information.u.bits.bus = ioc->pdev->bus->number;
+ karg.pci_information.u.bits.device = PCI_SLOT(ioc->pdev->devfn);
+ karg.pci_information.u.bits.function = PCI_FUNC(ioc->pdev->devfn);
+ karg.pci_information.segment_id = pci_domain_nr(ioc->pdev->bus);
+ karg.firmware_version = ioc->facts.FWVersion.Word;
+ strcpy(karg.driver_version, MPT3SAS_DRIVER_NAME);
+ strcat(karg.driver_version, "-");
+ strcat(karg.driver_version, MPT3SAS_DRIVER_VERSION);
+ karg.bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventquery - main handler for MPT3EVENTQUERY opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventquery(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventquery karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ karg.event_entries = MPT3SAS_CTL_EVENT_LOG_SIZE;
+ memcpy(karg.event_types, ioc->event_type,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventenable - main handler for MPT3EVENTENABLE opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventenable(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventenable karg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ memcpy(ioc->event_type, karg.event_types,
+ MPI2_EVENT_NOTIFY_EVENTMASK_WORDS * sizeof(u32));
+ mpt3sas_base_validate_event_type(ioc, ioc->event_type);
+
+ if (ioc->event_log)
+ return 0;
+ /* initialize event_log */
+ ioc->event_context = 0;
+ ioc->aen_event_read_flag = 0;
+ ioc->event_log = kcalloc(MPT3SAS_CTL_EVENT_LOG_SIZE,
+ sizeof(struct MPT3_IOCTL_EVENTS), GFP_KERNEL);
+ if (!ioc->event_log) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_eventreport - main handler for MPT3EVENTREPORT opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_eventreport(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_eventreport karg;
+ u32 number_bytes, max_events, max;
+ struct mpt3_ioctl_eventreport __user *uarg = arg;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ number_bytes = karg.hdr.max_data_size -
+ sizeof(struct mpt3_ioctl_header);
+ max_events = number_bytes/sizeof(struct MPT3_IOCTL_EVENTS);
+ max = min_t(u32, MPT3SAS_CTL_EVENT_LOG_SIZE, max_events);
+
+ /* If fewer than 1 event is requested, there must have
+ * been some type of error.
+ */
+ if (!max || !ioc->event_log)
+ return -ENODATA;
+
+ number_bytes = max * sizeof(struct MPT3_IOCTL_EVENTS);
+ if (copy_to_user(uarg->event_data, ioc->event_log, number_bytes)) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ /* reset flag so SIGIO can restart */
+ ioc->aen_event_read_flag = 0;
+ return 0;
+}
+
+/**
+ * _ctl_do_reset - main handler for MPT3HARDRESET opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_diag_reset karg;
+ int retval;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n", ioc->name,
+ __func__));
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ pr_info(MPT3SAS_FMT "host reset: %s\n",
+ ioc->name, ((!retval) ? "SUCCESS" : "FAILED"));
+ return 0;
+}
+
+/**
+ * _ctl_btdh_search_sas_device - searching for sas device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_sas_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->sas_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == sas_device->handle) {
+ btdh->bus = sas_device->channel;
+ btdh->id = sas_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == sas_device->channel && btdh->id ==
+ sas_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = sas_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_search_raid_device - searching for raid device
+ * @ioc: per adapter object
+ * @btdh: btdh ioctl payload
+ */
+static int
+_ctl_btdh_search_raid_device(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_ioctl_btdh_mapping *btdh)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int rc = 0;
+
+ if (list_empty(&ioc->raid_device_list))
+ return rc;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (btdh->bus == 0xFFFFFFFF && btdh->id == 0xFFFFFFFF &&
+ btdh->handle == raid_device->handle) {
+ btdh->bus = raid_device->channel;
+ btdh->id = raid_device->id;
+ rc = 1;
+ goto out;
+ } else if (btdh->bus == raid_device->channel && btdh->id ==
+ raid_device->id && btdh->handle == 0xFFFF) {
+ btdh->handle = raid_device->handle;
+ rc = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_btdh_mapping - main handler for MPT3BTDHMAPPING opcode
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_btdh_mapping(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_ioctl_btdh_mapping karg;
+ int rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = _ctl_btdh_search_sas_device(ioc, &karg);
+ if (!rc)
+ _ctl_btdh_search_raid_device(ioc, &karg);
+
+ if (copy_to_user(arg, &karg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * _ctl_diag_capability - return diag buffer capability
+ * @ioc: per adapter object
+ * @buffer_type: specifies either TRACE, SNAPSHOT, or EXTENDED
+ *
+ * returns 1 when diag buffer support is enabled in firmware
+ */
+static u8
+_ctl_diag_capability(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type)
+{
+ u8 rc = 0;
+
+ switch (buffer_type) {
+ case MPI2_DIAG_BUF_TYPE_TRACE:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_SNAPSHOT:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER)
+ rc = 1;
+ break;
+ case MPI2_DIAG_BUF_TYPE_EXTENDED:
+ if (ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER)
+ rc = 1;
+ }
+
+ return rc;
+}
+
+
+/**
+ * _ctl_diag_register_2 - wrapper for registering diag buffer support
+ * @ioc: per adapter object
+ * @diag_register: the diag_register struct passed in from user space
+ *
+ */
+static long
+_ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
+ struct mpt3_diag_register *diag_register)
+{
+ int rc, i;
+ void *request_data = NULL;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz = 0;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ u8 buffer_type;
+ unsigned long timeleft;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ u8 issue_reset = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ buffer_type = diag_register->buffer_type;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) {
+ pr_err(MPT3SAS_FMT
+ "%s: already has a registered buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__,
+ buffer_type);
+ return -EINVAL;
+ }
+
+ if (diag_register->requested_buffer_size % 4) {
+ pr_err(MPT3SAS_FMT
+ "%s: the requested_buffer_size is not 4 byte aligned\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ request_data = ioc->diag_buffer[buffer_type];
+ request_data_sz = diag_register->requested_buffer_size;
+ ioc->unique_id[buffer_type] = diag_register->unique_id;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ memcpy(ioc->product_specific[buffer_type],
+ diag_register->product_specific, MPT3_PRODUCT_SPECIFIC_DWORDS);
+ ioc->diagnostic_flags[buffer_type] = diag_register->diagnostic_flags;
+
+ if (request_data) {
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ if (request_data_sz != ioc->diag_buffer_sz[buffer_type]) {
+ pci_free_consistent(ioc->pdev,
+ ioc->diag_buffer_sz[buffer_type],
+ request_data, request_data_dma);
+ request_data = NULL;
+ }
+ }
+
+ if (request_data == NULL) {
+ ioc->diag_buffer_sz[buffer_type] = 0;
+ ioc->diag_buffer_dma[buffer_type] = 0;
+ request_data = pci_alloc_consistent(
+ ioc->pdev, request_data_sz, &request_data_dma);
+ if (request_data == NULL) {
+ pr_err(MPT3SAS_FMT "%s: failed allocating memory" \
+ " for diag buffers, requested size(%d)\n",
+ ioc->name, __func__, request_data_sz);
+ mpt3sas_base_free_smid(ioc, smid);
+ return -ENOMEM;
+ }
+ ioc->diag_buffer[buffer_type] = request_data;
+ ioc->diag_buffer_sz[buffer_type] = request_data_sz;
+ ioc->diag_buffer_dma[buffer_type] = request_data_dma;
+ }
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = diag_register->buffer_type;
+ mpi_request->Flags = cpu_to_le32(diag_register->diagnostic_flags);
+ mpi_request->BufferAddress = cpu_to_le64(request_data_dma);
+ mpi_request->BufferLength = cpu_to_le32(request_data_sz);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(0x%p), dma(0x%llx), sz(%d)\n",
+ ioc->name, __func__, request_data,
+ (unsigned long long)request_data_dma,
+ le32_to_cpu(mpi_request->BufferLength)));
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ if (rc && request_data)
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * mpt3sas_enable_diag_buffer - enabling diag_buffers support driver load time
+ * @ioc: per adapter object
+ * @bits_to_register: bitwise field where trace is bit 0, and snapshot is bit 1
+ *
+ * This is called when command line option diag_buffer_enable is enabled
+ * at driver load time.
+ */
+void
+mpt3sas_enable_diag_buffer(struct MPT3SAS_ADAPTER *ioc, u8 bits_to_register)
+{
+ struct mpt3_diag_register diag_register;
+
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+
+ if (bits_to_register & 1) {
+ pr_info(MPT3SAS_FMT "registering trace buffer support\n",
+ ioc->name);
+ ioc->diag_trigger_master.MasterData =
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 2) {
+ pr_info(MPT3SAS_FMT "registering snapshot buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_SNAPSHOT;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+
+ if (bits_to_register & 4) {
+ pr_info(MPT3SAS_FMT "registering extended buffer support\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_EXTENDED;
+ /* register for 2MB buffers */
+ diag_register.requested_buffer_size = 2 * (1024 * 1024);
+ diag_register.unique_id = 0x7075901;
+ _ctl_diag_register_2(ioc, &diag_register);
+ }
+}
+
+/**
+ * _ctl_diag_register - application register with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+static long
+_ctl_diag_register(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_register karg;
+ long rc;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ rc = _ctl_diag_register_2(ioc, &karg);
+ return rc;
+}
+
+/**
+ * _ctl_diag_unregister - application unregister with driver
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+static long
+_ctl_diag_unregister(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_unregister karg;
+ void *request_data;
+ dma_addr_t request_data_dma;
+ u32 request_data_sz;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) has not been released\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_data_sz = ioc->diag_buffer_sz[buffer_type];
+ request_data_dma = ioc->diag_buffer_dma[buffer_type];
+ pci_free_consistent(ioc->pdev, request_data_sz,
+ request_data, request_data_dma);
+ ioc->diag_buffer[buffer_type] = NULL;
+ ioc->diag_buffer_status[buffer_type] = 0;
+ return 0;
+}
+
+/**
+ * _ctl_diag_query - query relevant info associated with diag buffers
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+static long
+_ctl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_query karg;
+ void *request_data;
+ int i;
+ u8 buffer_type;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ karg.application_flags = 0;
+ buffer_type = karg.buffer_type;
+
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id & 0xffffff00) {
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] & MPT3_DIAG_BUFFER_IS_RELEASED)
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID);
+ else
+ karg.application_flags = (MPT3_APP_FLAGS_APP_OWNED |
+ MPT3_APP_FLAGS_BUFFER_VALID |
+ MPT3_APP_FLAGS_FW_BUFFER_ACCESS);
+
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ karg.product_specific[i] =
+ ioc->product_specific[buffer_type][i];
+
+ karg.total_buffer_size = ioc->diag_buffer_sz[buffer_type];
+ karg.driver_added_buffer_size = 0;
+ karg.unique_id = ioc->unique_id[buffer_type];
+ karg.diagnostic_flags = ioc->diagnostic_flags[buffer_type];
+
+ if (copy_to_user(arg, &karg, sizeof(struct mpt3_diag_query))) {
+ pr_err(MPT3SAS_FMT
+ "%s: unable to write mpt3_diag_query data @ %p\n",
+ ioc->name, __func__, arg);
+ return -EFAULT;
+ }
+ return 0;
+}
+
+/**
+ * mpt3sas_send_diag_release - Diag Release Message
+ * @ioc: per adapter object
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @issue_reset - specifies whether host reset is required.
+ *
+ */
+int
+mpt3sas_send_diag_release(struct MPT3SAS_ADAPTER *ioc, u8 buffer_type,
+ u8 *issue_reset)
+{
+ Mpi2DiagReleaseRequest_t *mpi_request;
+ Mpi2DiagReleaseReply_t *mpi_reply;
+ u16 smid;
+ u16 ioc_status;
+ u32 ioc_state;
+ int rc;
+ unsigned long timeleft;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ rc = 0;
+ *issue_reset = 0;
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED)
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: skipping due to FAULT state\n", ioc->name,
+ __func__));
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_RELEASE;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagReleaseRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ *issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ out:
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+/**
+ * _ctl_diag_release - request to send Diag Release Message to firmware
+ * @arg - user space buffer containing ioctl content
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+static long
+_ctl_diag_release(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_release karg;
+ void *request_data;
+ int rc;
+ u8 buffer_type;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is not registered\n",
+ ioc->name, __func__, buffer_type);
+ return -EINVAL;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ if (ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is already released\n",
+ ioc->name, __func__,
+ buffer_type);
+ return 0;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have memory allocated for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ /* buffers were released by due to host reset */
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_DIAG_RESET)) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_RELEASED;
+ ioc->diag_buffer_status[buffer_type] &=
+ ~MPT3_DIAG_BUFFER_IS_DIAG_RESET;
+ pr_err(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) was released due to host reset\n",
+ ioc->name, __func__, buffer_type);
+ return 0;
+ }
+
+ rc = mpt3sas_send_diag_release(ioc, buffer_type, &issue_reset);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ return rc;
+}
+
+/**
+ * _ctl_diag_read_buffer - request for copy of the diag buffer
+ * @ioc: per adapter object
+ * @arg - user space buffer containing ioctl content
+ */
+static long
+_ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
+{
+ struct mpt3_diag_read_buffer karg;
+ struct mpt3_diag_read_buffer __user *uarg = arg;
+ void *request_data, *diag_data;
+ Mpi2DiagBufferPostRequest_t *mpi_request;
+ Mpi2DiagBufferPostReply_t *mpi_reply;
+ int rc, i;
+ u8 buffer_type;
+ unsigned long timeleft, request_size, copy_size;
+ u16 smid;
+ u16 ioc_status;
+ u8 issue_reset = 0;
+
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+ __func__));
+
+ buffer_type = karg.unique_id & 0x000000ff;
+ if (!_ctl_diag_capability(ioc, buffer_type)) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have capability for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -EPERM;
+ }
+
+ if (karg.unique_id != ioc->unique_id[buffer_type]) {
+ pr_err(MPT3SAS_FMT
+ "%s: unique_id(0x%08x) is not registered\n",
+ ioc->name, __func__, karg.unique_id);
+ return -EINVAL;
+ }
+
+ request_data = ioc->diag_buffer[buffer_type];
+ if (!request_data) {
+ pr_err(MPT3SAS_FMT
+ "%s: doesn't have buffer for buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type);
+ return -ENOMEM;
+ }
+
+ request_size = ioc->diag_buffer_sz[buffer_type];
+
+ if ((karg.starting_offset % 4) || (karg.bytes_to_read % 4)) {
+ pr_err(MPT3SAS_FMT "%s: either the starting_offset " \
+ "or bytes_to_read are not 4 byte aligned\n", ioc->name,
+ __func__);
+ return -EINVAL;
+ }
+
+ if (karg.starting_offset > request_size)
+ return -EINVAL;
+
+ diag_data = (void *)(request_data + karg.starting_offset);
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: diag_buffer(%p), offset(%d), sz(%d)\n",
+ ioc->name, __func__,
+ diag_data, karg.starting_offset, karg.bytes_to_read));
+
+ /* Truncate data on requests that are too large */
+ if ((diag_data + karg.bytes_to_read < diag_data) ||
+ (diag_data + karg.bytes_to_read > request_data + request_size))
+ copy_size = request_size - karg.starting_offset;
+ else
+ copy_size = karg.bytes_to_read;
+
+ if (copy_to_user((void __user *)uarg->diagnostic_data,
+ diag_data, copy_size)) {
+ pr_err(MPT3SAS_FMT
+ "%s: Unable to write mpt_diag_read_buffer_t data @ %p\n",
+ ioc->name, __func__, diag_data);
+ return -EFAULT;
+ }
+
+ if ((karg.flags & MPT3_FLAGS_REREGISTER) == 0)
+ return 0;
+
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: Reregister buffer_type(0x%02x)\n",
+ ioc->name, __func__, buffer_type));
+ if ((ioc->diag_buffer_status[buffer_type] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: buffer_type(0x%02x) is still registered\n",
+ ioc->name, __func__, buffer_type));
+ return 0;
+ }
+ /* Get a free request frame and save the message context.
+ */
+
+ if (ioc->ctl_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: ctl_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->ctl_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ ioc->ctl_cmds.status = MPT3_CMD_PENDING;
+ memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz);
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->ctl_cmds.smid = smid;
+
+ mpi_request->Function = MPI2_FUNCTION_DIAG_BUFFER_POST;
+ mpi_request->BufferType = buffer_type;
+ mpi_request->BufferLength =
+ cpu_to_le32(ioc->diag_buffer_sz[buffer_type]);
+ mpi_request->BufferAddress =
+ cpu_to_le64(ioc->diag_buffer_dma[buffer_type]);
+ for (i = 0; i < MPT3_PRODUCT_SPECIFIC_DWORDS; i++)
+ mpi_request->ProductSpecific[i] =
+ cpu_to_le32(ioc->product_specific[buffer_type][i]);
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+
+ init_completion(&ioc->ctl_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->ctl_cmds.done,
+ MPT3_IOCTL_DEFAULT_TIMEOUT*HZ);
+
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n", ioc->name,
+ __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2DiagBufferPostRequest_t)/4);
+ if (!(ioc->ctl_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ /* process the completed Reply Message Frame */
+ if ((ioc->ctl_cmds.status & MPT3_CMD_REPLY_VALID) == 0) {
+ pr_err(MPT3SAS_FMT "%s: no reply message\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+
+ mpi_reply = ioc->ctl_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
+
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ ioc->diag_buffer_status[buffer_type] |=
+ MPT3_DIAG_BUFFER_IS_REGISTERED;
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT "%s: success\n",
+ ioc->name, __func__));
+ } else {
+ pr_info(MPT3SAS_FMT
+ "%s: ioc_status(0x%04x) log_info(0x%08x)\n",
+ ioc->name, __func__,
+ ioc_status, le32_to_cpu(mpi_reply->IOCLogInfo));
+ rc = -EFAULT;
+ }
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ out:
+
+ ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
+ return rc;
+}
+
+
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_compat_mpt_command - convert 32bit pointers to 64bit.
+ * @ioc: per adapter object
+ * @cmd - ioctl opcode
+ * @arg - (struct mpt3_ioctl_command32)
+ *
+ * MPT3COMMAND32 - Handle 32bit applications running on 64bit os.
+ */
+static long
+_ctl_compat_mpt_command(struct MPT3SAS_ADAPTER *ioc, unsigned cmd,
+ void __user *arg)
+{
+ struct mpt3_ioctl_command32 karg32;
+ struct mpt3_ioctl_command32 __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+ if (_IOC_SIZE(cmd) != sizeof(struct mpt3_ioctl_command32))
+ return -EINVAL;
+
+ uarg = (struct mpt3_ioctl_command32 __user *) arg;
+
+ if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ memset(&karg, 0, sizeof(struct mpt3_ioctl_command));
+ karg.hdr.ioc_number = karg32.hdr.ioc_number;
+ karg.hdr.port_number = karg32.hdr.port_number;
+ karg.hdr.max_data_size = karg32.hdr.max_data_size;
+ karg.timeout = karg32.timeout;
+ karg.max_reply_bytes = karg32.max_reply_bytes;
+ karg.data_in_size = karg32.data_in_size;
+ karg.data_out_size = karg32.data_out_size;
+ karg.max_sense_bytes = karg32.max_sense_bytes;
+ karg.data_sge_offset = karg32.data_sge_offset;
+ karg.reply_frame_buf_ptr = compat_ptr(karg32.reply_frame_buf_ptr);
+ karg.data_in_buf_ptr = compat_ptr(karg32.data_in_buf_ptr);
+ karg.data_out_buf_ptr = compat_ptr(karg32.data_out_buf_ptr);
+ karg.sense_data_ptr = compat_ptr(karg32.sense_data_ptr);
+ return _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+}
+#endif
+
+/**
+ * _ctl_ioctl_main - main ioctl entry point
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ * compat - handles 32 bit applications in 64bit os
+ */
+static long
+_ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
+ u8 compat)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct mpt3_ioctl_header ioctl_header;
+ enum block_state state;
+ long ret = -EINVAL;
+
+ /* get IOCTL header */
+ if (copy_from_user(&ioctl_header, (char __user *)arg,
+ sizeof(struct mpt3_ioctl_header))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ return -EFAULT;
+ }
+
+ if (_ctl_verify_adapter(ioctl_header.ioc_number, &ioc) == -1 || !ioc)
+ return -ENODEV;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery ||
+ ioc->is_driver_loading)
+ return -EAGAIN;
+
+ state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING : BLOCKING;
+ if (state == NON_BLOCKING) {
+ if (!mutex_trylock(&ioc->ctl_cmds.mutex))
+ return -EAGAIN;
+ } else if (mutex_lock_interruptible(&ioc->ctl_cmds.mutex))
+ return -ERESTARTSYS;
+
+
+ switch (cmd) {
+ case MPT3IOCINFO:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_iocinfo))
+ ret = _ctl_getiocinfo(ioc, arg);
+ break;
+#ifdef CONFIG_COMPAT
+ case MPT3COMMAND32:
+#endif
+ case MPT3COMMAND:
+ {
+ struct mpt3_ioctl_command __user *uarg;
+ struct mpt3_ioctl_command karg;
+
+#ifdef CONFIG_COMPAT
+ if (compat) {
+ ret = _ctl_compat_mpt_command(ioc, cmd, arg);
+ break;
+ }
+#endif
+ if (copy_from_user(&karg, arg, sizeof(karg))) {
+ pr_err("failure at %s:%d/%s()!\n",
+ __FILE__, __LINE__, __func__);
+ ret = -EFAULT;
+ break;
+ }
+
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_command)) {
+ uarg = arg;
+ ret = _ctl_do_mpt_command(ioc, karg, &uarg->mf);
+ }
+ break;
+ }
+ case MPT3EVENTQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventquery))
+ ret = _ctl_eventquery(ioc, arg);
+ break;
+ case MPT3EVENTENABLE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_eventenable))
+ ret = _ctl_eventenable(ioc, arg);
+ break;
+ case MPT3EVENTREPORT:
+ ret = _ctl_eventreport(ioc, arg);
+ break;
+ case MPT3HARDRESET:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_diag_reset))
+ ret = _ctl_do_reset(ioc, arg);
+ break;
+ case MPT3BTDHMAPPING:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_ioctl_btdh_mapping))
+ ret = _ctl_btdh_mapping(ioc, arg);
+ break;
+ case MPT3DIAGREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_register))
+ ret = _ctl_diag_register(ioc, arg);
+ break;
+ case MPT3DIAGUNREGISTER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_unregister))
+ ret = _ctl_diag_unregister(ioc, arg);
+ break;
+ case MPT3DIAGQUERY:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_query))
+ ret = _ctl_diag_query(ioc, arg);
+ break;
+ case MPT3DIAGRELEASE:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_release))
+ ret = _ctl_diag_release(ioc, arg);
+ break;
+ case MPT3DIAGREADBUFFER:
+ if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
+ ret = _ctl_diag_read_buffer(ioc, arg);
+ break;
+ default:
+ dctlprintk(ioc, pr_info(MPT3SAS_FMT
+ "unsupported ioctl opcode(0x%08x)\n", ioc->name, cmd));
+ break;
+ }
+
+ mutex_unlock(&ioc->ctl_cmds.mutex);
+ return ret;
+}
+
+/**
+ * _ctl_ioctl - main ioctl entry point (unlocked)
+ * @file - (struct file)
+ * @cmd - ioctl opcode
+ * @arg -
+ */
+static long
+_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 0);
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/**
+ * _ctl_ioctl_compat - main ioctl entry point (compat)
+ * @file -
+ * @cmd -
+ * @arg -
+ *
+ * This routine handles 32 bit applications in 64bit os.
+ */
+static long
+_ctl_ioctl_compat(struct file *file, unsigned cmd, unsigned long arg)
+{
+ long ret;
+
+ ret = _ctl_ioctl_main(file, cmd, (void __user *)arg, 1);
+ return ret;
+}
+#endif
+
+/* scsi host attributes */
+/**
+ * _ctl_version_fw_show - firmware version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_fw_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
+ (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
+ (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
+ ioc->facts.FWVersion.Word & 0x000000FF);
+}
+static DEVICE_ATTR(version_fw, S_IRUGO, _ctl_version_fw_show, NULL);
+
+/**
+ * _ctl_version_bios_show - bios version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_bios_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ u32 version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
+
+ return snprintf(buf, PAGE_SIZE, "%02d.%02d.%02d.%02d\n",
+ (version & 0xFF000000) >> 24,
+ (version & 0x00FF0000) >> 16,
+ (version & 0x0000FF00) >> 8,
+ version & 0x000000FF);
+}
+static DEVICE_ATTR(version_bios, S_IRUGO, _ctl_version_bios_show, NULL);
+
+/**
+ * _ctl_version_mpi_show - MPI (message passing interface) version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_mpi_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%03x.%02x\n",
+ ioc->facts.MsgVersion, ioc->facts.HeaderVersion >> 8);
+}
+static DEVICE_ATTR(version_mpi, S_IRUGO, _ctl_version_mpi_show, NULL);
+
+/**
+ * _ctl_version_product_show - product name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_product_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.ChipName);
+}
+static DEVICE_ATTR(version_product, S_IRUGO, _ctl_version_product_show, NULL);
+
+/**
+ * _ctl_version_nvdata_persistent_show - ndvata persistent version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_persistent_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionPersistent.Word));
+}
+static DEVICE_ATTR(version_nvdata_persistent, S_IRUGO,
+ _ctl_version_nvdata_persistent_show, NULL);
+
+/**
+ * _ctl_version_nvdata_default_show - nvdata default version
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_version_nvdata_default_show(struct device *cdev, struct device_attribute
+ *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n",
+ le32_to_cpu(ioc->iounit_pg0.NvdataVersionDefault.Word));
+}
+static DEVICE_ATTR(version_nvdata_default, S_IRUGO,
+ _ctl_version_nvdata_default_show, NULL);
+
+/**
+ * _ctl_board_name_show - board name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_name_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardName);
+}
+static DEVICE_ATTR(board_name, S_IRUGO, _ctl_board_name_show, NULL);
+
+/**
+ * _ctl_board_assembly_show - board assembly name
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_assembly_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardAssembly);
+}
+static DEVICE_ATTR(board_assembly, S_IRUGO, _ctl_board_assembly_show, NULL);
+
+/**
+ * _ctl_board_tracer_show - board tracer number
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_board_tracer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, 16, "%s\n", ioc->manu_pg0.BoardTracerNumber);
+}
+static DEVICE_ATTR(board_tracer, S_IRUGO, _ctl_board_tracer_show, NULL);
+
+/**
+ * _ctl_io_delay_show - io missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_io_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->io_missing_delay);
+}
+static DEVICE_ATTR(io_delay, S_IRUGO, _ctl_io_delay_show, NULL);
+
+/**
+ * _ctl_device_delay_show - device missing delay
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is for firmware implemention for deboucing device
+ * removal events.
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_delay_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->device_missing_delay);
+}
+static DEVICE_ATTR(device_delay, S_IRUGO, _ctl_device_delay_show, NULL);
+
+/**
+ * _ctl_fw_queue_depth_show - global credits
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_fw_queue_depth_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%02d\n", ioc->facts.RequestCredit);
+}
+static DEVICE_ATTR(fw_queue_depth, S_IRUGO, _ctl_fw_queue_depth_show, NULL);
+
+/**
+ * _ctl_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the controller sas address
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_sas_address_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)ioc->sas_hba.sas_address);
+}
+static DEVICE_ATTR(host_sas_address, S_IRUGO,
+ _ctl_host_sas_address_show, NULL);
+
+/**
+ * _ctl_logging_level_show - logging level
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_logging_level_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%08xh\n", ioc->logging_level);
+}
+static ssize_t
+_ctl_logging_level_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%x", &val) != 1)
+ return -EINVAL;
+
+ ioc->logging_level = val;
+ pr_info(MPT3SAS_FMT "logging_level=%08xh\n", ioc->name,
+ ioc->logging_level);
+ return strlen(buf);
+}
+static DEVICE_ATTR(logging_level, S_IRUGO | S_IWUSR, _ctl_logging_level_show,
+ _ctl_logging_level_store);
+
+/**
+ * _ctl_fwfault_debug_show - show/store fwfault_debug
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * mpt3sas_fwfault_debug is command line option
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_fwfault_debug_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->fwfault_debug);
+}
+static ssize_t
+_ctl_fwfault_debug_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->fwfault_debug = val;
+ pr_info(MPT3SAS_FMT "fwfault_debug=%d\n", ioc->name,
+ ioc->fwfault_debug);
+ return strlen(buf);
+}
+static DEVICE_ATTR(fwfault_debug, S_IRUGO | S_IWUSR,
+ _ctl_fwfault_debug_show, _ctl_fwfault_debug_store);
+
+/**
+ * _ctl_ioc_reset_count_show - ioc reset count
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is firmware queue depth limit
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reset_count_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", ioc->ioc_reset_count);
+}
+static DEVICE_ATTR(ioc_reset_count, S_IRUGO, _ctl_ioc_reset_count_show, NULL);
+
+/**
+ * _ctl_ioc_reply_queue_count_show - number of reply queues
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is number of reply queues
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_ioc_reply_queue_count_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 reply_queue_count;
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((ioc->facts.IOCCapabilities &
+ MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable)
+ reply_queue_count = ioc->reply_queue_count;
+ else
+ reply_queue_count = 1;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", reply_queue_count);
+}
+static DEVICE_ATTR(reply_queue_count, S_IRUGO, _ctl_ioc_reply_queue_count_show,
+ NULL);
+
+struct DIAG_BUFFER_START {
+ __le32 Size;
+ __le32 DiagVersion;
+ u8 BufferType;
+ u8 Reserved[3];
+ __le32 Reserved1;
+ __le32 Reserved2;
+ __le32 Reserved3;
+};
+
+/**
+ * _ctl_host_trace_buffer_size_show - host buffer size (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_host_trace_buffer_size_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ u32 size = 0;
+ struct DIAG_BUFFER_START *request_data;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ request_data = (struct DIAG_BUFFER_START *)
+ ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE];
+ if ((le32_to_cpu(request_data->DiagVersion) == 0x00000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01000000 ||
+ le32_to_cpu(request_data->DiagVersion) == 0x01010000) &&
+ le32_to_cpu(request_data->Reserved3) == 0x4742444c)
+ size = le32_to_cpu(request_data->Size);
+
+ ioc->ring_buffer_sz = size;
+ return snprintf(buf, PAGE_SIZE, "%d\n", size);
+}
+static DEVICE_ATTR(host_trace_buffer_size, S_IRUGO,
+ _ctl_host_trace_buffer_size_show, NULL);
+
+/**
+ * _ctl_host_trace_buffer_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * You will only be able to read 4k bytes of ring buffer at a time.
+ * In order to read beyond 4k bytes, you will have to write out the
+ * offset to the same attribute, it will move the pointer.
+ */
+static ssize_t
+_ctl_host_trace_buffer_show(struct device *cdev, struct device_attribute *attr,
+ char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ void *request_data;
+ u32 size;
+
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ pr_err(MPT3SAS_FMT
+ "%s: host_trace_buffer is not registered\n",
+ ioc->name, __func__);
+ return 0;
+ }
+
+ if (ioc->ring_buffer_offset > ioc->ring_buffer_sz)
+ return 0;
+
+ size = ioc->ring_buffer_sz - ioc->ring_buffer_offset;
+ size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+ request_data = ioc->diag_buffer[0] + ioc->ring_buffer_offset;
+ memcpy(buf, request_data, size);
+ return size;
+}
+
+static ssize_t
+_ctl_host_trace_buffer_store(struct device *cdev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int val = 0;
+
+ if (sscanf(buf, "%d", &val) != 1)
+ return -EINVAL;
+
+ ioc->ring_buffer_offset = val;
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_show, _ctl_host_trace_buffer_store);
+
+
+/*****************************************/
+
+/**
+ * _ctl_host_trace_buffer_enable_show - firmware ring buffer (trace only)
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ *
+ * This is a mechnism to post/release host_trace_buffers
+ */
+static ssize_t
+_ctl_host_trace_buffer_enable_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if ((!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) ||
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+ else if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ return snprintf(buf, PAGE_SIZE, "release\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "post\n");
+}
+
+static ssize_t
+_ctl_host_trace_buffer_enable_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ char str[10] = "";
+ struct mpt3_diag_register diag_register;
+ u8 issue_reset = 0;
+
+ /* don't allow post/release occurr while recovery is active */
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery || ioc->is_driver_loading)
+ return -EBUSY;
+
+ if (sscanf(buf, "%9s", str) != 1)
+ return -EINVAL;
+
+ if (!strcmp(str, "post")) {
+ /* exit out if host buffers are already posted */
+ if ((ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE]) &&
+ (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) &&
+ ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0))
+ goto out;
+ memset(&diag_register, 0, sizeof(struct mpt3_diag_register));
+ pr_info(MPT3SAS_FMT "posting host trace buffers\n",
+ ioc->name);
+ diag_register.buffer_type = MPI2_DIAG_BUF_TYPE_TRACE;
+ diag_register.requested_buffer_size = (1024 * 1024);
+ diag_register.unique_id = 0x7075900;
+ ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] = 0;
+ _ctl_diag_register_2(ioc, &diag_register);
+ } else if (!strcmp(str, "release")) {
+ /* exit out if host buffers are already released */
+ if (!ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE])
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0)
+ goto out;
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ goto out;
+ pr_info(MPT3SAS_FMT "releasing host trace buffer\n",
+ ioc->name);
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ out:
+ return strlen(buf);
+}
+static DEVICE_ATTR(host_trace_buffer_enable, S_IRUGO | S_IWUSR,
+ _ctl_host_trace_buffer_enable_show,
+ _ctl_host_trace_buffer_enable_store);
+
+/*********** diagnostic trigger suppport *********************************/
+
+/**
+ * _ctl_diag_trigger_master_show - show the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MASTER_TRIGGER_T);
+ memcpy(buf, &ioc->diag_trigger_master, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_master_store - store the diag_trigger_master attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_master_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = min(sizeof(struct SL_WH_MASTER_TRIGGER_T), count);
+ memset(&ioc->diag_trigger_master, 0,
+ sizeof(struct SL_WH_MASTER_TRIGGER_T));
+ memcpy(&ioc->diag_trigger_master, buf, rc);
+ ioc->diag_trigger_master.MasterData |=
+ (MASTER_TRIGGER_FW_FAULT + MASTER_TRIGGER_ADAPTER_RESET);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+static DEVICE_ATTR(diag_trigger_master, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_master_show, _ctl_diag_trigger_master_store);
+
+
+/**
+ * _ctl_diag_trigger_event_show - show the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_EVENT_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_event, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_event_store - store the diag_trigger_event attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_event_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_EVENT_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_event, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_event, buf, sz);
+ if (ioc->diag_trigger_event.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_event.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_event, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_event_show, _ctl_diag_trigger_event_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_SCSI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_scsi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_scsi_store - store the diag_trigger_scsi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_scsi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_SCSI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_scsi, 0,
+ sizeof(struct SL_WH_EVENT_TRIGGERS_T));
+ memcpy(&ioc->diag_trigger_scsi, buf, sz);
+ if (ioc->diag_trigger_scsi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_scsi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+static DEVICE_ATTR(diag_trigger_scsi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_scsi_show, _ctl_diag_trigger_scsi_store);
+
+
+/**
+ * _ctl_diag_trigger_scsi_show - show the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_show(struct device *cdev,
+ struct device_attribute *attr, char *buf)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t rc;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ rc = sizeof(struct SL_WH_MPI_TRIGGERS_T);
+ memcpy(buf, &ioc->diag_trigger_mpi, rc);
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return rc;
+}
+
+/**
+ * _ctl_diag_trigger_mpi_store - store the diag_trigger_mpi attribute
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * A sysfs 'read/write' shost attribute.
+ */
+static ssize_t
+_ctl_diag_trigger_mpi_store(struct device *cdev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct Scsi_Host *shost = class_to_shost(cdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ unsigned long flags;
+ ssize_t sz;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
+ memset(&ioc->diag_trigger_mpi, 0,
+ sizeof(ioc->diag_trigger_mpi));
+ memcpy(&ioc->diag_trigger_mpi, buf, sz);
+ if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
+ ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return sz;
+}
+
+static DEVICE_ATTR(diag_trigger_mpi, S_IRUGO | S_IWUSR,
+ _ctl_diag_trigger_mpi_show, _ctl_diag_trigger_mpi_store);
+
+/*********** diagnostic trigger suppport *** END ****************************/
+
+
+
+/*****************************************/
+
+struct device_attribute *mpt3sas_host_attrs[] = {
+ &dev_attr_version_fw,
+ &dev_attr_version_bios,
+ &dev_attr_version_mpi,
+ &dev_attr_version_product,
+ &dev_attr_version_nvdata_persistent,
+ &dev_attr_version_nvdata_default,
+ &dev_attr_board_name,
+ &dev_attr_board_assembly,
+ &dev_attr_board_tracer,
+ &dev_attr_io_delay,
+ &dev_attr_device_delay,
+ &dev_attr_logging_level,
+ &dev_attr_fwfault_debug,
+ &dev_attr_fw_queue_depth,
+ &dev_attr_host_sas_address,
+ &dev_attr_ioc_reset_count,
+ &dev_attr_host_trace_buffer_size,
+ &dev_attr_host_trace_buffer,
+ &dev_attr_host_trace_buffer_enable,
+ &dev_attr_reply_queue_count,
+ &dev_attr_diag_trigger_master,
+ &dev_attr_diag_trigger_event,
+ &dev_attr_diag_trigger_scsi,
+ &dev_attr_diag_trigger_mpi,
+ NULL,
+};
+
+/* device attributes */
+
+/**
+ * _ctl_device_sas_address_show - sas address
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the sas address for the target
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_sas_address_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%016llx\n",
+ (unsigned long long)sas_device_priv_data->sas_target->sas_address);
+}
+static DEVICE_ATTR(sas_address, S_IRUGO, _ctl_device_sas_address_show, NULL);
+
+/**
+ * _ctl_device_handle_show - device handle
+ * @cdev - pointer to embedded class device
+ * @buf - the buffer returned
+ *
+ * This is the firmware assigned device handle
+ *
+ * A sysfs 'read-only' shost attribute.
+ */
+static ssize_t
+_ctl_device_handle_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_DEVICE *sas_device_priv_data = sdev->hostdata;
+
+ return snprintf(buf, PAGE_SIZE, "0x%04x\n",
+ sas_device_priv_data->sas_target->handle);
+}
+static DEVICE_ATTR(sas_device_handle, S_IRUGO, _ctl_device_handle_show, NULL);
+
+struct device_attribute *mpt3sas_dev_attrs[] = {
+ &dev_attr_sas_address,
+ &dev_attr_sas_device_handle,
+ NULL,
+};
+
+static const struct file_operations ctl_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = _ctl_ioctl,
+ .release = _ctl_release,
+ .poll = _ctl_poll,
+ .fasync = _ctl_fasync,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = _ctl_ioctl_compat,
+#endif
+};
+
+static struct miscdevice ctl_dev = {
+ .minor = MPT3SAS_MINOR,
+ .name = MPT3SAS_DEV_NAME,
+ .fops = &ctl_fops,
+};
+
+/**
+ * mpt3sas_ctl_init - main entry point for ctl.
+ *
+ */
+void
+mpt3sas_ctl_init(void)
+{
+ async_queue = NULL;
+ if (misc_register(&ctl_dev) < 0)
+ pr_err("%s can't register misc device [minor=%d]\n",
+ MPT3SAS_DRIVER_NAME, MPT3SAS_MINOR);
+
+ init_waitqueue_head(&ctl_poll_wait);
+}
+
+/**
+ * mpt3sas_ctl_exit - exit point for ctl
+ *
+ */
+void
+mpt3sas_ctl_exit(void)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ int i;
+
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list) {
+
+ /* free memory associated to diag buffers */
+ for (i = 0; i < MPI2_DIAG_BUF_TYPE_COUNT; i++) {
+ if (!ioc->diag_buffer[i])
+ continue;
+ if (!(ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED))
+ continue;
+ if ((ioc->diag_buffer_status[i] &
+ MPT3_DIAG_BUFFER_IS_RELEASED))
+ continue;
+ pci_free_consistent(ioc->pdev, ioc->diag_buffer_sz[i],
+ ioc->diag_buffer[i], ioc->diag_buffer_dma[i]);
+ ioc->diag_buffer[i] = NULL;
+ ioc->diag_buffer_status[i] = 0;
+ }
+
+ kfree(ioc->event_log);
+ }
+ misc_deregister(&ctl_dev);
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
new file mode 100644
index 000000000000..bd89f4f00550
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -0,0 +1,418 @@
+/*
+ * Management Module Support for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_CTL_H_INCLUDED
+#define MPT3SAS_CTL_H_INCLUDED
+
+#ifdef __KERNEL__
+#include <linux/miscdevice.h>
+#endif
+
+
+#ifndef MPT3SAS_MINOR
+#define MPT3SAS_MINOR (MPT_MINOR + 2)
+#endif
+#define MPT3SAS_DEV_NAME "mpt3ctl"
+#define MPT3_MAGIC_NUMBER 'L'
+#define MPT3_IOCTL_DEFAULT_TIMEOUT (10) /* in seconds */
+
+/**
+ * IOCTL opcodes
+ */
+#define MPT3IOCINFO _IOWR(MPT3_MAGIC_NUMBER, 17, \
+ struct mpt3_ioctl_iocinfo)
+#define MPT3COMMAND _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command)
+#ifdef CONFIG_COMPAT
+#define MPT3COMMAND32 _IOWR(MPT3_MAGIC_NUMBER, 20, \
+ struct mpt3_ioctl_command32)
+#endif
+#define MPT3EVENTQUERY _IOWR(MPT3_MAGIC_NUMBER, 21, \
+ struct mpt3_ioctl_eventquery)
+#define MPT3EVENTENABLE _IOWR(MPT3_MAGIC_NUMBER, 22, \
+ struct mpt3_ioctl_eventenable)
+#define MPT3EVENTREPORT _IOWR(MPT3_MAGIC_NUMBER, 23, \
+ struct mpt3_ioctl_eventreport)
+#define MPT3HARDRESET _IOWR(MPT3_MAGIC_NUMBER, 24, \
+ struct mpt3_ioctl_diag_reset)
+#define MPT3BTDHMAPPING _IOWR(MPT3_MAGIC_NUMBER, 31, \
+ struct mpt3_ioctl_btdh_mapping)
+
+/* diag buffer support */
+#define MPT3DIAGREGISTER _IOWR(MPT3_MAGIC_NUMBER, 26, \
+ struct mpt3_diag_register)
+#define MPT3DIAGRELEASE _IOWR(MPT3_MAGIC_NUMBER, 27, \
+ struct mpt3_diag_release)
+#define MPT3DIAGUNREGISTER _IOWR(MPT3_MAGIC_NUMBER, 28, \
+ struct mpt3_diag_unregister)
+#define MPT3DIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 29, \
+ struct mpt3_diag_query)
+#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
+ struct mpt3_diag_read_buffer)
+
+/**
+ * struct mpt3_ioctl_header - main header structure
+ * @ioc_number - IOC unit number
+ * @port_number - IOC port number
+ * @max_data_size - maximum number bytes to transfer on read
+ */
+struct mpt3_ioctl_header {
+ uint32_t ioc_number;
+ uint32_t port_number;
+ uint32_t max_data_size;
+};
+
+/**
+ * struct mpt3_ioctl_diag_reset - diagnostic reset
+ * @hdr - generic header
+ */
+struct mpt3_ioctl_diag_reset {
+ struct mpt3_ioctl_header hdr;
+};
+
+
+/**
+ * struct mpt3_ioctl_pci_info - pci device info
+ * @device - pci device id
+ * @function - pci function id
+ * @bus - pci bus id
+ * @segment_id - pci segment id
+ */
+struct mpt3_ioctl_pci_info {
+ union {
+ struct {
+ uint32_t device:5;
+ uint32_t function:3;
+ uint32_t bus:24;
+ } bits;
+ uint32_t word;
+ } u;
+ uint32_t segment_id;
+};
+
+
+#define MPT2_IOCTL_INTERFACE_SCSI (0x00)
+#define MPT2_IOCTL_INTERFACE_FC (0x01)
+#define MPT2_IOCTL_INTERFACE_FC_IP (0x02)
+#define MPT2_IOCTL_INTERFACE_SAS (0x03)
+#define MPT2_IOCTL_INTERFACE_SAS2 (0x04)
+#define MPT3_IOCTL_INTERFACE_SAS3 (0x06)
+#define MPT2_IOCTL_VERSION_LENGTH (32)
+
+/**
+ * struct mpt3_ioctl_iocinfo - generic controller info
+ * @hdr - generic header
+ * @adapter_type - type of adapter (spi, fc, sas)
+ * @port_number - port number
+ * @pci_id - PCI Id
+ * @hw_rev - hardware revision
+ * @sub_system_device - PCI subsystem Device ID
+ * @sub_system_vendor - PCI subsystem Vendor ID
+ * @rsvd0 - reserved
+ * @firmware_version - firmware version
+ * @bios_version - BIOS version
+ * @driver_version - driver version - 32 ASCII characters
+ * @rsvd1 - reserved
+ * @scsi_id - scsi id of adapter 0
+ * @rsvd2 - reserved
+ * @pci_information - pci info (2nd revision)
+ */
+struct mpt3_ioctl_iocinfo {
+ struct mpt3_ioctl_header hdr;
+ uint32_t adapter_type;
+ uint32_t port_number;
+ uint32_t pci_id;
+ uint32_t hw_rev;
+ uint32_t subsystem_device;
+ uint32_t subsystem_vendor;
+ uint32_t rsvd0;
+ uint32_t firmware_version;
+ uint32_t bios_version;
+ uint8_t driver_version[MPT2_IOCTL_VERSION_LENGTH];
+ uint8_t rsvd1;
+ uint8_t scsi_id;
+ uint16_t rsvd2;
+ struct mpt3_ioctl_pci_info pci_information;
+};
+
+
+/* number of event log entries */
+#define MPT3SAS_CTL_EVENT_LOG_SIZE (50)
+
+/**
+ * struct mpt3_ioctl_eventquery - query event count and type
+ * @hdr - generic header
+ * @event_entries - number of events returned by get_event_report
+ * @rsvd - reserved
+ * @event_types - type of events currently being captured
+ */
+struct mpt3_ioctl_eventquery {
+ struct mpt3_ioctl_header hdr;
+ uint16_t event_entries;
+ uint16_t rsvd;
+ uint32_t event_types[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS];
+};
+
+/**
+ * struct mpt3_ioctl_eventenable - enable/disable event capturing
+ * @hdr - generic header
+ * @event_types - toggle off/on type of events to be captured
+ */
+struct mpt3_ioctl_eventenable {
+ struct mpt3_ioctl_header hdr;
+ uint32_t event_types[4];
+};
+
+#define MPT3_EVENT_DATA_SIZE (192)
+/**
+ * struct MPT3_IOCTL_EVENTS -
+ * @event - the event that was reported
+ * @context - unique value for each event assigned by driver
+ * @data - event data returned in fw reply message
+ */
+struct MPT3_IOCTL_EVENTS {
+ uint32_t event;
+ uint32_t context;
+ uint8_t data[MPT3_EVENT_DATA_SIZE];
+};
+
+/**
+ * struct mpt3_ioctl_eventreport - returing event log
+ * @hdr - generic header
+ * @event_data - (see struct MPT3_IOCTL_EVENTS)
+ */
+struct mpt3_ioctl_eventreport {
+ struct mpt3_ioctl_header hdr;
+ struct MPT3_IOCTL_EVENTS event_data[1];
+};
+
+/**
+ * struct mpt3_ioctl_command - generic mpt firmware passthru ioctl
+ * @hdr - generic header
+ * @timeout - command timeout in seconds. (if zero then use driver default
+ * value).
+ * @reply_frame_buf_ptr - reply location
+ * @data_in_buf_ptr - destination for read
+ * @data_out_buf_ptr - data source for write
+ * @sense_data_ptr - sense data location
+ * @max_reply_bytes - maximum number of reply bytes to be sent to app.
+ * @data_in_size - number bytes for data transfer in (read)
+ * @data_out_size - number bytes for data transfer out (write)
+ * @max_sense_bytes - maximum number of bytes for auto sense buffers
+ * @data_sge_offset - offset in words from the start of the request message to
+ * the first SGL
+ * @mf[1];
+ */
+struct mpt3_ioctl_command {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ void __user *reply_frame_buf_ptr;
+ void __user *data_in_buf_ptr;
+ void __user *data_out_buf_ptr;
+ void __user *sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+
+#ifdef CONFIG_COMPAT
+struct mpt3_ioctl_command32 {
+ struct mpt3_ioctl_header hdr;
+ uint32_t timeout;
+ uint32_t reply_frame_buf_ptr;
+ uint32_t data_in_buf_ptr;
+ uint32_t data_out_buf_ptr;
+ uint32_t sense_data_ptr;
+ uint32_t max_reply_bytes;
+ uint32_t data_in_size;
+ uint32_t data_out_size;
+ uint32_t max_sense_bytes;
+ uint32_t data_sge_offset;
+ uint8_t mf[1];
+};
+#endif
+
+/**
+ * struct mpt3_ioctl_btdh_mapping - mapping info
+ * @hdr - generic header
+ * @id - target device identification number
+ * @bus - SCSI bus number that the target device exists on
+ * @handle - device handle for the target device
+ * @rsvd - reserved
+ *
+ * To obtain a bus/id the application sets
+ * handle to valid handle, and bus/id to 0xFFFF.
+ *
+ * To obtain the device handle the application sets
+ * bus/id valid value, and the handle to 0xFFFF.
+ */
+struct mpt3_ioctl_btdh_mapping {
+ struct mpt3_ioctl_header hdr;
+ uint32_t id;
+ uint32_t bus;
+ uint16_t handle;
+ uint16_t rsvd;
+};
+
+
+
+/* application flags for mpt3_diag_register, mpt3_diag_query */
+#define MPT3_APP_FLAGS_APP_OWNED (0x0001)
+#define MPT3_APP_FLAGS_BUFFER_VALID (0x0002)
+#define MPT3_APP_FLAGS_FW_BUFFER_ACCESS (0x0004)
+
+/* flags for mpt3_diag_read_buffer */
+#define MPT3_FLAGS_REREGISTER (0x0001)
+
+#define MPT3_PRODUCT_SPECIFIC_DWORDS 23
+
+/**
+ * struct mpt3_diag_register - application register with driver
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @requested_buffer_size - buffers size in bytes
+ * @unique_id - tag specified by application that is used to signal ownership
+ * of the buffer.
+ *
+ * This will allow the driver to setup any required buffers that will be
+ * needed by firmware to communicate with the driver.
+ */
+struct mpt3_diag_register {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t requested_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_unregister - application unregister with driver
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be unregistered
+ *
+ * This will allow the driver to cleanup any memory allocated for diag
+ * messages and to free up any resources.
+ */
+struct mpt3_diag_unregister {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_query - query relevant info associated with diag buffers
+ * @hdr - generic header
+ * @reserved -
+ * @buffer_type - specifies either TRACE, SNAPSHOT, or EXTENDED
+ * @application_flags - misc flags
+ * @diagnostic_flags - specifies flags affecting command processing
+ * @product_specific - product specific information
+ * @total_buffer_size - diag buffer size in bytes
+ * @driver_added_buffer_size - size of extra space appended to end of buffer
+ * @unique_id - unique id associated with this buffer.
+ *
+ * The application will send only buffer_type and unique_id. Driver will
+ * inspect unique_id first, if valid, fill in all the info. If unique_id is
+ * 0x00, the driver will return info specified by Buffer Type.
+ */
+struct mpt3_diag_query {
+ struct mpt3_ioctl_header hdr;
+ uint8_t reserved;
+ uint8_t buffer_type;
+ uint16_t application_flags;
+ uint32_t diagnostic_flags;
+ uint32_t product_specific[MPT3_PRODUCT_SPECIFIC_DWORDS];
+ uint32_t total_buffer_size;
+ uint32_t driver_added_buffer_size;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_release - request to send Diag Release Message to firmware
+ * @hdr - generic header
+ * @unique_id - tag uniquely identifies the buffer to be released
+ *
+ * This allows ownership of the specified buffer to returned to the driver,
+ * allowing an application to read the buffer without fear that firmware is
+ * overwritting information in the buffer.
+ */
+struct mpt3_diag_release {
+ struct mpt3_ioctl_header hdr;
+ uint32_t unique_id;
+};
+
+/**
+ * struct mpt3_diag_read_buffer - request for copy of the diag buffer
+ * @hdr - generic header
+ * @status -
+ * @reserved -
+ * @flags - misc flags
+ * @starting_offset - starting offset within drivers buffer where to start
+ * reading data at into the specified application buffer
+ * @bytes_to_read - number of bytes to copy from the drivers buffer into the
+ * application buffer starting at starting_offset.
+ * @unique_id - unique id associated with this buffer.
+ * @diagnostic_data - data payload
+ */
+struct mpt3_diag_read_buffer {
+ struct mpt3_ioctl_header hdr;
+ uint8_t status;
+ uint8_t reserved;
+ uint16_t flags;
+ uint32_t starting_offset;
+ uint32_t bytes_to_read;
+ uint32_t unique_id;
+ uint32_t diagnostic_data[1];
+};
+
+#endif /* MPT3SAS_CTL_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
new file mode 100644
index 000000000000..35405e7044f8
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -0,0 +1,219 @@
+/*
+ * Logging Support for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#ifndef MPT3SAS_DEBUG_H_INCLUDED
+#define MPT3SAS_DEBUG_H_INCLUDED
+
+#define MPT_DEBUG 0x00000001
+#define MPT_DEBUG_MSG_FRAME 0x00000002
+#define MPT_DEBUG_SG 0x00000004
+#define MPT_DEBUG_EVENTS 0x00000008
+#define MPT_DEBUG_EVENT_WORK_TASK 0x00000010
+#define MPT_DEBUG_INIT 0x00000020
+#define MPT_DEBUG_EXIT 0x00000040
+#define MPT_DEBUG_FAIL 0x00000080
+#define MPT_DEBUG_TM 0x00000100
+#define MPT_DEBUG_REPLY 0x00000200
+#define MPT_DEBUG_HANDSHAKE 0x00000400
+#define MPT_DEBUG_CONFIG 0x00000800
+#define MPT_DEBUG_DL 0x00001000
+#define MPT_DEBUG_RESET 0x00002000
+#define MPT_DEBUG_SCSI 0x00004000
+#define MPT_DEBUG_IOCTL 0x00008000
+#define MPT_DEBUG_SAS 0x00020000
+#define MPT_DEBUG_TRANSPORT 0x00040000
+#define MPT_DEBUG_TASK_SET_FULL 0x00080000
+
+#define MPT_DEBUG_TRIGGER_DIAG 0x00200000
+
+
+/*
+ * CONFIG_SCSI_MPT3SAS_LOGGING - enabled in Kconfig
+ */
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS) \
+{ \
+ if (IOC->logging_level & BITS) \
+ CMD; \
+}
+#else
+#define MPT_CHECK_LOGGING(IOC, CMD, BITS)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+
+/*
+ * debug macros
+ */
+
+#define dprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG)
+
+#define dsgprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SG)
+
+#define devtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENTS)
+
+#define dewtprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EVENT_WORK_TASK)
+
+#define dinitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_INIT)
+
+#define dexitprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_EXIT)
+
+#define dfailprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_FAIL)
+
+#define dtmprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TM)
+
+#define dreplyprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_REPLY)
+
+#define dhsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_HANDSHAKE)
+
+#define dcprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_CONFIG)
+
+#define ddlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_DL)
+
+#define drsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_RESET)
+
+#define dsprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SCSI)
+
+#define dctlprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_IOCTL)
+
+#define dsasprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS)
+
+#define dsastransport(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
+
+#define dmfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_MSG_FRAME)
+
+#define dtsfprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TASK_SET_FULL)
+
+#define dtransportprintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRANSPORT)
+
+#define dTriggerDiagPrintk(IOC, CMD) \
+ MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_TRIGGER_DIAG)
+
+
+
+/* inline functions for dumping debug data*/
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _debug_dump_mf - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_mf(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("mf:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_reply - print message frame contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_reply(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("reply:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+/**
+ * _debug_dump_config - print config page contents
+ * @mpi_request: pointer to message frame
+ * @sz: number of dwords
+ */
+static inline void
+_debug_dump_config(void *mpi_request, int sz)
+{
+ int i;
+ __le32 *mfp = (__le32 *)mpi_request;
+
+ pr_info("config:\n\t");
+ for (i = 0; i < sz; i++) {
+ if (i && ((i % 8) == 0))
+ pr_info("\n\t");
+ pr_info("%08x ", le32_to_cpu(mfp[i]));
+ }
+ pr_info("\n");
+}
+#else
+#define _debug_dump_mf(mpi_request, sz)
+#define _debug_dump_reply(mpi_request, sz)
+#define _debug_dump_config(mpi_request, sz)
+#endif /* CONFIG_SCSI_MPT3SAS_LOGGING */
+
+#endif /* MPT3SAS_DEBUG_H_INCLUDED */
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
new file mode 100644
index 000000000000..dcbf7c880cb2
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -0,0 +1,8163 @@
+/*
+ * Scsi Host Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/blkdev.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/aer.h>
+#include <linux/raid_class.h>
+
+#include "mpt3sas_base.h"
+
+MODULE_AUTHOR(MPT3SAS_AUTHOR);
+MODULE_DESCRIPTION(MPT3SAS_DESCRIPTION);
+MODULE_LICENSE("GPL");
+MODULE_VERSION(MPT3SAS_DRIVER_VERSION);
+
+#define RAID_CHANNEL 1
+/* forward proto's */
+static void _scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander);
+static void _firmware_event_work(struct work_struct *work);
+
+static void _scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device);
+static int _scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u8 retry_count, u8 is_pd);
+
+static u8 _scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid);
+
+static void _scsih_scan_start(struct Scsi_Host *shost);
+static int _scsih_scan_finished(struct Scsi_Host *shost, unsigned long time);
+
+/* global parameters */
+LIST_HEAD(mpt3sas_ioc_list);
+
+/* local parameters */
+static u8 scsi_io_cb_idx = -1;
+static u8 tm_cb_idx = -1;
+static u8 ctl_cb_idx = -1;
+static u8 base_cb_idx = -1;
+static u8 port_enable_cb_idx = -1;
+static u8 transport_cb_idx = -1;
+static u8 scsih_cb_idx = -1;
+static u8 config_cb_idx = -1;
+static int mpt_ids;
+
+static u8 tm_tr_cb_idx = -1 ;
+static u8 tm_tr_volume_cb_idx = -1 ;
+static u8 tm_sas_control_cb_idx = -1;
+
+/* command line options */
+static u32 logging_level;
+MODULE_PARM_DESC(logging_level,
+ " bits for enabling additional logging info (default=0)");
+
+
+static ushort max_sectors = 0xFFFF;
+module_param(max_sectors, ushort, 0);
+MODULE_PARM_DESC(max_sectors, "max sectors, range 64 to 32767 default=32767");
+
+
+static int missing_delay[2] = {-1, -1};
+module_param_array(missing_delay, int, NULL, 0);
+MODULE_PARM_DESC(missing_delay, " device missing delay , io missing delay");
+
+/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
+#define MPT3SAS_MAX_LUN (16895)
+static int max_lun = MPT3SAS_MAX_LUN;
+module_param(max_lun, int, 0);
+MODULE_PARM_DESC(max_lun, " max lun, default=16895 ");
+
+
+
+
+/* diag_buffer_enable is bitwise
+ * bit 0 set = TRACE
+ * bit 1 set = SNAPSHOT
+ * bit 2 set = EXTENDED
+ *
+ * Either bit can be set, or both
+ */
+static int diag_buffer_enable = -1;
+module_param(diag_buffer_enable, int, 0);
+MODULE_PARM_DESC(diag_buffer_enable,
+ " post diag buffers (TRACE=1/SNAPSHOT=2/EXTENDED=4/default=0)");
+static int disable_discovery = -1;
+module_param(disable_discovery, int, 0);
+MODULE_PARM_DESC(disable_discovery, " disable discovery ");
+
+
+/* permit overriding the host protection capabilities mask (EEDP/T10 PI) */
+static int prot_mask = -1;
+module_param(prot_mask, int, 0);
+MODULE_PARM_DESC(prot_mask, " host protection capabilities mask, def=7 ");
+
+
+/* raid transport support */
+
+static struct raid_template *mpt3sas_raid_template;
+
+
+/**
+ * struct sense_info - common structure for obtaining sense keys
+ * @skey: sense key
+ * @asc: additional sense code
+ * @ascq: additional sense code qualifier
+ */
+struct sense_info {
+ u8 skey;
+ u8 asc;
+ u8 ascq;
+};
+
+#define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
+#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
+#define MPT3SAS_ABRT_TASK_SET (0xFFFE)
+#define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
+/**
+ * struct fw_event_work - firmware event struct
+ * @list: link list framework
+ * @work: work object (ioc->fault_reset_work_q)
+ * @cancel_pending_work: flag set during reset handling
+ * @ioc: per adapter object
+ * @device_handle: device handle
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @ignore: flag meaning this event has been marked to ignore
+ * @event: firmware event MPI2_EVENT_XXX defined in mpt2_ioc.h
+ * @event_data: reply event data payload follows
+ *
+ * This object stored on ioc->fw_event_list.
+ */
+struct fw_event_work {
+ struct list_head list;
+ struct work_struct work;
+ u8 cancel_pending_work;
+ struct delayed_work delayed_work;
+
+ struct MPT3SAS_ADAPTER *ioc;
+ u16 device_handle;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 ignore;
+ u16 event;
+ void *event_data;
+};
+
+/* raid transport support */
+static struct raid_template *mpt3sas_raid_template;
+
+/**
+ * struct _scsi_io_transfer - scsi io transfer
+ * @handle: sas device handle (assigned by firmware)
+ * @is_raid: flag set for hidden raid components
+ * @dir: DMA_TO_DEVICE, DMA_FROM_DEVICE,
+ * @data_length: data transfer length
+ * @data_dma: dma pointer to data
+ * @sense: sense data
+ * @lun: lun number
+ * @cdb_length: cdb length
+ * @cdb: cdb contents
+ * @timeout: timeout for this command
+ * @VF_ID: virtual function id
+ * @VP_ID: virtual port id
+ * @valid_reply: flag set for reply message
+ * @sense_length: sense length
+ * @ioc_status: ioc status
+ * @scsi_state: scsi state
+ * @scsi_status: scsi staus
+ * @log_info: log information
+ * @transfer_length: data length transfer when there is a reply message
+ *
+ * Used for sending internal scsi commands to devices within this module.
+ * Refer to _scsi_send_scsi_io().
+ */
+struct _scsi_io_transfer {
+ u16 handle;
+ u8 is_raid;
+ enum dma_data_direction dir;
+ u32 data_length;
+ dma_addr_t data_dma;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
+ u32 lun;
+ u8 cdb_length;
+ u8 cdb[32];
+ u8 timeout;
+ u8 VF_ID;
+ u8 VP_ID;
+ u8 valid_reply;
+ /* the following bits are only valid when 'valid_reply = 1' */
+ u32 sense_length;
+ u16 ioc_status;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ u32 transfer_length;
+};
+
+/*
+ * The pci device ids are defined in mpi/mpi2_cnfg.h.
+ */
+static DEFINE_PCI_DEVICE_TABLE(scsih_pci_table) = {
+ /* Fury ~ 3004 and 3008 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008,
+ PCI_ANY_ID, PCI_ANY_ID },
+ /* Invader ~ 3108 */
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5,
+ PCI_ANY_ID, PCI_ANY_ID },
+ { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6,
+ PCI_ANY_ID, PCI_ANY_ID },
+ {0} /* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, scsih_pci_table);
+
+/**
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
+ *
+ * Note: The logging levels are defined in mpt3sas_debug.h.
+ */
+static int
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
+{
+ int ret = param_set_int(val, kp);
+ struct MPT3SAS_ADAPTER *ioc;
+
+ if (ret)
+ return ret;
+
+ pr_info("setting logging_level(0x%08x)\n", logging_level);
+ list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
+ ioc->logging_level = logging_level;
+ return 0;
+}
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
+ &logging_level, 0644);
+
+/**
+ * _scsih_srch_boot_sas_address - search based on sas_address
+ * @sas_address: sas address
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_sas_address(u64 sas_address,
+ Mpi2BootDeviceSasWwid_t *boot_device)
+{
+ return (sas_address == le64_to_cpu(boot_device->SASAddress)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_device_name - search based on device name
+ * @device_name: device name specified in INDENTIFY fram
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_device_name(u64 device_name,
+ Mpi2BootDeviceDeviceName_t *boot_device)
+{
+ return (device_name == le64_to_cpu(boot_device->DeviceName)) ? 1 : 0;
+}
+
+/**
+ * _scsih_srch_boot_encl_slot - search based on enclosure_logical_id/slot
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static inline int
+_scsih_srch_boot_encl_slot(u64 enclosure_logical_id, u16 slot_number,
+ Mpi2BootDeviceEnclosureSlot_t *boot_device)
+{
+ return (enclosure_logical_id == le64_to_cpu(boot_device->
+ EnclosureLogicalID) && slot_number == le16_to_cpu(boot_device->
+ SlotNumber)) ? 1 : 0;
+}
+
+/**
+ * _scsih_is_boot_device - search for matching boot device.
+ * @sas_address: sas address
+ * @device_name: device name specified in INDENTIFY fram
+ * @enclosure_logical_id: enclosure logical id
+ * @slot_number: slot number
+ * @form: specifies boot device form
+ * @boot_device: boot device object from bios page 2
+ *
+ * Returns 1 when there's a match, 0 means no match.
+ */
+static int
+_scsih_is_boot_device(u64 sas_address, u64 device_name,
+ u64 enclosure_logical_id, u16 slot, u8 form,
+ Mpi2BiosPage2BootDevice_t *boot_device)
+{
+ int rc = 0;
+
+ switch (form) {
+ case MPI2_BIOSPAGE2_FORM_SAS_WWID:
+ if (!sas_address)
+ break;
+ rc = _scsih_srch_boot_sas_address(
+ sas_address, &boot_device->SasWwid);
+ break;
+ case MPI2_BIOSPAGE2_FORM_ENCLOSURE_SLOT:
+ if (!enclosure_logical_id)
+ break;
+ rc = _scsih_srch_boot_encl_slot(
+ enclosure_logical_id,
+ slot, &boot_device->EnclosureSlot);
+ break;
+ case MPI2_BIOSPAGE2_FORM_DEVICE_NAME:
+ if (!device_name)
+ break;
+ rc = _scsih_srch_boot_device_name(
+ device_name, &boot_device->DeviceName);
+ break;
+ case MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED:
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * _scsih_get_sas_address - set the sas_address for given device handle
+ * @handle: device handle
+ * @sas_address: sas address
+ *
+ * Returns 0 success, non-zero when failure
+ */
+static int
+_scsih_get_sas_address(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 *sas_address)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 ioc_status;
+
+ *sas_address = 0;
+
+ if (handle <= ioc->sas_hba.num_phys) {
+ *sas_address = ioc->sas_hba.sas_address;
+ return 0;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
+ *sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ return 0;
+ }
+
+ /* we hit this becuase the given parent handle doesn't exist */
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ return -ENXIO;
+
+ /* else error case */
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x), failure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+}
+
+/**
+ * _scsih_determine_boot_device - determine boot device.
+ * @ioc: per adapter object
+ * @device: either sas_device or raid_device object
+ * @is_raid: [flag] 1 = raid object, 0 = sas object
+ *
+ * Determines whether this device should be first reported device to
+ * to scsi-ml or sas transport, this purpose is for persistent boot device.
+ * There are primary, alternate, and current entries in bios page 2. The order
+ * priority is primary, alternate, then current. This routine saves
+ * the corresponding device object and is_raid flag in the ioc object.
+ * The saved data to be used later in _scsih_probe_boot_devices().
+ */
+static void
+_scsih_determine_boot_device(struct MPT3SAS_ADAPTER *ioc,
+ void *device, u8 is_raid)
+{
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u64 sas_address;
+ u64 device_name;
+ u64 enclosure_logical_id;
+ u16 slot;
+
+ /* only process this function when driver loads */
+ if (!ioc->is_driver_loading)
+ return;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ if (!is_raid) {
+ sas_device = device;
+ sas_address = sas_device->sas_address;
+ device_name = sas_device->device_name;
+ enclosure_logical_id = sas_device->enclosure_logical_id;
+ slot = sas_device->slot;
+ } else {
+ raid_device = device;
+ sas_address = raid_device->wwid;
+ device_name = 0;
+ enclosure_logical_id = 0;
+ slot = 0;
+ }
+
+ if (!ioc->req_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_boot_device.device = device;
+ ioc->req_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->req_alt_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.ReqAltBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.RequestedAltBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: req_alt_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->req_alt_boot_device.device = device;
+ ioc->req_alt_boot_device.is_raid = is_raid;
+ }
+ }
+
+ if (!ioc->current_boot_device.device) {
+ if (_scsih_is_boot_device(sas_address, device_name,
+ enclosure_logical_id, slot,
+ (ioc->bios_pg2.CurrentBootDeviceForm &
+ MPI2_BIOSPAGE2_FORM_MASK),
+ &ioc->bios_pg2.CurrentBootDevice)) {
+ dinitprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: current_boot_device(0x%016llx)\n",
+ ioc->name, __func__,
+ (unsigned long long)sas_address));
+ ioc->current_boot_device.device = device;
+ ioc->current_boot_device.is_raid = is_raid;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_sas_device_find_by_sas_address - sas device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+struct _sas_device *
+mpt3sas_scsih_sas_device_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->sas_address == sas_address)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_find_by_handle - sas device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for sas_device based on sas_address, then return sas_device
+ * object.
+ */
+static struct _sas_device *
+_scsih_sas_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ list_for_each_entry(sas_device, &ioc->sas_device_init_list, list)
+ if (sas_device->handle == handle)
+ return sas_device;
+
+ return NULL;
+}
+
+/**
+ * _scsih_sas_device_remove - remove sas_device from list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Removing object and freeing associated memory from the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ if (!sas_device)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_device_remove_by_handle - removing device object by handle
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * Return nothing.
+ */
+static void
+_scsih_device_remove_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * mpt3sas_device_remove_by_sas_address - removing device object by sas address
+ * @ioc: per adapter object
+ * @sas_address: device sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_device_remove_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_device)
+ list_del(&sas_device->list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ _scsih_remove_device(ioc, sas_device);
+}
+
+/**
+ * _scsih_sas_device_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object to the ioc->sas_device_list.
+ */
+static void
+_scsih_sas_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to remove
+ * devices while scanning is turned on due to an oops in
+ * scsi_sysfs_add_sdev()->add_device()->sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+}
+
+/**
+ * _scsih_sas_device_init_add - insert sas_device to the list.
+ * @ioc: per adapter object
+ * @sas_device: the sas_device object
+ * Context: This function will acquire ioc->sas_device_lock.
+ *
+ * Adding new object at driver load time to the ioc->sas_device_init_list.
+ */
+static void
+_scsih_sas_device_init_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ __func__, sas_device->handle,
+ (unsigned long long)sas_device->sas_address));
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_add_tail(&sas_device->list, &ioc->sas_device_init_list);
+ _scsih_determine_boot_device(ioc, sas_device, 0);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_find_by_id - raid device search
+ * @ioc: per adapter object
+ * @id: sas device target id
+ * @channel: sas device channel
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on target id, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_id(struct MPT3SAS_ADAPTER *ioc, int id, int channel)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->id == id && raid_device->channel == channel) {
+ r = raid_device;
+ goto out;
+ }
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_handle - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on handle, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->handle != handle)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_find_by_wwid - raid device search
+ * @ioc: per adapter object
+ * @handle: sas device handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->raid_device_lock
+ *
+ * This searches for raid_device based on wwid, then return raid_device
+ * object.
+ */
+static struct _raid_device *
+_scsih_raid_device_find_by_wwid(struct MPT3SAS_ADAPTER *ioc, u64 wwid)
+{
+ struct _raid_device *raid_device, *r;
+
+ r = NULL;
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid != wwid)
+ continue;
+ r = raid_device;
+ goto out;
+ }
+
+ out:
+ return r;
+}
+
+/**
+ * _scsih_raid_device_add - add raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ * This is added to the raid_device_list link list.
+ */
+static void
+_scsih_raid_device_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), wwid(0x%016llx)\n", ioc->name, __func__,
+ raid_device->handle, (unsigned long long)raid_device->wwid));
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_add_tail(&raid_device->list, &ioc->raid_device_list);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_raid_device_remove - delete raid_device object
+ * @ioc: per adapter object
+ * @raid_device: raid_device object
+ *
+ */
+static void
+_scsih_raid_device_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_handle - expander device search
+ * @ioc: per adapter object
+ * @handle: expander handle (assigned by firmware)
+ * Context: Calling function should acquire ioc->sas_device_lock
+ *
+ * This searches for expander device based on handle, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_handle(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->handle != handle)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * mpt3sas_scsih_expander_find_by_sas_address - expander device search
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * This searches for expander device based on sas_address, then returns the
+ * sas_node object.
+ */
+struct _sas_node *
+mpt3sas_scsih_expander_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ struct _sas_node *sas_expander, *r;
+
+ r = NULL;
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ r = sas_expander;
+ goto out;
+ }
+ out:
+ return r;
+}
+
+/**
+ * _scsih_expander_node_add - insert expander device to the list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new object to the ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_add(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&sas_expander->list, &ioc->sas_expander_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_is_end_device - determines if device is an end device
+ * @device_info: bitfield providing information about the device.
+ * Context: none
+ *
+ * Returns 1 if end device.
+ */
+static int
+_scsih_is_end_device(u32 device_info)
+{
+ if (device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE &&
+ ((device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET) |
+ (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))
+ return 1;
+ else
+ return 0;
+}
+
+/**
+ * _scsih_scsi_lookup_get - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ */
+static struct scsi_cmnd *
+_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ return ioc->scsi_lookup[smid - 1].scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_get_clear - returns scmd entry
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * Returns the smid stored scmd pointer.
+ * Then will derefrence the stored scmd pointer.
+ */
+static inline struct scsi_cmnd *
+_scsih_scsi_lookup_get_clear(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ unsigned long flags;
+ struct scsi_cmnd *scmd;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ scmd = ioc->scsi_lookup[smid - 1].scmd;
+ ioc->scsi_lookup[smid - 1].scmd = NULL;
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+
+ return scmd;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_scmd - scmd lookup
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @scmd: pointer to scsi command object
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a scmd pointer in the scsi_lookup array,
+ * returning the revelent smid. A returned value of zero means invalid.
+ */
+static u16
+_scsih_scsi_lookup_find_by_scmd(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd
+ *scmd)
+{
+ u16 smid;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ smid = 0;
+ for (i = 0; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd == scmd) {
+ smid = ioc->scsi_lookup[i].smid;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return smid;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_target - search for matching channel:id
+ * @ioc: per adapter object
+ * @id: target id
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
+ int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
+ unsigned int lun, int channel)
+{
+ u8 found;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ found = 0;
+ for (i = 0 ; i < ioc->scsiio_depth; i++) {
+ if (ioc->scsi_lookup[i].scmd &&
+ (ioc->scsi_lookup[i].scmd->device->id == id &&
+ ioc->scsi_lookup[i].scmd->device->channel == channel &&
+ ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+ found = 1;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ return found;
+}
+
+
+static void
+_scsih_adjust_queue_depth(struct scsi_device *sdev, int qdepth)
+{
+ struct Scsi_Host *shost = sdev->host;
+ int max_depth;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ max_depth = shost->can_queue;
+
+ /* limit max device queue for SATA to 32 */
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ goto not_sata;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ if (!sas_target_priv_data)
+ goto not_sata;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))
+ goto not_sata;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (sas_device && sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ max_depth = MPT3SAS_SATA_QUEUE_DEPTH;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ not_sata:
+
+ if (!sdev->tagged_supported)
+ max_depth = 1;
+ if (qdepth > max_depth)
+ qdepth = max_depth;
+ scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+}
+
+/**
+ * _scsih_change_queue_depth - setting device queue depth
+ * @sdev: scsi device struct
+ * @qdepth: requested queue depth
+ * @reason: SCSI_QDEPTH_DEFAULT/SCSI_QDEPTH_QFULL/SCSI_QDEPTH_RAMP_UP
+ * (see include/scsi/scsi_host.h for definition)
+ *
+ * Returns queue depth.
+ */
+static int
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
+{
+ if (reason == SCSI_QDEPTH_DEFAULT || reason == SCSI_QDEPTH_RAMP_UP)
+ _scsih_adjust_queue_depth(sdev, qdepth);
+ else if (reason == SCSI_QDEPTH_QFULL)
+ scsi_track_queue_full(sdev, qdepth);
+ else
+ return -EOPNOTSUPP;
+
+ if (sdev->inquiry_len > 7)
+ sdev_printk(KERN_INFO, sdev, "qdepth(%d), tagged(%d), " \
+ "simple(%d), ordered(%d), scsi_level(%d), cmd_que(%d)\n",
+ sdev->queue_depth, sdev->tagged_supported, sdev->simple_tags,
+ sdev->ordered_tags, sdev->scsi_level,
+ (sdev->inquiry[7] & 2) >> 1);
+
+ return sdev->queue_depth;
+}
+
+/**
+ * _scsih_change_queue_type - changing device queue tag type
+ * @sdev: scsi device struct
+ * @tag_type: requested tag type
+ *
+ * Returns queue tag type.
+ */
+static int
+_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
+{
+ if (sdev->tagged_supported) {
+ scsi_set_tag_type(sdev, tag_type);
+ if (tag_type)
+ scsi_activate_tcq(sdev, sdev->queue_depth);
+ else
+ scsi_deactivate_tcq(sdev, sdev->queue_depth);
+ } else
+ tag_type = 0;
+
+ return tag_type;
+}
+
+
+/**
+ * _scsih_target_alloc - target add routine
+ * @starget: scsi target struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_target_alloc(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = kzalloc(sizeof(struct scsi_target), GFP_KERNEL);
+ if (!sas_target_priv_data)
+ return -ENOMEM;
+
+ starget->hostdata = sas_target_priv_data;
+ sas_target_priv_data->starget = starget;
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+
+ /* RAID volumes */
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ sas_target_priv_data->handle = raid_device->handle;
+ sas_target_priv_data->sas_address = raid_device->wwid;
+ sas_target_priv_data->flags |= MPT_TARGET_FLAGS_VOLUME;
+ raid_device->starget = starget;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return 0;
+ }
+
+ /* sas/sata devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+
+ if (sas_device) {
+ sas_target_priv_data->handle = sas_device->handle;
+ sas_target_priv_data->sas_address = sas_device->sas_address;
+ sas_device->starget = starget;
+ sas_device->id = starget->id;
+ sas_device->channel = starget->channel;
+ if (test_bit(sas_device->handle, ioc->pd_handles))
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ if (sas_device->fast_path)
+ sas_target_priv_data->flags |= MPT_TARGET_FASTPATH_IO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ return 0;
+}
+
+/**
+ * _scsih_target_destroy - target destroy routine
+ * @starget: scsi target struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_target_destroy(struct scsi_target *starget)
+{
+ struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct sas_rphy *rphy;
+
+ sas_target_priv_data = starget->hostdata;
+ if (!sas_target_priv_data)
+ return;
+
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, starget->id,
+ starget->channel);
+ if (raid_device) {
+ raid_device->starget = NULL;
+ raid_device->sdev = NULL;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ goto out;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ rphy = dev_to_rphy(starget->dev.parent);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device && (sas_device->starget == starget) &&
+ (sas_device->id == starget->id) &&
+ (sas_device->channel == starget->channel))
+ sas_device->starget = NULL;
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ out:
+ kfree(sas_target_priv_data);
+ starget->hostdata = NULL;
+}
+
+/**
+ * _scsih_slave_alloc - device add routine
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_alloc(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ sas_device_priv_data = kzalloc(sizeof(struct scsi_device), GFP_KERNEL);
+ if (!sas_device_priv_data)
+ return -ENOMEM;
+
+ sas_device_priv_data->lun = sdev->lun;
+ sas_device_priv_data->flags = MPT_DEVICE_FLAGS_INIT;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns++;
+ sas_device_priv_data->sas_target = sas_target_priv_data;
+ sdev->hostdata = sas_device_priv_data;
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT))
+ sdev->no_uld_attach = 1;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+ if (starget->channel == RAID_CHANNEL) {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc,
+ starget->id, starget->channel);
+ if (raid_device)
+ raid_device->sdev = sdev; /* raid is single lun */
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_slave_destroy - device destroy routine
+ * @sdev: scsi device struct
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_slave_destroy(struct scsi_device *sdev)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct Scsi_Host *shost;
+ struct MPT3SAS_ADAPTER *ioc;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ if (!sdev->hostdata)
+ return;
+
+ starget = scsi_target(sdev);
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->num_luns--;
+
+ shost = dev_to_shost(&starget->dev);
+ ioc = shost_priv(shost);
+
+ if (!(sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME)) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_target_priv_data->sas_address);
+ if (sas_device && !sas_target_priv_data->num_luns)
+ sas_device->starget = NULL;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ kfree(sdev->hostdata);
+ sdev->hostdata = NULL;
+}
+
+/**
+ * _scsih_display_sata_capabilities - sata capabilities
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @sdev: scsi device struct
+ */
+static void
+_scsih_display_sata_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ u16 handle, struct scsi_device *sdev)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u16 flags;
+ u32 device_info;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ flags = le16_to_cpu(sas_device_pg0.Flags);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ sdev_printk(KERN_INFO, sdev,
+ "atapi(%s), ncq(%s), asyn_notify(%s), smart(%s), fua(%s), "
+ "sw_preserve(%s)\n",
+ (device_info & MPI2_SAS_DEVICE_INFO_ATAPI_DEVICE) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_NCQ_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_ASYNCHRONOUS_NOTIFY) ? "y" :
+ "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SMART_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_FUA_SUPPORTED) ? "y" : "n",
+ (flags & MPI2_SAS_DEVICE0_FLAGS_SATA_SW_PRESERVE) ? "y" : "n");
+}
+
+/*
+ * raid transport support -
+ * Enabled for SLES11 and newer, in older kernels the driver will panic when
+ * unloading the driver followed by a load - I beleive that the subroutine
+ * raid_class_release() is not cleaning up properly.
+ */
+
+/**
+ * _scsih_is_raid - return boolean indicating device is raid volume
+ * @dev the device struct object
+ */
+static int
+_scsih_is_raid(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+
+ return (sdev->channel == RAID_CHANNEL) ? 1 : 0;
+}
+
+/**
+ * _scsih_get_resync - get raid volume resync percent complete
+ * @dev the device struct object
+ */
+static void
+_scsih_get_resync(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volume_status_flags;
+ u8 percent_complete;
+ u16 handle;
+
+ percent_complete = 0;
+ handle = 0;
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device) {
+ handle = raid_device->handle;
+ percent_complete = raid_device->percent_complete;
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!handle)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ percent_complete = 0;
+ goto out;
+ }
+
+ volume_status_flags = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (!(volume_status_flags &
+ MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS))
+ percent_complete = 0;
+
+ out:
+ raid_set_resync(mpt3sas_raid_template, dev, percent_complete);
+}
+
+/**
+ * _scsih_get_state - get raid volume level
+ * @dev the device struct object
+ */
+static void
+_scsih_get_state(struct device *dev)
+{
+ struct scsi_device *sdev = to_scsi_device(dev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(sdev->host);
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ Mpi2RaidVolPage0_t vol_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 volstate;
+ enum raid_state state = RAID_STATE_UNKNOWN;
+ u16 handle = 0;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_id(ioc, sdev->id,
+ sdev->channel);
+ if (raid_device)
+ handle = raid_device->handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (!raid_device)
+ goto out;
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, &vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ volstate = le32_to_cpu(vol_pg0.VolumeStatusFlags);
+ if (volstate & MPI2_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS) {
+ state = RAID_STATE_RESYNCING;
+ goto out;
+ }
+
+ switch (vol_pg0.VolumeState) {
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ state = RAID_STATE_ACTIVE;
+ break;
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ state = RAID_STATE_DEGRADED;
+ break;
+ case MPI2_RAID_VOL_STATE_FAILED:
+ case MPI2_RAID_VOL_STATE_MISSING:
+ state = RAID_STATE_OFFLINE;
+ break;
+ }
+ out:
+ raid_set_state(mpt3sas_raid_template, dev, state);
+}
+
+/**
+ * _scsih_set_level - set raid level
+ * @sdev: scsi device struct
+ * @volume_type: volume type
+ */
+static void
+_scsih_set_level(struct scsi_device *sdev, u8 volume_type)
+{
+ enum raid_level level = RAID_LEVEL_UNKNOWN;
+
+ switch (volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ level = RAID_LEVEL_0;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ level = RAID_LEVEL_10;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ level = RAID_LEVEL_1E;
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ level = RAID_LEVEL_1;
+ break;
+ }
+
+ raid_set_level(mpt3sas_raid_template, &sdev->sdev_gendev, level);
+}
+
+
+/**
+ * _scsih_get_volume_capabilities - volume capabilities
+ * @ioc: per adapter object
+ * @sas_device: the raid_device object
+ *
+ * Returns 0 for success, else 1
+ */
+static int
+_scsih_get_volume_capabilities(struct MPT3SAS_ADAPTER *ioc,
+ struct _raid_device *raid_device)
+{
+ Mpi2RaidVolPage0_t *vol_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 sz;
+ u8 num_pds;
+
+ if ((mpt3sas_config_get_number_pds(ioc, raid_device->handle,
+ &num_pds)) || !num_pds) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ raid_device->num_pds = num_pds;
+ sz = offsetof(Mpi2RaidVolPage0_t, PhysDisk) + (num_pds *
+ sizeof(Mpi2RaidVol0PhysDisk_t));
+ vol_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!vol_pg0) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ if ((mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply, vol_pg0,
+ MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, raid_device->handle, sz))) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ kfree(vol_pg0);
+ return 1;
+ }
+
+ raid_device->volume_type = vol_pg0->VolumeType;
+
+ /* figure out what the underlying devices are by
+ * obtaining the device_info bits for the 1st device
+ */
+ if (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_PHYSDISKNUM,
+ vol_pg0->PhysDisk[0].PhysDiskNum))) {
+ if (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ le16_to_cpu(pd_pg0.DevHandle)))) {
+ raid_device->device_info =
+ le32_to_cpu(sas_device_pg0.DeviceInfo);
+ }
+ }
+
+ kfree(vol_pg0);
+ return 0;
+}
+
+
+
+/**
+ * _scsih_enable_tlr - setting TLR flags
+ * @ioc: per adapter object
+ * @sdev: scsi device struct
+ *
+ * Enabling Transaction Layer Retries for tape devices when
+ * vpd page 0x90 is present
+ *
+ */
+static void
+_scsih_enable_tlr(struct MPT3SAS_ADAPTER *ioc, struct scsi_device *sdev)
+{
+
+ /* only for TAPE */
+ if (sdev->type != TYPE_TAPE)
+ return;
+
+ if (!(ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR))
+ return;
+
+ sas_enable_tlr(sdev);
+ sdev_printk(KERN_INFO, sdev, "TLR %s\n",
+ sas_is_tlr_enabled(sdev) ? "Enabled" : "Disabled");
+ return;
+
+}
+
+/**
+ * _scsih_slave_configure - device configure routine.
+ * @sdev: scsi device struct
+ *
+ * Returns 0 if ok. Any other return is assumed to be an error and
+ * the device is ignored.
+ */
+static int
+_scsih_slave_configure(struct scsi_device *sdev)
+{
+ struct Scsi_Host *shost = sdev->host;
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ int qdepth;
+ u8 ssp_target = 0;
+ char *ds = "";
+ char *r_level = "";
+ u16 handle, volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ qdepth = 1;
+ sas_device_priv_data = sdev->hostdata;
+ sas_device_priv_data->configured_lun = 1;
+ sas_device_priv_data->flags &= ~MPT_DEVICE_FLAGS_INIT;
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+ handle = sas_target_priv_data->handle;
+
+ /* raid volume handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (!raid_device) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+ if (_scsih_get_volume_capabilities(ioc, raid_device)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__,
+ __LINE__, __func__));
+ return 1;
+ }
+
+
+ /* RAID Queue Depth Support
+ * IS volume = underlying qdepth of drive type, either
+ * MPT3SAS_SAS_QUEUE_DEPTH or MPT3SAS_SATA_QUEUE_DEPTH
+ * IM/IME/R10 = 128 (MPT3SAS_RAID_QUEUE_DEPTH)
+ */
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (raid_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ else
+ ds = "STP";
+ }
+
+ switch (raid_device->volume_type) {
+ case MPI2_RAID_VOL_TYPE_RAID0:
+ r_level = "RAID0";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1E:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ if (ioc->manu_pg10.OEMIdentifier &&
+ (le32_to_cpu(ioc->manu_pg10.GenericFlags0) &
+ MFG10_GF0_R10_DISPLAY) &&
+ !(raid_device->num_pds % 2))
+ r_level = "RAID10";
+ else
+ r_level = "RAID1E";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID1:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID1";
+ break;
+ case MPI2_RAID_VOL_TYPE_RAID10:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAID10";
+ break;
+ case MPI2_RAID_VOL_TYPE_UNKNOWN:
+ default:
+ qdepth = MPT3SAS_RAID_QUEUE_DEPTH;
+ r_level = "RAIDX";
+ break;
+ }
+
+ sdev_printk(KERN_INFO, sdev,
+ "%s: handle(0x%04x), wwid(0x%016llx), pd_count(%d), type(%s)\n",
+ r_level, raid_device->handle,
+ (unsigned long long)raid_device->wwid,
+ raid_device->num_pds, ds);
+
+
+ _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+
+/* raid transport support */
+ _scsih_set_level(sdev, raid_device->volume_type);
+ return 0;
+ }
+
+ /* non-raid handling */
+ if (sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ if (mpt3sas_config_get_volume_handle(ioc, handle,
+ &volume_handle)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ if (volume_handle && mpt3sas_config_get_volume_wwid(ioc,
+ volume_handle, &volume_wwid)) {
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__));
+ return 1;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_device_priv_data->sas_target->sas_address);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ dfailprintk(ioc, pr_warn(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__,
+ __func__));
+ return 1;
+ }
+
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET) {
+ qdepth = MPT3SAS_SAS_QUEUE_DEPTH;
+ ssp_target = 1;
+ ds = "SSP";
+ } else {
+ qdepth = MPT3SAS_SATA_QUEUE_DEPTH;
+ if (sas_device->device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ ds = "STP";
+ else if (sas_device->device_info &
+ MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ ds = "SATA";
+ }
+
+ sdev_printk(KERN_INFO, sdev, "%s: handle(0x%04x), " \
+ "sas_addr(0x%016llx), phy(%d), device_name(0x%016llx)\n",
+ ds, handle, (unsigned long long)sas_device->sas_address,
+ sas_device->phy, (unsigned long long)sas_device->device_name);
+ sdev_printk(KERN_INFO, sdev,
+ "%s: enclosure_logical_id(0x%016llx), slot(%d)\n",
+ ds, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!ssp_target)
+ _scsih_display_sata_capabilities(ioc, handle, sdev);
+
+
+ _scsih_change_queue_depth(sdev, qdepth, SCSI_QDEPTH_DEFAULT);
+
+ if (ssp_target) {
+ sas_read_port_mode_page(sdev);
+ _scsih_enable_tlr(ioc, sdev);
+ }
+
+ return 0;
+}
+
+/**
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * @sdev: scsi device struct
+ * @bdev: pointer to block device context
+ * @capacity: device size (in 512 byte sectors)
+ * @params: three element array to place output:
+ * params[0] number of heads (max 255)
+ * params[1] number of sectors (max 63)
+ * params[2] number of cylinders
+ *
+ * Return nothing.
+ */
+static int
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+ sector_t capacity, int params[])
+{
+ int heads;
+ int sectors;
+ sector_t cylinders;
+ ulong dummy;
+
+ heads = 64;
+ sectors = 32;
+
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+
+ /*
+ * Handle extended translation size for logical drives
+ * > 1Gb
+ */
+ if ((ulong)capacity >= 0x200000) {
+ heads = 255;
+ sectors = 63;
+ dummy = heads * sectors;
+ cylinders = capacity;
+ sector_div(cylinders, dummy);
+ }
+
+ /* return result */
+ params[0] = heads;
+ params[1] = sectors;
+ params[2] = cylinders;
+
+ return 0;
+}
+
+/**
+ * _scsih_response_code - translation of device response code
+ * @ioc: per adapter object
+ * @response_code: response code returned by the device
+ *
+ * Return nothing.
+ */
+static void
+_scsih_response_code(struct MPT3SAS_ADAPTER *ioc, u8 response_code)
+{
+ char *desc;
+
+ switch (response_code) {
+ case MPI2_SCSITASKMGMT_RSP_TM_COMPLETE:
+ desc = "task management request completed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_INVALID_FRAME:
+ desc = "invalid frame";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_NOT_SUPPORTED:
+ desc = "task management request not supported";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_FAILED:
+ desc = "task management request failed";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED:
+ desc = "task management request succeeded";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_TM_INVALID_LUN:
+ desc = "invalid lun";
+ break;
+ case 0xA:
+ desc = "overlapped tag attempted";
+ break;
+ case MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC:
+ desc = "task queued, however not sent to target";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+ pr_warn(MPT3SAS_FMT "response_code(0x%01x): %s\n",
+ ioc->name, response_code, desc);
+}
+
+/**
+ * _scsih_tm_done - tm completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: none.
+ *
+ * The callback handler when using scsih_issue_tm.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ if (ioc->tm_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->tm_cmds.smid != smid)
+ return 1;
+ mpt3sas_base_flush_reply_queues(ioc);
+ ioc->tm_cmds.status |= MPT3_CMD_COMPLETE;
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (mpi_reply) {
+ memcpy(ioc->tm_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
+ ioc->tm_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->tm_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->tm_cmds.done);
+ return 1;
+}
+
+/**
+ * mpt3sas_scsih_set_tm_flag - set per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 1;
+ skip = 1;
+ ioc->ignore_loginfos = 1;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_clear_tm_flag - clear per target tm_busy
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During taskmangement request, we need to freeze the device queue.
+ */
+void
+mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+ u8 skip = 0;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ if (skip)
+ continue;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle == handle) {
+ sas_device_priv_data->sas_target->tm_busy = 0;
+ skip = 1;
+ ioc->ignore_loginfos = 0;
+ }
+ }
+}
+
+/**
+ * mpt3sas_scsih_issue_tm - main routine for sending tm requests
+ * @ioc: per adapter struct
+ * @device_handle: device handle
+ * @channel: the channel assigned by the OS
+ * @id: the id assigned by the OS
+ * @lun: lun number
+ * @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
+ * @smid_task: smid assigned to the task
+ * @timeout: timeout in seconds
+ * @serial_number: the serial_number from scmd
+ * @m_type: TM_MUTEX_ON or TM_MUTEX_OFF
+ * Context: user
+ *
+ * A generic API for sending task management requests to firmware.
+ *
+ * The callback index is set inside `ioc->tm_cb_idx`.
+ *
+ * Return SUCCESS or FAILED.
+ */
+int
+mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
+ uint id, uint lun, u8 type, u16 smid_task, ulong timeout,
+ unsigned long serial_number, enum mutex_type m_type)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ u16 smid = 0;
+ u32 ioc_state;
+ unsigned long timeleft;
+ struct scsiio_tracker *scsi_lookup = NULL;
+ int rc;
+
+ if (m_type == TM_MUTEX_ON)
+ mutex_lock(&ioc->tm_cmds.mutex);
+ if (ioc->tm_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_info(MPT3SAS_FMT "%s: tm_cmd busy!!!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
+ if (ioc_state & MPI2_DOORBELL_USED) {
+ dhsprintk(ioc, pr_info(MPT3SAS_FMT
+ "unexpected doorbell active!\n", ioc->name));
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
+ mpt3sas_base_fault_info(ioc, ioc_state &
+ MPI2_DOORBELL_DATA_MASK);
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ goto err_out;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = FAILED;
+ goto err_out;
+ }
+
+ if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+ scsi_lookup = &ioc->scsi_lookup[smid_task - 1];
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "sending tm: handle(0x%04x), task_type(0x%02x), smid(%d)\n",
+ ioc->name, handle, type, smid_task));
+ ioc->tm_cmds.status = MPT3_CMD_PENDING;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->tm_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ memset(ioc->tm_cmds.reply, 0, sizeof(Mpi2SCSITaskManagementReply_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = type;
+ mpi_request->TaskMID = cpu_to_le16(smid_task);
+ int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
+ mpt3sas_scsih_set_tm_flag(ioc, handle);
+ init_completion(&ioc->tm_cmds.done);
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ if (!(ioc->tm_cmds.status & MPT3_CMD_RESET)) {
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = (!rc) ? SUCCESS : FAILED;
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ goto err_out;
+ }
+ }
+
+ if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ mpi_reply = ioc->tm_cmds.reply;
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "complete tm: " \
+ "ioc_status(0x%04x), loginfo(0x%08x), term_count(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+ if (ioc->logging_level & MPT_DEBUG_TM) {
+ _scsih_response_code(ioc, mpi_reply->ResponseCode);
+ if (mpi_reply->IOCStatus)
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SCSITaskManagementRequest_t)/4);
+ }
+ }
+
+ switch (type) {
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
+ rc = SUCCESS;
+ if (scsi_lookup->scmd == NULL)
+ break;
+ rc = FAILED;
+ break;
+
+ case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
+ if (_scsih_scsi_lookup_find_by_target(ioc, id, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
+ case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
+ if (_scsih_scsi_lookup_find_by_lun(ioc, id, lun, channel))
+ rc = FAILED;
+ else
+ rc = SUCCESS;
+ break;
+ case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
+ rc = SUCCESS;
+ break;
+ default:
+ rc = FAILED;
+ break;
+ }
+
+ mpt3sas_scsih_clear_tm_flag(ioc, handle);
+ ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+
+ return rc;
+
+ err_out:
+ if (m_type == TM_MUTEX_ON)
+ mutex_unlock(&ioc->tm_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _scsih_tm_display_info - displays info about the device
+ * @ioc: per adapter struct
+ * @scmd: pointer to scsi command object
+ *
+ * Called by task management callback handlers.
+ */
+static void
+_scsih_tm_display_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd)
+{
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ scsi_print_command(scmd);
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ starget_printk(KERN_INFO, starget,
+ "%s handle(0x%04x), %s wwid(0x%016llx)\n",
+ device_str, priv_target->handle,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ if (priv_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ starget_printk(KERN_INFO, starget,
+ "volume handle(0x%04x), "
+ "volume wwid(0x%016llx)\n",
+ sas_device->volume_handle,
+ (unsigned long long)sas_device->volume_wwid);
+ }
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_address(0x%016llx), phy(%d)\n",
+ sas_device->handle,
+ (unsigned long long)sas_device->sas_address,
+ sas_device->phy);
+ starget_printk(KERN_INFO, starget,
+ "enclosure_logical_id(0x%016llx), slot(%d)\n",
+ (unsigned long long)sas_device->enclosure_logical_id,
+ sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_abort - eh threads main abort routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_abort(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u16 smid;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting task abort! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* search for the command */
+ smid = _scsih_scsi_lookup_find_by_scmd(ioc, scmd);
+ if (!smid) {
+ scmd->result = DID_RESET << 16;
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components and volumes this is not supported */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT ||
+ sas_device_priv_data->sas_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ mpt3sas_halt_firmware(ioc);
+
+ handle = sas_device_priv_data->sas_target->handle;
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd->serial_number, TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "task abort: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+
+ sdev_printk(KERN_INFO, scmd->device,
+ "attempting device reset! scmd(%p)\n", scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ sdev_printk(KERN_INFO, scmd->device,
+ "device been deleted! scmd(%p)\n", scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, scmd->device->lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 30, 0,
+ TM_MUTEX_ON);
+
+ out:
+ sdev_printk(KERN_INFO, scmd->device, "device reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+/**
+ * _scsih_target_reset - eh threads main target reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_target_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle;
+ int r;
+ struct scsi_target *starget = scmd->device->sdev_target;
+
+ starget_printk(KERN_INFO, starget, "attempting target reset! scmd(%p)\n",
+ scmd);
+ _scsih_tm_display_info(ioc, scmd);
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ starget_printk(KERN_INFO, starget, "target been deleted! scmd(%p)\n",
+ scmd);
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ r = SUCCESS;
+ goto out;
+ }
+
+ /* for hidden raid components obtain the volume_handle */
+ handle = 0;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc,
+ sas_device_priv_data->sas_target->handle);
+ if (sas_device)
+ handle = sas_device->volume_handle;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ } else
+ handle = sas_device_priv_data->sas_target->handle;
+
+ if (!handle) {
+ scmd->result = DID_RESET << 16;
+ r = FAILED;
+ goto out;
+ }
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, scmd->device->channel,
+ scmd->device->id, 0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
+ 30, 0, TM_MUTEX_ON);
+
+ out:
+ starget_printk(KERN_INFO, starget, "target reset: %s scmd(%p)\n",
+ ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+ return r;
+}
+
+
+/**
+ * _scsih_host_reset - eh threads main host reset routine
+ * @scmd: pointer to scsi command object
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_host_reset(struct scsi_cmnd *scmd)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ int r, retval;
+
+ pr_info(MPT3SAS_FMT "attempting host reset! scmd(%p)\n",
+ ioc->name, scmd);
+ scsi_print_command(scmd);
+
+ retval = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ r = (retval < 0) ? FAILED : SUCCESS;
+ pr_info(MPT3SAS_FMT "host reset: %s scmd(%p)\n",
+ ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+
+ return r;
+}
+
+/**
+ * _scsih_fw_event_add - insert and queue up fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This adds the firmware event object into link list, then queues it up to
+ * be processed from user context.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_add(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ unsigned long flags;
+
+ if (ioc->firmware_event_thread == NULL)
+ return;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ INIT_LIST_HEAD(&fw_event->list);
+ list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ INIT_WORK(&fw_event->work, _firmware_event_work);
+ queue_work(ioc->firmware_event_thread, &fw_event->work);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_fw_event_free - delete fw_event
+ * @ioc: per adapter object
+ * @fw_event: object describing the event
+ * Context: This function will acquire ioc->fw_event_lock.
+ *
+ * This removes firmware event object from link list, frees associated memory.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_free(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work
+ *fw_event)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_del(&fw_event->list);
+ kfree(fw_event->event_data);
+ kfree(fw_event);
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+
+ /**
+ * mpt3sas_send_trigger_data_event - send event for processing trigger data
+ * @ioc: per adapter object
+ * @event_data: trigger event data
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event_data = kzalloc(sizeof(*event_data), GFP_ATOMIC);
+ if (!fw_event->event_data)
+ return;
+ fw_event->event = MPT3SAS_PROCESS_TRIGGER_DIAG;
+ fw_event->ioc = ioc;
+ memcpy(fw_event->event_data, event_data, sizeof(*event_data));
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_error_recovery_delete_devices - remove devices not responding
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_error_recovery_delete_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ if (ioc->is_driver_loading)
+ return;
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_REMOVE_UNRESPONDING_DEVICES;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * mpt3sas_port_enable_complete - port enable completed (fake event)
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_port_enable_complete(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_PORT_ENABLE_COMPLETE;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_fw_event_cleanup_queue - cleanup event queue
+ * @ioc: per adapter object
+ *
+ * Walk the firmware event queue, either killing timers, or waiting
+ * for outstanding events to complete
+ *
+ * Return nothing.
+ */
+static void
+_scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct fw_event_work *fw_event, *next;
+
+ if (list_empty(&ioc->fw_event_list) ||
+ !ioc->firmware_event_thread || in_interrupt())
+ return;
+
+ list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+ if (cancel_delayed_work(&fw_event->delayed_work)) {
+ _scsih_fw_event_free(ioc, fw_event);
+ continue;
+ }
+ fw_event->cancel_pending_work = 1;
+ }
+}
+
+/**
+ * _scsih_ublock_io_all_device - unblock every device
+ * @ioc: per adapter object
+ *
+ * change the device state from block to running
+ */
+static void
+_scsih_ublock_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (!sas_device_priv_data->block)
+ continue;
+
+ sas_device_priv_data->block = 0;
+ dewtprintk(ioc, sdev_printk(KERN_INFO, sdev,
+ "device_running, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle));
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+}
+
+
+/**
+ * _scsih_ublock_io_device - prepare device to be deleted
+ * @ioc: per adapter object
+ * @sas_addr: sas address
+ *
+ * unblock then put device in offline state
+ */
+static void
+_scsih_ublock_io_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->sas_address
+ != sas_address)
+ continue;
+ if (sas_device_priv_data->block) {
+ sas_device_priv_data->block = 0;
+ scsi_internal_device_unblock(sdev, SDEV_RUNNING);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_all_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_all_device(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev, "device_blocked, handle(0x%04x)\n",
+ sas_device_priv_data->sas_target->handle);
+ }
+}
+
+/**
+ * _scsih_block_io_device - set the device state to SDEV_BLOCK
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * During device pull we need to appropiately set the sdev state.
+ */
+static void
+_scsih_block_io_device(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data)
+ continue;
+ if (sas_device_priv_data->sas_target->handle != handle)
+ continue;
+ if (sas_device_priv_data->block)
+ continue;
+ sas_device_priv_data->block = 1;
+ scsi_internal_device_block(sdev);
+ sdev_printk(KERN_INFO, sdev,
+ "device_blocked, handle(0x%04x)\n", handle);
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_to_ex
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * attached to this expander. This function called when expander is
+ * pulled.
+ */
+static void
+_scsih_block_io_to_children_attached_to_ex(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_sibling;
+ unsigned long flags;
+
+ if (!sas_expander)
+ return;
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE) {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device =
+ mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ if (sas_device)
+ set_bit(sas_device->handle,
+ ioc->blocking_handles);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+ }
+
+ list_for_each_entry(mpt3sas_port,
+ &sas_expander->sas_port_list, port_list) {
+
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE) {
+ expander_sibling =
+ mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, mpt3sas_port->remote_identify.sas_address);
+ _scsih_block_io_to_children_attached_to_ex(ioc,
+ expander_sibling);
+ }
+ }
+}
+
+/**
+ * _scsih_block_io_to_children_attached_directly
+ * @ioc: per adapter object
+ * @event_data: topology change event data
+ *
+ * This routine set sdev state to SDEV_BLOCK for all devices
+ * direct attached during device pull.
+ */
+static void
+_scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
+ _scsih_block_io_device(ioc, handle);
+ }
+}
+
+/**
+ * _scsih_tm_tr_send - send task management request
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This code is to initiate the device removal handshake protocol
+ * with controller firmware. This function will issue target reset
+ * using high priority request queue. It will send a sas iounit
+ * control request (MPI2_SAS_OP_REMOVE_DEVICE) from this completion.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _sas_device *sas_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ u64 sas_address = 0;
+ unsigned long flags;
+ struct _tr_list *delayed_tr;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed: handle(0x%04x)\n",
+ __func__, ioc->name, handle));
+ return;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational: handle(0x%04x)\n",
+ __func__, ioc->name,
+ handle));
+ return;
+ }
+
+ /* if PD, then return */
+ if (test_bit(handle, ioc->pd_handles))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device && sas_device->starget &&
+ sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ sas_address = sas_device->sas_address;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_target_priv_data) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle,
+ (unsigned long long)sas_address));
+ _scsih_ublock_io_device(ioc, sas_address);
+ sas_target_priv_data->handle = MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_DEVICE_REMOVAL);
+}
+
+/**
+ * _scsih_tm_tr_complete -
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the target reset completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ * It will send a sas iounit control request (MPI2_SAS_OP_REMOVE_DEVICE)
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ Mpi2SasIoUnitControlRequest_t *mpi_request;
+ u16 smid_sas_ctrl;
+ u32 ioc_state;
+
+ if (ioc->remove_host) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host has been removed\n", __func__, ioc->name));
+ return 1;
+ } else if (ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host in pci error recovery\n", __func__,
+ ioc->name));
+ return 1;
+ }
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ if (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host is not operational\n", __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ smid_sas_ctrl = mpt3sas_base_get_smid(ioc, ioc->tm_sas_control_cb_idx);
+ if (!smid_sas_ctrl) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ return 1;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid_sas_ctrl,
+ ioc->tm_sas_control_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid_sas_ctrl);
+ memset(mpi_request, 0, sizeof(Mpi2SasIoUnitControlRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request->Operation = MPI2_SAS_OP_REMOVE_DEVICE;
+ mpi_request->DevHandle = mpi_request_tm->DevHandle;
+ mpt3sas_base_put_smid_default(ioc, smid_sas_ctrl);
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_sas_control_complete - completion routine
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * This is the sas iounit control completion routine.
+ * This code is part of the code to initiate the device removal
+ * handshake protocol with controller firmware.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_sas_control_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ Mpi2SasIoUnitControlReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (likely(mpi_reply)) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "sc_complete:handle(0x%04x), (open) "
+ "smid(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle), smid,
+ le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo)));
+ } else {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ return 1;
+}
+
+/**
+ * _scsih_tm_tr_volume_send - send target reset request for volumes
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt time.
+ *
+ * This is designed to send muliple task management request at the same
+ * time to the fifo. If the fifo is full, we will append the request,
+ * and process it in a future completion.
+ */
+static void
+_scsih_tm_tr_volume_send(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SCSITaskManagementRequest_t *mpi_request;
+ u16 smid;
+ struct _tr_list *delayed_tr;
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return;
+ }
+
+ smid = mpt3sas_base_get_smid_hpr(ioc, ioc->tm_tr_volume_cb_idx);
+ if (!smid) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ if (!delayed_tr)
+ return;
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_volume_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n",
+ ioc->name, handle));
+ return;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_send:handle(0x%04x), (open), smid(%d), cb(%d)\n",
+ ioc->name, handle, smid,
+ ioc->tm_tr_volume_cb_idx));
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSITaskManagementRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET;
+ mpt3sas_base_put_smid_hi_priority(ioc, smid);
+}
+
+/**
+ * _scsih_tm_volume_tr_complete - target reset completion
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt time.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_tm_volume_tr_complete(struct MPT3SAS_ADAPTER *ioc, u16 smid,
+ u8 msix_index, u32 reply)
+{
+ u16 handle;
+ Mpi2SCSITaskManagementRequest_t *mpi_request_tm;
+ Mpi2SCSITaskManagementReply_t *mpi_reply =
+ mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (ioc->shost_recovery || ioc->remove_host ||
+ ioc->pci_error_recovery) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: host reset in progress!\n",
+ __func__, ioc->name));
+ return 1;
+ }
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ mpi_request_tm = mpt3sas_base_get_msg_frame(ioc, smid);
+ handle = le16_to_cpu(mpi_request_tm->DevHandle);
+ if (handle != le16_to_cpu(mpi_reply->DevHandle)) {
+ dewtprintk(ioc, pr_err(MPT3SAS_FMT
+ "spurious interrupt: handle(0x%04x:0x%04x), smid(%d)!!!\n",
+ ioc->name, handle,
+ le16_to_cpu(mpi_reply->DevHandle), smid));
+ return 0;
+ }
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "tr_complete:handle(0x%04x), (open) smid(%d), ioc_status(0x%04x), "
+ "loginfo(0x%08x), completed(%d)\n", ioc->name,
+ handle, smid, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo),
+ le32_to_cpu(mpi_reply->TerminationCount)));
+
+ return _scsih_check_for_pending_tm(ioc, smid);
+}
+
+
+/**
+ * _scsih_check_for_pending_tm - check for pending task management
+ * @ioc: per adapter object
+ * @smid: system request message index
+ *
+ * This will check delayed target reset list, and feed the
+ * next reqeust.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_check_for_pending_tm(struct MPT3SAS_ADAPTER *ioc, u16 smid)
+{
+ struct _tr_list *delayed_tr;
+
+ if (!list_empty(&ioc->delayed_tr_volume_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_volume_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_volume_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ if (!list_empty(&ioc->delayed_tr_list)) {
+ delayed_tr = list_entry(ioc->delayed_tr_list.next,
+ struct _tr_list, list);
+ mpt3sas_base_free_smid(ioc, smid);
+ _scsih_tm_tr_send(ioc, delayed_tr->handle);
+ list_del(&delayed_tr->list);
+ kfree(delayed_tr);
+ return 0;
+ }
+
+ return 1;
+}
+
+/**
+ * _scsih_check_topo_delete_events - sanity check on topo events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ *
+ * This routine added to better handle cable breaker.
+ *
+ * This handles the case where driver receives multiple expander
+ * add and delete events in a single shot. When there is a delete event
+ * the routine will void any pending add events waiting in the event queue.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_topo_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventDataSasTopologyChangeList_t *local_event_data;
+ u16 expander_handle;
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i, reason_code;
+ u16 handle;
+
+ for (i = 0 ; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)
+ _scsih_tm_tr_send(ioc, handle);
+ }
+
+ expander_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+ if (expander_handle < ioc->sas_hba.num_phys) {
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+ return;
+ }
+ if (event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING) {
+ /* put expander attached devices into blocking state */
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ expander_handle);
+ _scsih_block_io_to_children_attached_to_ex(ioc, sas_expander);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ do {
+ handle = find_first_bit(ioc->blocking_handles,
+ ioc->facts.MaxDevHandle);
+ if (handle < ioc->facts.MaxDevHandle)
+ _scsih_block_io_device(ioc, handle);
+ } while (test_and_clear_bit(handle, ioc->blocking_handles));
+ } else if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_RESPONDING)
+ _scsih_block_io_to_children_attached_directly(ioc, event_data);
+
+ if (event_data->ExpStatus != MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING)
+ return;
+
+ /* mark ignore flag for pending events */
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ list_for_each_entry(fw_event, &ioc->fw_event_list, list) {
+ if (fw_event->event != MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST ||
+ fw_event->ignore)
+ continue;
+ local_event_data = fw_event->event_data;
+ if (local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_ADDED ||
+ local_event_data->ExpStatus ==
+ MPI2_EVENT_SAS_TOPO_ES_RESPONDING) {
+ if (le16_to_cpu(local_event_data->ExpanderDevHandle) ==
+ expander_handle) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting ignoring flag\n", ioc->name));
+ fw_event->ignore = 1;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_delete_flag - setting volume delete flag
+ * @ioc: per adapter object
+ * @handle: device handle
+ *
+ * This returns nothing.
+ */
+static void
+_scsih_set_volume_delete_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device && raid_device->starget &&
+ raid_device->starget->hostdata) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "setting delete flag: handle(0x%04x), "
+ "wwid(0x%016llx)\n", ioc->name, handle,
+ (unsigned long long) raid_device->wwid));
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_set_volume_handle_for_tr - set handle for target reset to volume
+ * @handle: input handle
+ * @a: handle for volume a
+ * @b: handle for volume b
+ *
+ * IR firmware only supports two raid volumes. The purpose of this
+ * routine is to set the volume handle in either a or b. When the given
+ * input handle is non-zero, or when a and b have not been set before.
+ */
+static void
+_scsih_set_volume_handle_for_tr(u16 handle, u16 *a, u16 *b)
+{
+ if (!handle || handle == *a || handle == *b)
+ return;
+ if (!*a)
+ *a = handle;
+ else if (!*b)
+ *b = handle;
+}
+
+/**
+ * _scsih_check_ir_config_unhide_events - check for UNHIDE events
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This routine will send target reset to volume, followed by target
+ * resets to the PDs. This is called when a PD has been removed, or
+ * volume has been deleted or removed. When the target reset is sent
+ * to volume, the PD target resets need to be queued to start upon
+ * completion of the volume target reset.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_ir_config_unhide_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u16 handle, volume_handle, a, b;
+ struct _tr_list *delayed_tr;
+
+ a = 0;
+ b = 0;
+
+ /* Volume Resets for Deleted or Removed */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED ||
+ element->ReasonCode ==
+ MPI2_EVENT_IR_CHANGE_RC_REMOVED) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_delete_flag(ioc, volume_handle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ /* Volume Resets for UNHIDE events */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG)
+ continue;
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_UNHIDE) {
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ _scsih_set_volume_handle_for_tr(volume_handle, &a, &b);
+ }
+ }
+
+ if (a)
+ _scsih_tm_tr_volume_send(ioc, a);
+ if (b)
+ _scsih_tm_tr_volume_send(ioc, b);
+
+ /* PD target resets */
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_UNHIDE)
+ continue;
+ handle = le16_to_cpu(element->PhysDiskDevHandle);
+ volume_handle = le16_to_cpu(element->VolDevHandle);
+ clear_bit(handle, ioc->pd_handles);
+ if (!volume_handle)
+ _scsih_tm_tr_send(ioc, handle);
+ else if (volume_handle == a || volume_handle == b) {
+ delayed_tr = kzalloc(sizeof(*delayed_tr), GFP_ATOMIC);
+ BUG_ON(!delayed_tr);
+ INIT_LIST_HEAD(&delayed_tr->list);
+ delayed_tr->handle = handle;
+ list_add_tail(&delayed_tr->list, &ioc->delayed_tr_list);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "DELAYED:tr:handle(0x%04x), (open)\n", ioc->name,
+ handle));
+ } else
+ _scsih_tm_tr_send(ioc, handle);
+ }
+}
+
+
+/**
+ * _scsih_check_volume_delete_events - set delete flag for volumes
+ * @ioc: per adapter object
+ * @event_data: the event data payload
+ * Context: interrupt time.
+ *
+ * This will handle the case when the cable connected to entire volume is
+ * pulled. We will take care of setting the deleted flag so normal IO will
+ * not be sent.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_check_volume_delete_events(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrVolume_t *event_data)
+{
+ u32 state;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+ state = le32_to_cpu(event_data->NewValue);
+ if (state == MPI2_RAID_VOL_STATE_MISSING || state ==
+ MPI2_RAID_VOL_STATE_FAILED)
+ _scsih_set_volume_delete_flag(ioc,
+ le16_to_cpu(event_data->VolDevHandle));
+}
+
+/**
+ * _scsih_flush_running_cmds - completing outstanding commands.
+ * @ioc: per adapter object
+ *
+ * The flushing out of all pending scmd commands following host reset,
+ * where all IO is dropped to the floor.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct scsi_cmnd *scmd;
+ u16 smid;
+ u16 count = 0;
+
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (!scmd)
+ continue;
+ count++;
+ mpt3sas_base_free_smid(ioc, smid);
+ scsi_dma_unmap(scmd);
+ if (ioc->pci_error_recovery)
+ scmd->result = DID_NO_CONNECT << 16;
+ else
+ scmd->result = DID_RESET << 16;
+ scmd->scsi_done(scmd);
+ }
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT "completing %d cmds\n",
+ ioc->name, count));
+}
+
+/**
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIORequest_t *mpi_request)
+{
+ u16 eedp_flags;
+ unsigned char prot_op = scsi_get_prot_op(scmd);
+ unsigned char prot_type = scsi_get_prot_type(scmd);
+ Mpi25SCSIIORequest_t *mpi_request_3v =
+ (Mpi25SCSIIORequest_t *)mpi_request;
+
+ if (prot_type == SCSI_PROT_DIF_TYPE0 || prot_op == SCSI_PROT_NORMAL)
+ return;
+
+ if (prot_op == SCSI_PROT_READ_STRIP)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+ else if (prot_op == SCSI_PROT_WRITE_INSERT)
+ eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+ else
+ return;
+
+ switch (prot_type) {
+ case SCSI_PROT_DIF_TYPE1:
+ case SCSI_PROT_DIF_TYPE2:
+
+ /*
+ * enable ref/guard checking
+ * auto increment ref tag
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+ MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+ mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+ cpu_to_be32(scsi_get_lba(scmd));
+ break;
+
+ case SCSI_PROT_DIF_TYPE3:
+
+ /*
+ * enable guard checking
+ */
+ eedp_flags |= MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+
+ break;
+ }
+
+ mpi_request_3v->EEDPBlockSize =
+ cpu_to_le16(scmd->device->sector_size);
+ mpi_request->EEDPFlags = cpu_to_le16(eedp_flags);
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+ u8 ascq;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ ascq = 0x01;
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ ascq = 0x02;
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ ascq = 0x03;
+ break;
+ default:
+ ascq = 0x00;
+ break;
+ }
+ scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x10,
+ ascq);
+ scmd->result = DRIVER_SENSE << 24 | (DID_ABORT << 16) |
+ SAM_STAT_CHECK_CONDITION;
+}
+
+
+/**
+ * _scsih_qcmd_lck - main scsi request entry point
+ * @scmd: pointer to scsi command object
+ * @done: function pointer to be invoked on completion
+ *
+ * The callback index is set inside `ioc->scsi_io_cb_idx`.
+ *
+ * Returns 0 on success. If there's a failure, return either:
+ * SCSI_MLQUEUE_DEVICE_BUSY if the device queue is full, or
+ * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
+ */
+static int
+_scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2SCSIIORequest_t *mpi_request;
+ u32 mpi_control;
+ u16 smid;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_SCSI)
+ scsi_print_command(scmd);
+#endif
+
+ scmd->scsi_done = done;
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ if (ioc->pci_error_recovery || ioc->remove_host) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+ sas_target_priv_data = sas_device_priv_data->sas_target;
+
+ /* invalid device handle */
+ handle = sas_target_priv_data->handle;
+ if (handle == MPT3SAS_INVALID_DEVICE_HANDLE) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+
+
+ /* host recovery or link resets sent via IOCTLs */
+ if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
+ return SCSI_MLQUEUE_HOST_BUSY;
+
+ /* device has been deleted */
+ else if (sas_target_priv_data->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scmd->scsi_done(scmd);
+ return 0;
+ /* device busy with task managment */
+ } else if (sas_target_priv_data->tm_busy ||
+ sas_device_priv_data->block)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+
+ if (scmd->sc_data_direction == DMA_FROM_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_READ;
+ else if (scmd->sc_data_direction == DMA_TO_DEVICE)
+ mpi_control = MPI2_SCSIIO_CONTROL_WRITE;
+ else
+ mpi_control = MPI2_SCSIIO_CONTROL_NODATATRANSFER;
+
+ /* set tags */
+ if (!(sas_device_priv_data->flags & MPT_DEVICE_FLAGS_INIT)) {
+ if (scmd->device->tagged_supported) {
+ if (scmd->device->ordered_tags)
+ mpi_control |= MPI2_SCSIIO_CONTROL_ORDEREDQ;
+ else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+ } else
+ mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ;
+
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ scmd->cmd_len != 32)
+ mpi_control |= MPI2_SCSIIO_CONTROL_TLR_ON;
+
+ smid = mpt3sas_base_get_smid_scsiio(ioc, ioc->scsi_io_cb_idx, scmd);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+ _scsih_setup_eedp(ioc, scmd, mpi_request);
+
+ if (scmd->cmd_len == 32)
+ mpi_control |= 4 << MPI2_SCSIIO_CONTROL_ADDCDBLEN_SHIFT;
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ mpi_request->Function = MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+ else
+ mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
+ mpi_request->DevHandle = cpu_to_le16(handle);
+ mpi_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
+ mpi_request->Control = cpu_to_le32(mpi_control);
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len);
+ mpi_request->MsgFlags = MPI2_SCSIIO_MSGFLAGS_SYSTEM_SENSE_ADDR;
+ mpi_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE;
+ mpi_request->SenseBufferLowAddress =
+ mpt3sas_base_get_sense_buffer_dma(ioc, smid);
+ mpi_request->SGLOffset0 = offsetof(Mpi2SCSIIORequest_t, SGL) / 4;
+ int_to_scsilun(sas_device_priv_data->lun, (struct scsi_lun *)
+ mpi_request->LUN);
+ memcpy(mpi_request->CDB.CDB32, scmd->cmnd, scmd->cmd_len);
+
+ if (mpi_request->DataLength) {
+ if (ioc->build_sg_scmd(ioc, scmd, smid)) {
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+ } else
+ ioc->build_zero_len_sge(ioc, &mpi_request->SGL);
+
+ if (likely(mpi_request->Function == MPI2_FUNCTION_SCSI_IO_REQUEST)) {
+ if (sas_target_priv_data->flags & MPT_TARGET_FASTPATH_IO) {
+ mpi_request->IoFlags = cpu_to_le16(scmd->cmd_len |
+ MPI25_SCSIIO_IOFLAGS_FAST_PATH);
+ mpt3sas_base_put_smid_fast_path(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_scsi_io(ioc, smid, handle);
+ } else
+ mpt3sas_base_put_smid_default(ioc, smid);
+ return 0;
+
+ out:
+ return SCSI_MLQUEUE_HOST_BUSY;
+}
+static DEF_SCSI_QCMD(_scsih_qcmd)
+
+
+/**
+ * _scsih_normalize_sense - normalize descriptor and fixed format sense data
+ * @sense_buffer: sense data returned by target
+ * @data: normalized skey/asc/ascq
+ *
+ * Return nothing.
+ */
+static void
+_scsih_normalize_sense(char *sense_buffer, struct sense_info *data)
+{
+ if ((sense_buffer[0] & 0x7F) >= 0x72) {
+ /* descriptor format */
+ data->skey = sense_buffer[1] & 0x0F;
+ data->asc = sense_buffer[2];
+ data->ascq = sense_buffer[3];
+ } else {
+ /* fixed format */
+ data->skey = sense_buffer[2] & 0x0F;
+ data->asc = sense_buffer[12];
+ data->ascq = sense_buffer[13];
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_scsi_ioc_info - translated non-succesfull SCSI_IO request
+ * @ioc: per adapter object
+ * @scmd: pointer to scsi command object
+ * @mpi_reply: reply mf payload returned from firmware
+ *
+ * scsi_status - SCSI Status code returned from target device
+ * scsi_state - state info associated with SCSI_IO determined by ioc
+ * ioc_status - ioc supplied status info
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scsi_ioc_info(struct MPT3SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
+ Mpi2SCSIIOReply_t *mpi_reply, u16 smid)
+{
+ u32 response_info;
+ u8 *response_bytes;
+ u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ u8 scsi_state = mpi_reply->SCSIState;
+ u8 scsi_status = mpi_reply->SCSIStatus;
+ char *desc_ioc_state = NULL;
+ char *desc_scsi_status = NULL;
+ char *desc_scsi_state = ioc->tmp_string;
+ u32 log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ struct _sas_device *sas_device = NULL;
+ unsigned long flags;
+ struct scsi_target *starget = scmd->device->sdev_target;
+ struct MPT3SAS_TARGET *priv_target = starget->hostdata;
+ char *device_str = NULL;
+
+ if (!priv_target)
+ return;
+ device_str = "volume";
+
+ if (log_info == 0x31170000)
+ return;
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_SUCCESS:
+ desc_ioc_state = "success";
+ break;
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ desc_ioc_state = "invalid function";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ desc_ioc_state = "scsi recovered error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
+ desc_ioc_state = "scsi invalid dev handle";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ desc_ioc_state = "scsi device not there";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ desc_ioc_state = "scsi data overrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ desc_ioc_state = "scsi data underrun";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ desc_ioc_state = "scsi io data error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ desc_ioc_state = "scsi protocol error";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ desc_ioc_state = "scsi task terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ desc_ioc_state = "scsi residual mismatch";
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ desc_ioc_state = "scsi task mgmt failed";
+ break;
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ desc_ioc_state = "scsi ioc terminated";
+ break;
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ desc_ioc_state = "scsi ext terminated";
+ break;
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ desc_ioc_state = "eedp guard error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ desc_ioc_state = "eedp ref tag error";
+ break;
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ desc_ioc_state = "eedp app tag error";
+ break;
+ default:
+ desc_ioc_state = "unknown";
+ break;
+ }
+
+ switch (scsi_status) {
+ case MPI2_SCSI_STATUS_GOOD:
+ desc_scsi_status = "good";
+ break;
+ case MPI2_SCSI_STATUS_CHECK_CONDITION:
+ desc_scsi_status = "check condition";
+ break;
+ case MPI2_SCSI_STATUS_CONDITION_MET:
+ desc_scsi_status = "condition met";
+ break;
+ case MPI2_SCSI_STATUS_BUSY:
+ desc_scsi_status = "busy";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE:
+ desc_scsi_status = "intermediate";
+ break;
+ case MPI2_SCSI_STATUS_INTERMEDIATE_CONDMET:
+ desc_scsi_status = "intermediate condmet";
+ break;
+ case MPI2_SCSI_STATUS_RESERVATION_CONFLICT:
+ desc_scsi_status = "reservation conflict";
+ break;
+ case MPI2_SCSI_STATUS_COMMAND_TERMINATED:
+ desc_scsi_status = "command terminated";
+ break;
+ case MPI2_SCSI_STATUS_TASK_SET_FULL:
+ desc_scsi_status = "task set full";
+ break;
+ case MPI2_SCSI_STATUS_ACA_ACTIVE:
+ desc_scsi_status = "aca active";
+ break;
+ case MPI2_SCSI_STATUS_TASK_ABORTED:
+ desc_scsi_status = "task aborted";
+ break;
+ default:
+ desc_scsi_status = "unknown";
+ break;
+ }
+
+ desc_scsi_state[0] = '\0';
+ if (!scsi_state)
+ desc_scsi_state = " ";
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ strcat(desc_scsi_state, "response info ");
+ if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ strcat(desc_scsi_state, "state terminated ");
+ if (scsi_state & MPI2_SCSI_STATE_NO_SCSI_STATUS)
+ strcat(desc_scsi_state, "no status ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_FAILED)
+ strcat(desc_scsi_state, "autosense failed ");
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID)
+ strcat(desc_scsi_state, "autosense valid ");
+
+ scsi_print_command(scmd);
+
+ if (priv_target->flags & MPT_TARGET_FLAGS_VOLUME) {
+ pr_warn(MPT3SAS_FMT "\t%s wwid(0x%016llx)\n", ioc->name,
+ device_str, (unsigned long long)priv_target->sas_address);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ priv_target->sas_address);
+ if (sas_device) {
+ pr_warn(MPT3SAS_FMT
+ "\tsas_address(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->sas_address, sas_device->phy);
+ pr_warn(MPT3SAS_FMT
+ "\tenclosure_logical_id(0x%016llx), slot(%d)\n",
+ ioc->name, (unsigned long long)
+ sas_device->enclosure_logical_id, sas_device->slot);
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+
+ pr_warn(MPT3SAS_FMT
+ "\thandle(0x%04x), ioc_status(%s)(0x%04x), smid(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->DevHandle),
+ desc_ioc_state, ioc_status, smid);
+ pr_warn(MPT3SAS_FMT
+ "\trequest_len(%d), underflow(%d), resid(%d)\n",
+ ioc->name, scsi_bufflen(scmd), scmd->underflow,
+ scsi_get_resid(scmd));
+ pr_warn(MPT3SAS_FMT
+ "\ttag(%d), transfer_count(%d), sc->result(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->TaskTag),
+ le32_to_cpu(mpi_reply->TransferCount), scmd->result);
+ pr_warn(MPT3SAS_FMT
+ "\tscsi_status(%s)(0x%02x), scsi_state(%s)(0x%02x)\n",
+ ioc->name, desc_scsi_status,
+ scsi_status, desc_scsi_state, scsi_state);
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ pr_warn(MPT3SAS_FMT
+ "\t[sense_key,asc,ascq]: [0x%02x,0x%02x,0x%02x], count(%d)\n",
+ ioc->name, data.skey,
+ data.asc, data.ascq, le32_to_cpu(mpi_reply->SenseCount));
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID) {
+ response_info = le32_to_cpu(mpi_reply->ResponseInfo);
+ response_bytes = (u8 *)&response_info;
+ _scsih_response_code(ioc, response_bytes[0]);
+ }
+}
+#endif
+
+/**
+ * _scsih_turn_on_fault_led - illuminate Fault LED
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ Mpi2SepReply_t mpi_reply;
+ Mpi2SepRequest_t mpi_request;
+
+ memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+ mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+ mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+ mpi_request.SlotStatus =
+ cpu_to_le32(MPI2_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT);
+ mpi_request.DevHandle = cpu_to_le16(handle);
+ mpi_request.Flags = MPI2_SEP_REQ_FLAGS_DEVHANDLE_ADDRESS;
+ if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+ &mpi_request)) != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo)));
+ return;
+ }
+}
+
+/**
+ * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct fw_event_work *fw_event;
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event)
+ return;
+ fw_event->event = MPT3SAS_TURN_ON_FAULT_LED;
+ fw_event->device_handle = handle;
+ fw_event->ioc = ioc;
+ _scsih_fw_event_add(ioc, fw_event);
+}
+
+/**
+ * _scsih_smart_predicted_fault - process smart errors
+ * @ioc: per adapter object
+ * @handle: device handle
+ * Context: interrupt.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_smart_predicted_fault(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ Mpi2EventNotificationReply_t *event_reply;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data;
+ struct _sas_device *sas_device;
+ ssize_t sz;
+ unsigned long flags;
+
+ /* only handle non-raid devices */
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+
+ if ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_RAID_COMPONENT) ||
+ ((sas_target_priv_data->flags & MPT_TARGET_FLAGS_VOLUME))) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+ starget_printk(KERN_WARNING, starget, "predicted fault\n");
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
+ _scsih_send_event_to_turn_on_fault_led(ioc, handle);
+
+ /* insert into event log */
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(Mpi2EventDataSasDeviceStatusChange_t);
+ event_reply = kzalloc(sz, GFP_KERNEL);
+ if (!event_reply) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ event_reply->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
+ event_reply->Event =
+ cpu_to_le16(MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
+ event_reply->MsgLength = sz/4;
+ event_reply->EventDataLength =
+ cpu_to_le16(sizeof(Mpi2EventDataSasDeviceStatusChange_t)/4);
+ event_data = (Mpi2EventDataSasDeviceStatusChange_t *)
+ event_reply->EventData;
+ event_data->ReasonCode = MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA;
+ event_data->ASC = 0x5D;
+ event_data->DevHandle = cpu_to_le16(handle);
+ event_data->SASAddress = cpu_to_le64(sas_target_priv_data->sas_address);
+ mpt3sas_ctl_add_to_event_log(ioc, event_reply);
+ kfree(event_reply);
+}
+
+/**
+ * _scsih_io_done - scsi request callback
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when using _scsih_qcmd.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ Mpi2SCSIIORequest_t *mpi_request;
+ Mpi2SCSIIOReply_t *mpi_reply;
+ struct scsi_cmnd *scmd;
+ u16 ioc_status;
+ u32 xfer_cnt;
+ u8 scsi_state;
+ u8 scsi_status;
+ u32 log_info;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 response_code = 0;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
+ if (scmd == NULL)
+ return 1;
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+
+ if (mpi_reply == NULL) {
+ scmd->result = DID_OK << 16;
+ goto out;
+ }
+
+ sas_device_priv_data = scmd->device->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target ||
+ sas_device_priv_data->sas_target->deleted) {
+ scmd->result = DID_NO_CONNECT << 16;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+
+ /* turning off TLR */
+ scsi_state = mpi_reply->SCSIState;
+ if (scsi_state & MPI2_SCSI_STATE_RESPONSE_INFO_VALID)
+ response_code =
+ le32_to_cpu(mpi_reply->ResponseInfo) & 0xFF;
+ if (!sas_device_priv_data->tlr_snoop_check) {
+ sas_device_priv_data->tlr_snoop_check++;
+ if ((sas_device_priv_data->flags & MPT_DEVICE_TLR_ON) &&
+ response_code == MPI2_SCSITASKMGMT_RSP_INVALID_FRAME)
+ sas_device_priv_data->flags &=
+ ~MPT_DEVICE_TLR_ON;
+ }
+
+ xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
+ scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ scsi_status = mpi_reply->SCSIStatus;
+
+ if (ioc_status == MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN && xfer_cnt == 0 &&
+ (scsi_status == MPI2_SCSI_STATUS_BUSY ||
+ scsi_status == MPI2_SCSI_STATUS_RESERVATION_CONFLICT ||
+ scsi_status == MPI2_SCSI_STATUS_TASK_SET_FULL)) {
+ ioc_status = MPI2_IOCSTATUS_SUCCESS;
+ }
+
+ if (scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID) {
+ struct sense_info data;
+ const void *sense_data = mpt3sas_base_get_sense_buffer(ioc,
+ smid);
+ u32 sz = min_t(u32, SCSI_SENSE_BUFFERSIZE,
+ le32_to_cpu(mpi_reply->SenseCount));
+ memcpy(scmd->sense_buffer, sense_data, sz);
+ _scsih_normalize_sense(scmd->sense_buffer, &data);
+ /* failure prediction threshold exceeded */
+ if (data.asc == 0x5D)
+ _scsih_smart_predicted_fault(ioc,
+ le16_to_cpu(mpi_reply->DevHandle));
+ mpt3sas_trigger_scsi(ioc, data.skey, data.asc, data.ascq);
+ }
+
+ switch (ioc_status) {
+ case MPI2_IOCSTATUS_BUSY:
+ case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
+ scmd->result = SAM_STAT_BUSY;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
+ scmd->result = DID_NO_CONNECT << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
+ if (sas_device_priv_data->block) {
+ scmd->result = DID_TRANSPORT_DISRUPTED << 16;
+ goto out;
+ }
+ if (log_info == 0x31110630) {
+ if (scmd->retries > 2) {
+ scmd->result = DID_NO_CONNECT << 16;
+ scsi_device_set_state(scmd->device,
+ SDEV_OFFLINE);
+ } else {
+ scmd->result = DID_SOFT_ERROR << 16;
+ scmd->device->expecting_cc_ua = 1;
+ }
+ break;
+ }
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+ case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
+ case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
+ if ((xfer_cnt == 0) || (scmd->underflow > xfer_cnt))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else
+ scmd->result = (DID_OK << 16) | scsi_status;
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
+ scmd->result = (DID_OK << 16) | scsi_status;
+
+ if ((scsi_state & MPI2_SCSI_STATE_AUTOSENSE_VALID))
+ break;
+
+ if (xfer_cnt < scmd->underflow) {
+ if (scsi_status == SAM_STAT_BUSY)
+ scmd->result = SAM_STAT_BUSY;
+ else
+ scmd->result = DID_SOFT_ERROR << 16;
+ } else if (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ else if (!xfer_cnt && scmd->cmnd[0] == REPORT_LUNS) {
+ mpi_reply->SCSIState = MPI2_SCSI_STATE_AUTOSENSE_VALID;
+ mpi_reply->SCSIStatus = SAM_STAT_CHECK_CONDITION;
+ scmd->result = (DRIVER_SENSE << 24) |
+ SAM_STAT_CHECK_CONDITION;
+ scmd->sense_buffer[0] = 0x70;
+ scmd->sense_buffer[2] = ILLEGAL_REQUEST;
+ scmd->sense_buffer[12] = 0x20;
+ scmd->sense_buffer[13] = 0;
+ }
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
+ scsi_set_resid(scmd, 0);
+ case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
+ case MPI2_IOCSTATUS_SUCCESS:
+ scmd->result = (DID_OK << 16) | scsi_status;
+ if (response_code ==
+ MPI2_SCSITASKMGMT_RSP_INVALID_FRAME ||
+ (scsi_state & (MPI2_SCSI_STATE_AUTOSENSE_FAILED |
+ MPI2_SCSI_STATE_NO_SCSI_STATUS)))
+ scmd->result = DID_SOFT_ERROR << 16;
+ else if (scsi_state & MPI2_SCSI_STATE_TERMINATED)
+ scmd->result = DID_RESET << 16;
+ break;
+
+ case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+ case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+ case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+ _scsih_eedp_error_handling(scmd, ioc_status);
+ break;
+
+ case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FUNCTION:
+ case MPI2_IOCSTATUS_INVALID_SGL:
+ case MPI2_IOCSTATUS_INTERNAL_ERROR:
+ case MPI2_IOCSTATUS_INVALID_FIELD:
+ case MPI2_IOCSTATUS_INVALID_STATE:
+ case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
+ case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
+ default:
+ scmd->result = DID_SOFT_ERROR << 16;
+ break;
+
+ }
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (scmd->result && (ioc->logging_level & MPT_DEBUG_REPLY))
+ _scsih_scsi_ioc_info(ioc , scmd, mpi_reply, smid);
+#endif
+
+ out:
+
+ scsi_dma_unmap(scmd);
+
+ scmd->scsi_done(scmd);
+ return 1;
+}
+
+/**
+ * _scsih_sas_host_refresh - refreshing sas host object contents
+ * @ioc: per adapter object
+ * Context: user
+ *
+ * During port enable, fw will send topology events for every device. Its
+ * possible that the handles may change from the previous setting, so this
+ * code keeping handles updating if changed.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_refresh(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 sz;
+ u16 ioc_status;
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ u16 attached_handle;
+ u8 link_rate;
+
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "updating handles for sas_host(0x%016llx)\n",
+ ioc->name, (unsigned long long)ioc->sas_hba.sas_address));
+
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys
+ * sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz)) != 0)
+ goto out;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ goto out;
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ link_rate = sas_iounit_pg0->PhyData[i].NegotiatedLinkRate >> 4;
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ attached_handle = le16_to_cpu(sas_iounit_pg0->PhyData[i].
+ AttachedDevHandle);
+ if (attached_handle && link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ link_rate = MPI2_SAS_NEG_LINK_RATE_1_5;
+ mpt3sas_transport_update_links(ioc, ioc->sas_hba.sas_address,
+ attached_handle, i, link_rate);
+ }
+ out:
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_sas_host_add - create sas host object
+ * @ioc: per adapter object
+ *
+ * Creating host side data object, stored in ioc->sas_hba
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_host_add(struct MPT3SAS_ADAPTER *ioc)
+{
+ int i;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u16 ioc_status;
+ u16 sz;
+ u8 device_missing_delay;
+
+ mpt3sas_config_get_number_hba_phys(ioc, &ioc->sas_hba.num_phys);
+ if (!ioc->sas_hba.num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ /* sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ ioc->io_missing_delay =
+ sas_iounit_pg1->IODeviceMissingDelay;
+ device_missing_delay =
+ sas_iounit_pg1->ReportDeviceMissingDelay;
+ if (device_missing_delay & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
+ ioc->device_missing_delay = (device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
+ else
+ ioc->device_missing_delay = device_missing_delay &
+ MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
+
+ ioc->sas_hba.parent_dev = &ioc->shost->shost_gendev;
+ ioc->sas_hba.phy = kcalloc(ioc->sas_hba.num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!ioc->sas_hba.phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if ((mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ i))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+
+ if (i == 0)
+ ioc->sas_hba.handle = le16_to_cpu(sas_iounit_pg0->
+ PhyData[0].ControllerDevHandle);
+ ioc->sas_hba.phy[i].handle = ioc->sas_hba.handle;
+ ioc->sas_hba.phy[i].phy_id = i;
+ mpt3sas_transport_add_host_phy(ioc, &ioc->sas_hba.phy[i],
+ phy_pg0, ioc->sas_hba.parent_dev);
+ }
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, ioc->sas_hba.handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out;
+ }
+ ioc->sas_hba.enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ ioc->sas_hba.sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ pr_info(MPT3SAS_FMT
+ "host_add: handle(0x%04x), sas_addr(0x%016llx), phys(%d)\n",
+ ioc->name, ioc->sas_hba.handle,
+ (unsigned long long) ioc->sas_hba.sas_address,
+ ioc->sas_hba.num_phys) ;
+
+ if (ioc->sas_hba.enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ ioc->sas_hba.enclosure_handle)))
+ ioc->sas_hba.enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+}
+
+/**
+ * _scsih_expander_add - creating expander object
+ * @ioc: per adapter object
+ * @handle: expander handle
+ *
+ * Creating expander object, stored in ioc->sas_expander_list.
+ *
+ * Return 0 for success, else error.
+ */
+static int
+_scsih_expander_add(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _sas_node *sas_expander;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ u32 ioc_status;
+ u16 parent_handle;
+ u64 sas_address, sas_address_parent = 0;
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port = NULL;
+
+ int rc = 0;
+
+ if (!handle)
+ return -1;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return -1;
+
+ if ((mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* handle out of order topology events */
+ parent_handle = le16_to_cpu(expander_pg0.ParentDevHandle);
+ if (_scsih_get_sas_address(ioc, parent_handle, &sas_address_parent)
+ != 0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if (sas_address_parent != ioc->sas_hba.sas_address) {
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address_parent);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (!sas_expander) {
+ rc = _scsih_expander_add(ioc, parent_handle);
+ if (rc != 0)
+ return rc;
+ }
+ }
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (sas_expander)
+ return 0;
+
+ sas_expander = kzalloc(sizeof(struct _sas_node),
+ GFP_KERNEL);
+ if (!sas_expander) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ sas_expander->handle = handle;
+ sas_expander->num_phys = expander_pg0.NumPhys;
+ sas_expander->sas_address_parent = sas_address_parent;
+ sas_expander->sas_address = sas_address;
+
+ pr_info(MPT3SAS_FMT "expander_add: handle(0x%04x)," \
+ " parent(0x%04x), sas_addr(0x%016llx), phys(%d)\n", ioc->name,
+ handle, parent_handle, (unsigned long long)
+ sas_expander->sas_address, sas_expander->num_phys);
+
+ if (!sas_expander->num_phys)
+ goto out_fail;
+ sas_expander->phy = kcalloc(sas_expander->num_phys,
+ sizeof(struct _sas_phy), GFP_KERNEL);
+ if (!sas_expander->phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+
+ INIT_LIST_HEAD(&sas_expander->sas_port_list);
+ mpt3sas_port = mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->parent_dev = &mpt3sas_port->rphy->dev;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ sas_expander->phy[i].handle = handle;
+ sas_expander->phy[i].phy_id = i;
+
+ if ((mpt3sas_transport_add_expander_phy(ioc,
+ &sas_expander->phy[i], expander_pg1,
+ sas_expander->parent_dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -1;
+ goto out_fail;
+ }
+ }
+
+ if (sas_expander->enclosure_handle) {
+ if (!(mpt3sas_config_get_enclosure_pg0(ioc, &mpi_reply,
+ &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_expander->enclosure_handle)))
+ sas_expander->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+ }
+
+ _scsih_expander_node_add(ioc, sas_expander);
+ return 0;
+
+ out_fail:
+
+ if (mpt3sas_port)
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_address_parent);
+ kfree(sas_expander);
+ return rc;
+}
+
+/**
+ * mpt3sas_expander_remove - removing expander object
+ * @ioc: per adapter object
+ * @sas_address: expander sas_address
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_expander_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+
+ if (ioc->shost_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+ if (sas_expander)
+ list_del(&sas_expander->list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (sas_expander)
+ _scsih_expander_node_remove(ioc, sas_expander);
+}
+
+/**
+ * _scsih_done - internal SCSI_IO callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated SCSI_IO.
+ * The callback index passed is `ioc->scsih_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+static u8
+_scsih_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->scsih_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->scsih_cmds.smid != smid)
+ return 1;
+ ioc->scsih_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->scsih_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->scsih_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->scsih_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->scsih_cmds.done);
+ return 1;
+}
+
+
+
+
+#define MPT3_MAX_LUNS (255)
+
+
+/**
+ * _scsih_check_access_status - check access flags
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle: sas device handle
+ * @access_flags: errors returned during discovery of the device
+ *
+ * Return 0 for success, else failure
+ */
+static u8
+_scsih_check_access_status(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle, u8 access_status)
+{
+ u8 rc = 1;
+ char *desc = NULL;
+
+ switch (access_status) {
+ case MPI2_SAS_DEVICE0_ASTATUS_NO_ERRORS:
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_NEEDS_INITIALIZATION:
+ rc = 0;
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_CAPABILITY_FAILED:
+ desc = "sata capability failed";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_AFFILIATION_CONFLICT:
+ desc = "sata affiliation conflict";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_ROUTE_NOT_ADDRESSABLE:
+ desc = "route not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SMP_ERROR_NOT_ADDRESSABLE:
+ desc = "smp error not addressable";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_DEVICE_BLOCKED:
+ desc = "device blocked";
+ break;
+ case MPI2_SAS_DEVICE0_ASTATUS_SATA_INIT_FAILED:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UNKNOWN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_AFFILIATION_CONFLICT:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_DIAG:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_IDENTIFICATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_CHECK_POWER:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_PIO_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_UDMA_SN:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_ZONING_VIOLATION:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_NOT_ADDRESSABLE:
+ case MPI2_SAS_DEVICE0_ASTATUS_SIF_MAX:
+ desc = "sata initialization failed";
+ break;
+ default:
+ desc = "unknown";
+ break;
+ }
+
+ if (!rc)
+ return 0;
+
+ pr_err(MPT3SAS_FMT
+ "discovery errors(%s): sas_address(0x%016llx), handle(0x%04x)\n",
+ ioc->name, desc, (unsigned long long)sas_address, handle);
+ return rc;
+}
+
+/**
+ * _scsih_check_device - checking device responsiveness
+ * @ioc: per adapter object
+ * @parent_sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+static void
+_scsih_check_device(struct MPT3SAS_ADAPTER *ioc,
+ u64 parent_sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ unsigned long flags;
+ u64 sas_address;
+ struct scsi_target *starget;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ u32 device_info;
+
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle)))
+ return;
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
+ return;
+
+ /* wide port handling ~ we need only handle device once for the phy that
+ * is matched in sas device page zero
+ */
+ if (phy_number != sas_device_pg0.PhyNum)
+ return;
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (unlikely(sas_device->handle != handle)) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ starget_printk(KERN_INFO, starget,
+ "handle changed from(0x%04x) to (0x%04x)!!!\n",
+ sas_device->handle, handle);
+ sas_target_priv_data->handle = handle;
+ sas_device->handle = handle;
+ }
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT
+ "device is not present handle(0x%04x), flags!!!\n",
+ ioc->name, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus)) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ _scsih_ublock_io_device(ioc, sas_address);
+
+}
+
+/**
+ * _scsih_add_device - creating sas device object
+ * @ioc: per adapter object
+ * @handle: sas device handle
+ * @phy_num: phy number end device attached to
+ * @is_pd: is this hidden raid component
+ *
+ * Creating end device object, stored in ioc->sas_device_list.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_scsih_add_device(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phy_num,
+ u8 is_pd)
+{
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2SasEnclosurePage0_t enclosure_pg0;
+ struct _sas_device *sas_device;
+ u32 ioc_status;
+ u64 sas_address;
+ u32 device_info;
+ unsigned long flags;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+
+ /* check if this is end device */
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ return -1;
+ sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* check if device is present */
+ if (!(le16_to_cpu(sas_device_pg0.Flags) &
+ MPI2_SAS_DEVICE0_FLAGS_DEVICE_PRESENT)) {
+ pr_err(MPT3SAS_FMT "device is not present handle(0x04%x)!!!\n",
+ ioc->name, handle);
+ return -1;
+ }
+
+ /* check if there were any issues with discovery */
+ if (_scsih_check_access_status(ioc, sas_address, handle,
+ sas_device_pg0.AccessStatus))
+ return -1;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return -1;
+
+ sas_device = kzalloc(sizeof(struct _sas_device),
+ GFP_KERNEL);
+ if (!sas_device) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 0;
+ }
+
+ sas_device->handle = handle;
+ if (_scsih_get_sas_address(ioc,
+ le16_to_cpu(sas_device_pg0.ParentDevHandle),
+ &sas_device->sas_address_parent) != 0)
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_device->enclosure_handle =
+ le16_to_cpu(sas_device_pg0.EnclosureHandle);
+ sas_device->slot =
+ le16_to_cpu(sas_device_pg0.Slot);
+ sas_device->device_info = device_info;
+ sas_device->sas_address = sas_address;
+ sas_device->phy = sas_device_pg0.PhyNum;
+ sas_device->fast_path = (le16_to_cpu(sas_device_pg0.Flags) &
+ MPI25_SAS_DEVICE0_FLAGS_FAST_PATH_CAPABLE) ? 1 : 0;
+
+ /* get enclosure_logical_id */
+ if (sas_device->enclosure_handle && !(mpt3sas_config_get_enclosure_pg0(
+ ioc, &mpi_reply, &enclosure_pg0, MPI2_SAS_ENCLOS_PGAD_FORM_HANDLE,
+ sas_device->enclosure_handle)))
+ sas_device->enclosure_logical_id =
+ le64_to_cpu(enclosure_pg0.EnclosureLogicalID);
+
+ /* get device name */
+ sas_device->device_name = le64_to_cpu(sas_device_pg0.DeviceName);
+
+ if (ioc->wait_for_discovery_to_complete)
+ _scsih_sas_device_init_add(ioc, sas_device);
+ else
+ _scsih_sas_device_add(ioc, sas_device);
+
+ return 0;
+}
+
+/**
+ * _scsih_remove_device - removing sas device object
+ * @ioc: per adapter object
+ * @sas_device_delete: the sas_device object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_device(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_device *sas_device)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ sas_target_priv_data = sas_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ _scsih_ublock_io_device(ioc, sas_device->sas_address);
+ sas_target_priv_data->handle =
+ MPT3SAS_INVALID_DEVICE_HANDLE;
+ }
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "removing handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, sas_device->handle,
+ (unsigned long long) sas_device->sas_address);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: exit: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, __func__,
+ sas_device->handle, (unsigned long long)
+ sas_device->sas_address));
+
+ kfree(sas_device);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_topology_change_event_debug - debug for topology event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ */
+static void
+_scsih_sas_topology_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasTopologyChangeList_t *event_data)
+{
+ int i;
+ u16 handle;
+ u16 reason_code;
+ u8 phy_number;
+ char *status_str = NULL;
+ u8 link_rate, prev_link_rate;
+
+ switch (event_data->ExpStatus) {
+ case MPI2_EVENT_SAS_TOPO_ES_ADDED:
+ status_str = "add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING:
+ status_str = "remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_RESPONDING:
+ case 0:
+ status_str = "responding";
+ break;
+ case MPI2_EVENT_SAS_TOPO_ES_DELAY_NOT_RESPONDING:
+ status_str = "remove delay";
+ break;
+ default:
+ status_str = "unknown status";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "sas topology change: (%s)\n",
+ ioc->name, status_str);
+ pr_info("\thandle(0x%04x), enclosure_handle(0x%04x) " \
+ "start_phy(%02d), count(%d)\n",
+ le16_to_cpu(event_data->ExpanderDevHandle),
+ le16_to_cpu(event_data->EnclosureHandle),
+ event_data->StartPhyNum, event_data->NumEntries);
+ for (i = 0; i < event_data->NumEntries; i++) {
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ phy_number = event_data->StartPhyNum + i;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+ status_str = "target add";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+ status_str = "target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING:
+ status_str = "delay target remove";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+ status_str = "link rate change";
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_NO_CHANGE:
+ status_str = "target responding";
+ break;
+ default:
+ status_str = "unknown";
+ break;
+ }
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ pr_info("\tphy(%02d), attached_handle(0x%04x): %s:" \
+ " link rate: new(0x%02x), old(0x%02x)\n", phy_number,
+ handle, status_str, link_rate, prev_link_rate);
+
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_topology_change_event - handle topology changes
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ */
+static int
+_scsih_sas_topology_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ int i;
+ u16 parent_handle, handle;
+ u16 reason_code;
+ u8 phy_number, max_phys;
+ struct _sas_node *sas_expander;
+ u64 sas_address;
+ unsigned long flags;
+ u8 link_rate, prev_link_rate;
+ Mpi2EventDataSasTopologyChangeList_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_topology_change_event_debug(ioc, event_data);
+#endif
+
+ if (ioc->shost_recovery || ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+
+ if (!ioc->sas_hba.num_phys)
+ _scsih_sas_host_add(ioc);
+ else
+ _scsih_sas_host_refresh(ioc);
+
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+
+ parent_handle = le16_to_cpu(event_data->ExpanderDevHandle);
+
+ /* handle expander add */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_ADDED)
+ if (_scsih_expander_add(ioc, parent_handle) != 0)
+ return 0;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_expander = mpt3sas_scsih_expander_find_by_handle(ioc,
+ parent_handle);
+ if (sas_expander) {
+ sas_address = sas_expander->sas_address;
+ max_phys = sas_expander->num_phys;
+ } else if (parent_handle < ioc->sas_hba.num_phys) {
+ sas_address = ioc->sas_hba.sas_address;
+ max_phys = ioc->sas_hba.num_phys;
+ } else {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle siblings events */
+ for (i = 0; i < event_data->NumEntries; i++) {
+ if (fw_event->ignore) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "ignoring expander event\n", ioc->name));
+ return 0;
+ }
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 0;
+ phy_number = event_data->StartPhyNum + i;
+ if (phy_number >= max_phys)
+ continue;
+ reason_code = event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_RC_MASK;
+ if ((event_data->PHY[i].PhyStatus &
+ MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT) && (reason_code !=
+ MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING))
+ continue;
+ handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
+ if (!handle)
+ continue;
+ link_rate = event_data->PHY[i].LinkRate >> 4;
+ prev_link_rate = event_data->PHY[i].LinkRate & 0xF;
+ switch (reason_code) {
+ case MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ if (link_rate == prev_link_rate)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ if (link_rate < MPI2_SAS_NEG_LINK_RATE_1_5)
+ break;
+
+ _scsih_check_device(ioc, sas_address, handle,
+ phy_number, link_rate);
+
+
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
+
+ if (ioc->shost_recovery)
+ break;
+
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, phy_number, link_rate);
+
+ _scsih_add_device(ioc, handle, phy_number, 0);
+
+ break;
+ case MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING:
+
+ _scsih_device_remove_by_handle(ioc, handle);
+ break;
+ }
+ }
+
+ /* handle expander removal */
+ if (event_data->ExpStatus == MPI2_EVENT_SAS_TOPO_ES_NOT_RESPONDING &&
+ sas_expander)
+ mpt3sas_expander_remove(ioc, sas_address);
+
+ return 0;
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_device_status_change_event_debug - debug for device event
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasDeviceStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
+ reason_str = "smart data";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+ reason_str = "unsupported device discovered";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
+ reason_str = "internal device reset";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
+ reason_str = "internal task abort set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
+ reason_str = "internal clear task set";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+ reason_str = "internal query task";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_SATA_INIT_FAILURE:
+ reason_str = "sata init failure";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET:
+ reason_str = "internal device reset complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_TASK_ABORT_INTERNAL:
+ reason_str = "internal task abort complete";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_ASYNC_NOTIFICATION:
+ reason_str = "internal async notification";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality";
+ break;
+ case MPI2_EVENT_SAS_DEV_STAT_RC_CMP_EXPANDER_REDUCED_FUNCTIONALITY:
+ reason_str = "expander reduced functionality complete";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ pr_info(MPT3SAS_FMT "device status change: (%s)\n"
+ "\thandle(0x%04x), sas address(0x%016llx), tag(%d)",
+ ioc->name, reason_str, le16_to_cpu(event_data->DevHandle),
+ (unsigned long long)le64_to_cpu(event_data->SASAddress),
+ le16_to_cpu(event_data->TaskTag));
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DEV_STAT_RC_SMART_DATA)
+ pr_info(MPT3SAS_FMT ", ASC(0x%x), ASCQ(0x%x)\n", ioc->name,
+ event_data->ASC, event_data->ASCQ);
+ pr_info("\n");
+}
+#endif
+
+/**
+ * _scsih_sas_device_status_change_event - handle device status change
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_device_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct MPT3SAS_TARGET *target_priv_data;
+ struct _sas_device *sas_device;
+ u64 sas_address;
+ unsigned long flags;
+ Mpi2EventDataSasDeviceStatusChange_t *event_data =
+ fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_device_status_change_event_debug(ioc,
+ event_data);
+#endif
+
+ /* In MPI Revision K (0xC), the internal device reset complete was
+ * implemented, so avoid setting tm_busy flag for older firmware.
+ */
+ if ((ioc->facts.HeaderVersion >> 8) < 0xC)
+ return;
+
+ if (event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET &&
+ event_data->ReasonCode !=
+ MPI2_EVENT_SAS_DEV_STAT_RC_CMP_INTERNAL_DEV_RESET)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_address = le64_to_cpu(event_data->SASAddress);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ sas_address);
+
+ if (!sas_device || !sas_device->starget) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ target_priv_data = sas_device->starget->hostdata;
+ if (!target_priv_data) {
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return;
+ }
+
+ if (event_data->ReasonCode ==
+ MPI2_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET)
+ target_priv_data->tm_busy = 1;
+ else
+ target_priv_data->tm_busy = 0;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_enclosure_dev_status_change_event_debug - debug for enclosure
+ * event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataSasEnclDevStatusChange_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->ReasonCode) {
+ case MPI2_EVENT_SAS_ENCL_RC_ADDED:
+ reason_str = "enclosure add";
+ break;
+ case MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING:
+ reason_str = "enclosure remove";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+
+ pr_info(MPT3SAS_FMT "enclosure status change: (%s)\n"
+ "\thandle(0x%04x), enclosure logical id(0x%016llx)"
+ " number slots(%d)\n", ioc->name, reason_str,
+ le16_to_cpu(event_data->EnclosureHandle),
+ (unsigned long long)le64_to_cpu(event_data->EnclosureLogicalID),
+ le16_to_cpu(event_data->StartSlot));
+}
+#endif
+
+/**
+ * _scsih_sas_enclosure_dev_status_change_event - handle enclosure events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_enclosure_dev_status_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_enclosure_dev_status_change_event_debug(ioc,
+ fw_event->event_data);
+#endif
+}
+
+/**
+ * _scsih_sas_broadcast_primitive_event - handle broadcast events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ struct scsi_cmnd *scmd;
+ struct scsi_device *sdev;
+ u16 smid, handle;
+ u32 lun;
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ u32 termination_count;
+ u32 query_count;
+ Mpi2SCSITaskManagementReply_t *mpi_reply;
+ Mpi2EventDataSasBroadcastPrimitive_t *event_data = fw_event->event_data;
+ u16 ioc_status;
+ unsigned long flags;
+ int r;
+ u8 max_retries = 0;
+ u8 task_abort_retries;
+
+ mutex_lock(&ioc->tm_cmds.mutex);
+ pr_info(MPT3SAS_FMT
+ "%s: enter: phy number(%d), width(%d)\n",
+ ioc->name, __func__, event_data->PhyNum,
+ event_data->PortWidth);
+
+ _scsih_block_io_all_device(ioc);
+
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ mpi_reply = ioc->tm_cmds.reply;
+ broadcast_aen_retry:
+
+ /* sanity checks for retrying this loop */
+ if (max_retries++ == 5) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: giving up\n",
+ ioc->name, __func__));
+ goto out;
+ } else if (max_retries > 1)
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "%s: %d retry\n",
+ ioc->name, __func__, max_retries - 1));
+
+ termination_count = 0;
+ query_count = 0;
+ for (smid = 1; smid <= ioc->scsiio_depth; smid++) {
+ if (ioc->shost_recovery)
+ goto out;
+ scmd = _scsih_scsi_lookup_get(ioc, smid);
+ if (!scmd)
+ continue;
+ sdev = scmd->device;
+ sas_device_priv_data = sdev->hostdata;
+ if (!sas_device_priv_data || !sas_device_priv_data->sas_target)
+ continue;
+ /* skip hidden raid components */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_RAID_COMPONENT)
+ continue;
+ /* skip volumes */
+ if (sas_device_priv_data->sas_target->flags &
+ MPT_TARGET_FLAGS_VOLUME)
+ continue;
+
+ handle = sas_device_priv_data->sas_target->handle;
+ lun = sas_device_priv_data->lun;
+ query_count++;
+
+ if (ioc->shost_recovery)
+ goto out;
+
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
+ MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, smid, 30, 0,
+ TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: FAILED when sending "
+ "QUERY_TASK: scmd(%p)\n", scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus)
+ & MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ sdev_printk(KERN_WARNING, sdev,
+ "query task: FAILED with IOCSTATUS(0x%04x), scmd(%p)\n",
+ ioc_status, scmd);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ /* see if IO is still owned by IOC and target */
+ if (mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+ mpi_reply->ResponseCode ==
+ MPI2_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC) {
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ continue;
+ }
+ task_abort_retries = 0;
+ tm_retry:
+ if (task_abort_retries++ == 60) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: ABORT_TASK: giving up\n", ioc->name,
+ __func__));
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ goto broadcast_aen_retry;
+ }
+
+ if (ioc->shost_recovery)
+ goto out_no_lock;
+
+ r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
+ sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, smid, 30,
+ scmd->serial_number, TM_MUTEX_OFF);
+ if (r == FAILED) {
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
+ "scmd(%p)\n", scmd);
+ goto tm_retry;
+ }
+
+ if (task_abort_retries > 1)
+ sdev_printk(KERN_WARNING, sdev,
+ "mpt3sas_scsih_issue_tm: ABORT_TASK: RETRIES (%d):"
+ " scmd(%p)\n",
+ task_abort_retries - 1, scmd);
+
+ termination_count += le32_to_cpu(mpi_reply->TerminationCount);
+ spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+ }
+
+ if (ioc->broadcast_aen_pending) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: loop back due to pending AEN\n",
+ ioc->name, __func__));
+ ioc->broadcast_aen_pending = 0;
+ goto broadcast_aen_retry;
+ }
+
+ out:
+ spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+ out_no_lock:
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - exit, query_count = %d termination_count = %d\n",
+ ioc->name, __func__, query_count, termination_count));
+
+ ioc->broadcast_aen_busy = 0;
+ if (!ioc->shost_recovery)
+ _scsih_ublock_io_all_device(ioc);
+ mutex_unlock(&ioc->tm_cmds.mutex);
+}
+
+/**
+ * _scsih_sas_discovery_event - handle discovery events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_discovery_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataSasDiscovery_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK) {
+ pr_info(MPT3SAS_FMT "discovery event: (%s)", ioc->name,
+ (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED) ?
+ "start" : "stop");
+ if (event_data->DiscoveryStatus)
+ pr_info("discovery_status(0x%08x)",
+ le32_to_cpu(event_data->DiscoveryStatus));
+ pr_info("\n");
+ }
+#endif
+
+ if (event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED &&
+ !ioc->sas_hba.num_phys) {
+ if (disable_discovery > 0 && ioc->shost_recovery) {
+ /* Wait for the reset to complete */
+ while (ioc->shost_recovery)
+ ssleep(1);
+ }
+ _scsih_sas_host_add(ioc);
+ }
+}
+
+/**
+ * _scsih_ir_fastpath - turn on fastpath for IR physdisk
+ * @ioc: per adapter object
+ * @handle: device handle for physical disk
+ * @phys_disk_num: physical disk number
+ *
+ * Return 0 for success, else failure.
+ */
+static int
+_scsih_ir_fastpath(struct MPT3SAS_ADAPTER *ioc, u16 handle, u8 phys_disk_num)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+ u8 issue_reset = 0;
+ int rc = 0;
+ u16 ioc_status;
+ u32 log_info;
+
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_PHYSDISK_HIDDEN;
+ mpi_request->PhysDiskNum = phys_disk_num;
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT "IR RAID_ACTION: turning fast "\
+ "path on for handle(0x%04x), phys_disk_num (0x%02x)\n", ioc->name,
+ handle, phys_disk_num));
+
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ rc = -EFAULT;
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->scsih_cmds.reply;
+ ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
+ if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
+ log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
+ else
+ log_info = 0;
+ ioc_status &= MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: failed: ioc_status(0x%04x), "
+ "loginfo(0x%08x)!!!\n", ioc->name, ioc_status,
+ log_info));
+ rc = -EFAULT;
+ } else
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "IR RAID_ACTION: completed successfully\n",
+ ioc->name));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ return rc;
+}
+
+/**
+ * _scsih_reprobe_lun - reprobing lun
+ * @sdev: scsi device struct
+ * @no_uld_attach: sdev->no_uld_attach flag setting
+ *
+ **/
+static void
+_scsih_reprobe_lun(struct scsi_device *sdev, void *no_uld_attach)
+{
+ int rc;
+ sdev->no_uld_attach = no_uld_attach ? 1 : 0;
+ sdev_printk(KERN_INFO, sdev, "%s raid component\n",
+ sdev->no_uld_attach ? "hidding" : "exposing");
+ rc = scsi_device_reprobe(sdev);
+}
+
+/**
+ * _scsih_sas_volume_add - add new volume
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ u64 wwid;
+ u16 handle = le16_to_cpu(element->VolDevHandle);
+ int rc;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc, wwid);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ return;
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ if (!ioc->wait_for_discovery_to_complete) {
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ _scsih_determine_boot_device(ioc, raid_device, 1);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_sas_volume_delete - delete volume
+ * @ioc: per adapter object
+ * @handle: volume device handle
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_volume_delete(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+{
+ struct _raid_device *raid_device;
+ unsigned long flags;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget = NULL;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device) {
+ if (raid_device->starget) {
+ starget = raid_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ list_del(&raid_device->list);
+ kfree(raid_device);
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (starget)
+ scsi_remove_target(&starget->dev);
+}
+
+/**
+ * _scsih_sas_pd_expose - expose pd component to /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_expose(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ sas_device->volume_handle = 0;
+ sas_device->volume_wwid = 0;
+ clear_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags &=
+ ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* exposing raid component */
+ if (starget)
+ starget_for_each_device(starget, NULL, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_hide - hide pd component from /dev/sdX
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_hide(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ struct scsi_target *starget = NULL;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ u16 volume_handle = 0;
+ u64 volume_wwid = 0;
+
+ mpt3sas_config_get_volume_handle(ioc, handle, &volume_handle);
+ if (volume_handle)
+ mpt3sas_config_get_volume_wwid(ioc, volume_handle,
+ &volume_wwid);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ if (sas_device) {
+ set_bit(handle, ioc->pd_handles);
+ if (sas_device->starget && sas_device->starget->hostdata) {
+ starget = sas_device->starget;
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->flags |=
+ MPT_TARGET_FLAGS_RAID_COMPONENT;
+ sas_device->volume_handle = volume_handle;
+ sas_device->volume_wwid = volume_wwid;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (!sas_device)
+ return;
+
+ /* hiding raid component */
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ if (starget)
+ starget_for_each_device(starget, (void *)1, _scsih_reprobe_lun);
+}
+
+/**
+ * _scsih_sas_pd_delete - delete pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_delete(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+
+ _scsih_device_remove_by_handle(ioc, handle);
+}
+
+/**
+ * _scsih_sas_pd_add - remove pd component
+ * @ioc: per adapter object
+ * @element: IR config element data
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_pd_add(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventIrConfigElement_t *element)
+{
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ u16 handle = le16_to_cpu(element->PhysDiskDevHandle);
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ u64 sas_address;
+ u16 parent_handle;
+
+ set_bit(handle, ioc->pd_handles);
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device) {
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ return;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_ir_fastpath(ioc, handle, element->PhysDiskNum);
+ _scsih_add_device(ioc, handle, 0, 1);
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_config_change_event_debug - debug for IR Config Change events
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrConfigChangeList_t *event_data)
+{
+ Mpi2EventIrConfigElement_t *element;
+ u8 element_type;
+ int i;
+ char *reason_str = NULL, *element_str = NULL;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+
+ pr_info(MPT3SAS_FMT "raid config change: (%s), elements(%d)\n",
+ ioc->name, (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ?
+ "foreign" : "native", event_data->NumElements);
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ reason_str = "add";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ reason_str = "remove";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE:
+ reason_str = "no change";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ reason_str = "hide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ reason_str = "unhide";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ reason_str = "volume_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ reason_str = "volume_deleted";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ reason_str = "pd_created";
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ reason_str = "pd_deleted";
+ break;
+ default:
+ reason_str = "unknown reason";
+ break;
+ }
+ element_type = le16_to_cpu(element->ElementFlags) &
+ MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK;
+ switch (element_type) {
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT:
+ element_str = "volume";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_VOLPHYSDISK_ELEMENT:
+ element_str = "phys disk";
+ break;
+ case MPI2_EVENT_IR_CHANGE_EFLAGS_HOTSPARE_ELEMENT:
+ element_str = "hot spare";
+ break;
+ default:
+ element_str = "unknown element";
+ break;
+ }
+ pr_info("\t(%s:%s), vol handle(0x%04x), " \
+ "pd handle(0x%04x), pd num(0x%02x)\n", element_str,
+ reason_str, le16_to_cpu(element->VolDevHandle),
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+}
+#endif
+
+/**
+ * _scsih_sas_ir_config_change_event - handle ir configuration change events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_config_change_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventIrConfigElement_t *element;
+ int i;
+ u8 foreign_config;
+ Mpi2EventDataIrConfigChangeList_t *event_data = fw_event->event_data;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_config_change_event_debug(ioc, event_data);
+
+#endif
+
+ foreign_config = (le32_to_cpu(event_data->Flags) &
+ MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) ? 1 : 0;
+
+ element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0];
+ if (ioc->shost_recovery) {
+
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+ if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_HIDE)
+ _scsih_ir_fastpath(ioc,
+ le16_to_cpu(element->PhysDiskDevHandle),
+ element->PhysDiskNum);
+ }
+ return;
+ }
+ for (i = 0; i < event_data->NumElements; i++, element++) {
+
+ switch (element->ReasonCode) {
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED:
+ case MPI2_EVENT_IR_CHANGE_RC_ADDED:
+ if (!foreign_config)
+ _scsih_sas_volume_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED:
+ case MPI2_EVENT_IR_CHANGE_RC_REMOVED:
+ if (!foreign_config)
+ _scsih_sas_volume_delete(ioc,
+ le16_to_cpu(element->VolDevHandle));
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_CREATED:
+ _scsih_sas_pd_hide(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_PD_DELETED:
+ _scsih_sas_pd_expose(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_HIDE:
+ _scsih_sas_pd_add(ioc, element);
+ break;
+ case MPI2_EVENT_IR_CHANGE_RC_UNHIDE:
+ _scsih_sas_pd_delete(ioc, element);
+ break;
+ }
+ }
+}
+
+/**
+ * _scsih_sas_ir_volume_event - IR volume event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_volume_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u64 wwid;
+ unsigned long flags;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u32 state;
+ int rc;
+ Mpi2EventDataIrVolume_t *event_data = fw_event->event_data;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_VOLUME_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_VOL_STATE_MISSING:
+ case MPI2_RAID_VOL_STATE_FAILED:
+ _scsih_sas_volume_delete(ioc, handle);
+ break;
+
+ case MPI2_RAID_VOL_STATE_ONLINE:
+ case MPI2_RAID_VOL_STATE_DEGRADED:
+ case MPI2_RAID_VOL_STATE_OPTIMAL:
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+
+ if (raid_device)
+ break;
+
+ mpt3sas_config_get_volume_wwid(ioc, handle, &wwid);
+ if (!wwid) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device = kzalloc(sizeof(struct _raid_device), GFP_KERNEL);
+ if (!raid_device) {
+ pr_err(MPT3SAS_FMT
+ "failure at %s:%d/%s()!\n", ioc->name,
+ __FILE__, __LINE__, __func__);
+ break;
+ }
+
+ raid_device->id = ioc->sas_id++;
+ raid_device->channel = RAID_CHANNEL;
+ raid_device->handle = handle;
+ raid_device->wwid = wwid;
+ _scsih_raid_device_add(ioc, raid_device);
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ break;
+
+ case MPI2_RAID_VOL_STATE_INITIALIZING:
+ default:
+ break;
+ }
+}
+
+/**
+ * _scsih_sas_ir_physical_disk_event - PD event
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_physical_disk_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ u16 handle, parent_handle;
+ u32 state;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ u32 ioc_status;
+ Mpi2EventDataIrPhysicalDisk_t *event_data = fw_event->event_data;
+ u64 sas_address;
+
+ if (ioc->shost_recovery)
+ return;
+
+ if (event_data->ReasonCode != MPI2_EVENT_IR_PHYSDISK_RC_STATE_CHANGED)
+ return;
+
+ handle = le16_to_cpu(event_data->PhysDiskDevHandle);
+ state = le32_to_cpu(event_data->NewValue);
+
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: handle(0x%04x), old(0x%08x), new(0x%08x)\n",
+ ioc->name, __func__, handle,
+ le32_to_cpu(event_data->PreviousValue), state));
+ switch (state) {
+ case MPI2_RAID_PD_STATE_ONLINE:
+ case MPI2_RAID_PD_STATE_DEGRADED:
+ case MPI2_RAID_PD_STATE_REBUILDING:
+ case MPI2_RAID_PD_STATE_OPTIMAL:
+ case MPI2_RAID_PD_STATE_HOT_SPARE:
+
+ set_bit(handle, ioc->pd_handles);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (sas_device)
+ return;
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address))
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+
+ _scsih_add_device(ioc, handle, 0, 1);
+
+ break;
+
+ case MPI2_RAID_PD_STATE_OFFLINE:
+ case MPI2_RAID_PD_STATE_NOT_CONFIGURED:
+ case MPI2_RAID_PD_STATE_NOT_COMPATIBLE:
+ default:
+ break;
+ }
+}
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+/**
+ * _scsih_sas_ir_operation_status_event_debug - debug for IR op event
+ * @ioc: per adapter object
+ * @event_data: event data payload
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event_debug(struct MPT3SAS_ADAPTER *ioc,
+ Mpi2EventDataIrOperationStatus_t *event_data)
+{
+ char *reason_str = NULL;
+
+ switch (event_data->RAIDOperation) {
+ case MPI2_EVENT_IR_RAIDOP_RESYNC:
+ reason_str = "resync";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_ONLINE_CAP_EXPANSION:
+ reason_str = "online capacity expansion";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_CONSISTENCY_CHECK:
+ reason_str = "consistency check";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_BACKGROUND_INIT:
+ reason_str = "background init";
+ break;
+ case MPI2_EVENT_IR_RAIDOP_MAKE_DATA_CONSISTENT:
+ reason_str = "make data consistent";
+ break;
+ }
+
+ if (!reason_str)
+ return;
+
+ pr_info(MPT3SAS_FMT "raid operational status: (%s)" \
+ "\thandle(0x%04x), percent complete(%d)\n",
+ ioc->name, reason_str,
+ le16_to_cpu(event_data->VolDevHandle),
+ event_data->PercentComplete);
+}
+#endif
+
+/**
+ * _scsih_sas_ir_operation_status_event - handle RAID operation events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_sas_ir_operation_status_event(struct MPT3SAS_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
+ static struct _raid_device *raid_device;
+ unsigned long flags;
+ u16 handle;
+
+#ifdef CONFIG_SCSI_MPT3SAS_LOGGING
+ if (ioc->logging_level & MPT_DEBUG_EVENT_WORK_TASK)
+ _scsih_sas_ir_operation_status_event_debug(ioc,
+ event_data);
+#endif
+
+ /* code added for raid transport support */
+ if (event_data->RAIDOperation == MPI2_EVENT_IR_RAIDOP_RESYNC) {
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ handle = le16_to_cpu(event_data->VolDevHandle);
+ raid_device = _scsih_raid_device_find_by_handle(ioc, handle);
+ if (raid_device)
+ raid_device->percent_complete =
+ event_data->PercentComplete;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_prep_device_scan - initialize parameters prior to device scan
+ * @ioc: per adapter object
+ *
+ * Set the deleted flag prior to device scan. If the device is found during
+ * the scan, then we clear the deleted flag.
+ */
+static void
+_scsih_prep_device_scan(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct MPT3SAS_DEVICE *sas_device_priv_data;
+ struct scsi_device *sdev;
+
+ shost_for_each_device(sdev, ioc->shost) {
+ sas_device_priv_data = sdev->hostdata;
+ if (sas_device_priv_data && sas_device_priv_data->sas_target)
+ sas_device_priv_data->sas_target->deleted = 1;
+ }
+}
+
+/**
+ * _scsih_mark_responding_sas_device - mark a sas_devices as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @slot: enclosure slot id
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_sas_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_sas_device(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 slot, u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data = NULL;
+ struct scsi_target *starget;
+ struct _sas_device *sas_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_for_each_entry(sas_device, &ioc->sas_device_list, list) {
+ if (sas_device->sas_address == sas_address &&
+ sas_device->slot == slot) {
+ sas_device->responding = 1;
+ starget = sas_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->tm_busy = 0;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ if (starget)
+ starget_printk(KERN_INFO, starget,
+ "handle(0x%04x), sas_addr(0x%016llx), "
+ "enclosure logical id(0x%016llx), "
+ "slot(%d)\n", handle,
+ (unsigned long long)sas_device->sas_address,
+ (unsigned long long)
+ sas_device->enclosure_logical_id,
+ sas_device->slot);
+ if (sas_device->handle == handle)
+ goto out;
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ sas_device->handle);
+ sas_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ goto out;
+ }
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_sas_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u32 device_info;
+
+ pr_info(MPT3SAS_FMT "search for end-devices: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+ if (!(_scsih_is_end_device(device_info)))
+ continue;
+ _scsih_mark_responding_sas_device(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress),
+ le16_to_cpu(sas_device_pg0.Slot), handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for end-devices: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_raid_device - mark a raid_device as responding
+ * @ioc: per adapter object
+ * @wwid: world wide identifier for raid volume
+ * @handle: device handle
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_raid_devices.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_raid_device(struct MPT3SAS_ADAPTER *ioc, u64 wwid,
+ u16 handle)
+{
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct scsi_target *starget;
+ struct _raid_device *raid_device;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ list_for_each_entry(raid_device, &ioc->raid_device_list, list) {
+ if (raid_device->wwid == wwid && raid_device->starget) {
+ starget = raid_device->starget;
+ if (starget && starget->hostdata) {
+ sas_target_priv_data = starget->hostdata;
+ sas_target_priv_data->deleted = 0;
+ } else
+ sas_target_priv_data = NULL;
+ raid_device->responding = 1;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ starget_printk(KERN_INFO, raid_device->starget,
+ "handle(0x%04x), wwid(0x%016llx)\n", handle,
+ (unsigned long long)raid_device->wwid);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ if (raid_device->handle == handle) {
+ spin_unlock_irqrestore(&ioc->raid_device_lock,
+ flags);
+ return;
+ }
+ pr_info("\thandle changed from(0x%04x)!!!\n",
+ raid_device->handle);
+ raid_device->handle = handle;
+ if (sas_target_priv_data)
+ sas_target_priv_data->handle = handle;
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_raid_devices -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_raid_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 handle;
+ u8 phys_disk_num;
+
+ if (!ioc->ir_firmware)
+ return;
+
+ pr_info(MPT3SAS_FMT "search for raid volumes: start\n",
+ ioc->name);
+
+ if (list_empty(&ioc->raid_device_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED)
+ _scsih_mark_responding_raid_device(ioc,
+ le64_to_cpu(volume_pg1.WWID), handle);
+ }
+
+ /* refresh the pd_handles */
+ phys_disk_num = 0xFF;
+ memset(ioc->pd_handles, 0, ioc->pd_handles_sz);
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ set_bit(handle, ioc->pd_handles);
+ }
+ out:
+ pr_info(MPT3SAS_FMT "search for responding raid volumes: complete\n",
+ ioc->name);
+}
+
+/**
+ * _scsih_mark_responding_expander - mark a expander as responding
+ * @ioc: per adapter object
+ * @sas_address: sas address
+ * @handle:
+ *
+ * After host reset, find out whether devices are still responding.
+ * Used in _scsih_remove_unresponsive_expanders.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_mark_responding_expander(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u16 handle)
+{
+ struct _sas_node *sas_expander;
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
+ if (sas_expander->sas_address != sas_address)
+ continue;
+ sas_expander->responding = 1;
+ if (sas_expander->handle == handle)
+ goto out;
+ pr_info("\texpander(0x%016llx): handle changed" \
+ " from(0x%04x) to (0x%04x)!!!\n",
+ (unsigned long long)sas_expander->sas_address,
+ sas_expander->handle, handle);
+ sas_expander->handle = handle;
+ for (i = 0 ; i < sas_expander->num_phys ; i++)
+ sas_expander->phy[i].handle = handle;
+ goto out;
+ }
+ out:
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+}
+
+/**
+ * _scsih_search_responding_expanders -
+ * @ioc: per adapter object
+ *
+ * After host reset, find out whether devices are still responding.
+ * If not remove.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_search_responding_expanders(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u64 sas_address;
+ u16 handle;
+
+ pr_info(MPT3SAS_FMT "search for expanders: start\n", ioc->name);
+
+ if (list_empty(&ioc->sas_expander_list))
+ goto out;
+
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ sas_address = le64_to_cpu(expander_pg0.SASAddress);
+ pr_info("\texpander present: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle,
+ (unsigned long long)sas_address);
+ _scsih_mark_responding_expander(ioc, sas_address, handle);
+ }
+
+ out:
+ pr_info(MPT3SAS_FMT "search for expanders: complete\n", ioc->name);
+}
+
+/**
+ * _scsih_remove_unresponding_sas_devices - removing unresponding devices
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_remove_unresponding_sas_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *sas_device_next;
+ struct _sas_node *sas_expander, *sas_expander_next;
+ struct _raid_device *raid_device, *raid_device_next;
+ struct list_head tmp_list;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: start\n",
+ ioc->name);
+
+ /* removing unresponding end devices */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: end-devices\n",
+ ioc->name);
+ list_for_each_entry_safe(sas_device, sas_device_next,
+ &ioc->sas_device_list, list) {
+ if (!sas_device->responding)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ sas_device->sas_address);
+ else
+ sas_device->responding = 0;
+ }
+
+ /* removing unresponding volumes */
+ if (ioc->ir_firmware) {
+ pr_info(MPT3SAS_FMT "removing unresponding devices: volumes\n",
+ ioc->name);
+ list_for_each_entry_safe(raid_device, raid_device_next,
+ &ioc->raid_device_list, list) {
+ if (!raid_device->responding)
+ _scsih_sas_volume_delete(ioc,
+ raid_device->handle);
+ else
+ raid_device->responding = 0;
+ }
+ }
+
+ /* removing unresponding expanders */
+ pr_info(MPT3SAS_FMT "removing unresponding devices: expanders\n",
+ ioc->name);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ INIT_LIST_HEAD(&tmp_list);
+ list_for_each_entry_safe(sas_expander, sas_expander_next,
+ &ioc->sas_expander_list, list) {
+ if (!sas_expander->responding)
+ list_move_tail(&sas_expander->list, &tmp_list);
+ else
+ sas_expander->responding = 0;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ list_for_each_entry_safe(sas_expander, sas_expander_next, &tmp_list,
+ list) {
+ list_del(&sas_expander->list);
+ _scsih_expander_node_remove(ioc, sas_expander);
+ }
+
+ pr_info(MPT3SAS_FMT "removing unresponding devices: complete\n",
+ ioc->name);
+
+ /* unblock devices */
+ _scsih_ublock_io_all_device(ioc);
+}
+
+static void
+_scsih_refresh_expander_links(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander, u16 handle)
+{
+ Mpi2ExpanderPage1_t expander_pg1;
+ Mpi2ConfigReply_t mpi_reply;
+ int i;
+
+ for (i = 0 ; i < sas_expander->num_phys ; i++) {
+ if ((mpt3sas_config_get_expander_pg1(ioc, &mpi_reply,
+ &expander_pg1, i, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return;
+ }
+
+ mpt3sas_transport_update_links(ioc, sas_expander->sas_address,
+ le16_to_cpu(expander_pg1.AttachedDevHandle), i,
+ expander_pg1.NegotiatedLinkRate >> 4);
+ }
+}
+
+/**
+ * _scsih_scan_for_devices_after_reset - scan for devices after host reset
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_scsih_scan_for_devices_after_reset(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2ExpanderPage0_t expander_pg0;
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2RaidVolPage1_t volume_pg1;
+ Mpi2RaidVolPage0_t volume_pg0;
+ Mpi2RaidPhysDiskPage0_t pd_pg0;
+ Mpi2EventIrConfigElement_t element;
+ Mpi2ConfigReply_t mpi_reply;
+ u8 phys_disk_num;
+ u16 ioc_status;
+ u16 handle, parent_handle;
+ u64 sas_address;
+ struct _sas_device *sas_device;
+ struct _sas_node *expander_device;
+ static struct _raid_device *raid_device;
+ u8 retry_count;
+ unsigned long flags;
+
+ pr_info(MPT3SAS_FMT "scan devices: start\n", ioc->name);
+
+ _scsih_sas_host_refresh(ioc);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders start\n", ioc->name);
+
+ /* expanders */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
+ MPI2_SAS_EXPAND_PGAD_FORM_GET_NEXT_HNDL, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from expander scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(expander_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ expander_device = mpt3sas_scsih_expander_find_by_sas_address(
+ ioc, le64_to_cpu(expander_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (expander_device)
+ _scsih_refresh_expander_links(ioc, expander_device,
+ handle);
+ else {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ _scsih_expander_add(ioc, handle);
+ pr_info(MPT3SAS_FMT "\tAFTER adding expander: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(expander_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: expanders complete\n",
+ ioc->name);
+
+ if (!ioc->ir_firmware)
+ goto skip_to_sas;
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk start\n", ioc->name);
+
+ /* phys disk */
+ phys_disk_num = 0xFF;
+ while (!(mpt3sas_config_get_phys_disk_pg0(ioc, &mpi_reply,
+ &pd_pg0, MPI2_PHYSDISK_PGAD_FORM_GET_NEXT_PHYSDISKNUM,
+ phys_disk_num))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan: "\
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ phys_disk_num = pd_pg0.PhysDiskNum;
+ handle = le16_to_cpu(pd_pg0.DevHandle);
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ if (mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE,
+ handle) != 0)
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from phys disk scan " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle,
+ &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address,
+ handle, sas_device_pg0.PhyNum,
+ MPI2_SAS_NEG_LINK_RATE_1_5);
+ set_bit(handle, ioc->pd_handles);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 1)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding phys disk: " \
+ " handle (0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name, handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: phys disk complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes start\n", ioc->name);
+
+ /* volumes */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_raid_volume_pg1(ioc, &mpi_reply,
+ &volume_pg1, MPI2_RAID_VOLUME_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(volume_pg1.DevHandle);
+ spin_lock_irqsave(&ioc->raid_device_lock, flags);
+ raid_device = _scsih_raid_device_find_by_wwid(ioc,
+ le64_to_cpu(volume_pg1.WWID));
+ spin_unlock_irqrestore(&ioc->raid_device_lock, flags);
+ if (raid_device)
+ continue;
+ if (mpt3sas_config_get_raid_volume_pg0(ioc, &mpi_reply,
+ &volume_pg0, MPI2_RAID_VOLUME_PGAD_FORM_HANDLE, handle,
+ sizeof(Mpi2RaidVolPage0_t)))
+ continue;
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from volume scan: " \
+ "ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ if (volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_OPTIMAL ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_ONLINE ||
+ volume_pg0.VolumeState == MPI2_RAID_VOL_STATE_DEGRADED) {
+ memset(&element, 0, sizeof(Mpi2EventIrConfigElement_t));
+ element.ReasonCode = MPI2_EVENT_IR_CHANGE_RC_ADDED;
+ element.VolDevHandle = volume_pg1.DevHandle;
+ pr_info(MPT3SAS_FMT
+ "\tBEFORE adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ _scsih_sas_volume_add(ioc, &element);
+ pr_info(MPT3SAS_FMT
+ "\tAFTER adding volume: handle (0x%04x)\n",
+ ioc->name, volume_pg1.DevHandle);
+ }
+ }
+
+ pr_info(MPT3SAS_FMT "\tscan devices: volumes complete\n",
+ ioc->name);
+
+ skip_to_sas:
+
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices start\n",
+ ioc->name);
+
+ /* sas devices */
+ handle = 0xFFFF;
+ while (!(mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply,
+ &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE,
+ handle))) {
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
+ break;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_info(MPT3SAS_FMT "\tbreak from end device scan:"\
+ " ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, ioc_status,
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+ break;
+ }
+ handle = le16_to_cpu(sas_device_pg0.DevHandle);
+ if (!(_scsih_is_end_device(
+ le32_to_cpu(sas_device_pg0.DeviceInfo))))
+ continue;
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ if (sas_device)
+ continue;
+ parent_handle = le16_to_cpu(sas_device_pg0.ParentDevHandle);
+ if (!_scsih_get_sas_address(ioc, parent_handle, &sas_address)) {
+ pr_info(MPT3SAS_FMT "\tBEFORE adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ mpt3sas_transport_update_links(ioc, sas_address, handle,
+ sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
+ retry_count = 0;
+ /* This will retry adding the end device.
+ * _scsih_add_device() will decide on retries and
+ * return "1" when it should be retried
+ */
+ while (_scsih_add_device(ioc, handle, retry_count++,
+ 0)) {
+ ssleep(1);
+ }
+ pr_info(MPT3SAS_FMT "\tAFTER adding end device: " \
+ "handle (0x%04x), sas_addr(0x%016llx)\n", ioc->name,
+ handle, (unsigned long long)
+ le64_to_cpu(sas_device_pg0.SASAddress));
+ }
+ }
+ pr_info(MPT3SAS_FMT "\tscan devices: end devices complete\n",
+ ioc->name);
+
+ pr_info(MPT3SAS_FMT "scan devices: complete\n", ioc->name);
+}
+/**
+ * mpt3sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT3_IOC_PRE_RESET, MPT3_IOC_AFTER_RESET,
+ * MPT3_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_scsih_reset_handler(struct MPT3SAS_ADAPTER *ioc, int reset_phase)
+{
+ switch (reset_phase) {
+ case MPT3_IOC_PRE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_PRE_RESET\n", ioc->name, __func__));
+ break;
+ case MPT3_IOC_AFTER_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_AFTER_RESET\n", ioc->name, __func__));
+ if (ioc->scsih_cmds.status & MPT3_CMD_PENDING) {
+ ioc->scsih_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->scsih_cmds.smid);
+ complete(&ioc->scsih_cmds.done);
+ }
+ if (ioc->tm_cmds.status & MPT3_CMD_PENDING) {
+ ioc->tm_cmds.status |= MPT3_CMD_RESET;
+ mpt3sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+ complete(&ioc->tm_cmds.done);
+ }
+
+ _scsih_fw_event_cleanup_queue(ioc);
+ _scsih_flush_running_cmds(ioc);
+ break;
+ case MPT3_IOC_DONE_RESET:
+ dtmprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: MPT3_IOC_DONE_RESET\n", ioc->name, __func__));
+ if ((!ioc->is_driver_loading) && !(disable_discovery > 0 &&
+ !ioc->sas_hba.num_phys)) {
+ _scsih_prep_device_scan(ioc);
+ _scsih_search_responding_sas_devices(ioc);
+ _scsih_search_responding_raid_devices(ioc);
+ _scsih_search_responding_expanders(ioc);
+ _scsih_error_recovery_delete_devices(ioc);
+ }
+ break;
+ }
+}
+
+/**
+ * _mpt3sas_fw_work - delayed task for processing firmware events
+ * @ioc: per adapter object
+ * @fw_event: The fw_event_work object
+ * Context: user.
+ *
+ * Return nothing.
+ */
+static void
+_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+ /* the queue is being flushed so ignore this event */
+ if (ioc->remove_host || fw_event->cancel_pending_work ||
+ ioc->pci_error_recovery) {
+ _scsih_fw_event_free(ioc, fw_event);
+ return;
+ }
+
+ switch (fw_event->event) {
+ case MPT3SAS_PROCESS_TRIGGER_DIAG:
+ mpt3sas_process_trigger_data(ioc, fw_event->event_data);
+ break;
+ case MPT3SAS_REMOVE_UNRESPONDING_DEVICES:
+ while (scsi_host_in_recovery(ioc->shost) || ioc->shost_recovery)
+ ssleep(1);
+ _scsih_remove_unresponding_sas_devices(ioc);
+ _scsih_scan_for_devices_after_reset(ioc);
+ break;
+ case MPT3SAS_PORT_ENABLE_COMPLETE:
+ ioc->start_scan = 0;
+ if (missing_delay[0] != -1 && missing_delay[1] != -1)
+ mpt3sas_base_update_missing_delay(ioc, missing_delay[0],
+ missing_delay[1]);
+ dewtprintk(ioc, pr_info(MPT3SAS_FMT
+ "port enable: complete from worker thread\n",
+ ioc->name));
+ break;
+ case MPT3SAS_TURN_ON_FAULT_LED:
+ _scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+ break;
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_sas_topology_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ _scsih_sas_device_status_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_DISCOVERY:
+ _scsih_sas_discovery_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ _scsih_sas_broadcast_primitive_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ _scsih_sas_enclosure_dev_status_change_event(ioc,
+ fw_event);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_sas_ir_config_change_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_sas_ir_volume_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ _scsih_sas_ir_physical_disk_event(ioc, fw_event);
+ break;
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ _scsih_sas_ir_operation_status_event(ioc, fw_event);
+ break;
+ }
+ _scsih_fw_event_free(ioc, fw_event);
+}
+
+/**
+ * _firmware_event_work
+ * @ioc: per adapter object
+ * @work: The fw_event_work object
+ * Context: user.
+ *
+ * wrappers for the work thread handling firmware events
+ *
+ * Return nothing.
+ */
+
+static void
+_firmware_event_work(struct work_struct *work)
+{
+ struct fw_event_work *fw_event = container_of(work,
+ struct fw_event_work, work);
+
+ _mpt3sas_fw_work(fw_event->ioc, fw_event);
+}
+
+/**
+ * mpt3sas_scsih_event_callback - firmware event handler (called at ISR time)
+ * @ioc: per adapter object
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ * Context: interrupt.
+ *
+ * This function merely adds a new work task into ioc->firmware_event_thread.
+ * The tasks are worked from _firmware_event_work in user context.
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_scsih_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index,
+ u32 reply)
+{
+ struct fw_event_work *fw_event;
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 event;
+ u16 sz;
+
+ /* events turned off due to host reset or driver unloading */
+ if (ioc->remove_host || ioc->pci_error_recovery)
+ return 1;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+
+ if (unlikely(!mpi_reply)) {
+ pr_err(MPT3SAS_FMT "mpi_reply not valid at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+
+ event = le16_to_cpu(mpi_reply->Event);
+
+ if (event != MPI2_EVENT_LOG_ENTRY_ADDED)
+ mpt3sas_trigger_event(ioc, event, 0);
+
+ switch (event) {
+ /* handle these */
+ case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
+ {
+ Mpi2EventDataSasBroadcastPrimitive_t *baen_data =
+ (Mpi2EventDataSasBroadcastPrimitive_t *)
+ mpi_reply->EventData;
+
+ if (baen_data->Primitive !=
+ MPI2_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+ return 1;
+
+ if (ioc->broadcast_aen_busy) {
+ ioc->broadcast_aen_pending++;
+ return 1;
+ } else
+ ioc->broadcast_aen_busy = 1;
+ break;
+ }
+
+ case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
+ _scsih_check_topo_delete_events(ioc,
+ (Mpi2EventDataSasTopologyChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
+ _scsih_check_ir_config_unhide_events(ioc,
+ (Mpi2EventDataIrConfigChangeList_t *)
+ mpi_reply->EventData);
+ break;
+ case MPI2_EVENT_IR_VOLUME:
+ _scsih_check_volume_delete_events(ioc,
+ (Mpi2EventDataIrVolume_t *)
+ mpi_reply->EventData);
+ break;
+
+ case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_OPERATION_STATUS:
+ case MPI2_EVENT_SAS_DISCOVERY:
+ case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
+ case MPI2_EVENT_IR_PHYSICAL_DISK:
+ break;
+
+ default: /* ignore the rest */
+ return 1;
+ }
+
+ fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
+ if (!fw_event) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return 1;
+ }
+ sz = le16_to_cpu(mpi_reply->EventDataLength) * 4;
+ fw_event->event_data = kzalloc(sz, GFP_ATOMIC);
+ if (!fw_event->event_data) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ kfree(fw_event);
+ return 1;
+ }
+
+ memcpy(fw_event->event_data, mpi_reply->EventData, sz);
+ fw_event->ioc = ioc;
+ fw_event->VF_ID = mpi_reply->VF_ID;
+ fw_event->VP_ID = mpi_reply->VP_ID;
+ fw_event->event = event;
+ _scsih_fw_event_add(ioc, fw_event);
+ return 1;
+}
+
+/* shost template */
+static struct scsi_host_template scsih_driver_template = {
+ .module = THIS_MODULE,
+ .name = "Fusion MPT SAS Host",
+ .proc_name = MPT3SAS_DRIVER_NAME,
+ .queuecommand = _scsih_qcmd,
+ .target_alloc = _scsih_target_alloc,
+ .slave_alloc = _scsih_slave_alloc,
+ .slave_configure = _scsih_slave_configure,
+ .target_destroy = _scsih_target_destroy,
+ .slave_destroy = _scsih_slave_destroy,
+ .scan_finished = _scsih_scan_finished,
+ .scan_start = _scsih_scan_start,
+ .change_queue_depth = _scsih_change_queue_depth,
+ .change_queue_type = _scsih_change_queue_type,
+ .eh_abort_handler = _scsih_abort,
+ .eh_device_reset_handler = _scsih_dev_reset,
+ .eh_target_reset_handler = _scsih_target_reset,
+ .eh_host_reset_handler = _scsih_host_reset,
+ .bios_param = _scsih_bios_param,
+ .can_queue = 1,
+ .this_id = -1,
+ .sg_tablesize = MPT3SAS_SG_DEPTH,
+ .max_sectors = 32767,
+ .cmd_per_lun = 7,
+ .use_clustering = ENABLE_CLUSTERING,
+ .shost_attrs = mpt3sas_host_attrs,
+ .sdev_attrs = mpt3sas_dev_attrs,
+};
+
+/**
+ * _scsih_expander_node_remove - removing expander device from list.
+ * @ioc: per adapter object
+ * @sas_expander: the sas_device object
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_expander_list.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_expander_node_remove(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_expander)
+{
+ struct _sas_port *mpt3sas_port, *next;
+
+ /* remove sibling ports attached to this expander */
+ list_for_each_entry_safe(mpt3sas_port, next,
+ &sas_expander->sas_port_list, port_list) {
+ if (ioc->shost_recovery)
+ return;
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ mpt3sas_transport_port_remove(ioc, sas_expander->sas_address,
+ sas_expander->sas_address_parent);
+
+ pr_info(MPT3SAS_FMT
+ "expander_remove: handle(0x%04x), sas_addr(0x%016llx)\n",
+ ioc->name,
+ sas_expander->handle, (unsigned long long)
+ sas_expander->sas_address);
+
+ kfree(sas_expander->phy);
+ kfree(sas_expander);
+}
+
+/**
+ * _scsih_ir_shutdown - IR shutdown notification
+ * @ioc: per adapter object
+ *
+ * Sending RAID Action to alert the Integrated RAID subsystem of the IOC that
+ * the host system is shutting down.
+ *
+ * Return nothing.
+ */
+static void
+_scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
+{
+ Mpi2RaidActionRequest_t *mpi_request;
+ Mpi2RaidActionReply_t *mpi_reply;
+ u16 smid;
+
+ /* is IR firmware build loaded ? */
+ if (!ioc->ir_firmware)
+ return;
+
+ /* are there any volumes ? */
+ if (list_empty(&ioc->raid_device_list))
+ return;
+
+ mutex_lock(&ioc->scsih_cmds.mutex);
+
+ if (ioc->scsih_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: scsih_cmd in use\n",
+ ioc->name, __func__);
+ goto out;
+ }
+ ioc->scsih_cmds.status = MPT3_CMD_PENDING;
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->scsih_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->scsih_cmds.smid = smid;
+ memset(mpi_request, 0, sizeof(Mpi2RaidActionRequest_t));
+
+ mpi_request->Function = MPI2_FUNCTION_RAID_ACTION;
+ mpi_request->Action = MPI2_RAID_ACTION_SYSTEM_SHUTDOWN_INITIATED;
+
+ pr_info(MPT3SAS_FMT "IR shutdown (sending)\n", ioc->name);
+ init_completion(&ioc->scsih_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ wait_for_completion_timeout(&ioc->scsih_cmds.done, 10*HZ);
+
+ if (!(ioc->scsih_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ goto out;
+ }
+
+ if (ioc->scsih_cmds.status & MPT3_CMD_REPLY_VALID) {
+ mpi_reply = ioc->scsih_cmds.reply;
+ pr_info(MPT3SAS_FMT
+ "IR shutdown (complete): ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, le16_to_cpu(mpi_reply->IOCStatus),
+ le32_to_cpu(mpi_reply->IOCLogInfo));
+ }
+
+ out:
+ ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->scsih_cmds.mutex);
+}
+
+/**
+ * _scsih_remove - detach and remove add host
+ * @pdev: PCI device struct
+ *
+ * Routine called when unloading the driver.
+ * Return nothing.
+ */
+static void _scsih_remove(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct _sas_port *mpt3sas_port, *next_port;
+ struct _raid_device *raid_device, *next;
+ struct MPT3SAS_TARGET *sas_target_priv_data;
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ /* release all the volumes */
+ _scsih_ir_shutdown(ioc);
+ list_for_each_entry_safe(raid_device, next, &ioc->raid_device_list,
+ list) {
+ if (raid_device->starget) {
+ sas_target_priv_data =
+ raid_device->starget->hostdata;
+ sas_target_priv_data->deleted = 1;
+ scsi_remove_target(&raid_device->starget->dev);
+ }
+ pr_info(MPT3SAS_FMT "removing handle(0x%04x), wwid(0x%016llx)\n",
+ ioc->name, raid_device->handle,
+ (unsigned long long) raid_device->wwid);
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+
+ /* free ports attached to the sas_host */
+ list_for_each_entry_safe(mpt3sas_port, next_port,
+ &ioc->sas_hba.sas_port_list, port_list) {
+ if (mpt3sas_port->remote_identify.device_type ==
+ SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ else if (mpt3sas_port->remote_identify.device_type ==
+ SAS_EDGE_EXPANDER_DEVICE ||
+ mpt3sas_port->remote_identify.device_type ==
+ SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc,
+ mpt3sas_port->remote_identify.sas_address);
+ }
+
+ /* free phys attached to the sas_host */
+ if (ioc->sas_hba.num_phys) {
+ kfree(ioc->sas_hba.phy);
+ ioc->sas_hba.phy = NULL;
+ ioc->sas_hba.num_phys = 0;
+ }
+
+ sas_remove_host(shost);
+ mpt3sas_base_detach(ioc);
+ list_del(&ioc->list);
+ scsi_remove_host(shost);
+ scsi_host_put(shost);
+}
+
+/**
+ * _scsih_shutdown - routine call during system shutdown
+ * @pdev: PCI device struct
+ *
+ * Return nothing.
+ */
+static void
+_scsih_shutdown(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ struct workqueue_struct *wq;
+ unsigned long flags;
+
+ ioc->remove_host = 1;
+ _scsih_fw_event_cleanup_queue(ioc);
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ wq = ioc->firmware_event_thread;
+ ioc->firmware_event_thread = NULL;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (wq)
+ destroy_workqueue(wq);
+
+ _scsih_ir_shutdown(ioc);
+ mpt3sas_base_detach(ioc);
+}
+
+
+/**
+ * _scsih_probe_boot_devices - reports 1st device
+ * @ioc: per adapter object
+ *
+ * If specified in bios page 2, this routine reports the 1st
+ * device scsi-ml or sas transport for persistent boot device
+ * purposes. Please refer to function _scsih_determine_boot_device()
+ */
+static void
+_scsih_probe_boot_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u8 is_raid;
+ void *device;
+ struct _sas_device *sas_device;
+ struct _raid_device *raid_device;
+ u16 handle;
+ u64 sas_address_parent;
+ u64 sas_address;
+ unsigned long flags;
+ int rc;
+
+ /* no Bios, return immediately */
+ if (!ioc->bios_pg3.BiosVersion)
+ return;
+
+ device = NULL;
+ is_raid = 0;
+ if (ioc->req_boot_device.device) {
+ device = ioc->req_boot_device.device;
+ is_raid = ioc->req_boot_device.is_raid;
+ } else if (ioc->req_alt_boot_device.device) {
+ device = ioc->req_alt_boot_device.device;
+ is_raid = ioc->req_alt_boot_device.is_raid;
+ } else if (ioc->current_boot_device.device) {
+ device = ioc->current_boot_device.device;
+ is_raid = ioc->current_boot_device.is_raid;
+ }
+
+ if (!device)
+ return;
+
+ if (is_raid) {
+ raid_device = device;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ } else {
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = device;
+ handle = sas_device->handle;
+ sas_address_parent = sas_device->sas_address_parent;
+ sas_address = sas_device->sas_address;
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+
+ if (!mpt3sas_transport_port_add(ioc, handle,
+ sas_address_parent)) {
+ _scsih_sas_device_remove(ioc, sas_device);
+ } else if (!sas_device->starget) {
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc, sas_address,
+ sas_address_parent);
+ _scsih_sas_device_remove(ioc, sas_device);
+ }
+ }
+}
+
+/**
+ * _scsih_probe_raid - reporting raid volumes to scsi-ml
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_raid(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _raid_device *raid_device, *raid_next;
+ int rc;
+
+ list_for_each_entry_safe(raid_device, raid_next,
+ &ioc->raid_device_list, list) {
+ if (raid_device->starget)
+ continue;
+ rc = scsi_add_device(ioc->shost, RAID_CHANNEL,
+ raid_device->id, 0);
+ if (rc)
+ _scsih_raid_device_remove(ioc, raid_device);
+ }
+}
+
+/**
+ * _scsih_probe_sas - reporting sas devices to sas transport
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_sas(struct MPT3SAS_ADAPTER *ioc)
+{
+ struct _sas_device *sas_device, *next;
+ unsigned long flags;
+
+ /* SAS Device List */
+ list_for_each_entry_safe(sas_device, next, &ioc->sas_device_init_list,
+ list) {
+
+ if (!mpt3sas_transport_port_add(ioc, sas_device->handle,
+ sas_device->sas_address_parent)) {
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ } else if (!sas_device->starget) {
+ /*
+ * When asyn scanning is enabled, its not possible to
+ * remove devices while scanning is turned on due to an
+ * oops in scsi_sysfs_add_sdev()->add_device()->
+ * sysfs_addrm_start()
+ */
+ if (!ioc->is_driver_loading)
+ mpt3sas_transport_port_remove(ioc,
+ sas_device->sas_address,
+ sas_device->sas_address_parent);
+ list_del(&sas_device->list);
+ kfree(sas_device);
+ continue;
+ }
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ list_move_tail(&sas_device->list, &ioc->sas_device_list);
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ }
+}
+
+/**
+ * _scsih_probe_devices - probing for devices
+ * @ioc: per adapter object
+ *
+ * Called during initial loading of the driver.
+ */
+static void
+_scsih_probe_devices(struct MPT3SAS_ADAPTER *ioc)
+{
+ u16 volume_mapping_flags;
+
+ if (!(ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR))
+ return; /* return when IOC doesn't support initiator mode */
+
+ _scsih_probe_boot_devices(ioc);
+
+ if (ioc->ir_firmware) {
+ volume_mapping_flags =
+ le16_to_cpu(ioc->ioc_pg8.IRVolumeMappingFlags) &
+ MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE;
+ if (volume_mapping_flags ==
+ MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) {
+ _scsih_probe_raid(ioc);
+ _scsih_probe_sas(ioc);
+ } else {
+ _scsih_probe_sas(ioc);
+ _scsih_probe_raid(ioc);
+ }
+ } else
+ _scsih_probe_sas(ioc);
+}
+
+/**
+ * _scsih_scan_start - scsi lld callback for .scan_start
+ * @shost: SCSI host pointer
+ *
+ * The shost has the ability to discover targets on its own instead
+ * of scanning the entire bus. In our implemention, we will kick off
+ * firmware discovery.
+ */
+static void
+_scsih_scan_start(struct Scsi_Host *shost)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+ if (diag_buffer_enable != -1 && diag_buffer_enable != 0)
+ mpt3sas_enable_diag_buffer(ioc, diag_buffer_enable);
+
+ if (disable_discovery > 0)
+ return;
+
+ ioc->start_scan = 1;
+ rc = mpt3sas_port_enable(ioc);
+
+ if (rc != 0)
+ pr_info(MPT3SAS_FMT "port enable: FAILED\n", ioc->name);
+}
+
+/**
+ * _scsih_scan_finished - scsi lld callback for .scan_finished
+ * @shost: SCSI host pointer
+ * @time: elapsed time of the scan in jiffies
+ *
+ * This function will be called periodicallyn until it returns 1 with the
+ * scsi_host and the elapsed time of the scan in jiffies. In our implemention,
+ * we wait for firmware discovery to complete, then return 1.
+ */
+static int
+_scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ if (disable_discovery > 0) {
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ return 1;
+ }
+
+ if (time >= (300 * HZ)) {
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with timeout (timeout=300s)\n",
+ ioc->name);
+ ioc->is_driver_loading = 0;
+ return 1;
+ }
+
+ if (ioc->start_scan)
+ return 0;
+
+ if (ioc->start_scan_failed) {
+ pr_info(MPT3SAS_FMT
+ "port enable: FAILED with (ioc_status=0x%08x)\n",
+ ioc->name, ioc->start_scan_failed);
+ ioc->is_driver_loading = 0;
+ ioc->wait_for_discovery_to_complete = 0;
+ ioc->remove_host = 1;
+ return 1;
+ }
+
+ pr_info(MPT3SAS_FMT "port enable: SUCCESS\n", ioc->name);
+ ioc->base_cmds.status = MPT3_CMD_NOT_USED;
+
+ if (ioc->wait_for_discovery_to_complete) {
+ ioc->wait_for_discovery_to_complete = 0;
+ _scsih_probe_devices(ioc);
+ }
+ mpt3sas_base_start_watchdog(ioc);
+ ioc->is_driver_loading = 0;
+ return 1;
+}
+
+/**
+ * _scsih_probe - attach and add scsi host
+ * @pdev: PCI device struct
+ * @id: pci device id
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct MPT3SAS_ADAPTER *ioc;
+ struct Scsi_Host *shost;
+
+ shost = scsi_host_alloc(&scsih_driver_template,
+ sizeof(struct MPT3SAS_ADAPTER));
+ if (!shost)
+ return -ENODEV;
+
+ /* init local params */
+ ioc = shost_priv(shost);
+ memset(ioc, 0, sizeof(struct MPT3SAS_ADAPTER));
+ INIT_LIST_HEAD(&ioc->list);
+ list_add_tail(&ioc->list, &mpt3sas_ioc_list);
+ ioc->shost = shost;
+ ioc->id = mpt_ids++;
+ sprintf(ioc->name, "%s%d", MPT3SAS_DRIVER_NAME, ioc->id);
+ ioc->pdev = pdev;
+ ioc->scsi_io_cb_idx = scsi_io_cb_idx;
+ ioc->tm_cb_idx = tm_cb_idx;
+ ioc->ctl_cb_idx = ctl_cb_idx;
+ ioc->base_cb_idx = base_cb_idx;
+ ioc->port_enable_cb_idx = port_enable_cb_idx;
+ ioc->transport_cb_idx = transport_cb_idx;
+ ioc->scsih_cb_idx = scsih_cb_idx;
+ ioc->config_cb_idx = config_cb_idx;
+ ioc->tm_tr_cb_idx = tm_tr_cb_idx;
+ ioc->tm_tr_volume_cb_idx = tm_tr_volume_cb_idx;
+ ioc->tm_sas_control_cb_idx = tm_sas_control_cb_idx;
+ ioc->logging_level = logging_level;
+ ioc->schedule_dead_ioc_flush_running_cmds = &_scsih_flush_running_cmds;
+ /* misc semaphores and spin locks */
+ mutex_init(&ioc->reset_in_progress_mutex);
+ spin_lock_init(&ioc->ioc_reset_in_progress_lock);
+ spin_lock_init(&ioc->scsi_lookup_lock);
+ spin_lock_init(&ioc->sas_device_lock);
+ spin_lock_init(&ioc->sas_node_lock);
+ spin_lock_init(&ioc->fw_event_lock);
+ spin_lock_init(&ioc->raid_device_lock);
+ spin_lock_init(&ioc->diag_trigger_lock);
+
+ INIT_LIST_HEAD(&ioc->sas_device_list);
+ INIT_LIST_HEAD(&ioc->sas_device_init_list);
+ INIT_LIST_HEAD(&ioc->sas_expander_list);
+ INIT_LIST_HEAD(&ioc->fw_event_list);
+ INIT_LIST_HEAD(&ioc->raid_device_list);
+ INIT_LIST_HEAD(&ioc->sas_hba.sas_port_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_list);
+ INIT_LIST_HEAD(&ioc->delayed_tr_volume_list);
+
+ /* init shost parameters */
+ shost->max_cmd_len = 32;
+ shost->max_lun = max_lun;
+ shost->transportt = mpt3sas_transport_template;
+ shost->unique_id = ioc->id;
+
+ if (max_sectors != 0xFFFF) {
+ if (max_sectors < 64) {
+ shost->max_sectors = 64;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "value of 64.\n", ioc->name, max_sectors);
+ } else if (max_sectors > 32767) {
+ shost->max_sectors = 32767;
+ pr_warn(MPT3SAS_FMT "Invalid value %d passed " \
+ "for max_sectors, range is 64 to 32767. Assigning "
+ "default value of 32767.\n", ioc->name,
+ max_sectors);
+ } else {
+ shost->max_sectors = max_sectors & 0xFFFE;
+ pr_info(MPT3SAS_FMT
+ "The max_sectors value is set to %d\n",
+ ioc->name, shost->max_sectors);
+ }
+ }
+
+ if ((scsi_add_host(shost, &pdev->dev))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ list_del(&ioc->list);
+ goto out_add_shost_fail;
+ }
+
+ /* register EEDP capabilities with SCSI layer */
+ if (prot_mask > 0)
+ scsi_host_set_prot(shost, prot_mask);
+ else
+ scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+ | SHOST_DIF_TYPE2_PROTECTION
+ | SHOST_DIF_TYPE3_PROTECTION);
+
+ scsi_host_set_guard(shost, SHOST_DIX_GUARD_CRC);
+
+ /* event thread */
+ snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
+ "fw_event%d", ioc->id);
+ ioc->firmware_event_thread = create_singlethread_workqueue(
+ ioc->firmware_event_name);
+ if (!ioc->firmware_event_thread) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_thread_fail;
+ }
+
+ ioc->is_driver_loading = 1;
+ if ((mpt3sas_base_attach(ioc))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_attach_fail;
+ }
+ scsi_scan_host(shost);
+ return 0;
+
+ out_attach_fail:
+ destroy_workqueue(ioc->firmware_event_thread);
+ out_thread_fail:
+ list_del(&ioc->list);
+ scsi_remove_host(shost);
+ out_add_shost_fail:
+ scsi_host_put(shost);
+ return -ENODEV;
+}
+
+#ifdef CONFIG_PM
+/**
+ * _scsih_suspend - power management suspend main entry point
+ * @pdev: PCI device struct
+ * @state: PM state change to (usually PCI_D3)
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state;
+
+ mpt3sas_base_stop_watchdog(ioc);
+ flush_scheduled_work();
+ scsi_block_requests(shost);
+ device_state = pci_choose_state(pdev, state);
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, entering operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_save_state(pdev);
+ mpt3sas_base_free_resources(ioc);
+ pci_set_power_state(pdev, device_state);
+ return 0;
+}
+
+/**
+ * _scsih_resume - power management resume main entry point
+ * @pdev: PCI device struct
+ *
+ * Returns 0 success, anything else error.
+ */
+static int
+_scsih_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ pci_power_t device_state = pdev->current_state;
+ int r;
+
+ pr_info(MPT3SAS_FMT
+ "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
+ ioc->name, pdev, pci_name(pdev), device_state);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_enable_wake(pdev, PCI_D0, 0);
+ pci_restore_state(pdev);
+ ioc->pdev = pdev;
+ r = mpt3sas_base_map_resources(ioc);
+ if (r)
+ return r;
+
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP, SOFT_RESET);
+ scsi_unblock_requests(shost);
+ mpt3sas_base_start_watchdog(ioc);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * _scsih_pci_error_detected - Called when a PCI error is detected.
+ * @pdev: PCI device struct
+ * @state: PCI channel state
+ *
+ * Description: Called when a PCI error is detected.
+ *
+ * Return value:
+ * PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
+ */
+static pci_ers_result_t
+_scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: detected callback, state(%d)!!\n",
+ ioc->name, state);
+
+ switch (state) {
+ case pci_channel_io_normal:
+ return PCI_ERS_RESULT_CAN_RECOVER;
+ case pci_channel_io_frozen:
+ /* Fatal error, prepare for slot reset */
+ ioc->pci_error_recovery = 1;
+ scsi_block_requests(ioc->shost);
+ mpt3sas_base_stop_watchdog(ioc);
+ mpt3sas_base_free_resources(ioc);
+ return PCI_ERS_RESULT_NEED_RESET;
+ case pci_channel_io_perm_failure:
+ /* Permanent error, prepare for device removal */
+ ioc->pci_error_recovery = 1;
+ mpt3sas_base_stop_watchdog(ioc);
+ _scsih_flush_running_cmds(ioc);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * _scsih_pci_slot_reset - Called when PCI slot has been reset.
+ * @pdev: PCI device struct
+ *
+ * Description: This routine is called by the pci error recovery
+ * code after the PCI slot has been reset, just before we
+ * should resume normal operations.
+ */
+static pci_ers_result_t
+_scsih_pci_slot_reset(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ int rc;
+
+ pr_info(MPT3SAS_FMT "PCI error: slot reset callback!!\n",
+ ioc->name);
+
+ ioc->pci_error_recovery = 0;
+ ioc->pdev = pdev;
+ pci_restore_state(pdev);
+ rc = mpt3sas_base_map_resources(ioc);
+ if (rc)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ rc = mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+
+ pr_warn(MPT3SAS_FMT "hard reset: %s\n", ioc->name,
+ (rc == 0) ? "success" : "failed");
+
+ if (!rc)
+ return PCI_ERS_RESULT_RECOVERED;
+ else
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+/**
+ * _scsih_pci_resume() - resume normal ops after PCI reset
+ * @pdev: pointer to PCI device
+ *
+ * Called when the error recovery driver tells us that its
+ * OK to resume normal operation. Use completion to allow
+ * halted scsi ops to resume.
+ */
+static void
+_scsih_pci_resume(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: resume callback!!\n", ioc->name);
+
+ pci_cleanup_aer_uncorrect_error_status(pdev);
+ mpt3sas_base_start_watchdog(ioc);
+ scsi_unblock_requests(ioc->shost);
+}
+
+/**
+ * _scsih_pci_mmio_enabled - Enable MMIO and dump debug registers
+ * @pdev: pointer to PCI device
+ */
+static pci_ers_result_t
+_scsih_pci_mmio_enabled(struct pci_dev *pdev)
+{
+ struct Scsi_Host *shost = pci_get_drvdata(pdev);
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+
+ pr_info(MPT3SAS_FMT "PCI error: mmio enabled callback!!\n",
+ ioc->name);
+
+ /* TODO - dump whatever for debugging purposes */
+
+ /* Request a slot reset. */
+ return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/* raid transport support */
+static struct raid_function_template mpt3sas_raid_functions = {
+ .cookie = &scsih_driver_template,
+ .is_raid = _scsih_is_raid,
+ .get_resync = _scsih_get_resync,
+ .get_state = _scsih_get_state,
+};
+
+static struct pci_error_handlers _scsih_err_handler = {
+ .error_detected = _scsih_pci_error_detected,
+ .mmio_enabled = _scsih_pci_mmio_enabled,
+ .slot_reset = _scsih_pci_slot_reset,
+ .resume = _scsih_pci_resume,
+};
+
+static struct pci_driver scsih_driver = {
+ .name = MPT3SAS_DRIVER_NAME,
+ .id_table = scsih_pci_table,
+ .probe = _scsih_probe,
+ .remove = _scsih_remove,
+ .shutdown = _scsih_shutdown,
+ .err_handler = &_scsih_err_handler,
+#ifdef CONFIG_PM
+ .suspend = _scsih_suspend,
+ .resume = _scsih_resume,
+#endif
+};
+
+
+/**
+ * _scsih_init - main entry point for this driver.
+ *
+ * Returns 0 success, anything else error.
+ */
+static int __init
+_scsih_init(void)
+{
+ int error;
+
+ mpt_ids = 0;
+
+ pr_info("%s version %s loaded\n", MPT3SAS_DRIVER_NAME,
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_transport_template =
+ sas_attach_transport(&mpt3sas_transport_functions);
+ if (!mpt3sas_transport_template)
+ return -ENODEV;
+
+/* raid transport support */
+ mpt3sas_raid_template = raid_class_attach(&mpt3sas_raid_functions);
+ if (!mpt3sas_raid_template) {
+ sas_release_transport(mpt3sas_transport_template);
+ return -ENODEV;
+ }
+
+ mpt3sas_base_initialize_callback_handler();
+
+ /* queuecommand callback hander */
+ scsi_io_cb_idx = mpt3sas_base_register_callback_handler(_scsih_io_done);
+
+ /* task managment callback handler */
+ tm_cb_idx = mpt3sas_base_register_callback_handler(_scsih_tm_done);
+
+ /* base internal commands callback handler */
+ base_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_base_done);
+ port_enable_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_port_enable_done);
+
+ /* transport internal commands callback handler */
+ transport_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_transport_done);
+
+ /* scsih internal commands callback handler */
+ scsih_cb_idx = mpt3sas_base_register_callback_handler(_scsih_done);
+
+ /* configuration page API internal commands callback handler */
+ config_cb_idx = mpt3sas_base_register_callback_handler(
+ mpt3sas_config_done);
+
+ /* ctl module callback handler */
+ ctl_cb_idx = mpt3sas_base_register_callback_handler(mpt3sas_ctl_done);
+
+ tm_tr_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_tr_complete);
+
+ tm_tr_volume_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_tm_volume_tr_complete);
+
+ tm_sas_control_cb_idx = mpt3sas_base_register_callback_handler(
+ _scsih_sas_control_complete);
+
+ mpt3sas_ctl_init();
+
+ error = pci_register_driver(&scsih_driver);
+ if (error) {
+ /* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+ }
+
+ return error;
+}
+
+/**
+ * _scsih_exit - exit point for this driver (when it is a module).
+ *
+ * Returns 0 success, anything else error.
+ */
+static void __exit
+_scsih_exit(void)
+{
+ pr_info("mpt3sas version %s unloading\n",
+ MPT3SAS_DRIVER_VERSION);
+
+ mpt3sas_ctl_exit();
+
+ pci_unregister_driver(&scsih_driver);
+
+
+ mpt3sas_base_release_callback_handler(scsi_io_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_cb_idx);
+ mpt3sas_base_release_callback_handler(base_cb_idx);
+ mpt3sas_base_release_callback_handler(port_enable_cb_idx);
+ mpt3sas_base_release_callback_handler(transport_cb_idx);
+ mpt3sas_base_release_callback_handler(scsih_cb_idx);
+ mpt3sas_base_release_callback_handler(config_cb_idx);
+ mpt3sas_base_release_callback_handler(ctl_cb_idx);
+
+ mpt3sas_base_release_callback_handler(tm_tr_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_tr_volume_cb_idx);
+ mpt3sas_base_release_callback_handler(tm_sas_control_cb_idx);
+
+/* raid transport support */
+ raid_class_release(mpt3sas_raid_template);
+ sas_release_transport(mpt3sas_transport_template);
+}
+
+module_init(_scsih_init);
+module_exit(_scsih_exit);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
new file mode 100644
index 000000000000..87ca2b7287c3
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -0,0 +1,2128 @@
+/*
+ * SAS Transport Layer for MPT (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_sas.h>
+#include <scsi/scsi_dbg.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _transport_sas_node_find_by_sas_address - sas node search
+ * @ioc: per adapter object
+ * @sas_address: sas address of expander or sas host
+ * Context: Calling function should acquire ioc->sas_node_lock.
+ *
+ * Search for either hba phys or expander device based on handle, then returns
+ * the sas_node object.
+ */
+static struct _sas_node *
+_transport_sas_node_find_by_sas_address(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address)
+{
+ if (ioc->sas_hba.sas_address == sas_address)
+ return &ioc->sas_hba;
+ else
+ return mpt3sas_scsih_expander_find_by_sas_address(ioc,
+ sas_address);
+}
+
+/**
+ * _transport_convert_phy_link_rate -
+ * @link_rate: link rate returned from mpt firmware
+ *
+ * Convert link_rate from mpi fusion into sas_transport form.
+ */
+static enum sas_linkrate
+_transport_convert_phy_link_rate(u8 link_rate)
+{
+ enum sas_linkrate rc;
+
+ switch (link_rate) {
+ case MPI2_SAS_NEG_LINK_RATE_1_5:
+ rc = SAS_LINK_RATE_1_5_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_3_0:
+ rc = SAS_LINK_RATE_3_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_6_0:
+ rc = SAS_LINK_RATE_6_0_GBPS;
+ break;
+ case MPI25_SAS_NEG_LINK_RATE_12_0:
+ rc = SAS_LINK_RATE_12_0_GBPS;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PHY_DISABLED:
+ rc = SAS_PHY_DISABLED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_NEGOTIATION_FAILED:
+ rc = SAS_LINK_RATE_FAILED;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_PORT_SELECTOR:
+ rc = SAS_SATA_PORT_SELECTOR;
+ break;
+ case MPI2_SAS_NEG_LINK_RATE_SMP_RESET_IN_PROGRESS:
+ rc = SAS_PHY_RESET_IN_PROGRESS;
+ break;
+
+ default:
+ case MPI2_SAS_NEG_LINK_RATE_SATA_OOB_COMPLETE:
+ case MPI2_SAS_NEG_LINK_RATE_UNKNOWN_LINK_RATE:
+ rc = SAS_LINK_RATE_UNKNOWN;
+ break;
+ }
+ return rc;
+}
+
+/**
+ * _transport_set_identify - set identify for phys and end devices
+ * @ioc: per adapter object
+ * @handle: device handle
+ * @identify: sas identify info
+ *
+ * Populates sas identify info.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_set_identify(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ struct sas_identify *identify)
+{
+ Mpi2SasDevicePage0_t sas_device_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u32 device_info;
+ u32 ioc_status;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ if ((mpt3sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
+ MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT
+ "handle(0x%04x), ioc_status(0x%04x)\nfailure at %s:%d/%s()!\n",
+ ioc->name, handle, ioc_status,
+ __FILE__, __LINE__, __func__);
+ return -EIO;
+ }
+
+ memset(identify, 0, sizeof(struct sas_identify));
+ device_info = le32_to_cpu(sas_device_pg0.DeviceInfo);
+
+ /* sas_address */
+ identify->sas_address = le64_to_cpu(sas_device_pg0.SASAddress);
+
+ /* phy number of the parent device this device is linked to */
+ identify->phy_identifier = sas_device_pg0.PhyNum;
+
+ /* device_type */
+ switch (device_info & MPI2_SAS_DEVICE_INFO_MASK_DEVICE_TYPE) {
+ case MPI2_SAS_DEVICE_INFO_NO_DEVICE:
+ identify->device_type = SAS_PHY_UNUSED;
+ break;
+ case MPI2_SAS_DEVICE_INFO_END_DEVICE:
+ identify->device_type = SAS_END_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER:
+ identify->device_type = SAS_EDGE_EXPANDER_DEVICE;
+ break;
+ case MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER:
+ identify->device_type = SAS_FANOUT_EXPANDER_DEVICE;
+ break;
+ }
+
+ /* initiator_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_INITIATOR)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_HOST)
+ identify->initiator_port_protocols |= SAS_PROTOCOL_SATA;
+
+ /* target_port_protocols */
+ if (device_info & MPI2_SAS_DEVICE_INFO_SSP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SSP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_STP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_STP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SMP_TARGET)
+ identify->target_port_protocols |= SAS_PROTOCOL_SMP;
+ if (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)
+ identify->target_port_protocols |= SAS_PROTOCOL_SATA;
+
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_done - internal transport layer callback handler.
+ * @ioc: per adapter object
+ * @smid: system request message index
+ * @msix_index: MSIX table index supplied by the OS
+ * @reply: reply message frame(lower 32bit addr)
+ *
+ * Callback handler when sending internal generated transport cmds.
+ * The callback index passed is `ioc->transport_cb_idx`
+ *
+ * Return 1 meaning mf should be freed from _base_interrupt
+ * 0 means the mf is freed from this function.
+ */
+u8
+mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
+ u32 reply)
+{
+ MPI2DefaultReply_t *mpi_reply;
+
+ mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
+ if (ioc->transport_cmds.status == MPT3_CMD_NOT_USED)
+ return 1;
+ if (ioc->transport_cmds.smid != smid)
+ return 1;
+ ioc->transport_cmds.status |= MPT3_CMD_COMPLETE;
+ if (mpi_reply) {
+ memcpy(ioc->transport_cmds.reply, mpi_reply,
+ mpi_reply->MsgLength*4);
+ ioc->transport_cmds.status |= MPT3_CMD_REPLY_VALID;
+ }
+ ioc->transport_cmds.status &= ~MPT3_CMD_PENDING;
+ complete(&ioc->transport_cmds.done);
+ return 1;
+}
+
+/* report manufacture request structure */
+struct rep_manu_request {
+ u8 smp_frame_type;
+ u8 function;
+ u8 reserved;
+ u8 request_length;
+};
+
+/* report manufacture reply structure */
+struct rep_manu_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x01 */
+ u8 function_result;
+ u8 response_length;
+ u16 expander_change_count;
+ u8 reserved0[2];
+ u8 sas_format;
+ u8 reserved2[3];
+ u8 vendor_id[SAS_EXPANDER_VENDOR_ID_LEN];
+ u8 product_id[SAS_EXPANDER_PRODUCT_ID_LEN];
+ u8 product_rev[SAS_EXPANDER_PRODUCT_REV_LEN];
+ u8 component_vendor_id[SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN];
+ u16 component_id;
+ u8 component_revision_id;
+ u8 reserved3;
+ u8 vendor_specific[8];
+};
+
+/**
+ * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * @ioc: per adapter object
+ * @sas_address: expander sas address
+ * @edev: the sas_expander_device object
+ *
+ * Fills in the sas_expander_device object when SMP port is created.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_expander_report_manufacture(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, struct sas_expander_device *edev)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct rep_manu_reply *manufacture_reply;
+ struct rep_manu_request *manufacture_request;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ dma_addr_t data_in_dma;
+ size_t data_in_sz;
+ size_t data_out_sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ data_out_sz = sizeof(struct rep_manu_request);
+ data_in_sz = sizeof(struct rep_manu_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ &data_out_dma);
+
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ data_in_dma = data_out_dma + sizeof(struct rep_manu_request);
+
+ manufacture_request = data_out;
+ manufacture_request->smp_frame_type = 0x40;
+ manufacture_request->function = 1;
+ manufacture_request->reserved = 0;
+ manufacture_request->request_length = 0;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = cpu_to_le64(sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(data_out_sz);
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma, data_out_sz, data_in_dma,
+ data_in_sz);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - send to sas_addr(0x%016llx)\n",
+ ioc->name, (unsigned long long)sas_address));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+ u8 *tmp;
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct rep_manu_reply))
+ goto out;
+
+ manufacture_reply = data_out + sizeof(struct rep_manu_request);
+ strncpy(edev->vendor_id, manufacture_reply->vendor_id,
+ SAS_EXPANDER_VENDOR_ID_LEN);
+ strncpy(edev->product_id, manufacture_reply->product_id,
+ SAS_EXPANDER_PRODUCT_ID_LEN);
+ strncpy(edev->product_rev, manufacture_reply->product_rev,
+ SAS_EXPANDER_PRODUCT_REV_LEN);
+ edev->level = manufacture_reply->sas_format & 1;
+ if (edev->level) {
+ strncpy(edev->component_vendor_id,
+ manufacture_reply->component_vendor_id,
+ SAS_EXPANDER_COMPONENT_VENDOR_ID_LEN);
+ tmp = (u8 *)&manufacture_reply->component_id;
+ edev->component_id = tmp[0] << 8 | tmp[1];
+ edev->component_revision_id =
+ manufacture_reply->component_revision_id;
+ }
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "report_manufacture - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, data_out_sz + data_in_sz,
+ data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+
+/**
+ * _transport_delete_port - helper function to removing a port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+ enum sas_device_type device_type =
+ mpt3sas_port->remote_identify.device_type;
+
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx)\n",
+ (unsigned long long) sas_address);
+
+ ioc->logging_level |= MPT_DEBUG_TRANSPORT;
+ if (device_type == SAS_END_DEVICE)
+ mpt3sas_device_remove_by_sas_address(ioc, sas_address);
+ else if (device_type == SAS_EDGE_EXPANDER_DEVICE ||
+ device_type == SAS_FANOUT_EXPANDER_DEVICE)
+ mpt3sas_expander_remove(ioc, sas_address);
+ ioc->logging_level &= ~MPT_DEBUG_TRANSPORT;
+}
+
+/**
+ * _transport_delete_phy - helper function to removing single phy from port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_delete_phy(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_port *mpt3sas_port, struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long) sas_address, mpt3sas_phy->phy_id);
+
+ list_del(&mpt3sas_phy->port_siblings);
+ mpt3sas_port->num_phys--;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+}
+
+/**
+ * _transport_add_phy - helper function to adding single phy to port
+ * @ioc: per adapter object
+ * @mpt3sas_port: mpt3sas per port object
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_port *mpt3sas_port,
+ struct _sas_phy *mpt3sas_phy)
+{
+ u64 sas_address = mpt3sas_port->remote_identify.sas_address;
+
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "add: sas_addr(0x%016llx), phy(%d)\n", (unsigned long long)
+ sas_address, mpt3sas_phy->phy_id);
+
+ list_add_tail(&mpt3sas_phy->port_siblings, &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ sas_port_add_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+}
+
+/**
+ * _transport_add_phy_to_an_existing_port - adding new phy to existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @sas_address: sas address of device/expander were phy needs to be added to
+ *
+ * Returns nothing.
+ */
+static void
+_transport_add_phy_to_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy,
+ u64 sas_address)
+{
+ struct _sas_port *mpt3sas_port;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 1)
+ return;
+
+ list_for_each_entry(mpt3sas_port, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address !=
+ sas_address)
+ continue;
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch == mpt3sas_phy)
+ return;
+ }
+ _transport_add_phy(ioc, mpt3sas_port, mpt3sas_phy);
+ return;
+ }
+
+}
+
+/**
+ * _transport_del_phy_from_an_existing_port - delete phy from existing port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @mpt3sas_phy: mpt3sas per phy object
+ *
+ * Returns nothing.
+ */
+static void
+_transport_del_phy_from_an_existing_port(struct MPT3SAS_ADAPTER *ioc,
+ struct _sas_node *sas_node, struct _sas_phy *mpt3sas_phy)
+{
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_phy *phy_srch;
+
+ if (mpt3sas_phy->phy_belongs_to_port == 0)
+ return;
+
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ list_for_each_entry(phy_srch, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if (phy_srch != mpt3sas_phy)
+ continue;
+
+ if (mpt3sas_port->num_phys == 1)
+ _transport_delete_port(ioc, mpt3sas_port);
+ else
+ _transport_delete_phy(ioc, mpt3sas_port,
+ mpt3sas_phy);
+ return;
+ }
+ }
+}
+
+/**
+ * _transport_sanity_check - sanity check when adding a new port
+ * @ioc: per adapter object
+ * @sas_node: sas node object (either expander or sas host)
+ * @sas_address: sas address of device being added
+ *
+ * See the explanation above from _transport_delete_duplicate_port
+ */
+static void
+_transport_sanity_check(struct MPT3SAS_ADAPTER *ioc, struct _sas_node *sas_node,
+ u64 sas_address)
+{
+ int i;
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address != sas_address)
+ continue;
+ if (sas_node->phy[i].phy_belongs_to_port == 1)
+ _transport_del_phy_from_an_existing_port(ioc, sas_node,
+ &sas_node->phy[i]);
+ }
+}
+
+/**
+ * mpt3sas_transport_port_add - insert port to the list
+ * @ioc: per adapter object
+ * @handle: handle of attached device
+ * @sas_address: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Adding new port object to the sas_node->sas_port_list.
+ *
+ * Returns mpt3sas_port.
+ */
+struct _sas_port *
+mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle,
+ u64 sas_address)
+{
+ struct _sas_phy *mpt3sas_phy, *next;
+ struct _sas_port *mpt3sas_port;
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct sas_rphy *rphy;
+ int i;
+ struct sas_port *port;
+
+ mpt3sas_port = kzalloc(sizeof(struct _sas_port),
+ GFP_KERNEL);
+ if (!mpt3sas_port) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return NULL;
+ }
+
+ INIT_LIST_HEAD(&mpt3sas_port->port_list);
+ INIT_LIST_HEAD(&mpt3sas_port->phy_list);
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!sas_node) {
+ pr_err(MPT3SAS_FMT
+ "%s: Could not find parent sas_address(0x%016llx)!\n",
+ ioc->name, __func__, (unsigned long long)sas_address);
+ goto out_fail;
+ }
+
+ if ((_transport_set_identify(ioc, handle,
+ &mpt3sas_port->remote_identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ if (mpt3sas_port->remote_identify.device_type == SAS_PHY_UNUSED) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ _transport_sanity_check(ioc, sas_node,
+ mpt3sas_port->remote_identify.sas_address);
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address !=
+ mpt3sas_port->remote_identify.sas_address)
+ continue;
+ list_add_tail(&sas_node->phy[i].port_siblings,
+ &mpt3sas_port->phy_list);
+ mpt3sas_port->num_phys++;
+ }
+
+ if (!mpt3sas_port->num_phys) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ port = sas_port_alloc_num(sas_node->parent_dev);
+ if ((sas_port_add(port))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ goto out_fail;
+ }
+
+ list_for_each_entry(mpt3sas_phy, &mpt3sas_port->phy_list,
+ port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &port->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx), phy(%d)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ sas_port_add_phy(port, mpt3sas_phy->phy);
+ mpt3sas_phy->phy_belongs_to_port = 1;
+ }
+
+ mpt3sas_port->port = port;
+ if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE)
+ rphy = sas_end_device_alloc(port);
+ else
+ rphy = sas_expander_alloc(port,
+ mpt3sas_port->remote_identify.device_type);
+
+ rphy->identify = mpt3sas_port->remote_identify;
+ if ((sas_rphy_add(rphy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &rphy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n",
+ handle, (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address);
+ mpt3sas_port->rphy = rphy;
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ list_add_tail(&mpt3sas_port->port_list, &sas_node->sas_port_list);
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* fill in report manufacture */
+ if (mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+ mpt3sas_port->remote_identify.device_type ==
+ MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
+ _transport_expander_report_manufacture(ioc,
+ mpt3sas_port->remote_identify.sas_address,
+ rphy_to_expander_device(rphy));
+ return mpt3sas_port;
+
+ out_fail:
+ list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list,
+ port_siblings)
+ list_del(&mpt3sas_phy->port_siblings);
+ kfree(mpt3sas_port);
+ return NULL;
+}
+
+/**
+ * mpt3sas_transport_port_remove - remove port from the list
+ * @ioc: per adapter object
+ * @sas_address: sas address of attached device
+ * @sas_address_parent: sas address of parent expander or sas host
+ * Context: This function will acquire ioc->sas_node_lock.
+ *
+ * Removing object and freeing associated memory from the
+ * ioc->sas_port_list.
+ *
+ * Return nothing.
+ */
+void
+mpt3sas_transport_port_remove(struct MPT3SAS_ADAPTER *ioc, u64 sas_address,
+ u64 sas_address_parent)
+{
+ int i;
+ unsigned long flags;
+ struct _sas_port *mpt3sas_port, *next;
+ struct _sas_node *sas_node;
+ u8 found = 0;
+ struct _sas_phy *mpt3sas_phy, *next_phy;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc,
+ sas_address_parent);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+ list_for_each_entry_safe(mpt3sas_port, next, &sas_node->sas_port_list,
+ port_list) {
+ if (mpt3sas_port->remote_identify.sas_address != sas_address)
+ continue;
+ found = 1;
+ list_del(&mpt3sas_port->port_list);
+ goto out;
+ }
+ out:
+ if (!found) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ for (i = 0; i < sas_node->num_phys; i++) {
+ if (sas_node->phy[i].remote_identify.sas_address == sas_address)
+ memset(&sas_node->phy[i].remote_identify, 0 ,
+ sizeof(struct sas_identify));
+ }
+
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ list_for_each_entry_safe(mpt3sas_phy, next_phy,
+ &mpt3sas_port->phy_list, port_siblings) {
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_port->port->dev,
+ "remove: sas_addr(0x%016llx), phy(%d)\n",
+ (unsigned long long)
+ mpt3sas_port->remote_identify.sas_address,
+ mpt3sas_phy->phy_id);
+ mpt3sas_phy->phy_belongs_to_port = 0;
+ sas_port_delete_phy(mpt3sas_port->port, mpt3sas_phy->phy);
+ list_del(&mpt3sas_phy->port_siblings);
+ }
+ sas_port_delete(mpt3sas_port->port);
+ kfree(mpt3sas_port);
+}
+
+/**
+ * mpt3sas_transport_add_host_phy - report sas_host phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @phy_pg0: sas phy page 0
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_host_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle = le16_to_cpu(phy_pg0.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate & MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ phy_pg0.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+
+/**
+ * mpt3sas_transport_add_expander_phy - report expander phy to transport
+ * @ioc: per adapter object
+ * @mpt3sas_phy: mpt3sas per phy object
+ * @expander_pg1: expander page 1
+ * @parent_dev: parent device class object
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt3sas_transport_add_expander_phy(struct MPT3SAS_ADAPTER *ioc, struct _sas_phy
+ *mpt3sas_phy, Mpi2ExpanderPage1_t expander_pg1,
+ struct device *parent_dev)
+{
+ struct sas_phy *phy;
+ int phy_index = mpt3sas_phy->phy_id;
+
+ INIT_LIST_HEAD(&mpt3sas_phy->port_siblings);
+ phy = sas_phy_alloc(parent_dev, phy_index);
+ if (!phy) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -1;
+ }
+ if ((_transport_set_identify(ioc, mpt3sas_phy->handle,
+ &mpt3sas_phy->identify))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ phy->identify = mpt3sas_phy->identify;
+ mpt3sas_phy->attached_handle =
+ le16_to_cpu(expander_pg1.AttachedDevHandle);
+ if (mpt3sas_phy->attached_handle)
+ _transport_set_identify(ioc, mpt3sas_phy->attached_handle,
+ &mpt3sas_phy->remote_identify);
+ phy->identify.phy_identifier = mpt3sas_phy->phy_id;
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ phy->minimum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate & MPI2_SAS_HWRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate_hw = _transport_convert_phy_link_rate(
+ expander_pg1.HwLinkRate >> 4);
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ expander_pg1.ProgrammedLinkRate >> 4);
+
+ if ((sas_phy_add(phy))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ sas_phy_free(phy);
+ return -1;
+ }
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &phy->dev,
+ "add: handle(0x%04x), sas_addr(0x%016llx)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ mpt3sas_phy->handle, (unsigned long long)
+ mpt3sas_phy->identify.sas_address,
+ mpt3sas_phy->attached_handle,
+ (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+ mpt3sas_phy->phy = phy;
+ return 0;
+}
+
+/**
+ * mpt3sas_transport_update_links - refreshing phy link changes
+ * @ioc: per adapter object
+ * @sas_address: sas address of parent expander or sas host
+ * @handle: attached device handle
+ * @phy_numberv: phy number
+ * @link_rate: new link rate
+ *
+ * Returns nothing.
+ */
+void
+mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
+ u64 sas_address, u16 handle, u8 phy_number, u8 link_rate)
+{
+ unsigned long flags;
+ struct _sas_node *sas_node;
+ struct _sas_phy *mpt3sas_phy;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery)
+ return;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ sas_node = _transport_sas_node_find_by_sas_address(ioc, sas_address);
+ if (!sas_node) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return;
+ }
+
+ mpt3sas_phy = &sas_node->phy[phy_number];
+ mpt3sas_phy->attached_handle = handle;
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ if (handle && (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)) {
+ _transport_set_identify(ioc, handle,
+ &mpt3sas_phy->remote_identify);
+ _transport_add_phy_to_an_existing_port(ioc, sas_node,
+ mpt3sas_phy, mpt3sas_phy->remote_identify.sas_address);
+ } else
+ memset(&mpt3sas_phy->remote_identify, 0 , sizeof(struct
+ sas_identify));
+
+ if (mpt3sas_phy->phy)
+ mpt3sas_phy->phy->negotiated_linkrate =
+ _transport_convert_phy_link_rate(link_rate);
+
+ if ((ioc->logging_level & MPT_DEBUG_TRANSPORT))
+ dev_printk(KERN_INFO, &mpt3sas_phy->phy->dev,
+ "refresh: parent sas_addr(0x%016llx),\n"
+ "\tlink_rate(0x%02x), phy(%d)\n"
+ "\tattached_handle(0x%04x), sas_addr(0x%016llx)\n",
+ (unsigned long long)sas_address,
+ link_rate, phy_number, handle, (unsigned long long)
+ mpt3sas_phy->remote_identify.sas_address);
+}
+
+static inline void *
+phy_to_ioc(struct sas_phy *phy)
+{
+ struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+ return shost_priv(shost);
+}
+
+static inline void *
+rphy_to_ioc(struct sas_rphy *rphy)
+{
+ struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+ return shost_priv(shost);
+}
+
+/* report phy error log structure */
+struct phy_error_log_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x11 */
+ u8 allocated_response_length;
+ u8 request_length; /* 02 */
+ u8 reserved_1[5];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+};
+
+/* report phy error log reply structure */
+struct phy_error_log_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+ __be16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 reserved_2[2];
+ __be32 invalid_dword;
+ __be32 running_disparity_error;
+ __be32 loss_of_dword_sync;
+ __be32 phy_reset_problem;
+};
+
+/**
+ * _transport_get_expander_phy_error_log - return expander counters
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_expander_phy_error_log(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_error_log_request *phy_error_log_request;
+ struct phy_error_log_reply *phy_error_log_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_error_log_request) +
+ sizeof(struct phy_error_log_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_error_log_request = data_out;
+ phy_error_log_request->smp_frame_type = 0x40;
+ phy_error_log_request->function = 0x11;
+ phy_error_log_request->request_length = 2;
+ phy_error_log_request->allocated_response_length = 0;
+ phy_error_log_request->phy_identifier = phy->number;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ ioc->build_sg(ioc, psge, data_out_dma,
+ sizeof(struct phy_error_log_request),
+ data_out_dma + sizeof(struct phy_error_log_request),
+ sizeof(struct phy_error_log_reply));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - send to sas_addr(0x%016llx), phy(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_error_log_reply))
+ goto out;
+
+ phy_error_log_reply = data_out +
+ sizeof(struct phy_error_log_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - function_result(%d)\n",
+ ioc->name, phy_error_log_reply->function_result));
+
+ phy->invalid_dword_count =
+ be32_to_cpu(phy_error_log_reply->invalid_dword);
+ phy->running_disparity_error_count =
+ be32_to_cpu(phy_error_log_reply->running_disparity_error);
+ phy->loss_of_dword_sync_count =
+ be32_to_cpu(phy_error_log_reply->loss_of_dword_sync);
+ phy->phy_reset_problem_count =
+ be32_to_cpu(phy_error_log_reply->phy_reset_problem);
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_error_log - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_get_linkerrors - return phy counters for both hba and expanders
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_get_linkerrors(struct sas_phy *phy)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ unsigned long flags;
+ Mpi2ConfigReply_t mpi_reply;
+ Mpi2SasPhyPage1_t phy_pg1;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_get_expander_phy_error_log(ioc, phy);
+
+ /* get hba phy error logs */
+ if ((mpt3sas_config_get_phy_pg1(ioc, &mpi_reply, &phy_pg1,
+ phy->number))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status (0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number,
+ le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ phy->invalid_dword_count = le32_to_cpu(phy_pg1.InvalidDwordCount);
+ phy->running_disparity_error_count =
+ le32_to_cpu(phy_pg1.RunningDisparityErrorCount);
+ phy->loss_of_dword_sync_count =
+ le32_to_cpu(phy_pg1.LossDwordSynchCount);
+ phy->phy_reset_problem_count =
+ le32_to_cpu(phy_pg1.PhyResetProblemCount);
+ return 0;
+}
+
+/**
+ * _transport_get_enclosure_identifier -
+ * @phy: The sas phy object
+ *
+ * Obtain the enclosure logical id for an expander.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device) {
+ *identifier = sas_device->enclosure_logical_id;
+ rc = 0;
+ } else {
+ *identifier = 0;
+ rc = -ENXIO;
+ }
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/**
+ * _transport_get_bay_identifier -
+ * @phy: The sas phy object
+ *
+ * Returns the slot id for a device that resides inside an enclosure.
+ */
+static int
+_transport_get_bay_identifier(struct sas_rphy *rphy)
+{
+ struct MPT3SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
+ struct _sas_device *sas_device;
+ unsigned long flags;
+ int rc;
+
+ spin_lock_irqsave(&ioc->sas_device_lock, flags);
+ sas_device = mpt3sas_scsih_sas_device_find_by_sas_address(ioc,
+ rphy->identify.sas_address);
+ if (sas_device)
+ rc = sas_device->slot;
+ else
+ rc = -ENXIO;
+ spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+ return rc;
+}
+
+/* phy control request structure */
+struct phy_control_request {
+ u8 smp_frame_type; /* 0x40 */
+ u8 function; /* 0x91 */
+ u8 allocated_response_length;
+ u8 request_length; /* 0x09 */
+ u16 expander_change_count;
+ u8 reserved_1[3];
+ u8 phy_identifier;
+ u8 phy_operation;
+ u8 reserved_2[13];
+ u64 attached_device_name;
+ u8 programmed_min_physical_link_rate;
+ u8 programmed_max_physical_link_rate;
+ u8 reserved_3[6];
+};
+
+/* phy control reply structure */
+struct phy_control_reply {
+ u8 smp_frame_type; /* 0x41 */
+ u8 function; /* 0x11 */
+ u8 function_result;
+ u8 response_length;
+};
+
+#define SMP_PHY_CONTROL_LINK_RESET (0x01)
+#define SMP_PHY_CONTROL_HARD_RESET (0x02)
+#define SMP_PHY_CONTROL_DISABLE (0x03)
+
+/**
+ * _transport_expander_phy_control - expander phy control
+ * @ioc: per adapter object
+ * @phy: The sas phy object
+ *
+ * Returns 0 for success, non-zero for failure.
+ *
+ */
+static int
+_transport_expander_phy_control(struct MPT3SAS_ADAPTER *ioc,
+ struct sas_phy *phy, u8 phy_operation)
+{
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ struct phy_control_request *phy_control_request;
+ struct phy_control_reply *phy_control_reply;
+ int rc;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u32 sgl_flags;
+ u8 issue_reset = 0;
+ void *data_out = NULL;
+ dma_addr_t data_out_dma;
+ u32 sz;
+ u16 wait_state_count;
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ mutex_lock(&ioc->transport_cmds.mutex);
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto out;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ sz = sizeof(struct phy_control_request) +
+ sizeof(struct phy_control_reply);
+ data_out = pci_alloc_consistent(ioc->pdev, sz, &data_out_dma);
+ if (!data_out) {
+ pr_err("failure at %s:%d/%s()!\n", __FILE__,
+ __LINE__, __func__);
+ rc = -ENOMEM;
+ mpt3sas_base_free_smid(ioc, smid);
+ goto out;
+ }
+
+ rc = -EINVAL;
+ memset(data_out, 0, sz);
+ phy_control_request = data_out;
+ phy_control_request->smp_frame_type = 0x40;
+ phy_control_request->function = 0x91;
+ phy_control_request->request_length = 9;
+ phy_control_request->allocated_response_length = 0;
+ phy_control_request->phy_identifier = phy->number;
+ phy_control_request->phy_operation = phy_operation;
+ phy_control_request->programmed_min_physical_link_rate =
+ phy->minimum_linkrate << 4;
+ phy_control_request->programmed_max_physical_link_rate =
+ phy->maximum_linkrate << 4;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->VF_ID = 0; /* TODO */
+ mpi_request->VP_ID = 0;
+ mpi_request->SASAddress = cpu_to_le64(phy->identify.sas_address);
+ mpi_request->RequestDataLength =
+ cpu_to_le16(sizeof(struct phy_error_log_request));
+ psge = &mpi_request->SGL;
+
+ /* WRITE sgel first */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_request), data_out_dma);
+
+ /* incr sgel */
+ psge += ioc->sge_size;
+
+ /* READ sgel last */
+ sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
+ MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
+ MPI2_SGE_FLAGS_END_OF_LIST);
+ sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
+ ioc->base_add_sg_single(psge, sgl_flags |
+ sizeof(struct phy_control_reply), data_out_dma +
+ sizeof(struct phy_control_request));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - send to sas_addr(0x%016llx), phy(%d), opcode(%d)\n",
+ ioc->name, (unsigned long long)phy->identify.sas_address,
+ phy->number, phy_operation));
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s: timeout\n",
+ ioc->name, __func__);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - complete\n", ioc->name));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - reply data transfer size(%d)\n",
+ ioc->name, le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ if (le16_to_cpu(mpi_reply->ResponseDataLength) !=
+ sizeof(struct phy_control_reply))
+ goto out;
+
+ phy_control_reply = data_out +
+ sizeof(struct phy_control_request);
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - function_result(%d)\n",
+ ioc->name, phy_control_reply->function_result));
+
+ rc = 0;
+ } else
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "phy_control - no reply\n", ioc->name));
+
+ issue_host_reset:
+ if (issue_reset)
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ if (data_out)
+ pci_free_consistent(ioc->pdev, sz, data_out, data_out_dma);
+
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+/**
+ * _transport_phy_reset -
+ * @phy: The sas phy object
+ * @hard_reset:
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIoUnitControlReply_t mpi_reply;
+ Mpi2SasIoUnitControlRequest_t mpi_request;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (hard_reset == 1) ? SMP_PHY_CONTROL_HARD_RESET :
+ SMP_PHY_CONTROL_LINK_RESET);
+
+ /* handle hba phys */
+ memset(&mpi_request, 0, sizeof(Mpi2SasIoUnitControlReply_t));
+ mpi_request.Function = MPI2_FUNCTION_SAS_IO_UNIT_CONTROL;
+ mpi_request.Operation = hard_reset ?
+ MPI2_SAS_OP_PHY_HARD_RESET : MPI2_SAS_OP_PHY_LINK_RESET;
+ mpi_request.PhyNum = phy->number;
+
+ if ((mpt3sas_base_sas_iounit_control(ioc, &mpi_reply, &mpi_request))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ return -ENXIO;
+ }
+
+ if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo)
+ pr_info(MPT3SAS_FMT
+ "phy(%d), ioc_status(0x%04x), loginfo(0x%08x)\n",
+ ioc->name, phy->number, le16_to_cpu(mpi_reply.IOCStatus),
+ le32_to_cpu(mpi_reply.IOCLogInfo));
+
+ return 0;
+}
+
+/**
+ * _transport_phy_enable - enable/disable phys
+ * @phy: The sas phy object
+ * @enable: enable phy when true
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_enable(struct sas_phy *phy, int enable)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasIOUnitPage0_t *sas_iounit_pg0 = NULL;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int rc = 0;
+ unsigned long flags;
+ int i, discovery_active;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address)
+ return _transport_expander_phy_control(ioc, phy,
+ (enable == 1) ? SMP_PHY_CONTROL_LINK_RESET :
+ SMP_PHY_CONTROL_DISABLE);
+
+ /* handle hba phys */
+
+ /* read sas_iounit page 0 */
+ sz = offsetof(Mpi2SasIOUnitPage0_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit0PhyData_t));
+ sas_iounit_pg0 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg0) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg0(ioc, &mpi_reply,
+ sas_iounit_pg0, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* unable to enable/disable phys when when discovery is active */
+ for (i = 0, discovery_active = 0; i < ioc->sas_hba.num_phys ; i++) {
+ if (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_DISCOVERY_IN_PROGRESS) {
+ pr_err(MPT3SAS_FMT "discovery is active on " \
+ "port = %d, phy = %d: unable to enable/disable "
+ "phys, try again later!\n", ioc->name,
+ sas_iounit_pg0->PhyData[i].Port, i);
+ discovery_active = 1;
+ }
+ }
+
+ if (discovery_active) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ /* read sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ /* copy Port/PortFlags/PhyFlags from page 0 */
+ for (i = 0; i < ioc->sas_hba.num_phys ; i++) {
+ sas_iounit_pg1->PhyData[i].Port =
+ sas_iounit_pg0->PhyData[i].Port;
+ sas_iounit_pg1->PhyData[i].PortFlags =
+ (sas_iounit_pg0->PhyData[i].PortFlags &
+ MPI2_SASIOUNIT0_PORTFLAGS_AUTO_PORT_CONFIG);
+ sas_iounit_pg1->PhyData[i].PhyFlags =
+ (sas_iounit_pg0->PhyData[i].PhyFlags &
+ (MPI2_SASIOUNIT0_PHYFLAGS_ZONING_ENABLED +
+ MPI2_SASIOUNIT0_PHYFLAGS_PHY_DISABLED));
+ }
+
+ if (enable)
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ &= ~MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+ else
+ sas_iounit_pg1->PhyData[phy->number].PhyFlags
+ |= MPI2_SASIOUNIT1_PHYFLAGS_PHY_DISABLE;
+
+ mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1, sz);
+
+ /* link reset */
+ if (enable)
+ _transport_phy_reset(phy, 0);
+
+ out:
+ kfree(sas_iounit_pg1);
+ kfree(sas_iounit_pg0);
+ return rc;
+}
+
+/**
+ * _transport_phy_speed - set phy min/max link rates
+ * @phy: The sas phy object
+ * @rates: rates defined in sas_phy_linkrates
+ *
+ * Only support sas_host direct attached phys.
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_transport_phy_speed(struct sas_phy *phy, struct sas_phy_linkrates *rates)
+{
+ struct MPT3SAS_ADAPTER *ioc = phy_to_ioc(phy);
+ Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
+ Mpi2SasPhyPage0_t phy_pg0;
+ Mpi2ConfigReply_t mpi_reply;
+ u16 ioc_status;
+ u16 sz;
+ int i;
+ int rc = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ioc->sas_node_lock, flags);
+ if (_transport_sas_node_find_by_sas_address(ioc,
+ phy->identify.sas_address) == NULL) {
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+ return -EINVAL;
+ }
+ spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
+
+ if (!rates->minimum_linkrate)
+ rates->minimum_linkrate = phy->minimum_linkrate;
+ else if (rates->minimum_linkrate < phy->minimum_linkrate_hw)
+ rates->minimum_linkrate = phy->minimum_linkrate_hw;
+
+ if (!rates->maximum_linkrate)
+ rates->maximum_linkrate = phy->maximum_linkrate;
+ else if (rates->maximum_linkrate > phy->maximum_linkrate_hw)
+ rates->maximum_linkrate = phy->maximum_linkrate_hw;
+
+ /* handle expander phys */
+ if (phy->identify.sas_address != ioc->sas_hba.sas_address) {
+ phy->minimum_linkrate = rates->minimum_linkrate;
+ phy->maximum_linkrate = rates->maximum_linkrate;
+ return _transport_expander_phy_control(ioc, phy,
+ SMP_PHY_CONTROL_LINK_RESET);
+ }
+
+ /* handle hba phys */
+
+ /* sas_iounit page 1 */
+ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (ioc->sas_hba.num_phys *
+ sizeof(Mpi2SasIOUnit1PhyData_t));
+ sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
+ if (!sas_iounit_pg1) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
+ sas_iounit_pg1, sz))) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+ ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
+ MPI2_IOCSTATUS_MASK;
+ if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ioc->sas_hba.num_phys; i++) {
+ if (phy->number != i) {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (ioc->sas_hba.phy[i].phy->minimum_linkrate +
+ (ioc->sas_hba.phy[i].phy->maximum_linkrate << 4));
+ } else {
+ sas_iounit_pg1->PhyData[i].MaxMinLinkRate =
+ (rates->minimum_linkrate +
+ (rates->maximum_linkrate << 4));
+ }
+ }
+
+ if (mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
+ sz)) {
+ pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
+ ioc->name, __FILE__, __LINE__, __func__);
+ rc = -ENXIO;
+ goto out;
+ }
+
+ /* link reset */
+ _transport_phy_reset(phy, 0);
+
+ /* read phy page 0, then update the rates in the sas transport phy */
+ if (!mpt3sas_config_get_phy_pg0(ioc, &mpi_reply, &phy_pg0,
+ phy->number)) {
+ phy->minimum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate & MPI2_SAS_PRATE_MIN_RATE_MASK);
+ phy->maximum_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.ProgrammedLinkRate >> 4);
+ phy->negotiated_linkrate = _transport_convert_phy_link_rate(
+ phy_pg0.NegotiatedLinkRate &
+ MPI2_SAS_NEG_LINK_RATE_MASK_PHYSICAL);
+ }
+
+ out:
+ kfree(sas_iounit_pg1);
+ return rc;
+}
+
+/**
+ * _transport_smp_handler - transport portal for smp passthru
+ * @shost: shost object
+ * @rphy: sas transport rphy object
+ * @req:
+ *
+ * This used primarily for smp_utils.
+ * Example:
+ * smp_rep_general /sys/class/bsg/expander-5:0
+ */
+static int
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+ struct request *req)
+{
+ struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
+ Mpi2SmpPassthroughRequest_t *mpi_request;
+ Mpi2SmpPassthroughReply_t *mpi_reply;
+ int rc, i;
+ u16 smid;
+ u32 ioc_state;
+ unsigned long timeleft;
+ void *psge;
+ u8 issue_reset = 0;
+ dma_addr_t dma_addr_in = 0;
+ dma_addr_t dma_addr_out = 0;
+ dma_addr_t pci_dma_in = 0;
+ dma_addr_t pci_dma_out = 0;
+ void *pci_addr_in = NULL;
+ void *pci_addr_out = NULL;
+ u16 wait_state_count;
+ struct request *rsp = req->next_rq;
+ struct bio_vec *bvec = NULL;
+
+ if (!rsp) {
+ pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
+ ioc->name, __func__);
+ return -EINVAL;
+ }
+
+ if (ioc->shost_recovery || ioc->pci_error_recovery) {
+ pr_info(MPT3SAS_FMT "%s: host reset in progress!\n",
+ __func__, ioc->name);
+ return -EFAULT;
+ }
+
+ rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
+ if (rc)
+ return rc;
+
+ if (ioc->transport_cmds.status != MPT3_CMD_NOT_USED) {
+ pr_err(MPT3SAS_FMT "%s: transport_cmds in use\n", ioc->name,
+ __func__);
+ rc = -EAGAIN;
+ goto out;
+ }
+ ioc->transport_cmds.status = MPT3_CMD_PENDING;
+
+ /* Check if the request is split across multiple segments */
+ if (req->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+
+ /* Allocate memory and copy the request */
+ pci_addr_out = pci_alloc_consistent(ioc->pdev,
+ blk_rq_bytes(req), &pci_dma_out);
+ if (!pci_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ bio_for_each_segment(bvec, req->bio, i) {
+ memcpy(pci_addr_out + offset,
+ page_address(bvec->bv_page) + bvec->bv_offset,
+ bvec->bv_len);
+ offset += bvec->bv_len;
+ }
+ } else {
+ dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
+ blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_out) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr out = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto free_pci;
+ }
+ }
+
+ /* Check if the response needs to be populated across
+ * multiple segments */
+ if (rsp->bio->bi_vcnt > 1) {
+ pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
+ &pci_dma_in);
+ if (!pci_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): PCI Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ } else {
+ dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio),
+ blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
+ if (!dma_addr_in) {
+ pr_info(MPT3SAS_FMT "%s(): DMA Addr in = NULL\n",
+ ioc->name, __func__);
+ rc = -ENOMEM;
+ goto unmap;
+ }
+ }
+
+ wait_state_count = 0;
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
+ if (wait_state_count++ == 10) {
+ pr_err(MPT3SAS_FMT
+ "%s: failed due to ioc not operational\n",
+ ioc->name, __func__);
+ rc = -EFAULT;
+ goto unmap;
+ }
+ ssleep(1);
+ ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
+ pr_info(MPT3SAS_FMT
+ "%s: waiting for operational state(count=%d)\n",
+ ioc->name, __func__, wait_state_count);
+ }
+ if (wait_state_count)
+ pr_info(MPT3SAS_FMT "%s: ioc is operational\n",
+ ioc->name, __func__);
+
+ smid = mpt3sas_base_get_smid(ioc, ioc->transport_cb_idx);
+ if (!smid) {
+ pr_err(MPT3SAS_FMT "%s: failed obtaining a smid\n",
+ ioc->name, __func__);
+ rc = -EAGAIN;
+ goto unmap;
+ }
+
+ rc = 0;
+ mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
+ ioc->transport_cmds.smid = smid;
+
+ memset(mpi_request, 0, sizeof(Mpi2SmpPassthroughRequest_t));
+ mpi_request->Function = MPI2_FUNCTION_SMP_PASSTHROUGH;
+ mpi_request->PhysicalPort = 0xFF;
+ mpi_request->SASAddress = (rphy) ?
+ cpu_to_le64(rphy->identify.sas_address) :
+ cpu_to_le64(ioc->sas_hba.sas_address);
+ mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
+ psge = &mpi_request->SGL;
+
+ if (req->bio->bi_vcnt > 1)
+ ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
+ pci_dma_in, (blk_rq_bytes(rsp) + 4));
+ else
+ ioc->build_sg(ioc, psge, dma_addr_out, (blk_rq_bytes(req) - 4),
+ dma_addr_in, (blk_rq_bytes(rsp) + 4));
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - sending smp request\n", ioc->name, __func__));
+
+ init_completion(&ioc->transport_cmds.done);
+ mpt3sas_base_put_smid_default(ioc, smid);
+ timeleft = wait_for_completion_timeout(&ioc->transport_cmds.done,
+ 10*HZ);
+
+ if (!(ioc->transport_cmds.status & MPT3_CMD_COMPLETE)) {
+ pr_err(MPT3SAS_FMT "%s : timeout\n",
+ __func__, ioc->name);
+ _debug_dump_mf(mpi_request,
+ sizeof(Mpi2SmpPassthroughRequest_t)/4);
+ if (!(ioc->transport_cmds.status & MPT3_CMD_RESET))
+ issue_reset = 1;
+ goto issue_host_reset;
+ }
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - complete\n", ioc->name, __func__));
+
+ if (ioc->transport_cmds.status & MPT3_CMD_REPLY_VALID) {
+
+ mpi_reply = ioc->transport_cmds.reply;
+
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - reply data transfer size(%d)\n",
+ ioc->name, __func__,
+ le16_to_cpu(mpi_reply->ResponseDataLength)));
+
+ memcpy(req->sense, mpi_reply, sizeof(*mpi_reply));
+ req->sense_len = sizeof(*mpi_reply);
+ req->resid_len = 0;
+ rsp->resid_len -=
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+
+ /* check if the resp needs to be copied from the allocated
+ * pci mem */
+ if (rsp->bio->bi_vcnt > 1) {
+ u32 offset = 0;
+ u32 bytes_to_copy =
+ le16_to_cpu(mpi_reply->ResponseDataLength);
+ bio_for_each_segment(bvec, rsp->bio, i) {
+ if (bytes_to_copy <= bvec->bv_len) {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bytes_to_copy);
+ break;
+ } else {
+ memcpy(page_address(bvec->bv_page) +
+ bvec->bv_offset, pci_addr_in +
+ offset, bvec->bv_len);
+ bytes_to_copy -= bvec->bv_len;
+ }
+ offset += bvec->bv_len;
+ }
+ }
+ } else {
+ dtransportprintk(ioc, pr_info(MPT3SAS_FMT
+ "%s - no reply\n", ioc->name, __func__));
+ rc = -ENXIO;
+ }
+
+ issue_host_reset:
+ if (issue_reset) {
+ mpt3sas_base_hard_reset_handler(ioc, CAN_SLEEP,
+ FORCE_BIG_HAMMER);
+ rc = -ETIMEDOUT;
+ }
+
+ unmap:
+ if (dma_addr_out)
+ pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req),
+ PCI_DMA_BIDIRECTIONAL);
+ if (dma_addr_in)
+ pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp),
+ PCI_DMA_BIDIRECTIONAL);
+
+ free_pci:
+ if (pci_addr_out)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(req), pci_addr_out,
+ pci_dma_out);
+
+ if (pci_addr_in)
+ pci_free_consistent(ioc->pdev, blk_rq_bytes(rsp), pci_addr_in,
+ pci_dma_in);
+
+ out:
+ ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
+ mutex_unlock(&ioc->transport_cmds.mutex);
+ return rc;
+}
+
+struct sas_function_template mpt3sas_transport_functions = {
+ .get_linkerrors = _transport_get_linkerrors,
+ .get_enclosure_identifier = _transport_get_enclosure_identifier,
+ .get_bay_identifier = _transport_get_bay_identifier,
+ .phy_reset = _transport_phy_reset,
+ .phy_enable = _transport_phy_enable,
+ .set_phy_speed = _transport_phy_speed,
+ .smp_handler = _transport_smp_handler,
+};
+
+struct scsi_transport_template *mpt3sas_transport_template;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
new file mode 100644
index 000000000000..6f8d6213040b
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -0,0 +1,433 @@
+/*
+ * This module provides common API to set Diagnostic trigger for MPT
+ * (Message Passing Technology) based controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/compat.h>
+#include <linux/poll.h>
+
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#include "mpt3sas_base.h"
+
+/**
+ * _mpt3sas_raise_sigio - notifiy app
+ * @ioc: per adapter object
+ * @event_data:
+ */
+static void
+_mpt3sas_raise_sigio(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ Mpi2EventNotificationReply_t *mpi_reply;
+ u16 sz, event_data_sz;
+ unsigned long flags;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4;
+ mpi_reply = kzalloc(sz, GFP_KERNEL);
+ if (!mpi_reply)
+ goto out;
+ mpi_reply->Event = cpu_to_le16(MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED);
+ event_data_sz = (sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T) + 4) / 4;
+ mpi_reply->EventDataLength = cpu_to_le16(event_data_sz);
+ memcpy(&mpi_reply->EventData, event_data,
+ sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: add to driver event log\n",
+ ioc->name, __func__));
+ mpt3sas_ctl_add_to_event_log(ioc, mpi_reply);
+ kfree(mpi_reply);
+ out:
+
+ /* clearing the diag_trigger_active flag */
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: clearing diag_trigger_active flag\n",
+ ioc->name, __func__));
+ ioc->diag_trigger_active = 0;
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_process_trigger_data - process the event data for the trigger
+ * @ioc: per adapter object
+ * @event_data:
+ */
+void
+mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
+ struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data)
+{
+ u8 issue_reset = 0;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: enter\n",
+ ioc->name, __func__));
+
+ /* release the diag buffer trace */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: release trace diag buffer\n", ioc->name, __func__));
+ mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
+ &issue_reset);
+ }
+
+ _mpt3sas_raise_sigio(ioc, event_data);
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_master - Master trigger handler
+ * @ioc: per adapter object
+ * @trigger_bitmask:
+ *
+ */
+void
+mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ unsigned long flags;
+ u8 found_match = 0;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ goto by_pass_checks;
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ by_pass_checks:
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - trigger_bitmask = 0x%08x\n",
+ ioc->name, __func__, trigger_bitmask));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ if (ioc->diag_trigger_master.MasterData & trigger_bitmask) {
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MASTER;
+ event_data.u.master.MasterData = trigger_bitmask;
+
+ if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
+ trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
+ _mpt3sas_raise_sigio(ioc, &event_data);
+ else
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_event - Event trigger handler
+ * @ioc: per adapter object
+ * @event:
+ * @log_entry_qualifier:
+ *
+ */
+void
+mpt3sas_trigger_event(struct MPT3SAS_ADAPTER *ioc, u16 event,
+ u16 log_entry_qualifier)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_EVENT_TRIGGER_T *event_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - event = 0x%04x, log_entry_qualifier = 0x%04x\n",
+ ioc->name, __func__, event, log_entry_qualifier));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ event_trigger = ioc->diag_trigger_event.EventTriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_event.ValidEntries
+ && !found_match; i++, event_trigger++) {
+ if (event_trigger->EventValue != event)
+ continue;
+ if (event == MPI2_EVENT_LOG_ENTRY_ADDED) {
+ if (event_trigger->LogEntryQualifier ==
+ log_entry_qualifier)
+ found_match = 1;
+ continue;
+ }
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_EVENT;
+ event_data.u.event.EventValue = event;
+ event_data.u.event.LogEntryQualifier = log_entry_qualifier;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_scsi - SCSI trigger handler
+ * @ioc: per adapter object
+ * @sense_key:
+ * @asc:
+ * @ascq:
+ *
+ */
+void
+mpt3sas_trigger_scsi(struct MPT3SAS_ADAPTER *ioc, u8 sense_key, u8 asc,
+ u8 ascq)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_SCSI_TRIGGER_T *scsi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - sense_key = 0x%02x, asc = 0x%02x, ascq = 0x%02x\n",
+ ioc->name, __func__, sense_key, asc, ascq));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ scsi_trigger = ioc->diag_trigger_scsi.SCSITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_scsi.ValidEntries
+ && !found_match; i++, scsi_trigger++) {
+ if (scsi_trigger->SenseKey != sense_key)
+ continue;
+ if (!(scsi_trigger->ASC == 0xFF || scsi_trigger->ASC == asc))
+ continue;
+ if (!(scsi_trigger->ASCQ == 0xFF || scsi_trigger->ASCQ == ascq))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_SCSI;
+ event_data.u.scsi.SenseKey = sense_key;
+ event_data.u.scsi.ASC = asc;
+ event_data.u.scsi.ASCQ = ascq;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
+
+/**
+ * mpt3sas_trigger_mpi - MPI trigger handler
+ * @ioc: per adapter object
+ * @ioc_status:
+ * @loginfo:
+ *
+ */
+void
+mpt3sas_trigger_mpi(struct MPT3SAS_ADAPTER *ioc, u16 ioc_status, u32 loginfo)
+{
+ struct SL_WH_TRIGGERS_EVENT_DATA_T event_data;
+ struct SL_WH_MPI_TRIGGER_T *mpi_trigger;
+ int i;
+ unsigned long flags;
+ u8 found_match;
+
+ spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
+
+ /* check to see if trace buffers are currently registered */
+ if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ /* check to see if trace buffers are currently released */
+ if (ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
+ MPT3_DIAG_BUFFER_IS_RELEASED) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ return;
+ }
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: enter - ioc_status = 0x%04x, loginfo = 0x%08x\n",
+ ioc->name, __func__, ioc_status, loginfo));
+
+ /* don't send trigger if an trigger is currently active */
+ if (ioc->diag_trigger_active) {
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+ goto out;
+ }
+
+ /* check for the trigger condition */
+ mpi_trigger = ioc->diag_trigger_mpi.MPITriggerEntry;
+ for (i = 0 , found_match = 0; i < ioc->diag_trigger_mpi.ValidEntries
+ && !found_match; i++, mpi_trigger++) {
+ if (mpi_trigger->IOCStatus != ioc_status)
+ continue;
+ if (!(mpi_trigger->IocLogInfo == 0xFFFFFFFF ||
+ mpi_trigger->IocLogInfo == loginfo))
+ continue;
+ found_match = 1;
+ ioc->diag_trigger_active = 1;
+ }
+ spin_unlock_irqrestore(&ioc->diag_trigger_lock, flags);
+
+ if (!found_match)
+ goto out;
+
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT
+ "%s: setting diag_trigger_active flag\n",
+ ioc->name, __func__));
+ memset(&event_data, 0, sizeof(struct SL_WH_TRIGGERS_EVENT_DATA_T));
+ event_data.trigger_type = MPT3SAS_TRIGGER_MPI;
+ event_data.u.mpi.IOCStatus = ioc_status;
+ event_data.u.mpi.IocLogInfo = loginfo;
+ mpt3sas_send_trigger_data_event(ioc, &event_data);
+ out:
+ dTriggerDiagPrintk(ioc, pr_info(MPT3SAS_FMT "%s: exit\n", ioc->name,
+ __func__));
+}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
new file mode 100644
index 000000000000..a10c30907394
--- /dev/null
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -0,0 +1,193 @@
+/*
+ * This is the Fusion MPT base driver providing common API layer interface
+ * to set Diagnostic triggers for MPT (Message Passing Technology) based
+ * controllers
+ *
+ * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
+ * Copyright (C) 2012 LSI Corporation
+ * (mailto:DL-MPTFusionLinux@lsi.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * NO WARRANTY
+ * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+ * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+ * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+ * solely responsible for determining the appropriateness of using and
+ * distributing the Program and assumes all risks associated with its
+ * exercise of rights under this Agreement, including but not limited to
+ * the risks and costs of program errors, damage to or loss of data,
+ * programs or equipment, and unavailability or interruption of operations.
+
+ * DISCLAIMER OF LIABILITY
+ * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+ * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+ * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
+ * USA.
+ */
+ /* Diagnostic Trigger Configuration Data Structures */
+
+#ifndef MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+#define MPT3SAS_TRIGGER_DIAG_H_INCLUDED
+
+/* limitation on number of entries */
+#define NUM_VALID_ENTRIES (20)
+
+/* trigger types */
+#define MPT3SAS_TRIGGER_MASTER (1)
+#define MPT3SAS_TRIGGER_EVENT (2)
+#define MPT3SAS_TRIGGER_SCSI (3)
+#define MPT3SAS_TRIGGER_MPI (4)
+
+/* trigger names */
+#define MASTER_TRIGGER_FILE_NAME "diag_trigger_master"
+#define EVENT_TRIGGERS_FILE_NAME "diag_trigger_event"
+#define SCSI_TRIGGERS_FILE_NAME "diag_trigger_scsi"
+#define MPI_TRIGGER_FILE_NAME "diag_trigger_mpi"
+
+/* master trigger bitmask */
+#define MASTER_TRIGGER_FW_FAULT (0x00000001)
+#define MASTER_TRIGGER_ADAPTER_RESET (0x00000002)
+#define MASTER_TRIGGER_TASK_MANAGMENT (0x00000004)
+#define MASTER_TRIGGER_DEVICE_REMOVAL (0x00000008)
+
+/* fake firmware event for tigger */
+#define MPI3_EVENT_DIAGNOSTIC_TRIGGER_FIRED (0x6E)
+
+/**
+ * MasterTrigger is a single U32 passed to/from sysfs.
+ *
+ * Bit Flags (enables) include:
+ * 1. FW Faults
+ * 2. Adapter Reset issued by driver
+ * 3. TMs
+ * 4. Device Remove Event sent by FW
+ */
+
+struct SL_WH_MASTER_TRIGGER_T {
+ uint32_t MasterData;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGER_T - Definition of an event trigger element
+ * @EventValue: Event Code to trigger on
+ * @LogEntryQualifier: Type of FW event that logged (Log Entry Added Event only)
+ *
+ * Defines an event that should induce a DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_EVENT_TRIGGER_T {
+ uint16_t EventValue;
+ uint16_t LogEntryQualifier;
+};
+
+/**
+ * struct SL_WH_EVENT_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of Event Triggers to be monitored for.
+ * @ValidEntries: Number of _SL_WH_EVENT_TRIGGER_T structures contained in this
+ * structure.
+ * @EventTriggerEntry: List of Event trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set Event Triggers
+ * in the Linux Driver.
+ */
+
+struct SL_WH_EVENT_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_EVENT_TRIGGER_T EventTriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGER_T - Definition of a SCSI trigger element
+ * @ASCQ: Additional Sense Code Qualifier. Can be specific or 0xFF for
+ * wildcard.
+ * @ASC: Additional Sense Code. Can be specific or 0xFF for wildcard
+ * @SenseKey: SCSI Sense Key
+ *
+ * Defines a sense key (single or many variants) that should induce a
+ * DIAG_TRIGGER driver event if observed.
+ */
+struct SL_WH_SCSI_TRIGGER_T {
+ U8 ASCQ;
+ U8 ASC;
+ U8 SenseKey;
+ U8 Reserved;
+};
+
+/**
+ * struct SL_WH_SCSI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of SCSI sense codes that should trigger a DIAG_SERVICE event when
+ * observed.
+ * @ValidEntries: Number of _SL_WH_SCSI_TRIGGER_T structures contained in this
+ * structure.
+ * @SCSITriggerEntry: List of SCSI Sense Code trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set SCSI Sense Code
+ * Triggers in the Linux Driver.
+ */
+struct SL_WH_SCSI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_SCSI_TRIGGER_T SCSITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGER_T - Definition of an MPI trigger element
+ * @IOCStatus: MPI IOCStatus
+ * @IocLogInfo: MPI IocLogInfo. Can be specific or 0xFFFFFFFF for wildcard
+ *
+ * Defines a MPI IOCStatus/IocLogInfo pair that should induce a DIAG_TRIGGER
+ * driver event if observed.
+ */
+struct SL_WH_MPI_TRIGGER_T {
+ uint16_t IOCStatus;
+ uint16_t Reserved;
+ uint32_t IocLogInfo;
+};
+
+/**
+ * struct SL_WH_MPI_TRIGGERS_T - Structure passed to/from sysfs containing a
+ * list of MPI IOCStatus/IocLogInfo pairs that should trigger a DIAG_SERVICE
+ * event when observed.
+ * @ValidEntries: Number of _SL_WH_MPI_TRIGGER_T structures contained in this
+ * structure.
+ * @MPITriggerEntry: List of MPI IOCStatus/IocLogInfo trigger elements.
+ *
+ * This binary structure is transferred via sysfs to get/set MPI Error Triggers
+ * in the Linux Driver.
+ */
+struct SL_WH_MPI_TRIGGERS_T {
+ uint32_t ValidEntries;
+ struct SL_WH_MPI_TRIGGER_T MPITriggerEntry[NUM_VALID_ENTRIES];
+};
+
+/**
+ * struct SL_WH_TRIGGERS_EVENT_DATA_T - event data for trigger
+ * @trigger_type: trigger type (see MPT3SAS_TRIGGER_XXXX)
+ * @u: trigger condition that caused trigger to be sent
+ */
+struct SL_WH_TRIGGERS_EVENT_DATA_T {
+ uint32_t trigger_type;
+ union {
+ struct SL_WH_MASTER_TRIGGER_T master;
+ struct SL_WH_EVENT_TRIGGER_T event;
+ struct SL_WH_SCSI_TRIGGER_T scsi;
+ struct SL_WH_MPI_TRIGGER_T mpi;
+ } u;
+};
+#endif /* MPT3SAS_TRIGGER_DIAG_H_INCLUDED */
diff --git a/drivers/scsi/mvme16x_scsi.c b/drivers/scsi/mvme16x_scsi.c
index 39f554f5f261..8fbb97a8bfd3 100644
--- a/drivers/scsi/mvme16x_scsi.c
+++ b/drivers/scsi/mvme16x_scsi.c
@@ -34,8 +34,7 @@ static struct scsi_host_template mvme16x_scsi_driver_template = {
static struct platform_device *mvme16x_scsi_device;
-static __devinit int
-mvme16x_probe(struct platform_device *dev)
+static int mvme16x_probe(struct platform_device *dev)
{
struct Scsi_Host * host = NULL;
struct NCR_700_Host_Parameters *hostdata;
@@ -103,8 +102,7 @@ mvme16x_probe(struct platform_device *dev)
return -ENODEV;
}
-static __devexit int
-mvme16x_device_remove(struct platform_device *dev)
+static int mvme16x_device_remove(struct platform_device *dev)
{
struct Scsi_Host *host = platform_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
@@ -131,7 +129,7 @@ static struct platform_driver mvme16x_scsi_driver = {
.owner = THIS_MODULE,
},
.probe = mvme16x_probe,
- .remove = __devexit_p(mvme16x_device_remove),
+ .remove = mvme16x_device_remove,
};
static int __init mvme16x_scsi_init(void)
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
index 8ba47229049f..8bb06995adfb 100644
--- a/drivers/scsi/mvsas/mv_64xx.c
+++ b/drivers/scsi/mvsas/mv_64xx.c
@@ -41,7 +41,7 @@ static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
phy->phy_type |= PORT_TYPE_SATA;
}
-static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+static void mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -54,7 +54,7 @@ static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
mw32(MVS_PCS, tmp);
}
-static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
+static void mvs_64xx_phy_hacks(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
@@ -156,7 +156,7 @@ void mvs_64xx_clear_srs_irq(struct mvs_info *mvi, u8 reg_set, u8 clear_all)
}
}
-static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
+static int mvs_64xx_chip_reset(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -250,7 +250,7 @@ static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
}
}
-static int __devinit mvs_64xx_init(struct mvs_info *mvi)
+static int mvs_64xx_init(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
index 7e423e5ad5e1..1e4479f3331a 100644
--- a/drivers/scsi/mvsas/mv_94xx.c
+++ b/drivers/scsi/mvsas/mv_94xx.c
@@ -216,8 +216,7 @@ void set_phy_rate(struct mvs_info *mvi, int phy_id, u8 rate)
mvs_write_port_vsr_data(mvi, phy_id, phy_cfg.v);
}
-static void __devinit
-mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
+static void mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
{
u32 temp;
temp = (u32)(*(u32 *)&mvi->hba_info_param.phy_tuning[phy_id]);
@@ -258,7 +257,7 @@ mvs_94xx_config_reg_from_hba(struct mvs_info *mvi, int phy_id)
mvi->hba_info_param.phy_rate[phy_id]);
}
-static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+static void mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
{
void __iomem *regs = mvi->regs;
u32 tmp;
@@ -331,7 +330,7 @@ static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
mvs_write_port_vsr_data(mvi, phy_id, tmp & 0xfd7fffff);
}
-static int __devinit mvs_94xx_init(struct mvs_info *mvi)
+static int mvs_94xx_init(struct mvs_info *mvi)
{
void __iomem *regs = mvi->regs;
int i;
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
index 8f7eb4f21140..487aa6f97412 100644
--- a/drivers/scsi/mvsas/mv_94xx.h
+++ b/drivers/scsi/mvsas/mv_94xx.h
@@ -258,21 +258,11 @@ enum sas_sata_phy_regs {
#define SPI_ADDR_VLD_94XX (1U << 1)
#define SPI_CTRL_SpiStart_94XX (1U << 0)
-#define mv_ffc(x) ffz(x)
-
static inline int
mv_ffc64(u64 v)
{
- int i;
- i = mv_ffc((u32)v);
- if (i >= 0)
- return i;
- i = mv_ffc((u32)(v>>32));
-
- if (i != 0)
- return 32 + i;
-
- return -1;
+ u64 x = ~v;
+ return x ? __ffs64(x) : -1;
}
#define r_reg_set_enable(i) \
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
index bcc408042cee..8c4479ab49e8 100644
--- a/drivers/scsi/mvsas/mv_chips.h
+++ b/drivers/scsi/mvsas/mv_chips.h
@@ -160,7 +160,7 @@ static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
MVS_P4_INT_MASK, port, val);
}
-static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
+static inline void mvs_phy_hacks(struct mvs_info *mvi)
{
u32 tmp;
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
index cc59dff3810b..ce90d0546cdd 100644
--- a/drivers/scsi/mvsas/mv_init.c
+++ b/drivers/scsi/mvsas/mv_init.c
@@ -96,7 +96,7 @@ static struct sas_domain_function_template mvs_transport_ops = {
};
-static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
+static void mvs_phy_init(struct mvs_info *mvi, int phy_id)
{
struct mvs_phy *phy = &mvi->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -235,7 +235,7 @@ static irqreturn_t mvs_interrupt(int irq, void *opaque)
return IRQ_HANDLED;
}
-static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
+static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
{
int i = 0, slot_nr;
char pool_name[32];
@@ -373,7 +373,7 @@ void mvs_iounmap(void __iomem *regs)
iounmap(regs);
}
-static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
+static struct mvs_info *mvs_pci_alloc(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct Scsi_Host *shost, unsigned int id)
{
@@ -444,7 +444,7 @@ static int pci_go_64(struct pci_dev *pdev)
return rc;
}
-static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
+static int mvs_prep_sas_ha_init(struct Scsi_Host *shost,
const struct mvs_chip_info *chip_info)
{
int phy_nr, port_nr; unsigned short core_nr;
@@ -486,7 +486,7 @@ exit_free:
}
-static void __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
+static void mvs_post_sas_ha_init(struct Scsi_Host *shost,
const struct mvs_chip_info *chip_info)
{
int can_queue, i = 0, j = 0;
@@ -537,8 +537,7 @@ static void mvs_init_sas_add(struct mvs_info *mvi)
memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
}
-static int __devinit mvs_pci_init(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int mvs_pci_init(struct pci_dev *pdev, const struct pci_device_id *ent)
{
unsigned int rc, nhost = 0;
struct mvs_info *mvi;
@@ -645,7 +644,7 @@ err_out_enable:
return rc;
}
-static void __devexit mvs_pci_remove(struct pci_dev *pdev)
+static void mvs_pci_remove(struct pci_dev *pdev)
{
unsigned short core_nr, i = 0;
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
@@ -677,7 +676,7 @@ static void __devexit mvs_pci_remove(struct pci_dev *pdev)
return;
}
-static struct pci_device_id __devinitdata mvs_pci_table[] = {
+static struct pci_device_id mvs_pci_table[] = {
{ PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
{ PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
{
@@ -748,7 +747,7 @@ static struct pci_driver mvs_pci_driver = {
.name = DRV_NAME,
.id_table = mvs_pci_table,
.probe = mvs_pci_init,
- .remove = __devexit_p(mvs_pci_remove),
+ .remove = mvs_pci_remove,
};
static ssize_t
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index a3776d6ced60..532110f4562a 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -220,8 +220,8 @@ int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
return rc;
}
-void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
- u32 off_lo, u32 off_hi, u64 sas_addr)
+void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ u32 off_hi, u64 sas_addr)
{
u32 lo = (u32)sas_addr;
u32 hi = (u32)(sas_addr>>32);
@@ -316,10 +316,13 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
int elem, rc, i;
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct domain_device *dev = task->dev;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct scatterlist *sg_req, *sg_resp;
u32 req_len, resp_len, tag = tei->tag;
void *buf_tmp;
@@ -392,7 +395,7 @@ static int mvs_task_prep_smp(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
TXQ_MODE_I | tag |
- (sas_port->phy_mask << TXQ_PHY_SHIFT));
+ (MVS_PHY_ID << TXQ_PHY_SHIFT));
hdr->flags |= flags;
hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
@@ -438,11 +441,14 @@ static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
static int mvs_task_prep_ata(struct mvs_info *mvi,
struct mvs_task_exec_info *tei)
{
+ struct sas_ha_struct *sha = mvi->sas;
struct sas_task *task = tei->task;
struct domain_device *dev = task->dev;
struct mvs_device *mvi_dev = dev->lldd_dev;
struct mvs_cmd_hdr *hdr = tei->hdr;
struct asd_sas_port *sas_port = dev->port;
+ struct sas_phy *sphy = dev->phy;
+ struct asd_sas_phy *sas_phy = sha->sas_phy[sphy->number];
struct mvs_slot_info *slot;
void *buf_prd;
u32 tag = tei->tag, hdr_tag;
@@ -462,7 +468,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
slot->tx = mvi->tx_prod;
del_q = TXQ_MODE_I | tag |
(TXQ_CMD_STP << TXQ_CMD_SHIFT) |
- (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+ (MVS_PHY_ID << TXQ_PHY_SHIFT) |
(mvi_dev->taskfileset << TXQ_SRS_SHIFT);
mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
index c04a4f5b5972..9f3cc13a5ce7 100644
--- a/drivers/scsi/mvsas/mv_sas.h
+++ b/drivers/scsi/mvsas/mv_sas.h
@@ -69,13 +69,14 @@ extern struct kmem_cache *mvs_task_list_cache;
#define DEV_IS_EXPANDER(type) \
((type == EDGE_DEV) || (type == FANOUT_DEV))
-#define bit(n) ((u32)1 << n)
+#define bit(n) ((u64)1 << n)
#define for_each_phy(__lseq_mask, __mc, __lseq) \
for ((__mc) = (__lseq_mask), (__lseq) = 0; \
(__mc) != 0 ; \
(++__lseq), (__mc) >>= 1)
+#define MVS_PHY_ID (1U << sas_phy->id)
#define MV_INIT_DELAYED_WORK(w, f, d) INIT_DELAYED_WORK(w, f)
#define UNASSOC_D2H_FIS(id) \
((void *) mvi->rx_fis + 0x100 * id)
@@ -456,8 +457,8 @@ int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
void *funcdata);
-void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
- u32 off_lo, u32 off_hi, u64 sas_addr);
+void mvs_set_sas_addr(struct mvs_info *mvi, int port_id, u32 off_lo,
+ u32 off_hi, u64 sas_addr);
void mvs_scan_start(struct Scsi_Host *shost);
int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
int mvs_queue_command(struct sas_task *task, const int num,
diff --git a/drivers/scsi/mvumi.c b/drivers/scsi/mvumi.c
index c585a925b3cd..4594ccaaf49b 100644
--- a/drivers/scsi/mvumi.c
+++ b/drivers/scsi/mvumi.c
@@ -2506,8 +2506,7 @@ fail_add_device:
* @pdev: PCI device structure
* @id: PCI ids of supported hotplugged adapter
*/
-static int __devinit mvumi_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct Scsi_Host *host;
struct mvumi_hba *mhba;
@@ -2728,7 +2727,7 @@ static struct pci_driver mvumi_pci_driver = {
.name = MV_DRIVER_NAME,
.id_table = mvumi_pci_table,
.probe = mvumi_probe_one,
- .remove = __devexit_p(mvumi_detach_one),
+ .remove = mvumi_detach_one,
.shutdown = mvumi_shutdown,
#ifdef CONFIG_PM
.suspend = mvumi_suspend,
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 62b616891a33..1cc0c1c69c88 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -76,7 +76,7 @@ static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
-static struct pci_device_id nsp32_pci_table[] __devinitdata = {
+static struct pci_device_id nsp32_pci_table[] = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
@@ -186,10 +186,10 @@ static nsp32_sync_table nsp32_sync_table_pci[] = {
* function declaration
*/
/* module entry point */
-static int __devinit nsp32_probe (struct pci_dev *, const struct pci_device_id *);
-static void __devexit nsp32_remove(struct pci_dev *);
-static int __init init_nsp32 (void);
-static void __exit exit_nsp32 (void);
+static int nsp32_probe (struct pci_dev *, const struct pci_device_id *);
+static void nsp32_remove(struct pci_dev *);
+static int __init init_nsp32 (void);
+static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
@@ -3382,7 +3382,7 @@ static int nsp32_resume(struct pci_dev *pdev)
/************************************************************************
* PCI/Cardbus probe/remove routine
*/
-static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
nsp32_hw_data *data = &nsp32_data_base;
@@ -3418,7 +3418,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
return ret;
}
-static void __devexit nsp32_remove(struct pci_dev *pdev)
+static void nsp32_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -3435,7 +3435,7 @@ static struct pci_driver nsp32_driver = {
.name = "nsp32",
.id_table = nsp32_pci_table,
.probe = nsp32_probe,
- .remove = __devexit_p(nsp32_remove),
+ .remove = nsp32_remove,
#ifdef CONFIG_PM
.suspend = nsp32_suspend,
.resume = nsp32_resume,
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index c06b8e5aa2cf..d8293f25ca33 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -144,6 +144,10 @@ static int _osd_get_print_system_info(struct osd_dev *od,
odi->osdname_len = get_attrs[a].len;
/* Avoid NULL for memcmp optimization 0-length is good enough */
odi->osdname = kzalloc(odi->osdname_len + 1, GFP_KERNEL);
+ if (!odi->osdname) {
+ ret = -ENOMEM;
+ goto out;
+ }
if (odi->osdname_len)
memcpy(odi->osdname, get_attrs[a].val_ptr, odi->osdname_len);
OSD_INFO("OSD_NAME [%s]\n", odi->osdname);
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index d4ed9eb52657..43754176a7b7 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -97,9 +97,37 @@ struct osd_dev_handle {
static DEFINE_IDA(osd_minor_ida);
+/*
+ * scsi sysfs attribute operations
+ */
+static ssize_t osdname_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+ return sprintf(buf, "%s\n", ould->odi.osdname);
+}
+
+static ssize_t systemid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct osd_uld_device *ould = container_of(dev, struct osd_uld_device,
+ class_dev);
+
+ memcpy(buf, ould->odi.systemid, ould->odi.systemid_len);
+ return ould->odi.systemid_len;
+}
+
+static struct device_attribute osd_uld_attrs[] = {
+ __ATTR(osdname, S_IRUGO, osdname_show, NULL),
+ __ATTR(systemid, S_IRUGO, systemid_show, NULL),
+ __ATTR_NULL,
+};
+
static struct class osd_uld_class = {
.owner = THIS_MODULE,
.name = "scsi_osd",
+ .dev_attrs = osd_uld_attrs,
};
/*
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index bf54aafc2d71..b8dd05074abb 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -47,7 +47,7 @@
* read_main_config_table - read the configure table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
+static void read_main_config_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00);
@@ -83,8 +83,7 @@ static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha)
* read_general_status_table - read the general status table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-read_general_status_table(struct pm8001_hba_info *pm8001_ha)
+static void read_general_status_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->general_stat_tbl_addr;
pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00);
@@ -118,8 +117,7 @@ read_general_status_table(struct pm8001_hba_info *pm8001_ha)
* read_inbnd_queue_table - read the inbound queue table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+static void read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int inbQ_num = 1;
int i;
@@ -137,8 +135,7 @@ read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
* read_outbnd_queue_table - read the outbound queue table and save it.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
+static void read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
{
int outbQ_num = 1;
int i;
@@ -156,8 +153,7 @@ read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha)
* init_default_table_values - init the default table.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-init_default_table_values(struct pm8001_hba_info *pm8001_ha)
+static void init_default_table_values(struct pm8001_hba_info *pm8001_ha)
{
int qn = 1;
int i;
@@ -250,8 +246,7 @@ init_default_table_values(struct pm8001_hba_info *pm8001_ha)
* update_main_config_table - update the main default table to the HBA.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-update_main_config_table(struct pm8001_hba_info *pm8001_ha)
+static void update_main_config_table(struct pm8001_hba_info *pm8001_ha)
{
void __iomem *address = pm8001_ha->main_cfg_tbl_addr;
pm8001_mw32(address, 0x24,
@@ -297,8 +292,8 @@ update_main_config_table(struct pm8001_hba_info *pm8001_ha)
* update_inbnd_queue_table - update the inbound queue table to the HBA.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
+static void update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
{
void __iomem *address = pm8001_ha->inbnd_q_tbl_addr;
u16 offset = number * 0x20;
@@ -318,8 +313,8 @@ update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
* update_outbnd_queue_table - update the outbound queue table to the HBA.
* @pm8001_ha: our hba card information
*/
-static void __devinit
-update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number)
+static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha,
+ int number)
{
void __iomem *address = pm8001_ha->outbnd_q_tbl_addr;
u16 offset = number * 0x24;
@@ -370,8 +365,8 @@ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue)
* @pm8001_ha: our hba card information
* @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc.
*/
-static void __devinit
-mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
+static void mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha,
+ u32 SSCbit)
{
u32 value, offset, i;
unsigned long flags;
@@ -438,9 +433,8 @@ mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit)
* @pm8001_ha: our hba card information
* @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us.
*/
-static void __devinit
-mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
- u32 interval)
+static void mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha,
+ u32 interval)
{
u32 offset;
u32 value;
@@ -601,7 +595,7 @@ static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha)
* pm8001_chip_init - the main init function that initialize whole PM8001 chip.
* @pm8001_ha: our hba card information
*/
-static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_chip_init(struct pm8001_hba_info *pm8001_ha)
{
/* check the firmware status */
if (-1 == check_fw_ready(pm8001_ha)) {
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index 0267c22f8741..3d5e522e00fc 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -104,8 +104,7 @@ static struct sas_domain_function_template pm8001_transport_ops = {
*@pm8001_ha: our hba structure.
*@phy_id: phy id.
*/
-static void __devinit pm8001_phy_init(struct pm8001_hba_info *pm8001_ha,
- int phy_id)
+static void pm8001_phy_init(struct pm8001_hba_info *pm8001_ha, int phy_id)
{
struct pm8001_phy *phy = &pm8001_ha->phy[phy_id];
struct asd_sas_phy *sas_phy = &phy->sas_phy;
@@ -141,7 +140,8 @@ static void pm8001_free(struct pm8001_hba_info *pm8001_ha)
for (i = 0; i < USI_MAX_MEMCNT; i++) {
if (pm8001_ha->memoryMap.region[i].virt_ptr != NULL) {
pci_free_consistent(pm8001_ha->pdev,
- pm8001_ha->memoryMap.region[i].element_size,
+ (pm8001_ha->memoryMap.region[i].total_len +
+ pm8001_ha->memoryMap.region[i].alignment),
pm8001_ha->memoryMap.region[i].virt_ptr,
pm8001_ha->memoryMap.region[i].phys_addr);
}
@@ -195,7 +195,7 @@ static irqreturn_t pm8001_interrupt(int irq, void *opaque)
* @pm8001_ha:our hba structure.
*
*/
-static int __devinit pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_alloc(struct pm8001_hba_info *pm8001_ha)
{
int i;
spin_lock_init(&pm8001_ha->lock);
@@ -360,8 +360,9 @@ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha)
* @ent: ent
* @shost: scsi host struct which has been initialized before.
*/
-static struct pm8001_hba_info *__devinit
-pm8001_pci_alloc(struct pci_dev *pdev, u32 chip_id, struct Scsi_Host *shost)
+static struct pm8001_hba_info *pm8001_pci_alloc(struct pci_dev *pdev,
+ u32 chip_id,
+ struct Scsi_Host *shost)
{
struct pm8001_hba_info *pm8001_ha;
struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
@@ -433,8 +434,8 @@ static int pci_go_44(struct pci_dev *pdev)
* @shost: scsi host which has been allocated outside.
* @chip_info: our ha struct.
*/
-static int __devinit pm8001_prep_sas_ha_init(struct Scsi_Host * shost,
- const struct pm8001_chip_info *chip_info)
+static int pm8001_prep_sas_ha_init(struct Scsi_Host *shost,
+ const struct pm8001_chip_info *chip_info)
{
int phy_nr, port_nr;
struct asd_sas_phy **arr_phy;
@@ -479,8 +480,8 @@ exit:
* @shost: scsi host which has been allocated outside
* @chip_info: our ha struct.
*/
-static void __devinit pm8001_post_sas_ha_init(struct Scsi_Host *shost,
- const struct pm8001_chip_info *chip_info)
+static void pm8001_post_sas_ha_init(struct Scsi_Host *shost,
+ const struct pm8001_chip_info *chip_info)
{
int i = 0;
struct pm8001_hba_info *pm8001_ha;
@@ -615,8 +616,8 @@ intx:
* pci driver it is invoked, all struct an hardware initilization should be done
* here, also, register interrupt
*/
-static int __devinit pm8001_pci_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int pm8001_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
unsigned int rc;
u32 pci_reg;
@@ -707,7 +708,7 @@ err_out_enable:
return rc;
}
-static void __devexit pm8001_pci_remove(struct pci_dev *pdev)
+static void pm8001_pci_remove(struct pci_dev *pdev)
{
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
struct pm8001_hba_info *pm8001_ha;
@@ -842,7 +843,7 @@ err_out_enable:
return rc;
}
-static struct pci_device_id __devinitdata pm8001_pci_table[] = {
+static struct pci_device_id pm8001_pci_table[] = {
{
PCI_VDEVICE(PMC_Sierra, 0x8001), chip_8001
},
@@ -857,7 +858,7 @@ static struct pci_driver pm8001_pci_driver = {
.name = DRV_NAME,
.id_table = pm8001_pci_table,
.probe = pm8001_pci_probe,
- .remove = __devexit_p(pm8001_pci_remove),
+ .remove = pm8001_pci_remove,
.suspend = pm8001_pci_suspend,
.resume = pm8001_pci_resume,
};
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index af763eab2039..b46f5e906837 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -125,7 +125,7 @@ static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
/*
* PCI device ids supported by pmcraid driver
*/
-static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
+static struct pci_device_id pmcraid_pci_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
},
@@ -4818,8 +4818,7 @@ pmcraid_release_control_blocks(
* Return Value
* 0 in case of success; -ENOMEM in case of failure
*/
-static int __devinit
-pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
{
int i;
@@ -4855,8 +4854,7 @@ pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
* Return Value
* 0 in case it can allocate all control blocks, otherwise -ENOMEM
*/
-static int __devinit
-pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
{
int i;
@@ -4922,8 +4920,7 @@ pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
* Return value
* 0 hrrq buffers are allocated, -ENOMEM otherwise.
*/
-static int __devinit
-pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
{
int i, buffer_size;
@@ -5062,8 +5059,7 @@ static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
* Return Value
* 0 for successful allocation, -ENOMEM for any failure
*/
-static int __devinit
-pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
+static int pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
{
int i;
@@ -5181,7 +5177,7 @@ static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
* Return Value
* 0 in case all of the blocks are allocated, -ENOMEM otherwise.
*/
-static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
+static int pmcraid_init_buffers(struct pmcraid_instance *pinstance)
{
int i;
@@ -5281,11 +5277,8 @@ static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
* Return Value
* 0 on success, non-zero in case of any failure
*/
-static int __devinit pmcraid_init_instance(
- struct pci_dev *pdev,
- struct Scsi_Host *host,
- void __iomem *mapped_pci_addr
-)
+static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
+ void __iomem *mapped_pci_addr)
{
struct pmcraid_instance *pinstance =
(struct pmcraid_instance *)host->hostdata;
@@ -5442,7 +5435,7 @@ static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
* Return value
* none
*/
-static void __devexit pmcraid_remove(struct pci_dev *pdev)
+static void pmcraid_remove(struct pci_dev *pdev)
{
struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
@@ -5883,10 +5876,8 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
* returns 0 if the device is claimed and successfully configured.
* returns non-zero error code in case of any failure
*/
-static int __devinit pmcraid_probe(
- struct pci_dev *pdev,
- const struct pci_device_id *dev_id
-)
+static int pmcraid_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
{
struct pmcraid_instance *pinstance;
struct Scsi_Host *host;
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index 959f10055be7..e6e2a30493e6 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -359,7 +359,7 @@ static struct scsi_host_template ps3rom_host_template = {
};
-static int __devinit ps3rom_probe(struct ps3_system_bus_device *_dev)
+static int ps3rom_probe(struct ps3_system_bus_device *_dev)
{
struct ps3_storage_device *dev = to_ps3_storage_device(&_dev->core);
int error;
diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c
index 538230be5cca..5a522c5bbd43 100644
--- a/drivers/scsi/qla1280.c
+++ b/drivers/scsi/qla1280.c
@@ -1438,7 +1438,7 @@ qla1280_return_status(struct response * sts, struct scsi_cmnd *cp)
* Returns:
* 0 = success
*/
-static int __devinit
+static int
qla1280_initialize_adapter(struct scsi_qla_host *ha)
{
struct device_reg __iomem *reg;
@@ -4230,7 +4230,7 @@ static struct scsi_host_template qla1280_driver_template = {
};
-static int __devinit
+static int
qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int devnum = id->driver_data;
@@ -4399,7 +4399,7 @@ qla1280_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
}
-static void __devexit
+static void
qla1280_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -4433,7 +4433,7 @@ static struct pci_driver qla1280_pci_driver = {
.name = "qla1280",
.id_table = qla1280_pci_tbl,
.probe = qla1280_probe_one,
- .remove = __devexit_p(qla1280_remove_one),
+ .remove = qla1280_remove_one,
};
static int __init
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 1c28215f8bed..1d82eef4e1eb 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -1272,22 +1272,29 @@ qla2x00_thermal_temp_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
- int rval = QLA_FUNCTION_FAILED;
- uint16_t temp, frac;
+ uint16_t temp = 0;
- if (!vha->hw->flags.thermal_supported)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (!vha->hw->thermal_support) {
+ ql_log(ql_log_warn, vha, 0x70db,
+ "Thermal not supported by this card.\n");
+ goto done;
+ }
- temp = frac = 0;
- if (qla2x00_reset_active(vha))
- ql_log(ql_log_warn, vha, 0x707b,
- "ISP reset active.\n");
- else if (!vha->hw->flags.eeh_busy)
- rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
- if (rval != QLA_SUCCESS)
- return snprintf(buf, PAGE_SIZE, "\n");
+ if (qla2x00_reset_active(vha)) {
+ ql_log(ql_log_warn, vha, 0x70dc, "ISP reset active.\n");
+ goto done;
+ }
+
+ if (vha->hw->flags.eeh_busy) {
+ ql_log(ql_log_warn, vha, 0x70dd, "PCI EEH busy.\n");
+ goto done;
+ }
+
+ if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
+ return snprintf(buf, PAGE_SIZE, "%d\n", temp);
- return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
+done:
+ return snprintf(buf, PAGE_SIZE, "\n");
}
static ssize_t
@@ -1615,8 +1622,7 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
* At this point all fcport's software-states are cleared. Perform any
* final cleanup of firmware resources (PCBs and XCBs).
*/
- if (fcport->loop_id != FC_NO_LOOP_ID &&
- !test_bit(UNLOADING, &fcport->vha->dpc_flags)) {
+ if (fcport->loop_id != FC_NO_LOOP_ID) {
if (IS_FWI2_CAPABLE(fcport->vha->hw))
fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
fcport->loop_id, fcport->d_id.b.domain,
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 2f9bddd3c616..ad54099cb805 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -27,7 +27,7 @@ void
qla2x00_bsg_sp_free(void *data, void *ptr)
{
srb_t *sp = (srb_t *)ptr;
- struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
+ struct scsi_qla_host *vha = sp->fcport->vha;
struct fc_bsg_job *bsg_job = sp->u.bsg_job;
struct qla_hw_data *ha = vha->hw;
@@ -40,7 +40,7 @@ qla2x00_bsg_sp_free(void *data, void *ptr)
if (sp->type == SRB_CT_CMD ||
sp->type == SRB_ELS_CMD_HST)
kfree(sp->fcport);
- mempool_free(sp, vha->hw->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
}
int
@@ -219,7 +219,8 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
break;
}
exit_fcp_prio_cfg:
- bsg_job->job_done(bsg_job);
+ if (!ret)
+ bsg_job->job_done(bsg_job);
return ret;
}
@@ -367,7 +368,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_unmap_sg;
}
@@ -514,7 +515,7 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
rval = -EIO;
goto done_free_fcport;
}
@@ -530,6 +531,75 @@ done_unmap_sg:
done:
return rval;
}
+
+/* Disable loopback mode */
+static inline int
+qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
+ int wait, int wait2)
+{
+ int ret = 0;
+ int rval = 0;
+ uint16_t new_config[4];
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
+ goto done_reset_internal;
+
+ memset(new_config, 0 , sizeof(new_config));
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_INTERNAL_LOOPBACK ||
+ (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
+ ENABLE_EXTERNAL_LOOPBACK) {
+ new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
+ ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
+ (new_config[0] & INTERNAL_LOOPBACK_MASK));
+ memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
+
+ ha->notify_dcbx_comp = wait;
+ ha->notify_lb_portup_comp = wait2;
+
+ ret = qla81xx_set_port_config(vha, new_config);
+ if (ret != QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x7025,
+ "Set port config failed.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ }
+
+ /* Wait for DCBX complete event */
+ if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x7026,
+ "DCBX completion not received.\n");
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x7027,
+ "DCBX completion received.\n");
+
+ if (wait2 &&
+ !wait_for_completion_timeout(&ha->lb_portup_comp,
+ (LB_PORTUP_COMP_TIMEOUT * HZ))) {
+ ql_dbg(ql_dbg_user, vha, 0x70c5,
+ "Port up completion not received.\n");
+ ha->notify_lb_portup_comp = 0;
+ rval = -EINVAL;
+ goto done_reset_internal;
+ } else
+ ql_dbg(ql_dbg_user, vha, 0x70c6,
+ "Port up completion received.\n");
+
+ ha->notify_dcbx_comp = 0;
+ ha->notify_lb_portup_comp = 0;
+ }
+done_reset_internal:
+ return rval;
+}
+
/*
* Set the port configuration to enable the internal or external loopback
* depending on the loopback mode.
@@ -565,9 +635,19 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
}
/* Wait for DCBX complete event */
- if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
+ if (!wait_for_completion_timeout(&ha->dcbx_comp,
+ (DCBX_COMP_TIMEOUT * HZ))) {
ql_dbg(ql_dbg_user, vha, 0x7022,
- "State change notification not received.\n");
+ "DCBX completion not received.\n");
+ ret = qla81xx_reset_loopback_mode(vha, new_config, 0, 0);
+ /*
+ * If the reset of the loopback mode doesn't work take a FCoE
+ * dump and reset the chip.
+ */
+ if (ret) {
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+ }
rval = -EINVAL;
} else {
if (ha->flags.idc_compl_status) {
@@ -577,7 +657,7 @@ qla81xx_set_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
ha->flags.idc_compl_status = 0;
} else
ql_dbg(ql_dbg_user, vha, 0x7023,
- "State change received.\n");
+ "DCBX completion received.\n");
}
ha->notify_dcbx_comp = 0;
@@ -586,57 +666,6 @@ done_set_internal:
return rval;
}
-/* Disable loopback mode */
-static inline int
-qla81xx_reset_loopback_mode(scsi_qla_host_t *vha, uint16_t *config,
- int wait)
-{
- int ret = 0;
- int rval = 0;
- uint16_t new_config[4];
- struct qla_hw_data *ha = vha->hw;
-
- if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
- goto done_reset_internal;
-
- memset(new_config, 0 , sizeof(new_config));
- if ((config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_INTERNAL_LOOPBACK ||
- (config[0] & INTERNAL_LOOPBACK_MASK) >> 1 ==
- ENABLE_EXTERNAL_LOOPBACK) {
- new_config[0] = config[0] & ~INTERNAL_LOOPBACK_MASK;
- ql_dbg(ql_dbg_user, vha, 0x70bf, "new_config[0]=%02x\n",
- (new_config[0] & INTERNAL_LOOPBACK_MASK));
- memcpy(&new_config[1], &config[1], sizeof(uint16_t) * 3) ;
-
- ha->notify_dcbx_comp = wait;
- ret = qla81xx_set_port_config(vha, new_config);
- if (ret != QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x7025,
- "Set port config failed.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- }
-
- /* Wait for DCBX complete event */
- if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
- (20 * HZ))) {
- ql_dbg(ql_dbg_user, vha, 0x7026,
- "State change notification not received.\n");
- ha->notify_dcbx_comp = 0;
- rval = -EINVAL;
- goto done_reset_internal;
- } else
- ql_dbg(ql_dbg_user, vha, 0x7027,
- "State change received.\n");
-
- ha->notify_dcbx_comp = 0;
- }
-done_reset_internal:
- return rval;
-}
-
static int
qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
{
@@ -738,12 +767,20 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (IS_QLA81XX(ha) || IS_QLA8031(ha)) {
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
+
if (qla81xx_get_port_config(vha, config)) {
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
+ }
+
+ if ((config[0] & INTERNAL_LOOPBACK_MASK) != 0) {
+ ql_dbg(ql_dbg_user, vha, 0x70c4,
+ "Loopback operation already in "
+ "progress.\n");
+ rval = -EAGAIN;
+ goto done_free_dma_rsp;
}
ql_dbg(ql_dbg_user, vha, 0x70c0,
@@ -755,15 +792,14 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
config, new_config, elreq.options);
else
rval = qla81xx_reset_loopback_mode(vha,
- config, 1);
+ config, 1, 0);
else
rval = qla81xx_set_loopback_mode(vha, config,
new_config, elreq.options);
if (rval) {
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
}
type = "FC_BSG_HST_VENDOR_LOOPBACK";
@@ -773,14 +809,6 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
- if (new_config[0]) {
- /* Revert back to original port config
- * Also clear internal loopback
- */
- qla81xx_reset_loopback_mode(vha,
- new_config, 0);
- }
-
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
ql_log(ql_log_warn, vha, 0x7029,
@@ -789,16 +817,39 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
- if (qla81xx_restart_mpi_firmware(vha) !=
- QLA_SUCCESS) {
- ql_log(ql_log_warn, vha, 0x702a,
- "MPI reset failed.\n");
+ if (IS_QLA81XX(ha)) {
+ if (qla81xx_restart_mpi_firmware(vha) !=
+ QLA_SUCCESS) {
+ ql_log(ql_log_warn, vha, 0x702a,
+ "MPI reset failed.\n");
+ }
}
- bsg_job->reply->result = (DID_ERROR << 16);
rval = -EIO;
- goto done_free_dma_req;
+ goto done_free_dma_rsp;
+ }
+
+ if (new_config[0]) {
+ int ret;
+
+ /* Revert back to original port config
+ * Also clear internal loopback
+ */
+ ret = qla81xx_reset_loopback_mode(vha,
+ new_config, 0, 1);
+ if (ret) {
+ /*
+ * If the reset of the loopback mode
+ * doesn't work take FCoE dump and then
+ * reset the chip.
+ */
+ ha->isp_ops->fw_dump(vha, 0);
+ set_bit(ISP_ABORT_NEEDED,
+ &vha->dpc_flags);
+ }
+
}
+
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
ql_dbg(ql_dbg_user, vha, 0x702b,
@@ -812,34 +863,27 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x702c,
"Vendor request %s failed.\n", type);
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
- sizeof(struct fc_bsg_reply);
-
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
+ bsg_job->reply->reply_payload_rcv_len = 0;
} else {
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
-
- bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
- sizeof(response) + sizeof(uint8_t);
- bsg_job->reply->reply_payload_rcv_len =
- bsg_job->reply_payload.payload_len;
- fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
- sizeof(struct fc_bsg_reply);
- memcpy(fw_sts_ptr, response, sizeof(response));
- fw_sts_ptr += sizeof(response);
- *fw_sts_ptr = command_sent;
- bsg_job->reply->result = DID_OK;
+ bsg_job->reply->result = (DID_OK << 16);
sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, rsp_data,
rsp_data_len);
}
- bsg_job->job_done(bsg_job);
+ bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
+ sizeof(response) + sizeof(uint8_t);
+ fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
+ sizeof(struct fc_bsg_reply);
+ memcpy(fw_sts_ptr, response, sizeof(response));
+ fw_sts_ptr += sizeof(response);
+ *fw_sts_ptr = command_sent;
+
+done_free_dma_rsp:
dma_free_coherent(&ha->pdev->dev, rsp_data_len,
rsp_data, rsp_data_dma);
done_free_dma_req:
@@ -853,6 +897,8 @@ done_unmap_req_sg:
dma_unmap_sg(&ha->pdev->dev,
bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -877,16 +923,15 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (rval) {
ql_log(ql_log_warn, vha, 0x7030,
"Vendor request 84xx reset failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
}
- bsg_job->job_done(bsg_job);
return rval;
}
@@ -976,8 +1021,7 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7037,
"Vendor request 84xx updatefw failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7038,
"Vendor request 84xx updatefw completed.\n");
@@ -986,7 +1030,6 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
bsg_job->reply->result = DID_OK;
}
- bsg_job->job_done(bsg_job);
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
done_free_fw_buf:
@@ -996,6 +1039,8 @@ done_unmap_sg:
dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -1163,8 +1208,7 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
ql_log(ql_log_warn, vha, 0x7043,
"Vendor request 84xx mgmt failed.\n");
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
+ rval = (DID_ERROR << 16);
} else {
ql_dbg(ql_dbg_user, vha, 0x7044,
@@ -1184,8 +1228,6 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
}
}
- bsg_job->job_done(bsg_job);
-
done_unmap_sg:
if (mgmt_b)
dma_free_coherent(&ha->pdev->dev, data_len, mgmt_b, mgmt_dma);
@@ -1200,6 +1242,8 @@ done_unmap_sg:
exit_mgmt:
dma_pool_free(ha->s_dma_pool, mn, mn_dma);
+ if (!rval)
+ bsg_job->job_done(bsg_job);
return rval;
}
@@ -1276,9 +1320,7 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
fcport->port_name[3], fcport->port_name[4],
fcport->port_name[5], fcport->port_name[6],
fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
- rval = 0;
- bsg_job->reply->result = (DID_ERROR << 16);
-
+ rval = (DID_ERROR << 16);
} else {
if (!port_param->mode) {
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
@@ -1292,9 +1334,9 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
bsg_job->reply->result = DID_OK;
+ bsg_job->job_done(bsg_job);
}
- bsg_job->job_done(bsg_job);
return rval;
}
@@ -1887,8 +1929,6 @@ qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
return qla24xx_process_bidir_cmd(bsg_job);
default:
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->job_done(bsg_job);
return -ENOSYS;
}
}
@@ -1919,8 +1959,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
ql_dbg(ql_dbg_user, vha, 0x709f,
"BSG: ISP abort active/needed -- cmd=%d.\n",
bsg_job->request->msgcode);
- bsg_job->reply->result = (DID_ERROR << 16);
- bsg_job->job_done(bsg_job);
return -EBUSY;
}
@@ -1943,7 +1981,6 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_RPT_CT:
default:
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
- bsg_job->reply->result = ret;
break;
}
return ret;
@@ -1966,7 +2003,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (((sp->type == SRB_CT_CMD) ||
@@ -2001,6 +2038,6 @@ done:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (bsg_job->request->msgcode == FC_BSG_HST_CT)
kfree(sp->fcport);
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(vha, sp);
return 0;
}
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index 37b8b7ba7421..e9f6b9bbf29a 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 44efe3cc79e6..1626de52e32a 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -11,20 +11,21 @@
* ----------------------------------------------------------------------
* | Level | Last Value Used | Holes |
* ----------------------------------------------------------------------
- * | Module Init and Probe | 0x0124 | 0x4b,0xba,0xfa |
- * | Mailbox commands | 0x114f | 0x111a-0x111b |
+ * | Module Init and Probe | 0x0126 | 0x4b,0xba,0xfa |
+ * | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
- * | Queue Command and IO tracing | 0x3030 | 0x3006-0x300b |
+ * | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
* | | | 0x3027-0x3028 |
* | | | 0x302d-0x302e |
* | DPC Thread | 0x401d | 0x4002,0x4013 |
* | Async Events | 0x5071 | 0x502b-0x502f |
* | | | 0x5047,0x5052 |
* | Timer Routines | 0x6011 | |
- * | User Space Interactions | 0x70c3 | 0x7018,0x702e, |
+ * | User Space Interactions | 0x70c4 | 0x7018,0x702e, |
+ * | | | 0x7020,0x7024, |
* | | | 0x7039,0x7045, |
* | | | 0x7073-0x7075, |
* | | | 0x708c, |
@@ -35,11 +36,11 @@
* | | | 0x800b,0x8039 |
* | AER/EEH | 0x9011 | |
* | Virtual Port | 0xa007 | |
- * | ISP82XX Specific | 0xb084 | 0xb002,0xb024 |
+ * | ISP82XX Specific | 0xb086 | 0xb002,0xb024 |
* | MultiQ | 0xc00c | |
* | Misc | 0xd010 | |
- * | Target Mode | 0xe06f | |
- * | Target Mode Management | 0xf071 | |
+ * | Target Mode | 0xe070 | |
+ * | Target Mode Management | 0xf072 | |
* | Target Mode Task Management | 0x1000b | |
* ----------------------------------------------------------------------
*/
@@ -526,8 +527,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
ha->max_req_queues : ha->max_rsp_queues;
mq->count = htonl(que_cnt);
for (cnt = 0; cnt < que_cnt; cnt++) {
- reg = (struct device_reg_25xxmq *) ((void *)
- ha->mqiobase + cnt * QLA_QUE_PAGE);
+ reg = (struct device_reg_25xxmq __iomem *)
+ (ha->mqiobase + cnt * QLA_QUE_PAGE);
que_idx = cnt * 4;
mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
@@ -2268,7 +2269,7 @@ qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
if (!cnt) {
nxt = fw->code_ram;
- nxt += sizeof(fw->code_ram),
+ nxt += sizeof(fw->code_ram);
nxt += (ha->fw_memory_size - 0x100000 + 1);
goto copy_queue;
} else
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 8f911c0b1e74..35e20b4f8b6c 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a9725bf5527b..c6509911772b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -37,6 +37,7 @@
#include "qla_nx.h"
#define QLA2XXX_DRIVER_NAME "qla2xxx"
#define QLA2XXX_APIDEV "ql2xapidev"
+#define QLA2XXX_MANUFACTURER "QLogic Corporation"
/*
* We have MAILBOX_REGISTER_COUNT sized arrays in a few places,
@@ -253,8 +254,8 @@
#define LOOP_DOWN_TIME 255 /* 240 */
#define LOOP_DOWN_RESET (LOOP_DOWN_TIME - 30)
-/* Maximum outstanding commands in ISP queues (1-65535) */
-#define MAX_OUTSTANDING_COMMANDS 1024
+#define DEFAULT_OUTSTANDING_COMMANDS 1024
+#define MIN_OUTSTANDING_COMMANDS 128
/* ISP request and response entry counts (37-65535) */
#define REQUEST_ENTRY_CNT_2100 128 /* Number of request entries. */
@@ -537,6 +538,8 @@ struct device_reg_25xxmq {
uint32_t req_q_out;
uint32_t rsp_q_in;
uint32_t rsp_q_out;
+ uint32_t atio_q_in;
+ uint32_t atio_q_out;
};
typedef union {
@@ -563,6 +566,9 @@ typedef union {
&(reg)->u.isp2100.mailbox5 : \
&(reg)->u.isp2300.rsp_q_out)
+#define ISP_ATIO_Q_IN(vha) (vha->hw->tgt.atio_q_in)
+#define ISP_ATIO_Q_OUT(vha) (vha->hw->tgt.atio_q_out)
+
#define MAILBOX_REG(ha, reg, num) \
(IS_QLA2100(ha) || IS_QLA2200(ha) ? \
(num < 8 ? \
@@ -762,8 +768,8 @@ typedef struct {
#define MBC_PORT_LOGOUT 0x56 /* Port Logout request */
#define MBC_SEND_RNID_ELS 0x57 /* Send RNID ELS request */
#define MBC_SET_RNID_PARAMS 0x59 /* Set RNID parameters */
-#define MBC_GET_RNID_PARAMS 0x5a /* Data Rate */
-#define MBC_DATA_RATE 0x5d /* Get RNID parameters */
+#define MBC_GET_RNID_PARAMS 0x5a /* Get RNID parameters */
+#define MBC_DATA_RATE 0x5d /* Data Rate */
#define MBC_INITIALIZE_FIRMWARE 0x60 /* Initialize firmware */
#define MBC_INITIATE_LIP 0x62 /* Initiate Loop */
/* Initialization Procedure */
@@ -809,6 +815,7 @@ typedef struct {
#define MBC_HOST_MEMORY_COPY 0x53 /* Host Memory Copy. */
#define MBC_SEND_RNFT_ELS 0x5e /* Send RNFT ELS request */
#define MBC_GET_LINK_PRIV_STATS 0x6d /* Get link & private data. */
+#define MBC_LINK_INITIALIZATION 0x72 /* Do link initialization. */
#define MBC_SET_VENDOR_ID 0x76 /* Set Vendor ID. */
#define MBC_PORT_RESET 0x120 /* Port Reset */
#define MBC_SET_PORT_CONFIG 0x122 /* Set port configuration */
@@ -856,6 +863,9 @@ typedef struct {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
+#define RNID_TYPE_ASIC_TEMP 0xC
+
/*
* Firmware state codes from get firmware state mailbox command
*/
@@ -1841,9 +1851,6 @@ typedef struct fc_port {
uint8_t scan_state;
} fc_port_t;
-#define QLA_FCPORT_SCAN_NONE 0
-#define QLA_FCPORT_SCAN_FOUND 1
-
/*
* Fibre channel port/lun states.
*/
@@ -2486,9 +2493,9 @@ struct bidi_statistics {
#define QLA_MAX_QUEUES 256
#define ISP_QUE_REG(ha, id) \
((ha->mqenable || IS_QLA83XX(ha)) ? \
- ((void *)(ha->mqiobase) +\
+ ((device_reg_t __iomem *)(ha->mqiobase) +\
(QLA_QUE_PAGE * id)) :\
- ((void *)(ha->iobase)))
+ ((device_reg_t __iomem *)(ha->iobase)))
#define QLA_REQ_QUE_ID(tag) \
((tag < QLA_MAX_QUEUES && tag > 0) ? tag : 0)
#define QLA_DEFAULT_QUE_QOS 5
@@ -2533,8 +2540,10 @@ struct req_que {
uint16_t qos;
uint16_t vp_idx;
struct rsp_que *rsp;
- srb_t *outstanding_cmds[MAX_OUTSTANDING_COMMANDS];
+ srb_t **outstanding_cmds;
uint32_t current_outstanding_cmd;
+ uint16_t num_outstanding_cmds;
+#define MAX_Q_DEPTH 32
int max_q_depth;
};
@@ -2557,11 +2566,13 @@ struct qlt_hw_data {
struct atio *atio_ring_ptr; /* Current address. */
uint16_t atio_ring_index; /* Current index. */
uint16_t atio_q_length;
+ uint32_t __iomem *atio_q_in;
+ uint32_t __iomem *atio_q_out;
void *target_lport_ptr;
struct qla_tgt_func_tmpl *tgt_ops;
struct qla_tgt *qla_tgt;
- struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
+ struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
uint16_t current_handle;
struct qla_tgt_vp_map *tgt_vp_map;
@@ -2618,7 +2629,6 @@ struct qla_hw_data {
uint32_t nic_core_hung:1;
uint32_t quiesce_owner:1;
- uint32_t thermal_supported:1;
uint32_t nic_core_reset_hdlr_active:1;
uint32_t nic_core_reset_owner:1;
uint32_t isp82xx_no_md_cap:1;
@@ -2788,6 +2798,8 @@ struct qla_hw_data {
#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha))
#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
(((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
+#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
+#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
/* HBA serial number */
uint8_t serial0;
@@ -2870,7 +2882,13 @@ struct qla_hw_data {
struct completion mbx_cmd_comp; /* Serialize mbx access */
struct completion mbx_intr_comp; /* Used for completion notification */
struct completion dcbx_comp; /* For set port config notification */
+ struct completion lb_portup_comp; /* Used to wait for link up during
+ * loopback */
+#define DCBX_COMP_TIMEOUT 20
+#define LB_PORTUP_COMP_TIMEOUT 10
+
int notify_dcbx_comp;
+ int notify_lb_portup_comp;
struct mutex selflogin_lock;
/* Basic firmware related information. */
@@ -2887,6 +2905,7 @@ struct qla_hw_data {
#define RISC_START_ADDRESS_2300 0x800
#define RISC_START_ADDRESS_2400 0x100000
uint16_t fw_xcb_count;
+ uint16_t fw_iocb_count;
uint16_t fw_options[16]; /* slots: 1,2,3,10,11 */
uint8_t fw_seriallink_options[4];
@@ -3056,7 +3075,16 @@ struct qla_hw_data {
struct work_struct idc_state_handler;
struct work_struct nic_core_unrecoverable;
+#define HOST_QUEUE_RAMPDOWN_INTERVAL (60 * HZ)
+#define HOST_QUEUE_RAMPUP_INTERVAL (30 * HZ)
+ unsigned long host_last_rampdown_time;
+ unsigned long host_last_rampup_time;
+ int cfg_lun_q_depth;
+
struct qlt_hw_data tgt;
+ uint16_t thermal_support;
+#define THERMAL_SUPPORT_I2C BIT_0
+#define THERMAL_SUPPORT_ISP BIT_1
};
/*
@@ -3115,6 +3143,8 @@ typedef struct scsi_qla_host {
#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
#define SCR_PENDING 21 /* SCR in target mode */
+#define HOST_RAMP_DOWN_QUEUE_DEPTH 22
+#define HOST_RAMP_UP_QUEUE_DEPTH 23
uint32_t device_flags;
#define SWITCH_FOUND BIT_0
@@ -3248,8 +3278,6 @@ struct qla_tgt_vp_map {
#define NVRAM_DELAY() udelay(10)
-#define INVALID_HANDLE (MAX_OUTSTANDING_COMMANDS+1)
-
/*
* Flash support definitions
*/
diff --git a/drivers/scsi/qla2xxx/qla_dfs.c b/drivers/scsi/qla2xxx/qla_dfs.c
index 706c4f7bc7c9..792a29294b62 100644
--- a/drivers/scsi/qla2xxx/qla_dfs.c
+++ b/drivers/scsi/qla2xxx/qla_dfs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index 59524aa0ab32..1ac2b0e3a0e1 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -300,7 +300,8 @@ struct init_cb_24xx {
uint32_t prio_request_q_address[2];
uint16_t msix;
- uint8_t reserved_2[6];
+ uint16_t msix_atio;
+ uint8_t reserved_2[4];
uint16_t atio_q_inpointer;
uint16_t atio_q_length;
@@ -1092,6 +1093,27 @@ struct device_reg_24xx {
uint32_t unused_6[2]; /* Gap. */
uint32_t iobase_sdata;
};
+/* RISC-RISC semaphore register PCI offet */
+#define RISC_REGISTER_BASE_OFFSET 0x7010
+#define RISC_REGISTER_WINDOW_OFFET 0x6
+
+/* RISC-RISC semaphore/flag register (risc address 0x7016) */
+
+#define RISC_SEMAPHORE 0x1UL
+#define RISC_SEMAPHORE_WE (RISC_SEMAPHORE << 16)
+#define RISC_SEMAPHORE_CLR (RISC_SEMAPHORE_WE | 0x0UL)
+#define RISC_SEMAPHORE_SET (RISC_SEMAPHORE_WE | RISC_SEMAPHORE)
+
+#define RISC_SEMAPHORE_FORCE 0x8000UL
+#define RISC_SEMAPHORE_FORCE_WE (RISC_SEMAPHORE_FORCE << 16)
+#define RISC_SEMAPHORE_FORCE_CLR (RISC_SEMAPHORE_FORCE_WE | 0x0UL)
+#define RISC_SEMAPHORE_FORCE_SET \
+ (RISC_SEMAPHORE_FORCE_WE | RISC_SEMAPHORE_FORCE)
+
+/* RISC semaphore timeouts (ms) */
+#define TIMEOUT_SEMAPHORE 2500
+#define TIMEOUT_SEMAPHORE_FORCE 2000
+#define TIMEOUT_TOTAL_ELAPSED 4500
/* Trace Control *************************************************************/
@@ -1366,9 +1388,7 @@ struct qla_flt_header {
#define FLT_REG_FCP_PRIO_0 0x87
#define FLT_REG_FCP_PRIO_1 0x88
#define FLT_REG_FCOE_FW 0xA4
-#define FLT_REG_FCOE_VPD_0 0xA9
#define FLT_REG_FCOE_NVRAM_0 0xAA
-#define FLT_REG_FCOE_VPD_1 0xAB
#define FLT_REG_FCOE_NVRAM_1 0xAC
struct qla_flt_region {
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 6acb39785a46..eb3ca21a7f17 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -55,7 +55,7 @@ extern void qla2x00_update_fcport(scsi_qla_host_t *, fc_port_t *);
extern void qla2x00_alloc_fw_dump(scsi_qla_host_t *);
extern void qla2x00_try_to_stop_firmware(scsi_qla_host_t *);
-extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *, uint16_t *);
+extern int qla2x00_get_thermal_temp(scsi_qla_host_t *, uint16_t *);
extern void qla84xx_put_chip(struct scsi_qla_host *);
@@ -84,6 +84,9 @@ extern int qla83xx_nic_core_reset(scsi_qla_host_t *);
extern void qla83xx_reset_ownership(scsi_qla_host_t *);
extern int qla2xxx_mctp_dump(scsi_qla_host_t *);
+extern int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *, struct req_que *);
+
/*
* Global Data in qla_os.c source file.
*/
@@ -94,6 +97,7 @@ extern int qlport_down_retry;
extern int ql2xplogiabsentdevice;
extern int ql2xloginretrycount;
extern int ql2xfdmienable;
+extern int ql2xmaxqdepth;
extern int ql2xallocfwdump;
extern int ql2xextended_error_logging;
extern int ql2xiidmaenable;
@@ -278,6 +282,9 @@ extern int
qla2x00_get_port_name(scsi_qla_host_t *, uint16_t, uint8_t *, uint8_t);
extern int
+qla24xx_link_initialize(scsi_qla_host_t *);
+
+extern int
qla2x00_lip_reset(scsi_qla_host_t *);
extern int
@@ -351,6 +358,9 @@ extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
+extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
@@ -416,7 +426,7 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
extern void qla2x00_free_irqs(scsi_qla_host_t *);
extern int qla2x00_get_data_rate(scsi_qla_host_t *);
-extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
+extern const char *qla2x00_get_link_speed_str(struct qla_hw_data *, uint16_t);
/*
* Global Function Prototypes in qla_sup.c source file.
@@ -436,6 +446,7 @@ extern uint8_t *qla25xx_read_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
uint32_t);
+extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
extern int qla2x00_beacon_on(struct scsi_qla_host *);
extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -598,7 +609,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *);
/* ISP 8021 hardware related */
extern void qla82xx_set_drv_active(scsi_qla_host_t *);
-extern void qla82xx_crb_win_unlock(struct qla_hw_data *);
extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32);
extern int qla82xx_rd_32(struct qla_hw_data *, ulong);
extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index f4e4bd7c3f4d..9b455250c101 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -218,6 +218,9 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
memcpy(fcport->port_name, ct_rsp->rsp.ga_nxt.port_name,
WWN_SIZE);
+ fcport->fc4_type = (ct_rsp->rsp.ga_nxt.fc4_types[2] & BIT_0) ?
+ FC4_TYPE_FCP_SCSI : FC4_TYPE_OTHER;
+
if (ct_rsp->rsp.ga_nxt.port_type != NS_N_PORT_TYPE &&
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
@@ -1325,8 +1328,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
- strcpy(eiter->a.manufacturer, "QLogic Corporation");
- alen = strlen(eiter->a.manufacturer);
+ alen = strlen(QLA2XXX_MANUFACTURER);
+ strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1646,8 +1649,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
- strcpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME);
- alen = strlen(eiter->a.os_dev_name);
+ alen = strlen(QLA2XXX_DRIVER_NAME);
+ strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
alen += (alen & 3) ? (4 - (alen & 3)) : 4;
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
@@ -1930,6 +1933,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
case BIT_11:
list[i].fp_speed = PORT_SPEED_8GB;
break;
+ case BIT_10:
+ list[i].fp_speed = PORT_SPEED_16GB;
+ break;
}
ql_dbg(ql_dbg_disc, vha, 0x205b,
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 48fca47384b7..edf4d14a1335 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -70,9 +70,7 @@ qla2x00_sp_free(void *data, void *ptr)
struct scsi_qla_host *vha = (scsi_qla_host_t *)data;
del_timer(&iocb->timer);
- mempool_free(sp, vha->hw->srb_mempool);
-
- QLA_VHA_MARK_NOT_BUSY(vha);
+ qla2x00_rel_sp(vha, sp);
}
/* Asynchronous Login/Logout Routines -------------------------------------- */
@@ -429,7 +427,7 @@ qla2x00_async_adisc_done(struct scsi_qla_host *vha, fc_port_t *fcport,
/* QLogic ISP2x00 Hardware Support Functions. */
/****************************************************************************/
-int
+static int
qla83xx_nic_core_fw_load(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -525,7 +523,7 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
vha->flags.reset_active = 0;
ha->flags.pci_channel_io_perm_failure = 0;
ha->flags.eeh_busy = 0;
- ha->flags.thermal_supported = 1;
+ ha->thermal_support = THERMAL_SUPPORT_I2C|THERMAL_SUPPORT_ISP;
atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
atomic_set(&vha->loop_state, LOOP_DOWN);
vha->device_flags = DFLG_NO_CABLE;
@@ -621,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -997,7 +997,7 @@ qla2x00_reset_chip(scsi_qla_host_t *vha)
*
* Returns 0 on success.
*/
-int
+static int
qla81xx_reset_mpi(scsi_qla_host_t *vha)
{
uint16_t mb[4] = {0x1010, 0, 1, 0};
@@ -1095,6 +1095,83 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
ha->isp_ops->enable_intrs(ha);
}
+static void
+qla25xx_read_risc_sema_reg(scsi_qla_host_t *vha, uint32_t *data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ *data = RD_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET);
+
+}
+
+static void
+qla25xx_write_risc_sema_reg(scsi_qla_host_t *vha, uint32_t data)
+{
+ struct device_reg_24xx __iomem *reg = &vha->hw->iobase->isp24;
+
+ WRT_REG_DWORD(&reg->iobase_addr, RISC_REGISTER_BASE_OFFSET);
+ WRT_REG_DWORD(&reg->iobase_window + RISC_REGISTER_WINDOW_OFFET, data);
+}
+
+static void
+qla25xx_manipulate_risc_semaphore(scsi_qla_host_t *vha)
+{
+ struct qla_hw_data *ha = vha->hw;
+ uint32_t wd32 = 0;
+ uint delta_msec = 100;
+ uint elapsed_msec = 0;
+ uint timeout_msec;
+ ulong n;
+
+ if (!IS_QLA25XX(ha) && !IS_QLA2031(ha))
+ return;
+
+attempt:
+ timeout_msec = TIMEOUT_SEMAPHORE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_SET);
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (wd32 & RISC_SEMAPHORE)
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (!(wd32 & RISC_SEMAPHORE))
+ goto force;
+
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ goto acquired;
+
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_CLR);
+ timeout_msec = TIMEOUT_SEMAPHORE_FORCE;
+ n = timeout_msec / delta_msec;
+ while (n--) {
+ qla25xx_read_risc_sema_reg(vha, &wd32);
+ if (!(wd32 & RISC_SEMAPHORE_FORCE))
+ break;
+ msleep(delta_msec);
+ elapsed_msec += delta_msec;
+ if (elapsed_msec > TIMEOUT_TOTAL_ELAPSED)
+ goto force;
+ }
+
+ if (wd32 & RISC_SEMAPHORE_FORCE)
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_CLR);
+
+ goto attempt;
+
+force:
+ qla25xx_write_risc_sema_reg(vha, RISC_SEMAPHORE_FORCE_SET);
+
+acquired:
+ return;
+}
+
/**
* qla24xx_reset_chip() - Reset ISP24xx chip.
* @ha: HA context
@@ -1113,6 +1190,8 @@ qla24xx_reset_chip(scsi_qla_host_t *vha)
ha->isp_ops->disable_intrs(ha);
+ qla25xx_manipulate_risc_semaphore(vha);
+
/* Perform RISC reset. */
qla24xx_reset_risc(vha);
}
@@ -1480,6 +1559,47 @@ done:
return rval;
}
+int
+qla2x00_alloc_outstanding_cmds(struct qla_hw_data *ha, struct req_que *req)
+{
+ /* Don't try to reallocate the array */
+ if (req->outstanding_cmds)
+ return QLA_SUCCESS;
+
+ if (!IS_FWI2_CAPABLE(ha) || (ha->mqiobase &&
+ (ql2xmultique_tag || ql2xmaxqueues > 1)))
+ req->num_outstanding_cmds = DEFAULT_OUTSTANDING_COMMANDS;
+ else {
+ if (ha->fw_xcb_count <= ha->fw_iocb_count)
+ req->num_outstanding_cmds = ha->fw_xcb_count;
+ else
+ req->num_outstanding_cmds = ha->fw_iocb_count;
+ }
+
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ /*
+ * Try to allocate a minimal size just so we can get through
+ * initialization.
+ */
+ req->num_outstanding_cmds = MIN_OUTSTANDING_COMMANDS;
+ req->outstanding_cmds = kzalloc(sizeof(srb_t *) *
+ req->num_outstanding_cmds, GFP_KERNEL);
+
+ if (!req->outstanding_cmds) {
+ ql_log(ql_log_fatal, NULL, 0x0126,
+ "Failed to allocate memory for "
+ "outstanding_cmds for req_que %p.\n", req);
+ req->num_outstanding_cmds = 0;
+ return QLA_FUNCTION_FAILED;
+ }
+ }
+
+ return QLA_SUCCESS;
+}
+
/**
* qla2x00_setup_chip() - Load and start RISC firmware.
* @ha: HA context
@@ -1549,9 +1669,18 @@ enable_82xx_npiv:
MIN_MULTI_ID_FABRIC - 1;
}
qla2x00_get_resource_cnts(vha, NULL,
- &ha->fw_xcb_count, NULL, NULL,
+ &ha->fw_xcb_count, NULL, &ha->fw_iocb_count,
&ha->max_npiv_vports, NULL);
+ /*
+ * Allocate the array of outstanding commands
+ * now that we know the firmware resources.
+ */
+ rval = qla2x00_alloc_outstanding_cmds(ha,
+ vha->req);
+ if (rval != QLA_SUCCESS)
+ goto failed;
+
if (!fw_major_version && ql2xallocfwdump
&& !IS_QLA82XX(ha))
qla2x00_alloc_fw_dump(vha);
@@ -1835,7 +1964,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
}
- qlt_24xx_config_rings(vha, reg);
+ qlt_24xx_config_rings(vha);
/* PCI posting */
RD_REG_DWORD(&ioreg->hccr);
@@ -1869,7 +1998,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
@@ -1888,10 +2017,6 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
qla2x00_init_response_q_entries(rsp);
}
- spin_lock(&ha->vport_slock);
-
- spin_unlock(&ha->vport_slock);
-
ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
ha->tgt.atio_ring_index = 0;
/* Initialize ATIO queue entries */
@@ -1971,6 +2096,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
"Waiting for LIP to complete.\n");
do {
+ memset(state, -1, sizeof(state));
rval = qla2x00_get_firmware_state(vha, state);
if (rval == QLA_SUCCESS) {
if (state[0] < FSTATE_LOSS_OF_SYNC) {
@@ -2081,6 +2207,7 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
char connect_type[22];
struct qla_hw_data *ha = vha->hw;
unsigned long flags;
+ scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
/* Get host addresses. */
rval = qla2x00_get_adapter_id(vha,
@@ -2094,6 +2221,13 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
} else {
ql_log(ql_log_warn, vha, 0x2009,
"Unable to get host loop ID.\n");
+ if (IS_FWI2_CAPABLE(ha) && (vha == base_vha) &&
+ (rval == QLA_COMMAND_ERROR && loop_id == 0x1b)) {
+ ql_log(ql_log_warn, vha, 0x1151,
+ "Doing link init.\n");
+ if (qla24xx_link_initialize(vha) == QLA_SUCCESS)
+ return rval;
+ }
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
}
return (rval);
@@ -2614,7 +2748,6 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
fcport->loop_id = FC_NO_LOOP_ID;
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
fcport->supported_classes = FC_COS_UNSPECIFIED;
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
return fcport;
}
@@ -2907,7 +3040,6 @@ cleanup_allocation:
static void
qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
{
- char *link_speed;
int rval;
uint16_t mb[4];
struct qla_hw_data *ha = vha->hw;
@@ -2934,10 +3066,10 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]);
} else {
- link_speed = qla2x00_get_link_speed_str(ha);
ql_dbg(ql_dbg_disc, vha, 0x2005,
"iIDMA adjusted to %s GB/s "
- "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
+ "on %02x%02x%02x%02x%02x%02x%02x%02x.\n",
+ qla2x00_get_link_speed_str(ha, fcport->fp_speed),
fcport->port_name[0], fcport->port_name[1],
fcport->port_name[2], fcport->port_name[3],
fcport->port_name[4], fcport->port_name[5],
@@ -3007,10 +3139,10 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->login_retry = 0;
fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
+ qla2x00_set_fcport_state(fcport, FCS_ONLINE);
qla2x00_iidma_fcport(vha, fcport);
qla24xx_update_fcport_fcp_prio(vha, fcport);
qla2x00_reg_remote_port(vha, fcport);
- qla2x00_set_fcport_state(fcport, FCS_ONLINE);
}
/*
@@ -3028,7 +3160,7 @@ static int
qla2x00_configure_fabric(scsi_qla_host_t *vha)
{
int rval;
- fc_port_t *fcport;
+ fc_port_t *fcport, *fcptemp;
uint16_t next_loopid;
uint16_t mb[MAILBOX_REGISTER_COUNT];
uint16_t loop_id;
@@ -3066,7 +3198,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
0xfc, mb, BIT_1|BIT_0);
if (rval != QLA_SUCCESS) {
set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
- break;
+ return rval;
}
if (mb[0] != MBS_COMMAND_COMPLETE) {
ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -3098,16 +3230,21 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
}
}
+#define QLA_FCPORT_SCAN 1
+#define QLA_FCPORT_FOUND 2
+
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ fcport->scan_state = QLA_FCPORT_SCAN;
+ }
+
rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
if (rval != QLA_SUCCESS)
break;
- /* Add new ports to existing port list */
- list_splice_tail_init(&new_fcports, &vha->vp_fcports);
-
- /* Starting free loop ID. */
- next_loopid = ha->min_external_loopid;
-
+ /*
+ * Logout all previous fabric devices marked lost, except
+ * FCP2 devices.
+ */
list_for_each_entry(fcport, &vha->vp_fcports, list) {
if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
break;
@@ -3115,8 +3252,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
continue;
- /* Logout lost/gone fabric devices (non-FCP2) */
- if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
+ if (fcport->scan_state == QLA_FCPORT_SCAN &&
atomic_read(&fcport->state) == FCS_ONLINE) {
qla2x00_mark_device_lost(vha, fcport,
ql2xplogiabsentdevice, 0);
@@ -3129,30 +3265,74 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
fcport->d_id.b.domain,
fcport->d_id.b.area,
fcport->d_id.b.al_pa);
+ fcport->loop_id = FC_NO_LOOP_ID;
}
- continue;
}
- fcport->scan_state = QLA_FCPORT_SCAN_NONE;
-
- /* Login fabric devices that need a login */
- if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
- atomic_read(&vha->loop_down_timer) == 0) {
- if (fcport->loop_id == FC_NO_LOOP_ID) {
- fcport->loop_id = next_loopid;
- rval = qla2x00_find_new_loop_id(
- base_vha, fcport);
- if (rval != QLA_SUCCESS) {
- /* Ran out of IDs to use */
- continue;
- }
+ }
+
+ /* Starting free loop ID. */
+ next_loopid = ha->min_external_loopid;
+
+ /*
+ * Scan through our port list and login entries that need to be
+ * logged in.
+ */
+ list_for_each_entry(fcport, &vha->vp_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
+ (fcport->flags & FCF_LOGIN_NEEDED) == 0)
+ continue;
+
+ if (fcport->loop_id == FC_NO_LOOP_ID) {
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(
+ base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
}
}
+ /* Login and update database */
+ qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+ }
+
+ /* Exit if out of loop IDs. */
+ if (rval != QLA_SUCCESS) {
+ break;
+ }
+
+ /*
+ * Login and add the new devices to our port list.
+ */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ if (atomic_read(&vha->loop_down_timer) ||
+ test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
+ break;
+
+ /* Find a new loop ID to use. */
+ fcport->loop_id = next_loopid;
+ rval = qla2x00_find_new_loop_id(base_vha, fcport);
+ if (rval != QLA_SUCCESS) {
+ /* Ran out of IDs to use */
+ break;
+ }
/* Login and update database */
qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
+
+ list_move_tail(&fcport->list, &vha->vp_fcports);
}
} while (0);
+ /* Free all new device structures not processed. */
+ list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
+ list_del(&fcport->list);
+ kfree(fcport);
+ }
+
if (rval) {
ql_dbg(ql_dbg_disc, vha, 0x2068,
"Configure fabric error exit rval=%d.\n", rval);
@@ -3188,8 +3368,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
int first_dev, last_dev;
port_id_t wrap = {}, nxt_d_id;
struct qla_hw_data *ha = vha->hw;
- struct scsi_qla_host *vp, *base_vha = pci_get_drvdata(ha->pdev);
- struct scsi_qla_host *tvp;
+ struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
rval = QLA_SUCCESS;
@@ -3302,22 +3481,8 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
continue;
/* Bypass virtual ports of the same host. */
- found = 0;
- if (ha->num_vhosts) {
- unsigned long flags;
-
- spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
- if (new_fcport->d_id.b24 == vp->d_id.b24) {
- found = 1;
- break;
- }
- }
- spin_unlock_irqrestore(&ha->vport_slock, flags);
-
- if (found)
- continue;
- }
+ if (qla2x00_is_a_vp_did(vha, new_fcport->d_id.b24))
+ continue;
/* Bypass if same domain and area of adapter. */
if (((new_fcport->d_id.b24 & 0xffff00) ==
@@ -3342,7 +3507,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
WWN_SIZE))
continue;
- fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
+ fcport->scan_state = QLA_FCPORT_FOUND;
found++;
@@ -3868,7 +4033,7 @@ qla83xx_reset_ownership(scsi_qla_host_t *vha)
}
}
-int
+static int
__qla83xx_set_drv_ack(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -3884,19 +4049,7 @@ __qla83xx_set_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-int
-qla83xx_set_drv_ack(scsi_qla_host_t *vha)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_set_drv_ack(vha);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
+static int
__qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -3912,19 +4065,7 @@ __qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
return rval;
}
-int
-qla83xx_clear_drv_ack(scsi_qla_host_t *vha)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_clear_drv_ack(vha);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-const char *
+static const char *
qla83xx_dev_state_to_string(uint32_t dev_state)
{
switch (dev_state) {
@@ -3978,7 +4119,7 @@ qla83xx_idc_audit(scsi_qla_host_t *vha, int audit_type)
}
/* Assumes idc_lock always held on entry */
-int
+static int
qla83xx_initiating_reset(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4026,36 +4167,12 @@ __qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
}
int
-qla83xx_set_idc_control(scsi_qla_host_t *vha, uint32_t idc_control)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_set_idc_control(vha, idc_control);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
__qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
{
return qla83xx_rd_reg(vha, QLA83XX_IDC_CONTROL, idc_control);
}
-int
-qla83xx_get_idc_control(scsi_qla_host_t *vha, uint32_t *idc_control)
-{
- int rval = QLA_SUCCESS;
-
- qla83xx_idc_lock(vha, 0);
- rval = __qla83xx_get_idc_control(vha, idc_control);
- qla83xx_idc_unlock(vha, 0);
-
- return rval;
-}
-
-int
+static int
qla83xx_check_driver_presence(scsi_qla_host_t *vha)
{
uint32_t drv_presence = 0;
@@ -4977,7 +5094,7 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
return rval;
}
-#define QLA_FW_URL "ftp://ftp.qlogic.com/outgoing/linux/firmware/"
+#define QLA_FW_URL "http://ldriver.qlogic.com/firmware/"
int
qla2x00_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
@@ -5502,6 +5619,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
if (IS_T10_PI_CAPABLE(ha))
nv->frame_payload_size &= ~7;
+ qlt_81xx_config_nvram_stage1(vha, nv);
+
/* Reset Initialization control block */
memset(icb, 0, ha->init_cb_size);
@@ -5542,6 +5661,8 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
"QLE8XXX");
+ qlt_81xx_config_nvram_stage2(vha, icb);
+
/* Use alternate WWN? */
if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index c0462c04c885..68e2c4afc134 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -198,6 +198,13 @@ done:
}
static inline void
+qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
+{
+ mempool_free(sp, vha->hw->srb_mempool);
+ QLA_VHA_MARK_NOT_BUSY(vha);
+}
+
+static inline void
qla2x00_init_timer(srb_t *sp, unsigned long tmo)
{
init_timer(&sp->u.iocb_cmd.timer);
@@ -213,3 +220,22 @@ qla2x00_gid_list_size(struct qla_hw_data *ha)
{
return sizeof(struct gid_list_info) * ha->max_fibre_devices;
}
+
+static inline void
+qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
+{
+ if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
+ if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
+ HOST_QUEUE_RAMPDOWN_INTERVAL)))
+ return;
+
+ /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
+ if (time_before(jiffies, (vha->hw->host_last_rampup_time +
+ HOST_QUEUE_RAMPUP_INTERVAL)))
+ return;
+
+ set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 03b752632839..d2630317cce8 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -349,14 +349,14 @@ qla2x00_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -520,7 +520,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk24 = NULL;
req = ha->req_q_map[0];
- mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
+ mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, NULL);
if (mrk == NULL) {
ql_log(ql_log_warn, base_vha, 0x3026,
"Failed to allocate Marker IOCB.\n");
@@ -1467,16 +1467,15 @@ qla24xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
- }
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
@@ -1641,15 +1640,15 @@ qla24xx_dif_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Compute number of required data segments */
@@ -1822,14 +1821,14 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x700b,
"No room on outstanding cmd array.\n");
goto queuing_error;
@@ -2263,14 +2262,14 @@ qla82xx_start_scsi(srb_t *sp)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS)
+ if (index == req->num_outstanding_cmds)
goto queuing_error;
/* Map the sg table so we have an accurate count of sg entries needed */
@@ -2551,7 +2550,7 @@ sufficient_dsds:
(unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD(
(unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
@@ -2748,7 +2747,6 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
struct rsp_que *rsp;
struct req_que *req;
int rval = EXT_STATUS_OK;
- device_reg_t __iomem *reg = ISP_QUE_REG(ha, vha->req->id);
rval = QLA_SUCCESS;
@@ -2768,15 +2766,15 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
- for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+ for (index = 1; index < req->num_outstanding_cmds; index++) {
handle++;
- if (handle == MAX_OUTSTANDING_COMMANDS)
+ if (handle == req->num_outstanding_cmds)
handle = 1;
if (!req->outstanding_cmds[handle])
break;
}
- if (index == MAX_OUTSTANDING_COMMANDS) {
+ if (index == req->num_outstanding_cmds) {
rval = EXT_STATUS_BUSY;
goto queuing_error;
}
@@ -2786,15 +2784,7 @@ qla2x00_start_bidir(srb_t *sp, struct scsi_qla_host *vha, uint32_t tot_dsds)
/* Check for room on request queue. */
if (req->cnt < req_cnt + 2) {
- if (ha->mqenable)
- cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
- else if (IS_QLA82XX(ha))
- cnt = RD_REG_DWORD(&reg->isp82.req_q_out);
- else if (IS_FWI2_CAPABLE(ha))
- cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
- else
- cnt = qla2x00_debounce_register(
- ISP_REQ_Q_OUT(ha, &reg->isp));
+ cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
if (req->ring_index < cnt)
req->cnt = cnt - req->ring_index;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5733811ce8e7..e9dbd74c20d3 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -13,6 +13,8 @@
#include <scsi/scsi_bsg_fc.h>
#include <scsi/scsi_eh.h>
+#include "qla_target.h"
+
static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
static void qla2x00_process_completed_request(struct scsi_qla_host *,
struct req_que *, uint32_t);
@@ -251,7 +253,7 @@ qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
/* Read all mbox registers? */
mboxes = (1 << ha->mbx_count) - 1;
if (!ha->mcp)
- ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
+ ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERROR.\n");
else
mboxes = ha->mcp->in_mb;
@@ -316,28 +318,24 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
}
#define LS_UNKNOWN 2
-char *
-qla2x00_get_link_speed_str(struct qla_hw_data *ha)
+const char *
+qla2x00_get_link_speed_str(struct qla_hw_data *ha, uint16_t speed)
{
- static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
- char *link_speed;
- int fw_speed = ha->link_data_rate;
+ static const char * const link_speeds[] = {
+ "1", "2", "?", "4", "8", "16", "10"
+ };
if (IS_QLA2100(ha) || IS_QLA2200(ha))
- link_speed = link_speeds[0];
- else if (fw_speed == 0x13)
- link_speed = link_speeds[6];
- else {
- link_speed = link_speeds[LS_UNKNOWN];
- if (fw_speed < 6)
- link_speed =
- link_speeds[fw_speed];
- }
-
- return link_speed;
+ return link_speeds[0];
+ else if (speed == 0x13)
+ return link_speeds[6];
+ else if (speed < 6)
+ return link_speeds[speed];
+ else
+ return link_speeds[LS_UNKNOWN];
}
-void
+static void
qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
{
struct qla_hw_data *ha = vha->hw;
@@ -493,10 +491,37 @@ qla83xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
if (mb[1] & IDC_DEVICE_STATE_CHANGE) {
ql_log(ql_log_info, vha, 0x506a,
"IDC Device-State changed = 0x%x.\n", mb[4]);
+ if (ha->flags.nic_core_reset_owner)
+ return;
qla83xx_schedule_work(vha, MBA_IDC_AEN);
}
}
+int
+qla2x00_is_a_vp_did(scsi_qla_host_t *vha, uint32_t rscn_entry)
+{
+ struct qla_hw_data *ha = vha->hw;
+ scsi_qla_host_t *vp;
+ uint32_t vp_did;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!ha->num_vhosts)
+ return ret;
+
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ vp_did = vp->d_id.b24;
+ if (vp_did == rscn_entry) {
+ ret = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return ret;
+}
+
/**
* qla2x00_async_event() - Process aynchronous events.
* @ha: SCSI driver HA context
@@ -671,7 +696,7 @@ skip_rio:
ql_dbg(ql_dbg_async, vha, 0x500a,
"LOOP UP detected (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha));
+ qla2x00_get_link_speed_str(ha, ha->link_data_rate));
vha->flags.management_server_logged_in = 0;
qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -860,7 +885,7 @@ skip_rio:
mb[1], mb[2], mb[3]);
ql_log(ql_log_warn, vha, 0x505f,
"Link is operational (%s Gbps).\n",
- qla2x00_get_link_speed_str(ha));
+ qla2x00_get_link_speed_str(ha, ha->link_data_rate));
/*
* Mark all devices as missing so we will login again.
@@ -903,6 +928,10 @@ skip_rio:
/* Ignore reserved bits from RSCN-payload. */
rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
+ /* Skip RSCNs for virtual ports on the same physical port */
+ if (qla2x00_is_a_vp_did(vha, rscn_entry))
+ break;
+
atomic_set(&vha->loop_down_timer, 0);
vha->flags.management_server_logged_in = 0;
@@ -987,14 +1016,25 @@ skip_rio:
mb[1], mb[2], mb[3]);
break;
case MBA_IDC_NOTIFY:
- /* See if we need to quiesce any I/O */
- if (IS_QLA8031(vha->hw))
- if ((mb[2] & 0x7fff) == MBC_PORT_RESET ||
- (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) {
+ if (IS_QLA8031(vha->hw)) {
+ mb[4] = RD_REG_WORD(&reg24->mailbox4);
+ if (((mb[2] & 0x7fff) == MBC_PORT_RESET ||
+ (mb[2] & 0x7fff) == MBC_SET_PORT_CONFIG) &&
+ (mb[4] & INTERNAL_LOOPBACK_MASK) != 0) {
set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
+ /*
+ * Extend loop down timer since port is active.
+ */
+ if (atomic_read(&vha->loop_state) == LOOP_DOWN)
+ atomic_set(&vha->loop_down_timer,
+ LOOP_DOWN_TIME);
qla2xxx_wake_dpc(vha);
}
+ }
case MBA_IDC_COMPLETE:
+ if (ha->notify_lb_portup_comp)
+ complete(&ha->lb_portup_comp);
+ /* Fallthru */
case MBA_IDC_TIME_EXT:
if (IS_QLA81XX(vha->hw) || IS_QLA8031(vha->hw))
qla81xx_idc_event(vha, mb[0], mb[1]);
@@ -1033,7 +1073,7 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
struct qla_hw_data *ha = vha->hw;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x3014,
"Invalid SCSI command index (%x).\n", index);
@@ -1071,7 +1111,7 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
uint16_t index;
index = LSW(pkt->handle);
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x5031,
"Invalid command index (%x).\n", index);
if (IS_QLA82XX(ha))
@@ -1744,7 +1784,7 @@ qla25xx_process_bidir_status_iocb(scsi_qla_host_t *vha, void *pkt,
sts24 = (struct sts_entry_24xx *) pkt;
/* Validate handle. */
- if (index >= MAX_OUTSTANDING_COMMANDS) {
+ if (index >= req->num_outstanding_cmds) {
ql_log(ql_log_warn, vha, 0x70af,
"Invalid SCSI completion handle 0x%x.\n", index);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -1914,9 +1954,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
req = ha->req_q_map[que];
/* Validate handle. */
- if (handle < MAX_OUTSTANDING_COMMANDS) {
+ if (handle < req->num_outstanding_cmds)
sp = req->outstanding_cmds[handle];
- } else
+ else
sp = NULL;
if (sp == NULL) {
@@ -1938,6 +1978,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
/* Fast path completion. */
if (comp_status == CS_COMPLETE && scsi_status == 0) {
+ qla2x00_do_host_ramp_up(vha);
qla2x00_process_completed_request(vha, req, handle);
return;
@@ -2197,6 +2238,9 @@ out:
cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
resid_len, fw_resid_len);
+ if (!res)
+ qla2x00_do_host_ramp_up(vha);
+
if (rsp->status_srb == NULL)
sp->done(ha, sp, res);
}
@@ -2318,7 +2362,7 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
/* Read all mbox registers? */
mboxes = (1 << ha->mbx_count) - 1;
if (!ha->mcp)
- ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
+ ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERROR.\n");
else
mboxes = ha->mcp->in_mb;
@@ -2751,6 +2795,12 @@ static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
{ "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
};
+static struct qla_init_msix_entry qla83xx_msix_entries[3] = {
+ { "qla2xxx (default)", qla24xx_msix_default },
+ { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+ { "qla2xxx (atio_q)", qla83xx_msix_atio_q },
+};
+
static void
qla24xx_disable_msix(struct qla_hw_data *ha)
{
@@ -2831,9 +2881,13 @@ msix_failed:
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < ha->msix_count; i++) {
qentry = &ha->msix_entries[i];
- if (IS_QLA82XX(ha)) {
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[i].handler,
+ 0, qla83xx_msix_entries[i].name, rsp);
+ } else if (IS_QLA82XX(ha)) {
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
@@ -2944,7 +2998,9 @@ skip_msi:
"Failed to reserve interrupt %d already in use.\n",
ha->pdev->irq);
goto fail;
- }
+ } else if (!ha->flags.msi_enabled)
+ ql_dbg(ql_dbg_init, vha, 0x0125,
+ "INTa mode: Enabled.\n");
clear_risc_ints:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 18c509fae555..186dd59ce4fa 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -900,13 +900,13 @@ qla2x00_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* command not found */
return QLA_FUNCTION_FAILED;
}
@@ -1633,6 +1633,54 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
}
/*
+ * qla24xx_link_initialization
+ * Issue link initialization mailbox command.
+ *
+ * Input:
+ * ha = adapter block pointer.
+ * TARGET_QUEUE_LOCK must be released.
+ * ADAPTER_STATE_LOCK must be released.
+ *
+ * Returns:
+ * qla2x00 local function return status code.
+ *
+ * Context:
+ * Kernel context.
+ */
+int
+qla24xx_link_initialize(scsi_qla_host_t *vha)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1152,
+ "Entered %s.\n", __func__);
+
+ if (!IS_FWI2_CAPABLE(vha->hw) || IS_CNA_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ mcp->mb[0] = MBC_LINK_INITIALIZATION;
+ mcp->mb[1] = BIT_6|BIT_4;
+ mcp->mb[2] = 0;
+ mcp->mb[3] = 0;
+ mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1153, "Failed=%x.\n", rval);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1154,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+/*
* qla2x00_lip_reset
* Issue LIP reset mailbox command.
*
@@ -2535,12 +2583,12 @@ qla24xx_abort_command(srb_t *sp)
"Entered %s.\n", __func__);
spin_lock_irqsave(&ha->hardware_lock, flags);
- for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
+ for (handle = 1; handle < req->num_outstanding_cmds; handle++) {
if (req->outstanding_cmds[handle] == sp)
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
- if (handle == MAX_OUTSTANDING_COMMANDS) {
+ if (handle == req->num_outstanding_cmds) {
/* Command not found. */
return QLA_FUNCTION_FAILED;
}
@@ -3093,6 +3141,7 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
struct qla_hw_data *ha = vha->hw;
scsi_qla_host_t *vp;
unsigned long flags;
+ int found;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
"Entered %s.\n", __func__);
@@ -3122,19 +3171,23 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
if (vp_idx == 0 && (MSB(stat) != 1))
goto reg_needed;
- if (MSB(stat) != 0) {
+ if (MSB(stat) != 0 && MSB(stat) != 2) {
ql_dbg(ql_dbg_mbx, vha, 0x10ba,
"Could not acquire ID for VP[%d].\n", vp_idx);
return;
}
+ found = 0;
spin_lock_irqsave(&ha->vport_slock, flags);
- list_for_each_entry(vp, &ha->vp_list, list)
- if (vp_idx == vp->vp_idx)
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ if (vp_idx == vp->vp_idx) {
+ found = 1;
break;
+ }
+ }
spin_unlock_irqrestore(&ha->vport_slock, flags);
- if (!vp)
+ if (!found)
return;
vp->d_id.b.domain = rptid_entry->port_id[2];
@@ -3536,7 +3589,7 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
QLA_QUE_PAGE * req->id);
mcp->mb[4] = req->id;
@@ -3605,7 +3658,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
if (IS_QLA83XX(ha))
mcp->mb[15] = 0;
- reg = (struct device_reg_25xxmq *)((void *)(ha->mqiobase) +
+ reg = (struct device_reg_25xxmq __iomem *)((ha->mqiobase) +
QLA_QUE_PAGE * rsp->id);
mcp->mb[4] = rsp->id;
@@ -3814,6 +3867,97 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
}
int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x1156,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * sizeof(uint32_t) - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1157,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
+static int
+qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+
+ if (!IS_FWI2_CAPABLE(vha->hw))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1159,
+ "Entered %s.\n", __func__);
+
+ mcp->mb[0] = MBC_GET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_ASIC_TEMP << 8;
+ mcp->out_mb = MBX_1|MBX_0;
+ mcp->in_mb = MBX_1|MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+ *temp = mcp->mb[1];
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x115a,
+ "Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x115b,
+ "Done %s.\n", __func__);
+ }
+
+ return rval;
+}
+
+int
qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
uint16_t dev, uint16_t off, uint16_t len, uint16_t opt)
{
@@ -4415,38 +4559,45 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
}
int
-qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
+qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp)
{
- int rval;
- uint8_t byte;
+ int rval = QLA_FUNCTION_FAILED;
struct qla_hw_data *ha = vha->hw;
+ uint8_t byte;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
"Entered %s.\n", __func__);
- /* Integer part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x10c9, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_I2C) {
+ rval = qla2x00_read_sfp(vha, 0, &byte,
+ 0x98, 0x1, 1, BIT_13|BIT_12|BIT_0);
+ *temp = byte;
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x10c9,
+ "Thermal not supported by I2C.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_I2C;
}
- *temp = byte;
- /* Fraction part */
- rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x10, 1,
- BIT_13|BIT_12|BIT_0);
- if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_mbx, vha, 0x1019, "Failed=%x.\n", rval);
- ha->flags.thermal_supported = 0;
- goto fail;
+ if (ha->thermal_support & THERMAL_SUPPORT_ISP) {
+ rval = qla2x00_read_asic_temperature(vha, temp);
+ if (rval == QLA_SUCCESS)
+ goto done;
+
+ ql_log(ql_log_warn, vha, 0x1019,
+ "Thermal not supported by ISP.\n");
+ ha->thermal_support &= ~THERMAL_SUPPORT_ISP;
}
- *frac = (byte >> 6) * 25;
+ ql_log(ql_log_warn, vha, 0x1150,
+ "Thermal not supported by this card "
+ "(ignoring further requests).\n");
+ return rval;
+
+done:
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
"Done %s.\n", __func__);
-fail:
return rval;
}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 20fd974f903a..f868a9f98afe 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -523,6 +523,7 @@ qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req)
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
}
+ kfree(req->outstanding_cmds);
kfree(req);
req = NULL;
}
@@ -649,6 +650,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
goto que_failed;
}
+ ret = qla2x00_alloc_outstanding_cmds(ha, req);
+ if (ret != QLA_SUCCESS)
+ goto que_failed;
+
mutex_lock(&ha->vport_lock);
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
@@ -685,7 +690,7 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
"options=0x%x.\n", req->options);
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
"options=0x%x.\n", req->options);
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 14cd361742fa..10754f518303 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -36,7 +36,7 @@
#define MAX_CRB_XFORM 60
static unsigned long crb_addr_xform[MAX_CRB_XFORM];
-int qla82xx_crb_table_initialized;
+static int qla82xx_crb_table_initialized;
#define qla82xx_crb_addr_transform(name) \
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
@@ -102,7 +102,7 @@ static void qla82xx_crb_addr_transform_setup(void)
qla82xx_crb_table_initialized = 1;
}
-struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
+static struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
{{{0, 0, 0, 0} } },
{{{1, 0x0100000, 0x0102000, 0x120000},
{1, 0x0110000, 0x0120000, 0x130000},
@@ -262,7 +262,7 @@ struct crb_128M_2M_block_map crb_128M_2M_map[64] = {
/*
* top 12 bits of crb internal address (hub, agent)
*/
-unsigned qla82xx_crb_hub_agt[64] = {
+static unsigned qla82xx_crb_hub_agt[64] = {
0,
QLA82XX_HW_CRB_HUB_AGT_ADR_PS,
QLA82XX_HW_CRB_HUB_AGT_ADR_MN,
@@ -330,7 +330,7 @@ unsigned qla82xx_crb_hub_agt[64] = {
};
/* Device states */
-char *q_dev_state[] = {
+static char *q_dev_state[] = {
"Unknown",
"Cold",
"Initializing",
@@ -359,12 +359,13 @@ qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off)
ha->crb_win = CRB_HI(*off);
writel(ha->crb_win,
- (void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
/* Read back value to make sure write has gone through before trying
* to use it.
*/
- win_read = RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ win_read = RD_REG_DWORD((void __iomem *)
+ (CRB_WINDOW_2M + ha->nx_pcibase));
if (win_read != ha->crb_win) {
ql_dbg(ql_dbg_p3p, vha, 0xb000,
"%s: Written crbwin (0x%x) "
@@ -567,7 +568,7 @@ qla82xx_pci_mem_bound_check(struct qla_hw_data *ha,
return 1;
}
-int qla82xx_pci_set_window_warning_count;
+static int qla82xx_pci_set_window_warning_count;
static unsigned long
qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr)
@@ -677,10 +678,10 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
u64 off, void *data, int size)
{
unsigned long flags;
- void *addr = NULL;
+ void __iomem *addr = NULL;
int ret = 0;
u64 start;
- uint8_t *mem_ptr = NULL;
+ uint8_t __iomem *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -712,7 +713,7 @@ static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha,
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2);
else
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
- if (mem_ptr == 0UL) {
+ if (mem_ptr == NULL) {
*(u8 *)data = 0;
return -1;
}
@@ -749,10 +750,10 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
u64 off, void *data, int size)
{
unsigned long flags;
- void *addr = NULL;
+ void __iomem *addr = NULL;
int ret = 0;
u64 start;
- uint8_t *mem_ptr = NULL;
+ uint8_t __iomem *mem_ptr = NULL;
unsigned long mem_base;
unsigned long mem_page;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -784,7 +785,7 @@ qla82xx_pci_mem_write_direct(struct qla_hw_data *ha,
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2);
else
mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE);
- if (mem_ptr == 0UL)
+ if (mem_ptr == NULL)
return -1;
addr = mem_ptr;
@@ -846,14 +847,21 @@ static int
qla82xx_rom_lock(struct qla_hw_data *ha)
{
int done = 0, timeout = 0;
+ uint32_t lock_owner = 0;
+ scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
while (!done) {
/* acquire semaphore2 from PCI HW block */
done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
if (done == 1)
break;
- if (timeout >= qla82xx_rom_lock_timeout)
+ if (timeout >= qla82xx_rom_lock_timeout) {
+ lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
+ ql_dbg(ql_dbg_p3p, vha, 0xb085,
+ "Failed to acquire rom lock, acquired by %d.\n",
+ lock_owner);
return -1;
+ }
timeout++;
}
qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER);
@@ -908,24 +916,24 @@ qla82xx_wait_rom_done(struct qla_hw_data *ha)
return 0;
}
-int
+static int
qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag)
{
uint32_t off_value, rval = 0;
- WRT_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase),
+ WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase),
(off & 0xFFFF0000));
/* Read back value to make sure write has gone through */
- RD_REG_DWORD((void *)(CRB_WINDOW_2M + ha->nx_pcibase));
+ RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
off_value = (off & 0x0000FFFF);
if (flag)
- WRT_REG_DWORD((void *)
+ WRT_REG_DWORD((void __iomem *)
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase),
data);
else
- rval = RD_REG_DWORD((void *)
+ rval = RD_REG_DWORD((void __iomem *)
(off_value + CRB_INDIRECT_2M + ha->nx_pcibase));
return rval;
@@ -955,7 +963,7 @@ qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp)
}
if (loops >= 50000) {
ql_log(ql_log_fatal, vha, 0x00b9,
- "Failed to aquire SEM2 lock.\n");
+ "Failed to acquire SEM2 lock.\n");
return -1;
}
ret = qla82xx_do_rom_fast_read(ha, addr, valp);
@@ -1122,7 +1130,7 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
long data;
};
- /* Halt all the indiviual PEGs and other blocks of the ISP */
+ /* Halt all the individual PEGs and other blocks of the ISP */
qla82xx_rom_lock(ha);
/* disable all I2Q */
@@ -1654,7 +1662,6 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
if (!ha->nx_pcibase) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000e,
"Cannot remap pcibase MMIO, aborting.\n");
- pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1669,7 +1676,6 @@ qla82xx_iospace_config(struct qla_hw_data *ha)
if (!ha->nxdb_wr_ptr) {
ql_log_pci(ql_log_fatal, ha->pdev, 0x000f,
"Cannot remap MMIO, aborting.\n");
- pci_release_regions(ha->pdev);
goto iospace_error_exit;
}
@@ -1764,14 +1770,6 @@ void qla82xx_config_rings(struct scsi_qla_host *vha)
WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0);
}
-void qla82xx_reset_adapter(struct scsi_qla_host *vha)
-{
- struct qla_hw_data *ha = vha->hw;
- vha->flags.online = 0;
- qla2x00_try_to_stop_firmware(vha);
- ha->isp_ops->disable_intrs(ha);
-}
-
static int
qla82xx_fw_load_from_blob(struct qla_hw_data *ha)
{
@@ -1856,7 +1854,7 @@ qla82xx_set_product_offset(struct qla_hw_data *ha)
return -1;
}
-int
+static int
qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type)
{
__le32 val;
@@ -1961,20 +1959,6 @@ qla82xx_check_rcvpeg_state(struct qla_hw_data *ha)
}
/* ISR related functions */
-uint32_t qla82xx_isr_int_target_mask_enable[8] = {
- ISR_INT_TARGET_MASK, ISR_INT_TARGET_MASK_F1,
- ISR_INT_TARGET_MASK_F2, ISR_INT_TARGET_MASK_F3,
- ISR_INT_TARGET_MASK_F4, ISR_INT_TARGET_MASK_F5,
- ISR_INT_TARGET_MASK_F7, ISR_INT_TARGET_MASK_F7
-};
-
-uint32_t qla82xx_isr_int_target_status[8] = {
- ISR_INT_TARGET_STATUS, ISR_INT_TARGET_STATUS_F1,
- ISR_INT_TARGET_STATUS_F2, ISR_INT_TARGET_STATUS_F3,
- ISR_INT_TARGET_STATUS_F4, ISR_INT_TARGET_STATUS_F5,
- ISR_INT_TARGET_STATUS_F7, ISR_INT_TARGET_STATUS_F7
-};
-
static struct qla82xx_legacy_intr_set legacy_intr[] = \
QLA82XX_LEGACY_INTR_CONFIG;
@@ -2813,7 +2797,7 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
else {
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval);
wmb();
- while (RD_REG_DWORD(ha->nxdb_rd_ptr) != dbval) {
+ while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) {
WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr,
dbval);
wmb();
@@ -2821,7 +2805,8 @@ qla82xx_start_iocbs(scsi_qla_host_t *vha)
}
}
-void qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
+static void
+qla82xx_rom_lock_recovery(struct qla_hw_data *ha)
{
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
@@ -3177,7 +3162,7 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
}
-int
+static int
qla82xx_check_fw_alive(scsi_qla_host_t *vha)
{
uint32_t fw_heartbeat_counter;
@@ -3651,7 +3636,7 @@ qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
if (!sp->u.scmd.ctx ||
@@ -3817,7 +3802,8 @@ qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha,
loop_cnt = ocm_hdr->op_count;
for (i = 0; i < loop_cnt; i++) {
- r_value = RD_REG_DWORD((void *)(r_addr + ha->nx_pcibase));
+ r_value = RD_REG_DWORD((void __iomem *)
+ (r_addr + ha->nx_pcibase));
*data_ptr++ = cpu_to_le32(r_value);
r_addr += r_stride;
}
@@ -4376,7 +4362,7 @@ qla82xx_md_free(scsi_qla_host_t *vha)
ha->md_tmplt_hdr, ha->md_template_size / 1024);
dma_free_coherent(&ha->pdev->dev, ha->md_template_size,
ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
- ha->md_tmplt_hdr = 0;
+ ha->md_tmplt_hdr = NULL;
}
/* Release the template data buffer allocated */
@@ -4386,7 +4372,7 @@ qla82xx_md_free(scsi_qla_host_t *vha)
ha->md_dump, ha->md_dump_size / 1024);
vfree(ha->md_dump);
ha->md_dump_size = 0;
- ha->md_dump = 0;
+ ha->md_dump = NULL;
}
}
@@ -4423,7 +4409,7 @@ qla82xx_md_prep(scsi_qla_host_t *vha)
dma_free_coherent(&ha->pdev->dev,
ha->md_template_size,
ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma);
- ha->md_tmplt_hdr = 0;
+ ha->md_tmplt_hdr = NULL;
}
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6c953e8c08f0..d268e8406fdb 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -897,7 +897,7 @@ struct ct6_dsd {
#define FLT_REG_BOOT_CODE_82XX 0x78
#define FLT_REG_FW_82XX 0x74
#define FLT_REG_GOLD_FW_82XX 0x75
-#define FLT_REG_VPD_82XX 0x81
+#define FLT_REG_VPD_8XXX 0x81
#define FA_VPD_SIZE_82XX 0x400
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index d501bf5f806b..2c6dd3dfe0f4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -41,7 +41,7 @@ static struct kmem_cache *ctx_cachep;
*/
int ql_errlev = ql_log_all;
-int ql2xenableclass2;
+static int ql2xenableclass2;
module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
MODULE_PARM_DESC(ql2xenableclass2,
"Specify if Class 2 operations are supported from the very "
@@ -89,6 +89,8 @@ MODULE_PARM_DESC(ql2xextended_error_logging,
"\t\t0x00200000 - AER/EEH. 0x00100000 - Multi Q.\n"
"\t\t0x00080000 - P3P Specific. 0x00040000 - Virtual Port.\n"
"\t\t0x00020000 - Buffer Dump. 0x00010000 - Misc.\n"
+ "\t\t0x00008000 - Verbose. 0x00004000 - Target.\n"
+ "\t\t0x00002000 - Target Mgmt. 0x00001000 - Target TMF.\n"
"\t\t0x7fffffff - For enabling all logs, can be too many logs.\n"
"\t\t0x1e400000 - Preferred value for capturing essential "
"debug information (equivalent to old "
@@ -109,8 +111,7 @@ MODULE_PARM_DESC(ql2xfdmienable,
"Enables FDMI registrations. "
"0 - no FDMI. Default is 1 - perform FDMI.");
-#define MAX_Q_DEPTH 32
-static int ql2xmaxqdepth = MAX_Q_DEPTH;
+int ql2xmaxqdepth = MAX_Q_DEPTH;
module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(ql2xmaxqdepth,
"Maximum queue depth to set for each LUN. "
@@ -358,6 +359,9 @@ static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
(req->length + 1) * sizeof(request_t),
req->ring, req->dma);
+ if (req)
+ kfree(req->outstanding_cmds);
+
kfree(req);
req = NULL;
}
@@ -494,12 +498,20 @@ qla24xx_pci_info_str(struct scsi_qla_host *vha, char *str)
(BIT_4 | BIT_5 | BIT_6 | BIT_7 | BIT_8 | BIT_9)) >> 4;
strcpy(str, "PCIe (");
- if (lspeed == 1)
+ switch (lspeed) {
+ case 1:
strcat(str, "2.5GT/s ");
- else if (lspeed == 2)
+ break;
+ case 2:
strcat(str, "5.0GT/s ");
- else
+ break;
+ case 3:
+ strcat(str, "8.0GT/s ");
+ break;
+ default:
strcat(str, "<unknown> ");
+ break;
+ }
snprintf(lwstr, sizeof(lwstr), "x%d)", lwidth);
strcat(str, lwstr);
@@ -618,7 +630,7 @@ qla2x00_sp_free_dma(void *vha, void *ptr)
}
CMD_SP(cmd) = NULL;
- mempool_free(sp, ha->srb_mempool);
+ qla2x00_rel_sp(sp->fcport->vha, sp);
}
static void
@@ -706,9 +718,11 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
goto qc24_target_busy;
}
- sp = qla2x00_get_sp(base_vha, fcport, GFP_ATOMIC);
- if (!sp)
+ sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
+ if (!sp) {
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy;
+ }
sp->u.scmd.cmd = cmd;
sp->type = SRB_SCSI_CMD;
@@ -719,8 +733,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
rval = ha->isp_ops->start_scsi(sp);
if (rval != QLA_SUCCESS) {
- ql_dbg(ql_dbg_io, vha, 0x3013,
+ ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3013,
"Start scsi failed rval=%d for cmd=%p.\n", rval, cmd);
+ set_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags);
goto qc24_host_busy_free_sp;
}
@@ -1000,7 +1015,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
spin_lock_irqsave(&ha->hardware_lock, flags);
req = vha->req;
for (cnt = 1; status == QLA_SUCCESS &&
- cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (!sp)
continue;
@@ -1290,14 +1305,14 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
}
if (ha->flags.enable_lip_full_login && !IS_CNA_CAPABLE(ha)) {
+ atomic_set(&vha->loop_state, LOOP_DOWN);
+ atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+ qla2x00_mark_all_devices_lost(vha, 0);
ret = qla2x00_full_login_lip(vha);
if (ret != QLA_SUCCESS) {
ql_dbg(ql_dbg_taskm, vha, 0x802d,
"full_login_lip=%d.\n", ret);
}
- atomic_set(&vha->loop_state, LOOP_DOWN);
- atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
- qla2x00_mark_all_devices_lost(vha, 0);
}
if (ha->flags.enable_lip_reset) {
@@ -1327,7 +1342,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
req = ha->req_q_map[que];
if (!req)
continue;
- for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+ if (!req->outstanding_cmds)
+ continue;
+ for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
req->outstanding_cmds[cnt] = NULL;
@@ -1443,6 +1460,81 @@ qla2x00_change_queue_type(struct scsi_device *sdev, int tag_type)
return tag_type;
}
+static void
+qla2x00_host_ramp_down_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampdown_time = jiffies;
+
+ if (ha->cfg_lun_q_depth <= vha->host->cmd_per_lun)
+ return;
+
+ if ((ha->cfg_lun_q_depth / 2) < vha->host->cmd_per_lun)
+ ha->cfg_lun_q_depth = vha->host->cmd_per_lun;
+ else
+ ha->cfg_lun_q_depth = ha->cfg_lun_q_depth / 2;
+
+ /*
+ * Geometrically ramp down the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > shost->cmd_per_lun) {
+ if (sdev->queue_depth < ha->cfg_lun_q_depth)
+ continue;
+ ql_log(ql_log_warn, vp, 0x3031,
+ "%ld:%d:%d: Ramping down queue depth to %d",
+ vp->host_no, sdev->id, sdev->lun,
+ ha->cfg_lun_q_depth);
+ qla2x00_change_queue_depth(sdev,
+ ha->cfg_lun_q_depth, SCSI_QDEPTH_DEFAULT);
+ }
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
+static void
+qla2x00_host_ramp_up_queuedepth(scsi_qla_host_t *vha)
+{
+ scsi_qla_host_t *vp;
+ struct Scsi_Host *shost;
+ struct scsi_device *sdev;
+ struct qla_hw_data *ha = vha->hw;
+ unsigned long flags;
+
+ ha->host_last_rampup_time = jiffies;
+ ha->cfg_lun_q_depth++;
+
+ /*
+ * Linearly ramp up the queue depth for all devices on this
+ * adapter
+ */
+ spin_lock_irqsave(&ha->vport_slock, flags);
+ list_for_each_entry(vp, &ha->vp_list, list) {
+ shost = vp->host;
+ shost_for_each_device(sdev, shost) {
+ if (sdev->queue_depth > ha->cfg_lun_q_depth)
+ continue;
+ qla2x00_change_queue_depth(sdev, ha->cfg_lun_q_depth,
+ SCSI_QDEPTH_RAMP_UP);
+ }
+ }
+ spin_unlock_irqrestore(&ha->vport_slock, flags);
+
+ return;
+}
+
/**
* qla2x00_config_dma_addressing() - Configure OS DMA addressing method.
* @ha: HA context
@@ -1720,6 +1812,9 @@ qla83xx_iospace_config(struct qla_hw_data *ha)
mqiobase_exit:
ha->msix_count = ha->max_rsp_queues + 1;
+
+ qlt_83xx_iospace_config(ha);
+
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x011f,
"MSIX Count:%d.\n", ha->msix_count);
return 0;
@@ -2144,7 +2239,7 @@ qla2xxx_scan_finished(struct Scsi_Host *shost, unsigned long time)
/*
* PCI driver interface
*/
-static int __devinit
+static int
qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret = -ENODEV;
@@ -2220,6 +2315,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->init_cb_size = sizeof(init_cb_t);
ha->link_data_rate = PORT_SPEED_UNKNOWN;
ha->optrom_size = OPTROM_SIZE_2300;
+ ha->cfg_lun_q_depth = ql2xmaxqdepth;
/* Assign ISP specific operations. */
if (IS_QLA2100(ha)) {
@@ -2297,6 +2393,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2328,6 +2425,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
ha->mbx_count = MAILBOX_REGISTER_COUNT;
req_length = REQUEST_ENTRY_CNT_24XX;
rsp_length = RESPONSE_ENTRY_CNT_2300;
+ ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
ha->gid_list_info_size = 8;
@@ -2357,7 +2455,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
/* Configure PCI I/O space */
ret = ha->isp_ops->iospace_config(ha);
if (ret)
- goto probe_hw_failed;
+ goto iospace_config_failed;
ql_log_pci(ql_log_info, pdev, 0x001d,
"Found an ISP%04X irq %d iobase 0x%p.\n",
@@ -2367,6 +2465,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
complete(&ha->mbx_cmd_comp);
init_completion(&ha->mbx_intr_comp);
init_completion(&ha->dcbx_comp);
+ init_completion(&ha->lb_portup_comp);
set_bit(0, (unsigned long *) ha->vp_idx_map);
@@ -2668,7 +2767,11 @@ probe_hw_failed:
qla82xx_idc_lock(ha);
qla82xx_clear_drv_active(ha);
qla82xx_idc_unlock(ha);
- iounmap((device_reg_t __iomem *)ha->nx_pcibase);
+ }
+iospace_config_failed:
+ if (IS_QLA82XX(ha)) {
+ if (!ha->nx_pcibase)
+ iounmap((device_reg_t __iomem *)ha->nx_pcibase);
if (!ql2xdbwr)
iounmap((device_reg_t __iomem *)ha->nxdb_wr_ptr);
} else {
@@ -2706,6 +2809,9 @@ qla2x00_shutdown(struct pci_dev *pdev)
scsi_qla_host_t *vha;
struct qla_hw_data *ha;
+ if (!atomic_read(&pdev->enable_cnt))
+ return;
+
vha = pci_get_drvdata(pdev);
ha = vha->hw;
@@ -2755,6 +2861,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
ha->flags.host_shutting_down = 1;
+ set_bit(UNLOADING, &base_vha->dpc_flags);
mutex_lock(&ha->vport_lock);
while (ha->cur_vport_count) {
struct Scsi_Host *scsi_host;
@@ -2784,8 +2891,6 @@ qla2x00_remove_one(struct pci_dev *pdev)
"Error while clearing DRV-Presence.\n");
}
- set_bit(UNLOADING, &base_vha->dpc_flags);
-
qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
qla2x00_dfs_remove(base_vha);
@@ -3721,10 +3826,9 @@ void qla2x00_relogin(struct scsi_qla_host *vha)
if (fcport->flags &
FCF_FCP2_DEVICE)
opts |= BIT_1;
- status2 =
- qla2x00_get_port_database(
- vha, fcport,
- opts);
+ status2 =
+ qla2x00_get_port_database(
+ vha, fcport, opts);
if (status2 != QLA_SUCCESS)
status = 1;
}
@@ -3836,7 +3940,7 @@ qla83xx_idc_state_handler_work(struct work_struct *work)
qla83xx_idc_unlock(base_vha, 0);
}
-int
+static int
qla83xx_check_nic_core_fw_alive(scsi_qla_host_t *base_vha)
{
int rval = QLA_SUCCESS;
@@ -3954,7 +4058,7 @@ qla83xx_wait_logic(void)
}
}
-int
+static int
qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
{
int rval;
@@ -3962,6 +4066,8 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
uint32_t idc_lck_rcvry_stage_mask = 0x3;
uint32_t idc_lck_rcvry_owner_mask = 0x3c;
struct qla_hw_data *ha = base_vha->hw;
+ ql_dbg(ql_dbg_p3p, base_vha, 0xb086,
+ "Trying force recovery of the IDC lock.\n");
rval = qla83xx_rd_reg(base_vha, QLA83XX_IDC_LOCK_RECOVERY, &data);
if (rval)
@@ -4013,7 +4119,7 @@ qla83xx_force_lock_recovery(scsi_qla_host_t *base_vha)
return rval;
}
-int
+static int
qla83xx_idc_lock_recovery(scsi_qla_host_t *base_vha)
{
int rval = QLA_SUCCESS;
@@ -4053,6 +4159,7 @@ qla83xx_idc_lock(scsi_qla_host_t *base_vha, uint16_t requester_id)
{
uint16_t options = (requester_id << 15) | BIT_6;
uint32_t data;
+ uint32_t lock_owner;
struct qla_hw_data *ha = base_vha->hw;
/* IDC-lock implementation using driver-lock/lock-id remote registers */
@@ -4064,8 +4171,11 @@ retry_lock:
qla83xx_wr_reg(base_vha, QLA83XX_DRIVER_LOCKID,
ha->portnum);
} else {
+ qla83xx_rd_reg(base_vha, QLA83XX_DRIVER_LOCKID,
+ &lock_owner);
ql_dbg(ql_dbg_p3p, base_vha, 0xb063,
- "Failed to acquire IDC lock. retrying...\n");
+ "Failed to acquire IDC lock, acquired by %d, "
+ "retrying...\n", lock_owner);
/* Retry/Perform IDC-Lock recovery */
if (qla83xx_idc_lock_recovery(base_vha)
@@ -4212,7 +4322,7 @@ qla83xx_clear_drv_presence(scsi_qla_host_t *vha)
return rval;
}
-void
+static void
qla83xx_need_reset_handler(scsi_qla_host_t *vha)
{
struct qla_hw_data *ha = vha->hw;
@@ -4224,7 +4334,7 @@ qla83xx_need_reset_handler(scsi_qla_host_t *vha)
while (1) {
qla83xx_rd_reg(vha, QLA83XX_IDC_DRIVER_ACK, &drv_ack);
qla83xx_rd_reg(vha, QLA83XX_IDC_DRV_PRESENCE, &drv_presence);
- if (drv_ack == drv_presence)
+ if ((drv_ack & drv_presence) == drv_presence)
break;
if (time_after_eq(jiffies, ack_timeout)) {
@@ -4251,7 +4361,7 @@ qla83xx_need_reset_handler(scsi_qla_host_t *vha)
ql_log(ql_log_info, vha, 0xb068, "HW State: COLD/RE-INIT.\n");
}
-int
+static int
qla83xx_device_bootstrap(scsi_qla_host_t *vha)
{
int rval = QLA_SUCCESS;
@@ -4505,9 +4615,9 @@ qla2x00_do_dpc(void *data)
"ISP abort end.\n");
}
- if (test_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags)) {
+ if (test_and_clear_bit(FCPORT_UPDATE_NEEDED,
+ &base_vha->dpc_flags)) {
qla2x00_update_fcports(base_vha);
- clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
}
if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
@@ -4593,6 +4703,18 @@ qla2x00_do_dpc(void *data)
qla2xxx_flash_npiv_conf(base_vha);
}
+ if (test_and_clear_bit(HOST_RAMP_DOWN_QUEUE_DEPTH,
+ &base_vha->dpc_flags)) {
+ /* Prevents simultaneous ramp up and down */
+ clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags);
+ qla2x00_host_ramp_down_queuedepth(base_vha);
+ }
+
+ if (test_and_clear_bit(HOST_RAMP_UP_QUEUE_DEPTH,
+ &base_vha->dpc_flags))
+ qla2x00_host_ramp_up_queuedepth(base_vha);
+
if (!ha->interrupts_on)
ha->isp_ops->enable_intrs(ha);
@@ -4721,7 +4843,7 @@ qla2x00_timer(scsi_qla_host_t *vha)
cpu_flags);
req = ha->req_q_map[0];
for (index = 1;
- index < MAX_OUTSTANDING_COMMANDS;
+ index < req->num_outstanding_cmds;
index++) {
fc_port_t *sfcp;
@@ -4790,7 +4912,9 @@ qla2x00_timer(scsi_qla_host_t *vha)
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) ||
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) ||
test_bit(VP_DPC_NEEDED, &vha->dpc_flags) ||
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags))) {
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags) ||
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags))) {
ql_dbg(ql_dbg_timer, vha, 0x600b,
"isp_abort_needed=%d loop_resync_needed=%d "
"fcport_update_needed=%d start_dpc=%d "
@@ -4803,12 +4927,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
ql_dbg(ql_dbg_timer, vha, 0x600c,
"beacon_blink_needed=%d isp_unrecoverable=%d "
"fcoe_ctx_reset_needed=%d vp_dpc_needed=%d "
- "relogin_needed=%d.\n",
+ "relogin_needed=%d, host_ramp_down_needed=%d "
+ "host_ramp_up_needed=%d.\n",
test_bit(BEACON_BLINK_NEEDED, &vha->dpc_flags),
test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags),
test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags),
test_bit(VP_DPC_NEEDED, &vha->dpc_flags),
- test_bit(RELOGIN_NEEDED, &vha->dpc_flags));
+ test_bit(RELOGIN_NEEDED, &vha->dpc_flags),
+ test_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags),
+ test_bit(HOST_RAMP_DOWN_QUEUE_DEPTH, &vha->dpc_flags));
qla2xxx_wake_dpc(vha);
}
@@ -4987,7 +5114,8 @@ qla2xxx_pci_mmio_enabled(struct pci_dev *pdev)
return PCI_ERS_RESULT_RECOVERED;
}
-uint32_t qla82xx_error_recovery(scsi_qla_host_t *base_vha)
+static uint32_t
+qla82xx_error_recovery(scsi_qla_host_t *base_vha)
{
uint32_t rval = QLA_FUNCTION_FAILED;
uint32_t drv_active = 0;
diff --git a/drivers/scsi/qla2xxx/qla_settings.h b/drivers/scsi/qla2xxx/qla_settings.h
index 892a81e457bc..46ef0ac48f44 100644
--- a/drivers/scsi/qla2xxx/qla_settings.h
+++ b/drivers/scsi/qla2xxx/qla_settings.h
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 32fdc2a66dd1..3bef6736d885 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1,6 +1,6 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
@@ -798,20 +798,8 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
case FLT_REG_BOOTLOAD_82XX:
ha->flt_region_bootload = start;
break;
- case FLT_REG_VPD_82XX:
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_0:
- if (!IS_QLA8031(ha))
- break;
- ha->flt_region_vpd_nvram = start;
- if (ha->flags.port0)
- ha->flt_region_vpd = start;
- break;
- case FLT_REG_FCOE_VPD_1:
- if (!IS_QLA8031(ha))
- break;
- if (!ha->flags.port0)
+ case FLT_REG_VPD_8XXX:
+ if (IS_CNA_CAPABLE(ha))
ha->flt_region_vpd = start;
break;
case FLT_REG_FCOE_NVRAM_0:
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 62aa5584f644..61b5d8c2b5da 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -52,7 +52,7 @@ MODULE_PARM_DESC(qlini_mode,
"\"disabled\" - initiator mode will never be enabled; "
"\"enabled\" (default) - initiator mode will always stay enabled.");
-static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
+int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
/*
* From scsi/fc/fc_fcp.h
@@ -73,7 +73,7 @@ enum fcp_resp_rsp_codes {
#define FCP_PTA_SIMPLE 0 /* simple task attribute */
#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
#define FCP_PTA_ORDERED 2 /* ordered task attribute */
-#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
+#define FCP_PTA_ACA 4 /* auto. contingent allegiance */
#define FCP_PTA_MASK 7 /* mask for task attribute field */
#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
@@ -1029,7 +1029,7 @@ void qlt_stop_phase2(struct qla_tgt *tgt)
EXPORT_SYMBOL(qlt_stop_phase2);
/* Called from qlt_remove_target() -> qla2x00_remove_one() */
-void qlt_release(struct qla_tgt *tgt)
+static void qlt_release(struct qla_tgt *tgt)
{
struct qla_hw_data *ha = tgt->ha;
@@ -1119,6 +1119,7 @@ static void qlt_send_notify_ack(struct scsi_qla_host *vha,
nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
nack->u.isp24.status = ntfy->u.isp24.status;
nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
+ nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
@@ -1264,8 +1265,27 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
{
struct qla_hw_data *ha = vha->hw;
+ struct se_session *se_sess = sess->se_sess;
struct qla_tgt_mgmt_cmd *mcmd;
+ struct se_cmd *se_cmd;
+ u32 lun = 0;
int rc;
+ bool found_lun = false;
+
+ spin_lock(&se_sess->sess_cmd_lock);
+ list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
+ struct qla_tgt_cmd *cmd =
+ container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+ if (cmd->tag == abts->exchange_addr_to_abort) {
+ lun = cmd->unpacked_lun;
+ found_lun = true;
+ break;
+ }
+ }
+ spin_unlock(&se_sess->sess_cmd_lock);
+
+ if (!found_lun)
+ return -ENOENT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
"qla_target(%d): task abort (tag=%d)\n",
@@ -1283,7 +1303,7 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
mcmd->sess = sess;
memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
- rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
+ rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
abts->exchange_addr_to_abort);
if (rc != 0) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
@@ -1551,7 +1571,7 @@ static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
/* always increment cmd handle */
do {
++h;
- if (h > MAX_OUTSTANDING_COMMANDS)
+ if (h > DEFAULT_OUTSTANDING_COMMANDS)
h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
if (h == ha->tgt.current_handle) {
ql_dbg(ql_dbg_tgt, vha, 0xe04e,
@@ -2422,7 +2442,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
return NULL;
}
/* handle-1 is actually used */
- if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
+ if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
ql_dbg(ql_dbg_tgt, vha, 0xe052,
"qla_target(%d): Wrong handle %x received\n",
vha->vp_idx, handle);
@@ -3980,7 +4000,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
"qla_target(%d): System error async event %#x "
- "occured", vha->vp_idx, code);
+ "occurred", vha->vp_idx, code);
break;
case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@@ -3989,7 +4009,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_LOOP_UP:
{
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
- "qla_target(%d): Async LOOP_UP occured "
+ "qla_target(%d): Async LOOP_UP occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
@@ -4006,7 +4026,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_LIP_RESET:
case MBA_RSCN_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
- "qla_target(%d): Async event %#x occured "
+ "qla_target(%d): Async event %#x occurred "
"(m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx, code,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
@@ -4015,7 +4035,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
case MBA_PORT_UPDATE:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
"qla_target(%d): Port update async event %#x "
- "occured: updating the ports database (m[0]=%x, m[1]=%x, "
+ "occurred: updating the ports database (m[0]=%x, m[1]=%x, "
"m[2]=%x, m[3]=%x)", vha->vp_idx, code,
le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
@@ -4031,7 +4051,7 @@ void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
default:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
- "qla_target(%d): Async event %#x occured: "
+ "qla_target(%d): Async event %#x occurred: "
"ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
@@ -4286,6 +4306,12 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
if (!QLA_TGT_MODE_ENABLED())
return 0;
+ if (!IS_TGT_MODE_CAPABLE(ha)) {
+ ql_log(ql_log_warn, base_vha, 0xe070,
+ "This adapter does not support target mode.\n");
+ return 0;
+ }
+
ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
"Registering target for host %ld(%p)", base_vha->host_no, ha);
@@ -4647,7 +4673,6 @@ void
qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
- struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
struct atio_from_isp *pkt;
int cnt, i;
@@ -4675,26 +4700,28 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
}
/* Adjust ring index */
- WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
}
void
-qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
+qlt_24xx_config_rings(struct scsi_qla_host *vha)
{
struct qla_hw_data *ha = vha->hw;
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
-/* FIXME: atio_q in/out for ha->mqenable=1..? */
- if (ha->mqenable) {
-#if 0
- WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp25mq.atio_q_out);
-#endif
- } else {
- /* Setup APTIO registers for target mode */
- WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
- WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
- RD_REG_DWORD(&reg->isp24.atio_q_out);
+ WRT_REG_DWORD(ISP_ATIO_Q_IN(vha), 0);
+ WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), 0);
+ RD_REG_DWORD(ISP_ATIO_Q_OUT(vha));
+
+ if (IS_ATIO_MSIX_CAPABLE(ha)) {
+ struct qla_msix_entry *msix = &ha->msix_entries[2];
+ struct init_cb_24xx *icb = (struct init_cb_24xx *)ha->init_cb;
+
+ icb->msix_atio = cpu_to_le16(msix->entry);
+ ql_dbg(ql_dbg_init, vha, 0xf072,
+ "Registering ICB vector 0x%x for atio que.\n",
+ msix->entry);
}
}
@@ -4777,6 +4804,101 @@ qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
}
}
+void
+qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (qla_tgt_mode_enabled(vha)) {
+ if (!ha->tgt.saved_set) {
+ /* We save only once */
+ ha->tgt.saved_exchange_count = nv->exchange_count;
+ ha->tgt.saved_firmware_options_1 =
+ nv->firmware_options_1;
+ ha->tgt.saved_firmware_options_2 =
+ nv->firmware_options_2;
+ ha->tgt.saved_firmware_options_3 =
+ nv->firmware_options_3;
+ ha->tgt.saved_set = 1;
+ }
+
+ nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
+
+ /* Enable target mode */
+ nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
+
+ /* Disable ini mode, if requested */
+ if (!qla_ini_mode_enabled(vha))
+ nv->firmware_options_1 |=
+ __constant_cpu_to_le32(BIT_5);
+
+ /* Disable Full Login after LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
+ /* Enable initial LIP */
+ nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
+ /* Enable FC tapes support */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+ /* Disable Full Login after LIP */
+ nv->host_p &= __constant_cpu_to_le32(~BIT_10);
+ /* Enable target PRLI control */
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
+ } else {
+ if (ha->tgt.saved_set) {
+ nv->exchange_count = ha->tgt.saved_exchange_count;
+ nv->firmware_options_1 =
+ ha->tgt.saved_firmware_options_1;
+ nv->firmware_options_2 =
+ ha->tgt.saved_firmware_options_2;
+ nv->firmware_options_3 =
+ ha->tgt.saved_firmware_options_3;
+ }
+ return;
+ }
+
+ /* out-of-order frames reassembly */
+ nv->firmware_options_3 |= BIT_6|BIT_9;
+
+ if (ha->tgt.enable_class_2) {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) =
+ FC_COS_CLASS2 | FC_COS_CLASS3;
+
+ nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
+ } else {
+ if (vha->flags.init_done)
+ fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
+
+ nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
+ }
+}
+
+void
+qlt_81xx_config_nvram_stage2(struct scsi_qla_host *vha,
+ struct init_cb_81xx *icb)
+{
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ if (ha->tgt.node_name_set) {
+ memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
+ icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
+ }
+}
+
+void
+qlt_83xx_iospace_config(struct qla_hw_data *ha)
+{
+ if (!QLA_TGT_MODE_ENABLED())
+ return;
+
+ ha->msix_count += 1; /* For ATIO Q */
+}
+
int
qlt_24xx_process_response_error(struct scsi_qla_host *vha,
struct sts_entry_24xx *pkt)
@@ -4809,11 +4931,41 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
if (!QLA_TGT_MODE_ENABLED())
return;
+ if (ha->mqenable || IS_QLA83XX(ha)) {
+ ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
+ } else {
+ ISP_ATIO_Q_IN(base_vha) = &ha->iobase->isp24.atio_q_in;
+ ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
+ }
+
mutex_init(&ha->tgt.tgt_mutex);
mutex_init(&ha->tgt.tgt_host_action_mutex);
qlt_clear_mode(base_vha);
}
+irqreturn_t
+qla83xx_msix_atio_q(int irq, void *dev_id)
+{
+ struct rsp_que *rsp;
+ scsi_qla_host_t *vha;
+ struct qla_hw_data *ha;
+ unsigned long flags;
+
+ rsp = (struct rsp_que *) dev_id;
+ ha = rsp->hw;
+ vha = pci_get_drvdata(ha->pdev);
+
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+
+ qlt_24xx_process_atio_queue(vha);
+ qla24xx_process_response_queue(vha, rsp);
+
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+ return IRQ_HANDLED;
+}
+
int
qlt_mem_alloc(struct qla_hw_data *ha)
{
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index bad749561ec2..ff9ccb9fd036 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -60,8 +60,9 @@
* multi-complete should come to the tgt driver or be handled there by qla2xxx
*/
#define CTIO_COMPLETION_HANDLE_MARK BIT_29
-#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
-#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
+#if (CTIO_COMPLETION_HANDLE_MARK <= DEFAULT_OUTSTANDING_COMMANDS)
+#error "CTIO_COMPLETION_HANDLE_MARK not larger than "
+ "DEFAULT_OUTSTANDING_COMMANDS"
#endif
#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
@@ -161,7 +162,7 @@ struct imm_ntfy_from_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -217,7 +218,7 @@ struct nack_to_isp {
uint16_t srr_rx_id;
uint16_t status;
uint8_t status_subcode;
- uint8_t reserved_3;
+ uint8_t fw_handle;
uint32_t exchange_address;
uint32_t srr_rel_offs;
uint16_t srr_ui;
@@ -948,6 +949,7 @@ extern void qlt_update_vp_map(struct scsi_qla_host *, int);
* is not set. Right now, ha value is ignored.
*/
#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
+extern int ql2x_ini_mode;
static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
{
@@ -985,12 +987,15 @@ extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
-extern void qlt_24xx_config_rings(struct scsi_qla_host *,
- device_reg_t __iomem *);
+extern void qlt_24xx_config_rings(struct scsi_qla_host *);
extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
struct nvram_24xx *);
extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
struct init_cb_24xx *);
+extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *,
+ struct init_cb_81xx *);
+extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *,
+ struct nvram_81xx *);
extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
struct sts_entry_24xx *);
extern void qlt_modify_vp_config(struct scsi_qla_host *,
@@ -1000,5 +1005,7 @@ extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
extern void qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
+extern irqreturn_t qla83xx_msix_atio_q(int, void *);
+extern void qlt_83xx_iospace_config(struct qla_hw_data *);
#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index cfe934e1af42..2b6e478d9e33 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -1,13 +1,13 @@
/*
* QLogic Fibre Channel HBA Driver
- * Copyright (c) 2003-2012 QLogic Corporation
+ * Copyright (c) 2003-2013 QLogic Corporation
*
* See LICENSE.qla2xxx for copyright and licensing details.
*/
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.07-k"
+#define QLA2XXX_VERSION "8.04.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 3d74f2f39ae1..d182c96e17ea 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -367,7 +367,7 @@ static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
if (!nacl) {
- pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
+ pr_err("Unable to allocate struct tcm_qla2xxx_nacl\n");
return NULL;
}
@@ -620,8 +620,8 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
return;
}
- cmd->se_cmd.scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
- transport_generic_request_failure(&cmd->se_cmd);
+ transport_generic_request_failure(&cmd->se_cmd,
+ TCM_CHECK_CONDITION_ABORT_CMD);
return;
}
diff --git a/drivers/scsi/qla4xxx/ql4_83xx.c b/drivers/scsi/qla4xxx/ql4_83xx.c
index 6e9af20be12f..5d8fe4f75650 100644
--- a/drivers/scsi/qla4xxx/ql4_83xx.c
+++ b/drivers/scsi/qla4xxx/ql4_83xx.c
@@ -538,7 +538,7 @@ struct device_info {
int port_num;
};
-static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
{
uint32_t drv_active;
uint32_t dev_part, dev_part1, dev_part2;
@@ -1351,31 +1351,58 @@ exit_start_fw:
/*----------------------Interrupt Related functions ---------------------*/
-void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
+ qla4_8xxx_intr_disable(ha);
+}
+
+static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
{
uint32_t mb_int, ret;
- if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
- qla4_8xxx_mbx_intr_disable(ha);
+ if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ ret = readl(&ha->qla4_83xx_reg->mbox_int);
+ mb_int = ret & ~INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+ }
+}
- ret = readl(&ha->qla4_83xx_reg->mbox_int);
- mb_int = ret & ~INT_ENABLE_FW_MB;
- writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
- writel(1, &ha->qla4_83xx_reg->leg_int_mask);
+void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_disable_mbox_intrs(ha);
+ qla4_83xx_disable_iocb_intrs(ha);
}
-void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
+{
+ if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
+ qla4_8xxx_intr_enable(ha);
+ set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
+ }
+}
+
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
{
uint32_t mb_int;
- qla4_8xxx_mbx_intr_enable(ha);
- mb_int = INT_ENABLE_FW_MB;
- writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
- writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+ if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
+ mb_int = INT_ENABLE_FW_MB;
+ writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
+ writel(0, &ha->qla4_83xx_reg->leg_int_mask);
+ set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
+ }
+}
- set_bit(AF_INTERRUPTS_ON, &ha->flags);
+
+void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
+{
+ qla4_83xx_enable_mbox_intrs(ha);
+ qla4_83xx_enable_iocb_intrs(ha);
}
+
void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
int incount)
{
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 76819b71ada7..19ee55a6226c 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -74,16 +74,22 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
}
break;
case 2:
- /* Reset HBA */
+ /* Reset HBA and collect FW dump */
ha->isp_ops->idc_lock(ha);
dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
if (dev_state == QLA8XXX_DEV_READY) {
- ql4_printk(KERN_INFO, ha,
- "%s: Setting Need reset, reset_owner is 0x%x.\n",
- __func__, ha->func_num);
+ ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
+ __func__);
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
QLA8XXX_DEV_NEED_RESET);
- set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ if (is_qla8022(ha) ||
+ (is_qla8032(ha) &&
+ qla4_83xx_can_perform_reset(ha))) {
+ set_bit(AF_8XXX_RST_OWNER, &ha->flags);
+ set_bit(AF_FW_RECOVERY, &ha->flags);
+ ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
+ __func__, ha->func_num);
+ }
} else
ql4_printk(KERN_INFO, ha,
"%s: Reset not performed as device state is 0x%x\n",
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 329d553eae94..129f5dd02822 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -136,6 +136,7 @@
#define RESPONSE_QUEUE_DEPTH 64
#define QUEUE_SIZE 64
#define DMA_BUFFER_SIZE 512
+#define IOCB_HIWAT_CUSHION 4
/*
* Misc
@@ -180,6 +181,7 @@
#define DISABLE_ACB_TOV 30
#define IP_CONFIG_TOV 30
#define LOGIN_TOV 12
+#define BOOT_LOGIN_RESP_TOV 60
#define MAX_RESET_HA_RETRIES 2
#define FW_ALIVE_WAIT_TOV 3
@@ -314,6 +316,7 @@ struct ql4_tuple_ddb {
* DDB flags.
*/
#define DF_RELOGIN 0 /* Relogin to device */
+#define DF_BOOT_TGT 1 /* Boot target entry */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
@@ -501,6 +504,7 @@ struct scsi_qla_host {
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
+#define AF_LOOPBACK 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
#define AF_HA_REMOVAL 12 /* 0x00001000 */
@@ -516,6 +520,8 @@ struct scsi_qla_host {
#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
+#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
+#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
unsigned long dpc_flags;
@@ -537,6 +543,7 @@ struct scsi_qla_host {
uint32_t tot_ddbs;
uint16_t iocb_cnt;
+ uint16_t iocb_hiwat;
/* SRB cache. */
#define SRB_MIN_REQ 128
@@ -838,7 +845,8 @@ static inline int is_aer_supported(struct scsi_qla_host *ha)
static inline int adapter_up(struct scsi_qla_host *ha)
{
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
- (test_bit(AF_LINK_UP, &ha->flags) != 0);
+ (test_bit(AF_LINK_UP, &ha->flags) != 0) &&
+ (!test_bit(AF_LOOPBACK, &ha->flags));
}
static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 1c4795020357..ad9d2e2d370f 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -495,7 +495,7 @@ struct qla_flt_region {
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
#define MBOX_ASTS_IDC_COMPLETE 0x8100
-#define MBOX_ASTS_IDC_NOTIFY 0x8101
+#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
@@ -522,6 +522,10 @@ struct qla_flt_region {
#define FLASH_OPT_COMMIT 2
#define FLASH_OPT_RMW_COMMIT 3
+/* Loopback type */
+#define ENABLE_INTERNAL_LOOPBACK 0x04
+#define ENABLE_EXTERNAL_LOOPBACK 0x08
+
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 57a5a3cf5770..982293edf02c 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -253,12 +253,14 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
+void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
+int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
extern int ql4xextended_error_logging;
extern int ql4xdontresethba;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 1aca1b4f70b8..8fc8548ba4ba 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -195,12 +195,10 @@ exit_get_sys_info_no_free:
* @ha: pointer to host adapter structure.
*
**/
-static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
+static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
{
/* Initialize aen queue */
ha->aen_q_count = MAX_AEN_ENTRIES;
-
- return qla4xxx_get_firmware_status(ha);
}
static uint8_t
@@ -935,14 +933,23 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
+ /*
+ * For ISP83XX, mailbox and IOCB interrupts are enabled separately.
+ * Mailbox interrupts must be enabled prior to issuing any mailbox
+ * command in order to prevent the possibility of losing interrupts
+ * while switching from polling to interrupt mode. IOCB interrupts are
+ * enabled via isp_ops->enable_intrs.
+ */
+ if (is_qla8032(ha))
+ qla4_83xx_enable_mbox_intrs(ha);
+
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
goto exit_init_hba;
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
goto exit_init_hba;
- if (qla4xxx_init_local_data(ha) == QLA_ERROR)
- goto exit_init_hba;
+ qla4xxx_init_local_data(ha);
status = qla4xxx_init_firmware(ha);
if (status == QLA_ERROR)
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index f48f37a281d1..14fec976f634 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -316,7 +316,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
goto queuing_error;
/* total iocbs active */
- if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
+ if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
goto queuing_error;
/* Build command packet */
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 15ea81465ce4..1b83dc283d2e 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -582,6 +582,33 @@ exit_prq_error:
}
/**
+ * qla4_83xx_loopback_in_progress: Is loopback in progress?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1 = loopback in progress, 0 = loopback not in progress
+ **/
+static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha)) {
+ if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
+ (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics in progress\n",
+ __func__));
+ rval = 1;
+ } else {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Loopback diagnostics not in progress\n",
+ __func__));
+ rval = 0;
+ }
+ }
+
+ return rval;
+}
+
+/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
* @mailbox_status: Mailbox status.
@@ -676,8 +703,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
case MBOX_ASTS_LINK_DOWN:
clear_bit(AF_LINK_UP, &ha->flags);
- if (test_bit(AF_INIT_DONE, &ha->flags))
+ if (test_bit(AF_INIT_DONE, &ha->flags)) {
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
+ qla4xxx_wake_dpc(ha);
+ }
ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
@@ -806,7 +835,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
" removed\n", ha->host_no, mbox_sts[0]));
break;
- case MBOX_ASTS_IDC_NOTIFY:
+ case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
{
uint32_t opcode;
if (is_qla8032(ha)) {
@@ -840,6 +869,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
DEBUG2(ql4_printk(KERN_INFO, ha,
"scsi:%ld: AEN %04x IDC Complete notification\n",
ha->host_no, mbox_sts[0]));
+
+ if (qla4_83xx_loopback_in_progress(ha))
+ set_bit(AF_LOOPBACK, &ha->flags);
+ else
+ clear_bit(AF_LOOPBACK, &ha->flags);
}
break;
@@ -1124,17 +1158,18 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
- ql4_printk(KERN_ERR, ha,
- "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
- __func__);
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
+ __func__));
return IRQ_NONE;
}
/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
- ql4_printk(KERN_ERR, ha,
- "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
- __func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
+ DEBUG2(ql4_printk(KERN_ERR, ha,
+ "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
+ __func__, (leg_int_ptr & PF_BITS_MASK),
+ ha->pf_bit));
return IRQ_NONE;
}
@@ -1437,11 +1472,14 @@ irq_not_attached:
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
{
- if (test_bit(AF_MSIX_ENABLED, &ha->flags))
- qla4_8xxx_disable_msix(ha);
- else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
- free_irq(ha->pdev->irq, ha);
- pci_disable_msi(ha->pdev);
- } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
- free_irq(ha->pdev->irq, ha);
+ if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
+ if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
+ qla4_8xxx_disable_msix(ha);
+ } else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ pci_disable_msi(ha->pdev);
+ } else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
+ free_irq(ha->pdev->irq, ha);
+ }
+ }
}
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 3d41034191f0..160d33697216 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -44,6 +44,30 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
}
/**
+ * qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
+ * @ha: Pointer to host adapter structure.
+ * @ret: 1=polling mode, 0=non-polling mode
+ **/
+static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
+{
+ int rval = 1;
+
+ if (is_qla8032(ha)) {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
+ rval = 0;
+ } else {
+ if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
+ test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
+ test_bit(AF_ONLINE, &ha->flags) &&
+ !test_bit(AF_HA_REMOVAL, &ha->flags))
+ rval = 0;
+ }
+
+ return rval;
+}
+
+/**
* qla4xxx_mailbox_command - issues mailbox commands
* @ha: Pointer to host adapter structure.
* @inCount: number of mailbox registers to load.
@@ -153,33 +177,28 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
/*
* Wait for completion: Poll or completion queue
*/
- if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
- test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
- test_bit(AF_ONLINE, &ha->flags) &&
- !test_bit(AF_HA_REMOVAL, &ha->flags)) {
- /* Do not poll for completion. Use completion queue */
- set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
- wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
- clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
- } else {
+ if (qla4xxx_is_intr_poll_mode(ha)) {
/* Poll for command to complete */
wait_count = jiffies + MBOX_TOV * HZ;
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
if (time_after_eq(jiffies, wait_count))
break;
-
/*
* Service the interrupt.
* The ISR will save the mailbox status registers
* to a temporary storage location in the adapter
* structure.
*/
-
spin_lock_irqsave(&ha->hardware_lock, flags);
ha->isp_ops->process_mailbox_interrupt(ha, outCount);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
msleep(10);
}
+ } else {
+ /* Do not poll for completion. Use completion queue */
+ set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
+ wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
+ clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
}
/* Check for mailbox timeout. */
@@ -678,8 +697,24 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
return QLA_ERROR;
}
- ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
- ha->host_no, mbox_sts[2]);
+ /* High-water mark of IOCBs */
+ ha->iocb_hiwat = mbox_sts[2];
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: firmware IOCBs available = %d\n", __func__,
+ ha->iocb_hiwat));
+
+ if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
+ ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
+
+ /* Ideally, we should not enter this code, as the # of firmware
+ * IOCBs is hard-coded in the firmware. We set a default
+ * iocb_hiwat here just in case */
+ if (ha->iocb_hiwat == 0) {
+ ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
+ DEBUG2(ql4_printk(KERN_WARNING, ha,
+ "%s: Setting IOCB's to = %d\n", __func__,
+ ha->iocb_hiwat));
+ }
return QLA_SUCCESS;
}
@@ -1385,10 +1420,8 @@ int qla4xxx_get_chap(struct scsi_qla_host *ha, char *username, char *password,
dma_addr_t chap_dma;
chap_table = dma_pool_alloc(ha->chap_dma_pool, GFP_KERNEL, &chap_dma);
- if (chap_table == NULL) {
- ret = -ENOMEM;
- goto exit_get_chap;
- }
+ if (chap_table == NULL)
+ return -ENOMEM;
chap_size = sizeof(struct ql4_chap_table);
memset(chap_table, 0, chap_size);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 499a92db1cf6..71d3d234f526 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -2986,7 +2986,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
retval = qla4_8xxx_device_state_handler(ha);
- if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
+ if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
retval = qla4xxx_request_irqs(ha);
return retval;
@@ -3427,11 +3427,11 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
}
/* Make sure we receive the minimum required data to cache internally */
- if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) {
+ if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
+ offsetof(struct mbx_sys_info, reserved)) {
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
" error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
goto exit_validate_mac82;
-
}
/* Save M.A.C. address & serial_number */
@@ -3463,7 +3463,7 @@ exit_validate_mac82:
/* Interrupt handling helpers. */
-int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3484,7 +3484,7 @@ int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
return QLA_SUCCESS;
}
-int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
+int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
@@ -3509,7 +3509,7 @@ int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
void
qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
{
- qla4_8xxx_mbx_intr_enable(ha);
+ qla4_8xxx_intr_enable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - reset */
@@ -3522,7 +3522,7 @@ void
qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
{
if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
- qla4_8xxx_mbx_intr_disable(ha);
+ qla4_8xxx_intr_disable(ha);
spin_lock_irq(&ha->hardware_lock);
/* BIT 10 - set */
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index fbc546e893ac..6142729167f4 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -1337,18 +1337,18 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
sess->password_in, BIDI_CHAP,
&idx);
if (rval)
- return -EINVAL;
-
- len = sprintf(buf, "%hu\n", idx);
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
break;
case ISCSI_PARAM_CHAP_OUT_IDX:
rval = qla4xxx_get_chap_index(ha, sess->username,
sess->password, LOCAL_CHAP,
&idx);
if (rval)
- return -EINVAL;
-
- len = sprintf(buf, "%hu\n", idx);
+ len = sprintf(buf, "\n");
+ else
+ len = sprintf(buf, "%hu\n", idx);
break;
default:
return iscsi_session_get_param(cls_sess, param, buf);
@@ -2242,6 +2242,7 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
!test_bit(AF_ONLINE, &ha->flags) ||
!test_bit(AF_LINK_UP, &ha->flags) ||
+ test_bit(AF_LOOPBACK, &ha->flags) ||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
goto qc_host_busy;
@@ -2978,6 +2979,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
if (status == QLA_SUCCESS) {
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
qla4xxx_cmd_wait(ha);
+
ha->isp_ops->disable_intrs(ha);
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -3479,7 +3481,8 @@ dpc_post_reset_ha:
}
/* ---- link change? --- */
- if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
+ if (!test_bit(AF_LOOPBACK, &ha->flags) &&
+ test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
if (!test_bit(AF_LINK_UP, &ha->flags)) {
/* ---- link down? --- */
qla4xxx_mark_all_devices_missing(ha);
@@ -3508,10 +3511,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
{
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
- if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
- /* Turn-off interrupts on the card. */
- ha->isp_ops->disable_intrs(ha);
- }
+ /* Turn-off interrupts on the card. */
+ ha->isp_ops->disable_intrs(ha);
if (is_qla40XX(ha)) {
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
@@ -3547,8 +3548,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
}
/* Detach interrupts */
- if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
- qla4xxx_free_irqs(ha);
+ qla4xxx_free_irqs(ha);
/* free extra memory */
qla4xxx_mem_free(ha);
@@ -4687,7 +4687,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
struct iscsi_endpoint *ep;
struct sockaddr_in *addr;
struct sockaddr_in6 *addr6;
- struct sockaddr *dst_addr;
+ struct sockaddr *t_addr;
+ struct sockaddr_storage *dst_addr;
char *ip;
/* TODO: need to destroy on unload iscsi_endpoint*/
@@ -4696,21 +4697,23 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
return NULL;
if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
- dst_addr->sa_family = AF_INET6;
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET6;
addr6 = (struct sockaddr_in6 *)dst_addr;
ip = (char *)&addr6->sin6_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
} else {
- dst_addr->sa_family = AF_INET;
+ t_addr = (struct sockaddr *)dst_addr;
+ t_addr->sa_family = AF_INET;
addr = (struct sockaddr_in *)dst_addr;
ip = (char *)&addr->sin_addr;
memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
}
- ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
+ ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
vfree(dst_addr);
return ep;
}
@@ -4725,7 +4728,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
}
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
- struct ddb_entry *ddb_entry)
+ struct ddb_entry *ddb_entry,
+ uint16_t idx)
{
uint16_t def_timeout;
@@ -4745,6 +4749,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
def_timeout : LOGIN_TOV;
ddb_entry->default_time2wait =
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
+
+ if (ql4xdisablesysfsboot &&
+ (idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
+ set_bit(DF_BOOT_TGT, &ddb_entry->flags);
}
static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
@@ -4881,7 +4889,7 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
struct dev_db_entry *fw_ddb_entry,
- int is_reset)
+ int is_reset, uint16_t idx)
{
struct iscsi_cls_session *cls_sess;
struct iscsi_session *sess;
@@ -4919,7 +4927,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
sizeof(struct dev_db_entry));
- qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+ qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
@@ -5036,7 +5044,7 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
goto continue_next_nt;
}
- ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+ ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
if (ret == QLA_ERROR)
goto exit_nt_list;
@@ -5116,6 +5124,78 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
}
/**
+ * qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
+ * response.
+ * @ha: pointer to adapter structure
+ *
+ * When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
+ * set in DDB and we will wait for login response of boot targets during
+ * probe.
+ **/
+static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
+{
+ struct ddb_entry *ddb_entry;
+ struct dev_db_entry *fw_ddb_entry = NULL;
+ dma_addr_t fw_ddb_entry_dma;
+ unsigned long wtime;
+ uint32_t ddb_state;
+ int max_ddbs, idx, ret;
+
+ max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+ MAX_DEV_DB_ENTRIES;
+
+ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ &fw_ddb_entry_dma, GFP_KERNEL);
+ if (!fw_ddb_entry) {
+ ql4_printk(KERN_ERR, ha,
+ "%s: Unable to allocate dma buffer\n", __func__);
+ goto exit_login_resp;
+ }
+
+ wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
+
+ for (idx = 0; idx < max_ddbs; idx++) {
+ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
+ if (ddb_entry == NULL)
+ continue;
+
+ if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: DDB index [%d]\n", __func__,
+ ddb_entry->fw_ddb_index));
+ do {
+ ret = qla4xxx_get_fwddb_entry(ha,
+ ddb_entry->fw_ddb_index,
+ fw_ddb_entry, fw_ddb_entry_dma,
+ NULL, NULL, &ddb_state, NULL,
+ NULL, NULL);
+ if (ret == QLA_ERROR)
+ goto exit_login_resp;
+
+ if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
+ (ddb_state == DDB_DS_SESSION_FAILED))
+ break;
+
+ schedule_timeout_uninterruptible(HZ);
+
+ } while ((time_after(wtime, jiffies)));
+
+ if (!time_after(wtime, jiffies)) {
+ DEBUG2(ql4_printk(KERN_INFO, ha,
+ "%s: Login response wait timer expired\n",
+ __func__));
+ goto exit_login_resp;
+ }
+ }
+ }
+
+exit_login_resp:
+ if (fw_ddb_entry)
+ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
+ fw_ddb_entry, fw_ddb_entry_dma);
+}
+
+/**
* qla4xxx_probe_adapter - callback function to probe HBA
* @pdev: pointer to pci_dev structure
* @pci_device_id: pointer to pci_device entry
@@ -5124,8 +5204,8 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
* It returns zero if successful. It also initializes all data necessary for
* the driver.
**/
-static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int qla4xxx_probe_adapter(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
int ret = -ENODEV, status;
struct Scsi_Host *host;
@@ -5270,7 +5350,7 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
if (is_qla80XX(ha)) {
ha->isp_ops->idc_lock(ha);
dev_state = qla4_8xxx_rd_direct(ha,
- QLA82XX_CRB_DEV_STATE);
+ QLA8XXX_CRB_DEV_STATE);
ha->isp_ops->idc_unlock(ha);
if (dev_state == QLA8XXX_DEV_FAILED) {
ql4_printk(KERN_WARNING, ha, "%s: don't retry "
@@ -5368,6 +5448,7 @@ skip_retry_init:
/* Perform the build ddb list and login to each */
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
+ qla4xxx_wait_login_resp_boot_tgt(ha);
qla4xxx_create_chap_list(ha);
@@ -5464,7 +5545,7 @@ static void qla4xxx_destroy_fw_ddb_session(struct scsi_qla_host *ha)
* qla4xxx_remove_adapter - callback function to remove adapter.
* @pci_dev: PCI device pointer
**/
-static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
+static void qla4xxx_remove_adapter(struct pci_dev *pdev)
{
struct scsi_qla_host *ha;
@@ -6008,14 +6089,6 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
goto exit_host_reset;
}
- rval = qla4xxx_wait_for_hba_online(ha);
- if (rval != QLA_SUCCESS) {
- DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
- "adapter\n", __func__));
- rval = -EIO;
- goto exit_host_reset;
- }
-
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
goto recover_adapter;
@@ -6115,7 +6188,6 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
{
uint32_t rval = QLA_ERROR;
- uint32_t ret = 0;
int fn;
struct pci_dev *other_pdev = NULL;
@@ -6201,16 +6273,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
qla4_8xxx_set_drv_active(ha);
ha->isp_ops->idc_unlock(ha);
- ret = qla4xxx_request_irqs(ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha, "Failed to "
- "reserve interrupt %d already in use.\n",
- ha->pdev->irq);
- rval = QLA_ERROR;
- } else {
- ha->isp_ops->enable_intrs(ha);
- rval = QLA_SUCCESS;
- }
+ ha->isp_ops->enable_intrs(ha);
}
} else {
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
@@ -6220,18 +6283,9 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
QLA8XXX_DEV_READY)) {
clear_bit(AF_FW_RECOVERY, &ha->flags);
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
- if (rval == QLA_SUCCESS) {
- ret = qla4xxx_request_irqs(ha);
- if (ret) {
- ql4_printk(KERN_WARNING, ha, "Failed to"
- " reserve interrupt %d already in"
- " use.\n", ha->pdev->irq);
- rval = QLA_ERROR;
- } else {
- ha->isp_ops->enable_intrs(ha);
- rval = QLA_SUCCESS;
- }
- }
+ if (rval == QLA_SUCCESS)
+ ha->isp_ops->enable_intrs(ha);
+
ha->isp_ops->idc_lock(ha);
qla4_8xxx_set_drv_active(ha);
ha->isp_ops->idc_unlock(ha);
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index f6df2ea91ab5..6775a45af315 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
* See LICENSE.qla4xxx for copyright and licensing details.
*/
-#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
+#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
diff --git a/drivers/scsi/qlogicfas.c b/drivers/scsi/qlogicfas.c
index 1e874f1fb5c6..13d628b56ff7 100644
--- a/drivers/scsi/qlogicfas.c
+++ b/drivers/scsi/qlogicfas.c
@@ -142,7 +142,7 @@ module_param_array(irq, int, NULL, 0);
MODULE_PARM_DESC(iobase, "I/O address");
MODULE_PARM_DESC(irq, "IRQ");
-static int __devinit qlogicfas_detect(struct scsi_host_template *sht)
+static int qlogicfas_detect(struct scsi_host_template *sht)
{
struct Scsi_Host *shost;
struct qlogicfas408_priv *priv;
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c
index 71fddbc60f18..6d48d30bed05 100644
--- a/drivers/scsi/qlogicpti.c
+++ b/drivers/scsi/qlogicpti.c
@@ -461,7 +461,7 @@ static int qlogicpti_reset_hardware(struct Scsi_Host *host)
#define PTI_RESET_LIMIT 400
-static int __devinit qlogicpti_load_firmware(struct qlogicpti *qpti)
+static int qlogicpti_load_firmware(struct qlogicpti *qpti)
{
const struct firmware *fw;
const char fwname[] = "qlogic/isp1000.bin";
@@ -670,7 +670,7 @@ static int qlogicpti_verify_tmon(struct qlogicpti *qpti)
static irqreturn_t qpti_intr(int irq, void *dev_id);
-static void __devinit qpti_chain_add(struct qlogicpti *qpti)
+static void qpti_chain_add(struct qlogicpti *qpti)
{
spin_lock_irq(&qptichain_lock);
if (qptichain != NULL) {
@@ -686,7 +686,7 @@ static void __devinit qpti_chain_add(struct qlogicpti *qpti)
spin_unlock_irq(&qptichain_lock);
}
-static void __devexit qpti_chain_del(struct qlogicpti *qpti)
+static void qpti_chain_del(struct qlogicpti *qpti)
{
spin_lock_irq(&qptichain_lock);
if (qptichain == qpti) {
@@ -701,7 +701,7 @@ static void __devexit qpti_chain_del(struct qlogicpti *qpti)
spin_unlock_irq(&qptichain_lock);
}
-static int __devinit qpti_map_regs(struct qlogicpti *qpti)
+static int qpti_map_regs(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
@@ -724,7 +724,7 @@ static int __devinit qpti_map_regs(struct qlogicpti *qpti)
return 0;
}
-static int __devinit qpti_register_irq(struct qlogicpti *qpti)
+static int qpti_register_irq(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
@@ -749,7 +749,7 @@ fail:
return -1;
}
-static void __devinit qpti_get_scsi_id(struct qlogicpti *qpti)
+static void qpti_get_scsi_id(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
struct device_node *dp;
@@ -803,7 +803,7 @@ static void qpti_get_clock(struct qlogicpti *qpti)
/* The request and response queues must each be aligned
* on a page boundary.
*/
-static int __devinit qpti_map_queues(struct qlogicpti *qpti)
+static int qpti_map_queues(struct qlogicpti *qpti)
{
struct platform_device *op = qpti->op;
@@ -1292,7 +1292,7 @@ static struct scsi_host_template qpti_template = {
};
static const struct of_device_id qpti_match[];
-static int __devinit qpti_sbus_probe(struct platform_device *op)
+static int qpti_sbus_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct Scsi_Host *host;
@@ -1402,7 +1402,7 @@ fail_unlink:
return -ENODEV;
}
-static int __devexit qpti_sbus_remove(struct platform_device *op)
+static int qpti_sbus_remove(struct platform_device *op)
{
struct qlogicpti *qpti = dev_get_drvdata(&op->dev);
@@ -1459,7 +1459,7 @@ static struct platform_driver qpti_sbus_driver = {
.of_match_table = qpti_match,
},
.probe = qpti_sbus_probe,
- .remove = __devexit_p(qpti_sbus_remove),
+ .remove = qpti_sbus_remove,
};
static int __init qpti_init(void)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9032e910bca3..f1bf5aff68ed 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1418,7 +1418,7 @@ static int scsi_lld_busy(struct request_queue *q)
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost;
- if (blk_queue_dead(q))
+ if (blk_queue_dying(q))
return 0;
shost = sdev->host;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index dc0ad85853e2..8f6b12cbd224 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -16,16 +16,14 @@
#include "scsi_priv.h"
-static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
+static int scsi_dev_type_suspend(struct device *dev, int (*cb)(struct device *))
{
- struct device_driver *drv;
int err;
err = scsi_device_quiesce(to_scsi_device(dev));
if (err == 0) {
- drv = dev->driver;
- if (drv && drv->suspend) {
- err = drv->suspend(dev, msg);
+ if (cb) {
+ err = cb(dev);
if (err)
scsi_device_resume(to_scsi_device(dev));
}
@@ -34,14 +32,12 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
return err;
}
-static int scsi_dev_type_resume(struct device *dev)
+static int scsi_dev_type_resume(struct device *dev, int (*cb)(struct device *))
{
- struct device_driver *drv;
int err = 0;
- drv = dev->driver;
- if (drv && drv->resume)
- err = drv->resume(dev);
+ if (cb)
+ err = cb(dev);
scsi_device_resume(to_scsi_device(dev));
dev_dbg(dev, "scsi resume: %d\n", err);
return err;
@@ -49,51 +45,39 @@ static int scsi_dev_type_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
-static int scsi_bus_suspend_common(struct device *dev, pm_message_t msg)
+static int
+scsi_bus_suspend_common(struct device *dev, int (*cb)(struct device *))
{
int err = 0;
if (scsi_is_sdev_device(dev)) {
/*
- * sd is the only high-level SCSI driver to implement runtime
- * PM, and sd treats runtime suspend, system suspend, and
- * system hibernate identically (but not system freeze).
+ * All the high-level SCSI drivers that implement runtime
+ * PM treat runtime suspend, system suspend, and system
+ * hibernate identically.
*/
- if (pm_runtime_suspended(dev)) {
- if (msg.event == PM_EVENT_SUSPEND ||
- msg.event == PM_EVENT_HIBERNATE)
- return 0; /* already suspended */
+ if (pm_runtime_suspended(dev))
+ return 0;
- /* wake up device so that FREEZE will succeed */
- pm_runtime_resume(dev);
- }
- err = scsi_dev_type_suspend(dev, msg);
+ err = scsi_dev_type_suspend(dev, cb);
}
+
return err;
}
-static int scsi_bus_resume_common(struct device *dev)
+static int
+scsi_bus_resume_common(struct device *dev, int (*cb)(struct device *))
{
int err = 0;
- /*
- * Parent device may have runtime suspended as soon as
- * it is woken up during the system resume.
- *
- * Resume it on behalf of child.
- */
- pm_runtime_get_sync(dev->parent);
-
if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev);
+ err = scsi_dev_type_resume(dev, cb);
+
if (err == 0) {
pm_runtime_disable(dev);
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
-
- pm_runtime_put_sync(dev->parent);
-
return err;
}
@@ -112,26 +96,49 @@ static int scsi_bus_prepare(struct device *dev)
static int scsi_bus_suspend(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_SUSPEND);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->suspend : NULL);
+}
+
+static int scsi_bus_resume(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->resume : NULL);
}
static int scsi_bus_freeze(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_FREEZE);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->freeze : NULL);
+}
+
+static int scsi_bus_thaw(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->thaw : NULL);
}
static int scsi_bus_poweroff(struct device *dev)
{
- return scsi_bus_suspend_common(dev, PMSG_HIBERNATE);
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_suspend_common(dev, pm ? pm->poweroff : NULL);
+}
+
+static int scsi_bus_restore(struct device *dev)
+{
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ return scsi_bus_resume_common(dev, pm ? pm->restore : NULL);
}
#else /* CONFIG_PM_SLEEP */
-#define scsi_bus_resume_common NULL
#define scsi_bus_prepare NULL
#define scsi_bus_suspend NULL
+#define scsi_bus_resume NULL
#define scsi_bus_freeze NULL
+#define scsi_bus_thaw NULL
#define scsi_bus_poweroff NULL
+#define scsi_bus_restore NULL
#endif /* CONFIG_PM_SLEEP */
@@ -140,10 +147,12 @@ static int scsi_bus_poweroff(struct device *dev)
static int scsi_runtime_suspend(struct device *dev)
{
int err = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
dev_dbg(dev, "scsi_runtime_suspend\n");
if (scsi_is_sdev_device(dev)) {
- err = scsi_dev_type_suspend(dev, PMSG_AUTO_SUSPEND);
+ err = scsi_dev_type_suspend(dev,
+ pm ? pm->runtime_suspend : NULL);
if (err == -EAGAIN)
pm_schedule_suspend(dev, jiffies_to_msecs(
round_jiffies_up_relative(HZ/10)));
@@ -157,10 +166,11 @@ static int scsi_runtime_suspend(struct device *dev)
static int scsi_runtime_resume(struct device *dev)
{
int err = 0;
+ const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
dev_dbg(dev, "scsi_runtime_resume\n");
if (scsi_is_sdev_device(dev))
- err = scsi_dev_type_resume(dev);
+ err = scsi_dev_type_resume(dev, pm ? pm->runtime_resume : NULL);
/* Insert hooks here for targets, hosts, and transport classes */
@@ -239,11 +249,11 @@ void scsi_autopm_put_host(struct Scsi_Host *shost)
const struct dev_pm_ops scsi_bus_pm_ops = {
.prepare = scsi_bus_prepare,
.suspend = scsi_bus_suspend,
- .resume = scsi_bus_resume_common,
+ .resume = scsi_bus_resume,
.freeze = scsi_bus_freeze,
- .thaw = scsi_bus_resume_common,
+ .thaw = scsi_bus_thaw,
.poweroff = scsi_bus_poweroff,
- .restore = scsi_bus_resume_common,
+ .restore = scsi_bus_restore,
.runtime_suspend = scsi_runtime_suspend,
.runtime_resume = scsi_runtime_resume,
.runtime_idle = scsi_runtime_idle,
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index ce5224c92eda..931a7d954203 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -247,11 +247,11 @@ show_shost_active_mode(struct device *dev,
static DEVICE_ATTR(active_mode, S_IRUGO | S_IWUSR, show_shost_active_mode, NULL);
-static int check_reset_type(char *str)
+static int check_reset_type(const char *str)
{
- if (strncmp(str, "adapter", 10) == 0)
+ if (sysfs_streq(str, "adapter"))
return SCSI_ADAPTER_RESET;
- else if (strncmp(str, "firmware", 10) == 0)
+ else if (sysfs_streq(str, "firmware"))
return SCSI_FIRMWARE_RESET;
else
return 0;
@@ -264,12 +264,9 @@ store_host_reset(struct device *dev, struct device_attribute *attr,
struct Scsi_Host *shost = class_to_shost(dev);
struct scsi_host_template *sht = shost->hostt;
int ret = -EINVAL;
- char str[10];
int type;
- sscanf(buf, "%s", str);
- type = check_reset_type(str);
-
+ type = check_reset_type(buf);
if (!type)
goto exit_store_host_reset;
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index 31969f2e13ce..dac7f8d14494 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -2503,6 +2503,15 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
}
static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
NULL);
+static ssize_t
+show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
+ return sprintf(buf, "%d\n", session->target_id);
+}
+static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
+ show_priv_session_target_id, NULL);
#define iscsi_priv_session_attr_show(field, format) \
static ssize_t \
@@ -2575,6 +2584,7 @@ static struct attribute *iscsi_session_attrs[] = {
&dev_attr_priv_sess_creator.attr,
&dev_attr_sess_chap_out_idx.attr,
&dev_attr_sess_chap_in_idx.attr,
+ &dev_attr_priv_sess_target_id.attr,
NULL,
};
@@ -2638,6 +2648,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
return S_IRUGO;
else if (attr == &dev_attr_priv_sess_creator.attr)
return S_IRUGO;
+ else if (attr == &dev_attr_priv_sess_target_id.attr)
+ return S_IRUGO;
else {
WARN_ONCE(1, "Invalid session attr");
return 0;
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index f7565fc4f0e3..1b681427dde0 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -151,6 +151,7 @@ static struct {
{ SAS_LINK_RATE_1_5_GBPS, "1.5 Gbit" },
{ SAS_LINK_RATE_3_0_GBPS, "3.0 Gbit" },
{ SAS_LINK_RATE_6_0_GBPS, "6.0 Gbit" },
+ { SAS_LINK_RATE_12_0_GBPS, "12.0 Gbit" },
};
sas_bitfield_name_search(linkspeed, sas_linkspeed_names)
sas_bitfield_name_set(linkspeed, sas_linkspeed_names)
diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
index 21a045e0559f..f379c7f3034c 100644
--- a/drivers/scsi/scsi_transport_srp.c
+++ b/drivers/scsi/scsi_transport_srp.c
@@ -38,7 +38,7 @@ struct srp_host_attrs {
#define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
#define SRP_HOST_ATTRS 0
-#define SRP_RPORT_ATTRS 2
+#define SRP_RPORT_ATTRS 3
struct srp_internal {
struct scsi_transport_template t;
@@ -47,7 +47,6 @@ struct srp_internal {
struct device_attribute *host_attrs[SRP_HOST_ATTRS + 1];
struct device_attribute *rport_attrs[SRP_RPORT_ATTRS + 1];
- struct device_attribute private_rport_attrs[SRP_RPORT_ATTRS];
struct transport_container rport_attr_cont;
};
@@ -72,24 +71,6 @@ static DECLARE_TRANSPORT_CLASS(srp_host_class, "srp_host", srp_host_setup,
static DECLARE_TRANSPORT_CLASS(srp_rport_class, "srp_remote_ports",
NULL, NULL, NULL);
-#define SETUP_TEMPLATE(attrb, field, perm, test, ro_test, ro_perm) \
- i->private_##attrb[count] = dev_attr_##field; \
- i->private_##attrb[count].attr.mode = perm; \
- if (ro_test) { \
- i->private_##attrb[count].attr.mode = ro_perm; \
- i->private_##attrb[count].store = NULL; \
- } \
- i->attrb[count] = &i->private_##attrb[count]; \
- if (test) \
- count++
-
-#define SETUP_RPORT_ATTRIBUTE_RD(field) \
- SETUP_TEMPLATE(rport_attrs, field, S_IRUGO, 1, 0, 0)
-
-#define SETUP_RPORT_ATTRIBUTE_RW(field) \
- SETUP_TEMPLATE(rport_attrs, field, S_IRUGO | S_IWUSR, \
- 1, 1, S_IRUGO)
-
#define SRP_PID(p) \
(p)->port_id[0], (p)->port_id[1], (p)->port_id[2], (p)->port_id[3], \
(p)->port_id[4], (p)->port_id[5], (p)->port_id[6], (p)->port_id[7], \
@@ -135,6 +116,24 @@ show_srp_rport_roles(struct device *dev, struct device_attribute *attr,
static DEVICE_ATTR(roles, S_IRUGO, show_srp_rport_roles, NULL);
+static ssize_t store_srp_rport_delete(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct srp_rport *rport = transport_class_to_srp_rport(dev);
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct srp_internal *i = to_srp_internal(shost->transportt);
+
+ if (i->f->rport_delete) {
+ i->f->rport_delete(rport);
+ return count;
+ } else {
+ return -ENOSYS;
+ }
+}
+
+static DEVICE_ATTR(delete, S_IWUSR, NULL, store_srp_rport_delete);
+
static void srp_rport_release(struct device *dev)
{
struct srp_rport *rport = dev_to_rport(dev);
@@ -324,12 +323,16 @@ srp_attach_transport(struct srp_function_template *ft)
i->rport_attr_cont.ac.attrs = &i->rport_attrs[0];
i->rport_attr_cont.ac.class = &srp_rport_class.class;
i->rport_attr_cont.ac.match = srp_rport_match;
- transport_container_register(&i->rport_attr_cont);
count = 0;
- SETUP_RPORT_ATTRIBUTE_RD(port_id);
- SETUP_RPORT_ATTRIBUTE_RD(roles);
- i->rport_attrs[count] = NULL;
+ i->rport_attrs[count++] = &dev_attr_port_id;
+ i->rport_attrs[count++] = &dev_attr_roles;
+ if (ft->rport_delete)
+ i->rport_attrs[count++] = &dev_attr_delete;
+ i->rport_attrs[count++] = NULL;
+ BUG_ON(count > ARRAY_SIZE(i->rport_attrs));
+
+ transport_container_register(&i->rport_attr_cont);
i->f = ft;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 352bc77b7c88..7992635d405f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -105,7 +105,7 @@ static void sd_unlock_native_capacity(struct gendisk *disk);
static int sd_probe(struct device *);
static int sd_remove(struct device *);
static void sd_shutdown(struct device *);
-static int sd_suspend(struct device *, pm_message_t state);
+static int sd_suspend(struct device *);
static int sd_resume(struct device *);
static void sd_rescan(struct device *);
static int sd_done(struct scsi_cmnd *);
@@ -465,15 +465,23 @@ static struct class sd_disk_class = {
.dev_attrs = sd_disk_attrs,
};
+static const struct dev_pm_ops sd_pm_ops = {
+ .suspend = sd_suspend,
+ .resume = sd_resume,
+ .poweroff = sd_suspend,
+ .restore = sd_resume,
+ .runtime_suspend = sd_suspend,
+ .runtime_resume = sd_resume,
+};
+
static struct scsi_driver sd_template = {
.owner = THIS_MODULE,
.gendrv = {
.name = "sd",
.probe = sd_probe,
.remove = sd_remove,
- .suspend = sd_suspend,
- .resume = sd_resume,
.shutdown = sd_shutdown,
+ .pm = &sd_pm_ops,
},
.rescan = sd_rescan,
.done = sd_done,
@@ -1011,7 +1019,7 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
SCpnt->cmnd[29] = (unsigned char) (this_count >> 16) & 0xff;
SCpnt->cmnd[30] = (unsigned char) (this_count >> 8) & 0xff;
SCpnt->cmnd[31] = (unsigned char) this_count & 0xff;
- } else if (block > 0xffffffff) {
+ } else if (sdp->use_16_for_rw) {
SCpnt->cmnd[0] += READ_16 - READ_6;
SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0);
SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0;
@@ -2203,6 +2211,8 @@ got_data:
}
}
+ sdp->use_16_for_rw = (sdkp->capacity > 0xffffffff);
+
/* Rescale capacity to 512-byte units */
if (sector_size == 4096)
sdkp->capacity <<= 3;
@@ -3052,7 +3062,7 @@ exit:
scsi_disk_put(sdkp);
}
-static int sd_suspend(struct device *dev, pm_message_t mesg)
+static int sd_suspend(struct device *dev)
{
struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev);
int ret = 0;
@@ -3067,7 +3077,7 @@ static int sd_suspend(struct device *dev, pm_message_t mesg)
goto done;
}
- if ((mesg.event & PM_EVENT_SLEEP) && sdkp->device->manage_start_stop) {
+ if (sdkp->device->manage_start_stop) {
sd_printk(KERN_NOTICE, sdkp, "Stopping disk\n");
ret = sd_start_stop_device(sdkp, 0);
}
@@ -3116,10 +3126,6 @@ static int __init init_sd(void)
if (err)
goto err_out;
- err = scsi_register_driver(&sd_template.gendrv);
- if (err)
- goto err_out_class;
-
sd_cdb_cache = kmem_cache_create("sd_ext_cdb", SD_EXT_CDB_SIZE,
0, 0, NULL);
if (!sd_cdb_cache) {
@@ -3133,8 +3139,15 @@ static int __init init_sd(void)
goto err_out_cache;
}
+ err = scsi_register_driver(&sd_template.gendrv);
+ if (err)
+ goto err_out_driver;
+
return 0;
+err_out_driver:
+ mempool_destroy(sd_cdb_pool);
+
err_out_cache:
kmem_cache_destroy(sd_cdb_cache);
@@ -3157,10 +3170,10 @@ static void __exit exit_sd(void)
SCSI_LOG_HLQUEUE(3, printk("exit_sd: exiting sd driver\n"));
+ scsi_unregister_driver(&sd_template.gendrv);
mempool_destroy(sd_cdb_pool);
kmem_cache_destroy(sd_cdb_cache);
- scsi_unregister_driver(&sd_template.gendrv);
class_unregister(&sd_disk_class);
for (i = 0; i < SD_MAJORS; i++)
diff --git a/drivers/scsi/sgiwd93.c b/drivers/scsi/sgiwd93.c
index 3a9d85ca6047..a464d959f66e 100644
--- a/drivers/scsi/sgiwd93.c
+++ b/drivers/scsi/sgiwd93.c
@@ -226,7 +226,7 @@ static struct scsi_host_template sgiwd93_template = {
.use_clustering = DISABLE_CLUSTERING,
};
-static int __devinit sgiwd93_probe(struct platform_device *pdev)
+static int sgiwd93_probe(struct platform_device *pdev)
{
struct sgiwd93_platform_data *pd = pdev->dev.platform_data;
unsigned char *wdregs = pd->wdregs;
@@ -312,7 +312,7 @@ static int __exit sgiwd93_remove(struct platform_device *pdev)
static struct platform_driver sgiwd93_driver = {
.probe = sgiwd93_probe,
- .remove = __devexit_p(sgiwd93_remove),
+ .remove = sgiwd93_remove,
.driver = {
.name = "sgiwd93",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/sim710.c b/drivers/scsi/sim710.c
index a318264a4ba1..3b3b56f4a830 100644
--- a/drivers/scsi/sim710.c
+++ b/drivers/scsi/sim710.c
@@ -94,9 +94,9 @@ static struct scsi_host_template sim710_driver_template = {
.module = THIS_MODULE,
};
-static __devinit int
-sim710_probe_common(struct device *dev, unsigned long base_addr,
- int irq, int clock, int differential, int scsi_id)
+static int sim710_probe_common(struct device *dev, unsigned long base_addr,
+ int irq, int clock, int differential,
+ int scsi_id)
{
struct Scsi_Host * host = NULL;
struct NCR_700_Host_Parameters *hostdata =
@@ -153,8 +153,7 @@ sim710_probe_common(struct device *dev, unsigned long base_addr,
return -ENODEV;
}
-static __devexit int
-sim710_device_remove(struct device *dev)
+static int sim710_device_remove(struct device *dev)
{
struct Scsi_Host *host = dev_get_drvdata(dev);
struct NCR_700_Host_Parameters *hostdata =
@@ -221,7 +220,7 @@ static struct eisa_driver sim710_eisa_driver = {
.driver = {
.name = "sim710",
.probe = sim710_eisa_probe,
- .remove = __devexit_p(sim710_device_remove),
+ .remove = sim710_device_remove,
},
};
#endif /* CONFIG_EISA */
diff --git a/drivers/scsi/sni_53c710.c b/drivers/scsi/sni_53c710.c
index cf51432f8e72..52d54e7425db 100644
--- a/drivers/scsi/sni_53c710.c
+++ b/drivers/scsi/sni_53c710.c
@@ -65,7 +65,7 @@ static struct scsi_host_template snirm710_template = {
.module = THIS_MODULE,
};
-static int __devinit snirm710_probe(struct platform_device *dev)
+static int snirm710_probe(struct platform_device *dev)
{
unsigned long base;
struct NCR_700_Host_Parameters *hostdata;
@@ -134,7 +134,7 @@ static int __exit snirm710_driver_remove(struct platform_device *dev)
static struct platform_driver snirm710_driver = {
.probe = snirm710_probe,
- .remove = __devexit_p(snirm710_driver_remove),
+ .remove = snirm710_driver_remove,
.driver = {
.name = "snirm_53c710",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 606215e54b88..325c31caa6e0 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -1540,8 +1540,7 @@ static void stex_free_irq(struct st_hba *hba)
pci_disable_msi(pdev);
}
-static int __devinit
-stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct st_hba *hba;
struct Scsi_Host *host;
@@ -1815,7 +1814,7 @@ static struct pci_driver stex_pci_driver = {
.name = DRV_NAME,
.id_table = stex_pci_tbl,
.probe = stex_probe,
- .remove = __devexit_p(stex_remove),
+ .remove = stex_remove,
.shutdown = stex_shutdown,
};
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 01440782feb2..5dd6c49bfa7e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -201,6 +201,7 @@ enum storvsc_request_type {
#define SRB_STATUS_AUTOSENSE_VALID 0x80
#define SRB_STATUS_INVALID_LUN 0x20
#define SRB_STATUS_SUCCESS 0x01
+#define SRB_STATUS_ABORTED 0x02
#define SRB_STATUS_ERROR 0x04
/*
@@ -295,6 +296,25 @@ struct storvsc_scan_work {
uint lun;
};
+static void storvsc_device_scan(struct work_struct *work)
+{
+ struct storvsc_scan_work *wrk;
+ uint lun;
+ struct scsi_device *sdev;
+
+ wrk = container_of(work, struct storvsc_scan_work, work);
+ lun = wrk->lun;
+
+ sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+ if (!sdev)
+ goto done;
+ scsi_rescan_device(&sdev->sdev_gendev);
+ scsi_device_put(sdev);
+
+done:
+ kfree(wrk);
+}
+
static void storvsc_bus_scan(struct work_struct *work)
{
struct storvsc_scan_work *wrk;
@@ -467,6 +487,7 @@ static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
if (!bounce_sgl)
return NULL;
+ sg_init_table(bounce_sgl, num_pages);
for (i = 0; i < num_pages; i++) {
page_buf = alloc_page(GFP_ATOMIC);
if (!page_buf)
@@ -760,6 +781,66 @@ cleanup:
return ret;
}
+static void storvsc_handle_error(struct vmscsi_request *vm_srb,
+ struct scsi_cmnd *scmnd,
+ struct Scsi_Host *host,
+ u8 asc, u8 ascq)
+{
+ struct storvsc_scan_work *wrk;
+ void (*process_err_fn)(struct work_struct *work);
+ bool do_work = false;
+
+ switch (vm_srb->srb_status) {
+ case SRB_STATUS_ERROR:
+ /*
+ * If there is an error; offline the device since all
+ * error recovery strategies would have already been
+ * deployed on the host side. However, if the command
+ * were a pass-through command deal with it appropriately.
+ */
+ switch (scmnd->cmnd[0]) {
+ case ATA_16:
+ case ATA_12:
+ set_host_byte(scmnd, DID_PASSTHROUGH);
+ break;
+ default:
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ }
+ break;
+ case SRB_STATUS_INVALID_LUN:
+ do_work = true;
+ process_err_fn = storvsc_remove_lun;
+ break;
+ case (SRB_STATUS_ABORTED | SRB_STATUS_AUTOSENSE_VALID):
+ if ((asc == 0x2a) && (ascq == 0x9)) {
+ do_work = true;
+ process_err_fn = storvsc_device_scan;
+ /*
+ * Retry the I/O that trigerred this.
+ */
+ set_host_byte(scmnd, DID_REQUEUE);
+ }
+ break;
+ }
+
+ if (!do_work)
+ return;
+
+ /*
+ * We need to schedule work to process this error; schedule it.
+ */
+ wrk = kmalloc(sizeof(struct storvsc_scan_work), GFP_ATOMIC);
+ if (!wrk) {
+ set_host_byte(scmnd, DID_TARGET_FAILURE);
+ return;
+ }
+
+ wrk->host = host;
+ wrk->lun = vm_srb->lun;
+ INIT_WORK(&wrk->work, process_err_fn);
+ schedule_work(&wrk->work);
+}
+
static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
{
@@ -768,8 +849,13 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
void (*scsi_done_fn)(struct scsi_cmnd *);
struct scsi_sense_hdr sense_hdr;
struct vmscsi_request *vm_srb;
- struct storvsc_scan_work *wrk;
struct stor_mem_pools *memp = scmnd->device->hostdata;
+ struct Scsi_Host *host;
+ struct storvsc_device *stor_dev;
+ struct hv_device *dev = host_dev->dev;
+
+ stor_dev = get_in_stor_device(dev);
+ host = stor_dev->host;
vm_srb = &cmd_request->vstor_packet.vm_srb;
if (cmd_request->bounce_sgl_count) {
@@ -782,55 +868,18 @@ static void storvsc_command_completion(struct storvsc_cmd_request *cmd_request)
cmd_request->bounce_sgl_count);
}
- /*
- * If there is an error; offline the device since all
- * error recovery strategies would have already been
- * deployed on the host side. However, if the command
- * were a pass-through command deal with it appropriately.
- */
scmnd->result = vm_srb->scsi_status;
- if (vm_srb->srb_status == SRB_STATUS_ERROR) {
- switch (scmnd->cmnd[0]) {
- case ATA_16:
- case ATA_12:
- set_host_byte(scmnd, DID_PASSTHROUGH);
- break;
- default:
- set_host_byte(scmnd, DID_TARGET_FAILURE);
- }
- }
-
-
- /*
- * If the LUN is invalid; remove the device.
- */
- if (vm_srb->srb_status == SRB_STATUS_INVALID_LUN) {
- struct storvsc_device *stor_dev;
- struct hv_device *dev = host_dev->dev;
- struct Scsi_Host *host;
-
- stor_dev = get_in_stor_device(dev);
- host = stor_dev->host;
-
- wrk = kmalloc(sizeof(struct storvsc_scan_work),
- GFP_ATOMIC);
- if (!wrk) {
- scmnd->result = DID_TARGET_FAILURE << 16;
- } else {
- wrk->host = host;
- wrk->lun = vm_srb->lun;
- INIT_WORK(&wrk->work, storvsc_remove_lun);
- schedule_work(&wrk->work);
- }
- }
-
if (scmnd->result) {
if (scsi_normalize_sense(scmnd->sense_buffer,
SCSI_SENSE_BUFFERSIZE, &sense_hdr))
scsi_print_sense_hdr("storvsc", &sense_hdr);
}
+ if (vm_srb->srb_status != SRB_STATUS_SUCCESS)
+ storvsc_handle_error(vm_srb, scmnd, host, sense_hdr.asc,
+ sense_hdr.ascq);
+
scsi_set_resid(scmnd,
cmd_request->data_buffer.len -
vm_srb->data_transfer_length);
@@ -1155,6 +1204,8 @@ static int storvsc_device_configure(struct scsi_device *sdevice)
blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
+ sdevice->no_write_same = 1;
+
return 0;
}
@@ -1237,6 +1288,8 @@ static bool storvsc_scsi_cmd_ok(struct scsi_cmnd *scmnd)
u8 scsi_op = scmnd->cmnd[0];
switch (scsi_op) {
+ /* the host does not handle WRITE_SAME, log accident usage */
+ case WRITE_SAME:
/*
* smartd sends this command and the host does not handle
* this. So, don't send it.
diff --git a/drivers/scsi/sun3x_esp.c b/drivers/scsi/sun3x_esp.c
index 0621037f0271..534eb96fc3a7 100644
--- a/drivers/scsi/sun3x_esp.c
+++ b/drivers/scsi/sun3x_esp.c
@@ -194,7 +194,7 @@ static const struct esp_driver_ops sun3x_esp_ops = {
.dma_error = sun3x_esp_dma_error,
};
-static int __devinit esp_sun3x_probe(struct platform_device *dev)
+static int esp_sun3x_probe(struct platform_device *dev)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -268,7 +268,7 @@ fail:
return err;
}
-static int __devexit esp_sun3x_remove(struct platform_device *dev)
+static int esp_sun3x_remove(struct platform_device *dev)
{
struct esp *esp = dev_get_drvdata(&dev->dev);
unsigned int irq = esp->host->irq;
@@ -292,7 +292,7 @@ static int __devexit esp_sun3x_remove(struct platform_device *dev)
static struct platform_driver esp_sun3x_driver = {
.probe = esp_sun3x_probe,
- .remove = __devexit_p(esp_sun3x_remove),
+ .remove = esp_sun3x_remove,
.driver = {
.name = "sun3x_esp",
.owner = THIS_MODULE,
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c
index 676fe9ac7f61..f2e68459f7ea 100644
--- a/drivers/scsi/sun_esp.c
+++ b/drivers/scsi/sun_esp.c
@@ -43,8 +43,7 @@ enum dvma_rev {
dvmahme
};
-static int __devinit esp_sbus_setup_dma(struct esp *esp,
- struct platform_device *dma_of)
+static int esp_sbus_setup_dma(struct esp *esp, struct platform_device *dma_of)
{
esp->dma = dma_of;
@@ -79,7 +78,7 @@ static int __devinit esp_sbus_setup_dma(struct esp *esp,
}
-static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
+static int esp_sbus_map_regs(struct esp *esp, int hme)
{
struct platform_device *op = esp->dev;
struct resource *res;
@@ -99,7 +98,7 @@ static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
return 0;
}
-static int __devinit esp_sbus_map_command_block(struct esp *esp)
+static int esp_sbus_map_command_block(struct esp *esp)
{
struct platform_device *op = esp->dev;
@@ -111,7 +110,7 @@ static int __devinit esp_sbus_map_command_block(struct esp *esp)
return 0;
}
-static int __devinit esp_sbus_register_irq(struct esp *esp)
+static int esp_sbus_register_irq(struct esp *esp)
{
struct Scsi_Host *host = esp->host;
struct platform_device *op = esp->dev;
@@ -120,7 +119,7 @@ static int __devinit esp_sbus_register_irq(struct esp *esp)
return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
}
-static void __devinit esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
+static void esp_get_scsi_id(struct esp *esp, struct platform_device *espdma)
{
struct platform_device *op = esp->dev;
struct device_node *dp;
@@ -142,7 +141,7 @@ done:
esp->scsi_id_mask = (1 << esp->scsi_id);
}
-static void __devinit esp_get_differential(struct esp *esp)
+static void esp_get_differential(struct esp *esp)
{
struct platform_device *op = esp->dev;
struct device_node *dp;
@@ -154,7 +153,7 @@ static void __devinit esp_get_differential(struct esp *esp)
esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
}
-static void __devinit esp_get_clock_params(struct esp *esp)
+static void esp_get_clock_params(struct esp *esp)
{
struct platform_device *op = esp->dev;
struct device_node *bus_dp, *dp;
@@ -170,7 +169,7 @@ static void __devinit esp_get_clock_params(struct esp *esp)
esp->cfreq = fmhz;
}
-static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
+static void esp_get_bursts(struct esp *esp, struct platform_device *dma_of)
{
struct device_node *dma_dp = dma_of->dev.of_node;
struct platform_device *op = esp->dev;
@@ -195,7 +194,7 @@ static void __devinit esp_get_bursts(struct esp *esp, struct platform_device *dm
esp->bursts = bursts;
}
-static void __devinit esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
+static void esp_sbus_get_props(struct esp *esp, struct platform_device *espdma)
{
esp_get_scsi_id(esp, espdma);
esp_get_differential(esp);
@@ -487,9 +486,8 @@ static const struct esp_driver_ops sbus_esp_ops = {
.dma_error = sbus_esp_dma_error,
};
-static int __devinit esp_sbus_probe_one(struct platform_device *op,
- struct platform_device *espdma,
- int hme)
+static int esp_sbus_probe_one(struct platform_device *op,
+ struct platform_device *espdma, int hme)
{
struct scsi_host_template *tpnt = &scsi_esp_template;
struct Scsi_Host *host;
@@ -562,7 +560,7 @@ fail:
return err;
}
-static int __devinit esp_sbus_probe(struct platform_device *op)
+static int esp_sbus_probe(struct platform_device *op)
{
struct device_node *dma_node = NULL;
struct device_node *dp = op->dev.of_node;
@@ -585,7 +583,7 @@ static int __devinit esp_sbus_probe(struct platform_device *op)
return esp_sbus_probe_one(op, dma_of, hme);
}
-static int __devexit esp_sbus_remove(struct platform_device *op)
+static int esp_sbus_remove(struct platform_device *op)
{
struct esp *esp = dev_get_drvdata(&op->dev);
struct platform_device *dma_of = esp->dma;
@@ -639,7 +637,7 @@ static struct platform_driver esp_sbus_driver = {
.of_match_table = esp_match,
},
.probe = esp_sbus_probe,
- .remove = __devexit_p(esp_sbus_remove),
+ .remove = esp_sbus_remove,
};
static int __init sunesp_init(void)
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index ac4eca6a5328..0b7819f3e09b 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -581,7 +581,7 @@ static int sym53c416_test(int base)
}
-static struct isapnp_device_id id_table[] __devinitdata = {
+static struct isapnp_device_id id_table[] = {
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
ISAPNP_VENDOR('S','L','I'), ISAPNP_FUNCTION(0x4161), 0 },
{ ISAPNP_ANY_ID, ISAPNP_ANY_ID,
diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
index e2b8e68b57e7..599568299fbe 100644
--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
@@ -1284,8 +1284,7 @@ static int sym53c8xx_proc_info(struct Scsi_Host *shost, char *buffer,
* sym_free_resources() should be used instead of this function after calling
* sym_attach().
*/
-static void __devinit
-sym_iounmap_device(struct sym_device *device)
+static void sym_iounmap_device(struct sym_device *device)
{
if (device->s.ioaddr)
pci_iounmap(device->pdev, device->s.ioaddr);
@@ -1325,8 +1324,8 @@ static void sym_free_resources(struct sym_hcb *np, struct pci_dev *pdev,
* If all is OK, install interrupt handling and
* start the timer daemon.
*/
-static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
- int unit, struct sym_device *dev)
+static struct Scsi_Host *sym_attach(struct scsi_host_template *tpnt, int unit,
+ struct sym_device *dev)
{
struct sym_data *sym_data;
struct sym_hcb *np = NULL;
@@ -1481,7 +1480,7 @@ static struct Scsi_Host * __devinit sym_attach(struct scsi_host_template *tpnt,
* Detect and try to read SYMBIOS and TEKRAM NVRAM.
*/
#if SYM_CONF_NVRAM_SUPPORT
-static void __devinit sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
+static void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
{
devp->nvram = nvp;
nvp->type = 0;
@@ -1494,7 +1493,7 @@ static inline void sym_get_nvram(struct sym_device *devp, struct sym_nvram *nvp)
}
#endif /* SYM_CONF_NVRAM_SUPPORT */
-static int __devinit sym_check_supported(struct sym_device *device)
+static int sym_check_supported(struct sym_device *device)
{
struct sym_chip *chip;
struct pci_dev *pdev = device->pdev;
@@ -1531,7 +1530,7 @@ static int __devinit sym_check_supported(struct sym_device *device)
* Ignore Symbios chips controlled by various RAID controllers.
* These controllers set value 0x52414944 at RAM end - 16.
*/
-static int __devinit sym_check_raid(struct sym_device *device)
+static int sym_check_raid(struct sym_device *device)
{
unsigned int ram_size, ram_val;
@@ -1552,7 +1551,7 @@ static int __devinit sym_check_raid(struct sym_device *device)
return -ENODEV;
}
-static int __devinit sym_set_workarounds(struct sym_device *device)
+static int sym_set_workarounds(struct sym_device *device)
{
struct sym_chip *chip = &device->chip;
struct pci_dev *pdev = device->pdev;
@@ -1602,8 +1601,7 @@ static int __devinit sym_set_workarounds(struct sym_device *device)
/*
* Map HBA registers and on-chip SRAM (if present).
*/
-static int __devinit
-sym_iomap_device(struct sym_device *device)
+static int sym_iomap_device(struct sym_device *device)
{
struct pci_dev *pdev = device->pdev;
struct pci_bus_region bus_addr;
@@ -1751,8 +1749,7 @@ static struct scsi_host_template sym2_template = {
static int attach_count;
-static int __devinit sym2_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
+static int sym2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct sym_device sym_dev;
struct sym_nvram nvram;
@@ -2077,7 +2074,7 @@ static struct spi_function_template sym2_transport_functions = {
.get_signalling = sym2_get_signalling,
};
-static struct pci_device_id sym2_id_table[] __devinitdata = {
+static struct pci_device_id sym2_id_table[] = {
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C810,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_NCR_53C820,
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index a1baccce05f0..9327f5fcec4e 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -2219,7 +2219,7 @@ static struct scsi_host_template driver_template = {
*
**********************************************************************/
-static void __devinit dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
+static void dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
{
u8 carryFlag = 1, j = 0x80, bval;
int i;
@@ -2242,7 +2242,7 @@ static void __devinit dc390_eeprom_prepare_read(struct pci_dev *pdev, u8 cmd)
}
}
-static u16 __devinit dc390_eeprom_get_data(struct pci_dev *pdev)
+static u16 dc390_eeprom_get_data(struct pci_dev *pdev)
{
int i;
u16 wval = 0;
@@ -2264,7 +2264,7 @@ static u16 __devinit dc390_eeprom_get_data(struct pci_dev *pdev)
return wval;
}
-static void __devinit dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
+static void dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
{
u8 cmd = EEPROM_READ, i;
@@ -2282,7 +2282,7 @@ static void __devinit dc390_read_eeprom(struct pci_dev *pdev, u16 *ptr)
}
/* Override EEprom values with explicitly set values */
-static void __devinit dc390_eeprom_override(u8 index)
+static void dc390_eeprom_override(u8 index)
{
u8 *ptr = (u8 *) dc390_eepromBuf[index], id;
@@ -2305,7 +2305,7 @@ static void __devinit dc390_eeprom_override(u8 index)
}
}
-static int __devinitdata tmscsim_def[] = {
+static int tmscsim_def[] = {
7,
0 /* 10MHz */,
PARITY_CHK_ | SEND_START_ | EN_DISCONNECT_ | SYNC_NEGO_ | TAG_QUEUEING_,
@@ -2315,7 +2315,7 @@ static int __devinitdata tmscsim_def[] = {
};
/* Copy defaults over set values where missing */
-static void __devinit dc390_fill_with_defaults (void)
+static void dc390_fill_with_defaults (void)
{
int i;
@@ -2335,7 +2335,7 @@ static void __devinit dc390_fill_with_defaults (void)
tmscsim[5] = 180;
}
-static void __devinit dc390_check_eeprom(struct pci_dev *pdev, u8 index)
+static void dc390_check_eeprom(struct pci_dev *pdev, u8 index)
{
u8 interpd[] = {1, 3, 5, 10, 16, 30, 60, 120};
u8 EEbuf[128];
@@ -2372,7 +2372,7 @@ static void __devinit dc390_check_eeprom(struct pci_dev *pdev, u8 index)
}
}
-static void __devinit dc390_init_hw(struct dc390_acb *pACB, u8 index)
+static void dc390_init_hw(struct dc390_acb *pACB, u8 index)
{
struct Scsi_Host *shost = pACB->pScsiHost;
u8 dstate;
@@ -2422,8 +2422,7 @@ static void __devinit dc390_init_hw(struct dc390_acb *pACB, u8 index)
DC390_write8(DMA_Status, dstate);
}
-static int __devinit dc390_probe_one(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int dc390_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct dc390_acb *pACB;
struct Scsi_Host *shost;
@@ -2532,7 +2531,7 @@ static int __devinit dc390_probe_one(struct pci_dev *pdev,
*
* @dev: The PCI device to remove.
*/
-static void __devexit dc390_remove_one(struct pci_dev *dev)
+static void dc390_remove_one(struct pci_dev *dev)
{
struct Scsi_Host *scsi_host = pci_get_drvdata(dev);
unsigned long iflags;
@@ -2568,7 +2567,7 @@ static struct pci_driver dc390_driver = {
.name = "tmscsim",
.id_table = tmscsim_pci_tbl,
.probe = dc390_probe_one,
- .remove = __devexit_p(dc390_remove_one),
+ .remove = dc390_remove_one,
};
static int __init dc390_module_init(void)
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index 8f27f9d6f91d..0371047c5922 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -2,48 +2,58 @@
# Kernel configuration file for the UFS Host Controller
#
# This code is based on drivers/scsi/ufs/Kconfig
-# Copyright (C) 2011 Samsung Samsung India Software Operations
+# Copyright (C) 2011-2013 Samsung India Software Operations
+#
+# Authors:
+# Santosh Yaraganavi <santosh.sy@samsung.com>
+# Vinayak Holikatti <h.vinayak@samsung.com>
#
-# Santosh Yaraganavi <santosh.sy@samsung.com>
-# Vinayak Holikatti <h.vinayak@samsung.com>
-
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
-
+# See the COPYING file in the top-level directory or visit
+# <http://www.gnu.org/licenses/gpl-2.0.html>
+#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
+#
+# This program is provided "AS IS" and "WITH ALL FAULTS" and
+# without warranty of any kind. You are solely responsible for
+# determining the appropriateness of using and distributing
+# the program and assume all risks associated with your exercise
+# of rights with respect to the program, including but not limited
+# to infringement of third party rights, the risks and costs of
+# program errors, damage to or loss of data, programs or equipment,
+# and unavailability or interruption of operations. Under no
+# circumstances will the contributor of this Program be liable for
+# any damages of any kind arising from your use or distribution of
+# this program.
-# NO WARRANTY
-# THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
-# CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
-# LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
-# MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
-# solely responsible for determining the appropriateness of using and
-# distributing the Program and assumes all risks associated with its
-# exercise of rights under this Agreement, including but not limited to
-# the risks and costs of program errors, damage to or loss of data,
-# programs or equipment, and unavailability or interruption of operations.
-
-# DISCLAIMER OF LIABILITY
-# NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-# DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
-# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
-# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
-# HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+config SCSI_UFSHCD
+ tristate "Universal Flash Storage Controller Driver Core"
+ depends on SCSI
+ ---help---
+ This selects the support for UFS devices in Linux, say Y and make
+ sure that you know the name of your UFS host adapter (the card
+ inside your computer that "speaks" the UFS protocol, also
+ called UFS Host Controller), because you will be asked for it.
+ The module will be called ufshcd.
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
-# USA.
+ To compile this driver as a module, choose M here and read
+ <file:Documentation/scsi/ufs.txt>.
+ However, do not compile this as a module if your root file system
+ (the one containing the directory /) is located on a UFS device.
-config SCSI_UFSHCD
- tristate "Universal Flash Storage host controller driver"
- depends on PCI && SCSI
+config SCSI_UFSHCD_PCI
+ tristate "PCI bus based UFS Controller support"
+ depends on SCSI_UFSHCD && PCI
---help---
- This is a generic driver which supports PCIe UFS Host controllers.
+ This selects the PCI UFS Host Controller Interface. Select this if
+ you have UFS Host Controller with PCI Interface.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
diff --git a/drivers/scsi/ufs/Makefile b/drivers/scsi/ufs/Makefile
index adf7895a6a91..9eda0dfbd6df 100644
--- a/drivers/scsi/ufs/Makefile
+++ b/drivers/scsi/ufs/Makefile
@@ -1,2 +1,3 @@
# UFSHCD makefile
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd.o
+obj-$(CONFIG_SCSI_UFSHCD_PCI) += ufshcd-pci.o
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index b207529f8d54..139bc0647b41 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufs.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFS_H
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
new file mode 100644
index 000000000000..5cb1d75f5868
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -0,0 +1,211 @@
+/*
+ * Universal Flash Storage Host controller PCI glue driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd-pci.c
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#include "ufshcd.h"
+#include <linux/pci.h>
+
+#ifdef CONFIG_PM
+/**
+ * ufshcd_pci_suspend - suspend power management function
+ * @pdev: pointer to PCI device handle
+ * @state: power state
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_suspend
+ * 2. Do bus specific power management
+ */
+
+ return -ENOSYS;
+}
+
+/**
+ * ufshcd_pci_resume - resume power management function
+ * @pdev: pointer to PCI device handle
+ *
+ * Returns -ENOSYS
+ */
+static int ufshcd_pci_resume(struct pci_dev *pdev)
+{
+ /*
+ * TODO:
+ * 1. Call ufshcd_resume.
+ * 2. Do bus specific wake up
+ */
+
+ return -ENOSYS;
+}
+#endif /* CONFIG_PM */
+
+/**
+ * ufshcd_pci_shutdown - main function to put the controller in reset state
+ * @pdev: pointer to PCI device handle
+ */
+static void ufshcd_pci_shutdown(struct pci_dev *pdev)
+{
+ ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+}
+
+/**
+ * ufshcd_pci_remove - de-allocate PCI/SCSI host and host memory space
+ * data structure memory
+ * @pdev - pointer to PCI handle
+ */
+static void ufshcd_pci_remove(struct pci_dev *pdev)
+{
+ struct ufs_hba *hba = pci_get_drvdata(pdev);
+
+ disable_irq(pdev->irq);
+ free_irq(pdev->irq, hba);
+ ufshcd_remove(hba);
+ pci_release_regions(pdev);
+ pci_set_drvdata(pdev, NULL);
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+}
+
+/**
+ * ufshcd_set_dma_mask - Set dma mask based on the controller
+ * addressing capability
+ * @pdev: PCI device structure
+ *
+ * Returns 0 for success, non-zero for failure
+ */
+static int ufshcd_set_dma_mask(struct pci_dev *pdev)
+{
+ int err;
+
+ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+ && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
+ return 0;
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+ return err;
+}
+
+/**
+ * ufshcd_pci_probe - probe routine of the driver
+ * @pdev: pointer to PCI device handle
+ * @id: PCI device id
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int
+ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct ufs_hba *hba;
+ void __iomem *mmio_base;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "pci_enable_device failed\n");
+ goto out_error;
+ }
+
+ pci_set_master(pdev);
+
+
+ err = pci_request_regions(pdev, UFSHCD);
+ if (err < 0) {
+ dev_err(&pdev->dev, "request regions failed\n");
+ goto out_disable;
+ }
+
+ mmio_base = pci_ioremap_bar(pdev, 0);
+ if (!mmio_base) {
+ dev_err(&pdev->dev, "memory map failed\n");
+ err = -ENOMEM;
+ goto out_release_regions;
+ }
+
+ err = ufshcd_set_dma_mask(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "set dma mask failed\n");
+ goto out_iounmap;
+ }
+
+ err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+ if (err) {
+ dev_err(&pdev->dev, "Initialization failed\n");
+ goto out_iounmap;
+ }
+
+ pci_set_drvdata(pdev, hba);
+
+ return 0;
+
+out_iounmap:
+ iounmap(mmio_base);
+out_release_regions:
+ pci_release_regions(pdev);
+out_disable:
+ pci_clear_master(pdev);
+ pci_disable_device(pdev);
+out_error:
+ return err;
+}
+
+static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
+ { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { } /* terminate list */
+};
+
+MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
+
+static struct pci_driver ufshcd_pci_driver = {
+ .name = UFSHCD,
+ .id_table = ufshcd_pci_tbl,
+ .probe = ufshcd_pci_probe,
+ .remove = ufshcd_pci_remove,
+ .shutdown = ufshcd_pci_shutdown,
+#ifdef CONFIG_PM
+ .suspend = ufshcd_pci_suspend,
+ .resume = ufshcd_pci_resume,
+#endif
+};
+
+module_pci_driver(ufshcd_pci_driver);
+
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("UFS host controller PCI glue driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 58f4ba6fe412..60fd40c4e4c2 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1,77 +1,39 @@
/*
- * Universal Flash Storage Host controller driver
+ * Universal Flash Storage Host controller driver Core
*
* This code is based on drivers/scsi/ufs/ufshcd.c
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/io.h>
-#include <linux/delay.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/workqueue.h>
-#include <linux/errno.h>
-#include <linux/types.h>
-#include <linux/wait.h>
-#include <linux/bitops.h>
-
-#include <asm/irq.h>
-#include <asm/byteorder.h>
-#include <scsi/scsi.h>
-#include <scsi/scsi_cmnd.h>
-#include <scsi/scsi_host.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/scsi_dbg.h>
-#include <scsi/scsi_eh.h>
-
-#include "ufs.h"
-#include "ufshci.h"
-
-#define UFSHCD "ufshcd"
-#define UFSHCD_DRIVER_VERSION "0.1"
+#include "ufshcd.h"
enum {
UFSHCD_MAX_CHANNEL = 0,
@@ -102,121 +64,6 @@ enum {
};
/**
- * struct uic_command - UIC command structure
- * @command: UIC command
- * @argument1: UIC command argument 1
- * @argument2: UIC command argument 2
- * @argument3: UIC command argument 3
- * @cmd_active: Indicate if UIC command is outstanding
- * @result: UIC command result
- */
-struct uic_command {
- u32 command;
- u32 argument1;
- u32 argument2;
- u32 argument3;
- int cmd_active;
- int result;
-};
-
-/**
- * struct ufs_hba - per adapter private structure
- * @mmio_base: UFSHCI base register address
- * @ucdl_base_addr: UFS Command Descriptor base address
- * @utrdl_base_addr: UTP Transfer Request Descriptor base address
- * @utmrdl_base_addr: UTP Task Management Descriptor base address
- * @ucdl_dma_addr: UFS Command Descriptor DMA address
- * @utrdl_dma_addr: UTRDL DMA address
- * @utmrdl_dma_addr: UTMRDL DMA address
- * @host: Scsi_Host instance of the driver
- * @pdev: PCI device handle
- * @lrb: local reference block
- * @outstanding_tasks: Bits representing outstanding task requests
- * @outstanding_reqs: Bits representing outstanding transfer requests
- * @capabilities: UFS Controller Capabilities
- * @nutrs: Transfer Request Queue depth supported by controller
- * @nutmrs: Task Management Queue depth supported by controller
- * @active_uic_cmd: handle of active UIC command
- * @ufshcd_tm_wait_queue: wait queue for task management
- * @tm_condition: condition variable for task management
- * @ufshcd_state: UFSHCD states
- * @int_enable_mask: Interrupt Mask Bits
- * @uic_workq: Work queue for UIC completion handling
- * @feh_workq: Work queue for fatal controller error handling
- * @errors: HBA errors
- */
-struct ufs_hba {
- void __iomem *mmio_base;
-
- /* Virtual memory reference */
- struct utp_transfer_cmd_desc *ucdl_base_addr;
- struct utp_transfer_req_desc *utrdl_base_addr;
- struct utp_task_req_desc *utmrdl_base_addr;
-
- /* DMA memory reference */
- dma_addr_t ucdl_dma_addr;
- dma_addr_t utrdl_dma_addr;
- dma_addr_t utmrdl_dma_addr;
-
- struct Scsi_Host *host;
- struct pci_dev *pdev;
-
- struct ufshcd_lrb *lrb;
-
- unsigned long outstanding_tasks;
- unsigned long outstanding_reqs;
-
- u32 capabilities;
- int nutrs;
- int nutmrs;
- u32 ufs_version;
-
- struct uic_command active_uic_cmd;
- wait_queue_head_t ufshcd_tm_wait_queue;
- unsigned long tm_condition;
-
- u32 ufshcd_state;
- u32 int_enable_mask;
-
- /* Work Queues */
- struct work_struct uic_workq;
- struct work_struct feh_workq;
-
- /* HBA Errors */
- u32 errors;
-};
-
-/**
- * struct ufshcd_lrb - local reference block
- * @utr_descriptor_ptr: UTRD address of the command
- * @ucd_cmd_ptr: UCD address of the command
- * @ucd_rsp_ptr: Response UPIU address for this command
- * @ucd_prdt_ptr: PRDT address of the command
- * @cmd: pointer to SCSI command
- * @sense_buffer: pointer to sense buffer address of the SCSI command
- * @sense_bufflen: Length of the sense buffer
- * @scsi_status: SCSI status of the command
- * @command_type: SCSI, UFS, Query.
- * @task_tag: Task tag of the command
- * @lun: LUN of the command
- */
-struct ufshcd_lrb {
- struct utp_transfer_req_desc *utr_descriptor_ptr;
- struct utp_upiu_cmd *ucd_cmd_ptr;
- struct utp_upiu_rsp *ucd_rsp_ptr;
- struct ufshcd_sg_entry *ucd_prdt_ptr;
-
- struct scsi_cmnd *cmd;
- u8 *sense_buffer;
- unsigned int sense_bufflen;
- int scsi_status;
-
- int command_type;
- int task_tag;
- unsigned int lun;
-};
-
-/**
* ufshcd_get_ufs_version - Get the UFS version supported by the HBA
* @hba - Pointer to adapter instance
*
@@ -335,21 +182,21 @@ static inline void ufshcd_free_hba_memory(struct ufs_hba *hba)
if (hba->utmrdl_base_addr) {
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- dma_free_coherent(&hba->pdev->dev, utmrdl_size,
+ dma_free_coherent(hba->dev, utmrdl_size,
hba->utmrdl_base_addr, hba->utmrdl_dma_addr);
}
if (hba->utrdl_base_addr) {
utrdl_size =
(sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, utrdl_size,
+ dma_free_coherent(hba->dev, utrdl_size,
hba->utrdl_base_addr, hba->utrdl_dma_addr);
}
if (hba->ucdl_base_addr) {
ucdl_size =
(sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- dma_free_coherent(&hba->pdev->dev, ucdl_size,
+ dma_free_coherent(hba->dev, ucdl_size,
hba->ucdl_base_addr, hba->ucdl_dma_addr);
}
}
@@ -429,15 +276,6 @@ static void ufshcd_enable_run_stop_reg(struct ufs_hba *hba)
}
/**
- * ufshcd_hba_stop - Send controller to reset state
- * @hba: per adapter instance
- */
-static inline void ufshcd_hba_stop(struct ufs_hba *hba)
-{
- writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
-}
-
-/**
* ufshcd_hba_start - Start controller initialization sequence
* @hba: per adapter instance
*/
@@ -724,7 +562,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for UTP command descriptors */
ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs);
- hba->ucdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->ucdl_base_addr = dma_alloc_coherent(hba->dev,
ucdl_size,
&hba->ucdl_dma_addr,
GFP_KERNEL);
@@ -737,7 +575,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
*/
if (!hba->ucdl_base_addr ||
WARN_ON(hba->ucdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Command Descriptor Memory allocation failed\n");
goto out;
}
@@ -747,13 +585,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTRD
*/
utrdl_size = (sizeof(struct utp_transfer_req_desc) * hba->nutrs);
- hba->utrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utrdl_base_addr = dma_alloc_coherent(hba->dev,
utrdl_size,
&hba->utrdl_dma_addr,
GFP_KERNEL);
if (!hba->utrdl_base_addr ||
WARN_ON(hba->utrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Transfer Descriptor Memory allocation failed\n");
goto out;
}
@@ -763,13 +601,13 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
* UFSHCI requires 1024 byte alignment of UTMRD
*/
utmrdl_size = sizeof(struct utp_task_req_desc) * hba->nutmrs;
- hba->utmrdl_base_addr = dma_alloc_coherent(&hba->pdev->dev,
+ hba->utmrdl_base_addr = dma_alloc_coherent(hba->dev,
utmrdl_size,
&hba->utmrdl_dma_addr,
GFP_KERNEL);
if (!hba->utmrdl_base_addr ||
WARN_ON(hba->utmrdl_dma_addr & (PAGE_SIZE - 1))) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task Management Descriptor Memory allocation failed\n");
goto out;
}
@@ -777,7 +615,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba)
/* Allocate memory for local reference block */
hba->lrb = kcalloc(hba->nutrs, sizeof(struct ufshcd_lrb), GFP_KERNEL);
if (!hba->lrb) {
- dev_err(&hba->pdev->dev, "LRB Memory allocation failed\n");
+ dev_err(hba->dev, "LRB Memory allocation failed\n");
goto out;
}
return 0;
@@ -867,7 +705,7 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba)
/* check if controller is ready to accept UIC commands */
if (((readl(hba->mmio_base + REG_CONTROLLER_STATUS)) &
UIC_COMMAND_READY) == 0x0) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller not ready"
" to accept UIC commands\n");
return -EIO;
@@ -912,7 +750,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
/* check if device present */
reg = readl((hba->mmio_base + REG_CONTROLLER_STATUS));
if (!ufshcd_is_device_present(reg)) {
- dev_err(&hba->pdev->dev, "cc: Device not present\n");
+ dev_err(hba->dev, "cc: Device not present\n");
err = -ENXIO;
goto out;
}
@@ -924,7 +762,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba)
if (!(ufshcd_get_lists_status(reg))) {
ufshcd_enable_run_stop_reg(hba);
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Host controller not ready to process requests");
err = -EIO;
goto out;
@@ -1005,7 +843,7 @@ static int ufshcd_hba_enable(struct ufs_hba *hba)
if (retry) {
retry--;
} else {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Controller enable failed\n");
return -EIO;
}
@@ -1084,7 +922,7 @@ static int ufshcd_do_reset(struct ufs_hba *hba)
/* start the initialization process */
if (ufshcd_initialize_hba(hba)) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Reset: Controller initialization failed\n");
return FAILED;
}
@@ -1167,7 +1005,7 @@ static int ufshcd_task_req_compl(struct ufs_hba *hba, u32 index)
task_result = SUCCESS;
} else {
task_result = FAILED;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"trc: Invalid ocs = %x\n", ocs_value);
}
spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -1281,7 +1119,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
/* check if the returned transfer response is valid */
result = ufshcd_is_valid_req_rsp(lrbp->ucd_rsp_ptr);
if (result) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Invalid response = %x\n", result);
break;
}
@@ -1310,7 +1148,7 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
case OCS_FATAL_ERROR:
default:
result |= DID_ERROR << 16;
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"OCS error from controller = %x\n", ocs);
break;
} /* end of switch */
@@ -1374,7 +1212,7 @@ static void ufshcd_uic_cc_handler (struct work_struct *work)
!(ufshcd_get_uic_cmd_result(hba))) {
if (ufshcd_make_hba_operational(hba))
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"cc: hba not operational state\n");
return;
}
@@ -1509,7 +1347,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
free_slot = ufshcd_get_tm_free_slot(hba);
if (free_slot >= hba->nutmrs) {
spin_unlock_irqrestore(host->host_lock, flags);
- dev_err(&hba->pdev->dev, "Task management queue full\n");
+ dev_err(hba->dev, "Task management queue full\n");
err = FAILED;
goto out;
}
@@ -1552,7 +1390,7 @@ ufshcd_issue_tm_cmd(struct ufs_hba *hba,
&hba->tm_condition) != 0),
60 * HZ);
if (!err) {
- dev_err(&hba->pdev->dev,
+ dev_err(hba->dev,
"Task management command timed-out\n");
err = FAILED;
goto out;
@@ -1688,23 +1526,13 @@ static struct scsi_host_template ufshcd_driver_template = {
};
/**
- * ufshcd_shutdown - main function to put the controller in reset state
- * @pdev: pointer to PCI device handle
- */
-static void ufshcd_shutdown(struct pci_dev *pdev)
-{
- ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
-}
-
-#ifdef CONFIG_PM
-/**
* ufshcd_suspend - suspend power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
* @state: power state
*
* Returns -ENOSYS
*/
-static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
+int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
{
/*
* TODO:
@@ -1717,14 +1545,15 @@ static int ufshcd_suspend(struct pci_dev *pdev, pm_message_t state)
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(ufshcd_suspend);
/**
* ufshcd_resume - resume power management function
- * @pdev: pointer to PCI device handle
+ * @hba: per adapter instance
*
* Returns -ENOSYS
*/
-static int ufshcd_resume(struct pci_dev *pdev)
+int ufshcd_resume(struct ufs_hba *hba)
{
/*
* TODO:
@@ -1737,7 +1566,7 @@ static int ufshcd_resume(struct pci_dev *pdev)
return -ENOSYS;
}
-#endif /* CONFIG_PM */
+EXPORT_SYMBOL_GPL(ufshcd_resume);
/**
* ufshcd_hba_free - free allocated memory for
@@ -1748,108 +1577,67 @@ static void ufshcd_hba_free(struct ufs_hba *hba)
{
iounmap(hba->mmio_base);
ufshcd_free_hba_memory(hba);
- pci_release_regions(hba->pdev);
}
/**
- * ufshcd_remove - de-allocate PCI/SCSI host and host memory space
+ * ufshcd_remove - de-allocate SCSI host and host memory space
* data structure memory
- * @pdev - pointer to PCI handle
+ * @hba - per adapter instance
*/
-static void ufshcd_remove(struct pci_dev *pdev)
+void ufshcd_remove(struct ufs_hba *hba)
{
- struct ufs_hba *hba = pci_get_drvdata(pdev);
-
/* disable interrupts */
ufshcd_int_config(hba, UFSHCD_INT_DISABLE);
- free_irq(pdev->irq, hba);
ufshcd_hba_stop(hba);
ufshcd_hba_free(hba);
scsi_remove_host(hba->host);
scsi_host_put(hba->host);
- pci_set_drvdata(pdev, NULL);
- pci_clear_master(pdev);
- pci_disable_device(pdev);
-}
-
-/**
- * ufshcd_set_dma_mask - Set dma mask based on the controller
- * addressing capability
- * @pdev: PCI device structure
- *
- * Returns 0 for success, non-zero for failure
- */
-static int ufshcd_set_dma_mask(struct ufs_hba *hba)
-{
- int err;
- u64 dma_mask;
-
- /*
- * If controller supports 64 bit addressing mode, then set the DMA
- * mask to 64-bit, else set the DMA mask to 32-bit
- */
- if (hba->capabilities & MASK_64_ADDRESSING_SUPPORT)
- dma_mask = DMA_BIT_MASK(64);
- else
- dma_mask = DMA_BIT_MASK(32);
-
- err = pci_set_dma_mask(hba->pdev, dma_mask);
- if (err)
- return err;
-
- err = pci_set_consistent_dma_mask(hba->pdev, dma_mask);
-
- return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_remove);
/**
- * ufshcd_probe - probe routine of the driver
- * @pdev: pointer to PCI device handle
- * @id: PCI device id
- *
+ * ufshcd_init - Driver initialization routine
+ * @dev: pointer to device handle
+ * @hba_handle: driver private handle
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
* Returns 0 on success, non-zero value on failure
*/
-static int __devinit
-ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
+ void __iomem *mmio_base, unsigned int irq)
{
struct Scsi_Host *host;
struct ufs_hba *hba;
int err;
- err = pci_enable_device(pdev);
- if (err) {
- dev_err(&pdev->dev, "pci_enable_device failed\n");
+ if (!dev) {
+ dev_err(dev,
+ "Invalid memory reference for dev is NULL\n");
+ err = -ENODEV;
goto out_error;
}
- pci_set_master(pdev);
+ if (!mmio_base) {
+ dev_err(dev,
+ "Invalid memory reference for mmio_base is NULL\n");
+ err = -ENODEV;
+ goto out_error;
+ }
host = scsi_host_alloc(&ufshcd_driver_template,
sizeof(struct ufs_hba));
if (!host) {
- dev_err(&pdev->dev, "scsi_host_alloc failed\n");
+ dev_err(dev, "scsi_host_alloc failed\n");
err = -ENOMEM;
- goto out_disable;
+ goto out_error;
}
hba = shost_priv(host);
-
- err = pci_request_regions(pdev, UFSHCD);
- if (err < 0) {
- dev_err(&pdev->dev, "request regions failed\n");
- goto out_host_put;
- }
-
- hba->mmio_base = pci_ioremap_bar(pdev, 0);
- if (!hba->mmio_base) {
- dev_err(&pdev->dev, "memory map failed\n");
- err = -ENOMEM;
- goto out_release_regions;
- }
-
hba->host = host;
- hba->pdev = pdev;
+ hba->dev = dev;
+ hba->mmio_base = mmio_base;
+ hba->irq = irq;
/* Read capabilities registers */
ufshcd_hba_capabilities(hba);
@@ -1857,17 +1645,11 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
/* Get UFS version supported by the controller */
hba->ufs_version = ufshcd_get_ufs_version(hba);
- err = ufshcd_set_dma_mask(hba);
- if (err) {
- dev_err(&pdev->dev, "set dma mask failed\n");
- goto out_iounmap;
- }
-
/* Allocate memory for host memory space */
err = ufshcd_memory_alloc(hba);
if (err) {
- dev_err(&pdev->dev, "Memory allocation failed\n");
- goto out_iounmap;
+ dev_err(hba->dev, "Memory allocation failed\n");
+ goto out_disable;
}
/* Configure LRB */
@@ -1889,76 +1671,50 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
INIT_WORK(&hba->feh_workq, ufshcd_fatal_err_handler);
/* IRQ registration */
- err = request_irq(pdev->irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
+ err = request_irq(irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
if (err) {
- dev_err(&pdev->dev, "request irq failed\n");
+ dev_err(hba->dev, "request irq failed\n");
goto out_lrb_free;
}
/* Enable SCSI tag mapping */
err = scsi_init_shared_tag_map(host, host->can_queue);
if (err) {
- dev_err(&pdev->dev, "init shared queue failed\n");
+ dev_err(hba->dev, "init shared queue failed\n");
goto out_free_irq;
}
- pci_set_drvdata(pdev, hba);
-
- err = scsi_add_host(host, &pdev->dev);
+ err = scsi_add_host(host, hba->dev);
if (err) {
- dev_err(&pdev->dev, "scsi_add_host failed\n");
+ dev_err(hba->dev, "scsi_add_host failed\n");
goto out_free_irq;
}
/* Initialization routine */
err = ufshcd_initialize_hba(hba);
if (err) {
- dev_err(&pdev->dev, "Initialization failed\n");
- goto out_free_irq;
+ dev_err(hba->dev, "Initialization failed\n");
+ goto out_remove_scsi_host;
}
+ *hba_handle = hba;
return 0;
+out_remove_scsi_host:
+ scsi_remove_host(hba->host);
out_free_irq:
- free_irq(pdev->irq, hba);
+ free_irq(irq, hba);
out_lrb_free:
ufshcd_free_hba_memory(hba);
-out_iounmap:
- iounmap(hba->mmio_base);
-out_release_regions:
- pci_release_regions(pdev);
-out_host_put:
- scsi_host_put(host);
out_disable:
- pci_clear_master(pdev);
- pci_disable_device(pdev);
+ scsi_host_put(host);
out_error:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_init);
-static DEFINE_PCI_DEVICE_TABLE(ufshcd_pci_tbl) = {
- { PCI_VENDOR_ID_SAMSUNG, 0xC00C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
- { } /* terminate list */
-};
-
-MODULE_DEVICE_TABLE(pci, ufshcd_pci_tbl);
-
-static struct pci_driver ufshcd_pci_driver = {
- .name = UFSHCD,
- .id_table = ufshcd_pci_tbl,
- .probe = ufshcd_probe,
- .remove = __devexit_p(ufshcd_remove),
- .shutdown = ufshcd_shutdown,
-#ifdef CONFIG_PM
- .suspend = ufshcd_suspend,
- .resume = ufshcd_resume,
-#endif
-};
-
-module_pci_driver(ufshcd_pci_driver);
-
-MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>, "
- "Vinayak Holikatti <h.vinayak@samsung.com>");
-MODULE_DESCRIPTION("Generic UFS host controller driver");
+MODULE_AUTHOR("Santosh Yaragnavi <santosh.sy@samsung.com>");
+MODULE_AUTHOR("Vinayak Holikatti <h.vinayak@samsung.com>");
+MODULE_DESCRIPTION("Generic UFS host controller driver Core");
MODULE_LICENSE("GPL");
MODULE_VERSION(UFSHCD_DRIVER_VERSION);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
new file mode 100644
index 000000000000..6b99a42f5819
--- /dev/null
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -0,0 +1,202 @@
+/*
+ * Universal Flash Storage Host controller driver
+ *
+ * This code is based on drivers/scsi/ufs/ufshcd.h
+ * Copyright (C) 2011-2013 Samsung India Software Operations
+ *
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
+ */
+
+#ifndef _UFSHCD_H
+#define _UFSHCD_H
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+#include <linux/bitops.h>
+#include <linux/pm_runtime.h>
+#include <linux/clk.h>
+
+#include <asm/irq.h>
+#include <asm/byteorder.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
+
+#include "ufs.h"
+#include "ufshci.h"
+
+#define UFSHCD "ufshcd"
+#define UFSHCD_DRIVER_VERSION "0.2"
+
+/**
+ * struct uic_command - UIC command structure
+ * @command: UIC command
+ * @argument1: UIC command argument 1
+ * @argument2: UIC command argument 2
+ * @argument3: UIC command argument 3
+ * @cmd_active: Indicate if UIC command is outstanding
+ * @result: UIC command result
+ */
+struct uic_command {
+ u32 command;
+ u32 argument1;
+ u32 argument2;
+ u32 argument3;
+ int cmd_active;
+ int result;
+};
+
+/**
+ * struct ufshcd_lrb - local reference block
+ * @utr_descriptor_ptr: UTRD address of the command
+ * @ucd_cmd_ptr: UCD address of the command
+ * @ucd_rsp_ptr: Response UPIU address for this command
+ * @ucd_prdt_ptr: PRDT address of the command
+ * @cmd: pointer to SCSI command
+ * @sense_buffer: pointer to sense buffer address of the SCSI command
+ * @sense_bufflen: Length of the sense buffer
+ * @scsi_status: SCSI status of the command
+ * @command_type: SCSI, UFS, Query.
+ * @task_tag: Task tag of the command
+ * @lun: LUN of the command
+ */
+struct ufshcd_lrb {
+ struct utp_transfer_req_desc *utr_descriptor_ptr;
+ struct utp_upiu_cmd *ucd_cmd_ptr;
+ struct utp_upiu_rsp *ucd_rsp_ptr;
+ struct ufshcd_sg_entry *ucd_prdt_ptr;
+
+ struct scsi_cmnd *cmd;
+ u8 *sense_buffer;
+ unsigned int sense_bufflen;
+ int scsi_status;
+
+ int command_type;
+ int task_tag;
+ unsigned int lun;
+};
+
+
+/**
+ * struct ufs_hba - per adapter private structure
+ * @mmio_base: UFSHCI base register address
+ * @ucdl_base_addr: UFS Command Descriptor base address
+ * @utrdl_base_addr: UTP Transfer Request Descriptor base address
+ * @utmrdl_base_addr: UTP Task Management Descriptor base address
+ * @ucdl_dma_addr: UFS Command Descriptor DMA address
+ * @utrdl_dma_addr: UTRDL DMA address
+ * @utmrdl_dma_addr: UTMRDL DMA address
+ * @host: Scsi_Host instance of the driver
+ * @dev: device handle
+ * @lrb: local reference block
+ * @outstanding_tasks: Bits representing outstanding task requests
+ * @outstanding_reqs: Bits representing outstanding transfer requests
+ * @capabilities: UFS Controller Capabilities
+ * @nutrs: Transfer Request Queue depth supported by controller
+ * @nutmrs: Task Management Queue depth supported by controller
+ * @ufs_version: UFS Version to which controller complies
+ * @irq: Irq number of the controller
+ * @active_uic_cmd: handle of active UIC command
+ * @ufshcd_tm_wait_queue: wait queue for task management
+ * @tm_condition: condition variable for task management
+ * @ufshcd_state: UFSHCD states
+ * @int_enable_mask: Interrupt Mask Bits
+ * @uic_workq: Work queue for UIC completion handling
+ * @feh_workq: Work queue for fatal controller error handling
+ * @errors: HBA errors
+ */
+struct ufs_hba {
+ void __iomem *mmio_base;
+
+ /* Virtual memory reference */
+ struct utp_transfer_cmd_desc *ucdl_base_addr;
+ struct utp_transfer_req_desc *utrdl_base_addr;
+ struct utp_task_req_desc *utmrdl_base_addr;
+
+ /* DMA memory reference */
+ dma_addr_t ucdl_dma_addr;
+ dma_addr_t utrdl_dma_addr;
+ dma_addr_t utmrdl_dma_addr;
+
+ struct Scsi_Host *host;
+ struct device *dev;
+
+ struct ufshcd_lrb *lrb;
+
+ unsigned long outstanding_tasks;
+ unsigned long outstanding_reqs;
+
+ u32 capabilities;
+ int nutrs;
+ int nutmrs;
+ u32 ufs_version;
+ unsigned int irq;
+
+ struct uic_command active_uic_cmd;
+ wait_queue_head_t ufshcd_tm_wait_queue;
+ unsigned long tm_condition;
+
+ u32 ufshcd_state;
+ u32 int_enable_mask;
+
+ /* Work Queues */
+ struct work_struct uic_workq;
+ struct work_struct feh_workq;
+
+ /* HBA Errors */
+ u32 errors;
+};
+
+int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
+ unsigned int);
+void ufshcd_remove(struct ufs_hba *);
+
+/**
+ * ufshcd_hba_stop - Send controller to reset state
+ * @hba: per adapter instance
+ */
+static inline void ufshcd_hba_stop(struct ufs_hba *hba)
+{
+ writel(CONTROLLER_DISABLE, (hba->mmio_base + REG_CONTROLLER_ENABLE));
+}
+
+#endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index 6e3510f71167..0c164847a3ef 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -2,45 +2,35 @@
* Universal Flash Storage Host controller driver
*
* This code is based on drivers/scsi/ufs/ufshci.h
- * Copyright (C) 2011-2012 Samsung India Software Operations
+ * Copyright (C) 2011-2013 Samsung India Software Operations
*
- * Santosh Yaraganavi <santosh.sy@samsung.com>
- * Vinayak Holikatti <h.vinayak@samsung.com>
+ * Authors:
+ * Santosh Yaraganavi <santosh.sy@samsung.com>
+ * Vinayak Holikatti <h.vinayak@samsung.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
+ * See the COPYING file in the top-level directory or visit
+ * <http://www.gnu.org/licenses/gpl-2.0.html>
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * NO WARRANTY
- * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
- * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
- * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
- * solely responsible for determining the appropriateness of using and
- * distributing the Program and assumes all risks associated with its
- * exercise of rights under this Agreement, including but not limited to
- * the risks and costs of program errors, damage to or loss of data,
- * programs or equipment, and unavailability or interruption of operations.
-
- * DISCLAIMER OF LIABILITY
- * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
- * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
- * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
- * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
-
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
- * USA.
+ * This program is provided "AS IS" and "WITH ALL FAULTS" and
+ * without warranty of any kind. You are solely responsible for
+ * determining the appropriateness of using and distributing
+ * the program and assume all risks associated with your exercise
+ * of rights with respect to the program, including but not limited
+ * to infringement of third party rights, the risks and costs of
+ * program errors, damage to or loss of data, programs or equipment,
+ * and unavailability or interruption of operations. Under no
+ * circumstances will the contributor of this Program be liable for
+ * any damages of any kind arising from your use or distribution of
+ * this program.
*/
#ifndef _UFSHCI_H
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 595af1ae4421..3449a1f8c656 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -215,7 +215,7 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
static int virtscsi_kick_event(struct virtio_scsi *vscsi,
struct virtio_scsi_event_node *event_node)
{
- int ret;
+ int err;
struct scatterlist sg;
unsigned long flags;
@@ -223,13 +223,14 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
- ret = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node, GFP_ATOMIC);
- if (ret >= 0)
+ err = virtqueue_add_buf(vscsi->event_vq.vq, &sg, 0, 1, event_node,
+ GFP_ATOMIC);
+ if (!err)
virtqueue_kick(vscsi->event_vq.vq);
spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
- return ret;
+ return err;
}
static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
@@ -410,22 +411,23 @@ static int virtscsi_kick_cmd(struct virtio_scsi_target_state *tgt,
{
unsigned int out_num, in_num;
unsigned long flags;
- int ret;
+ int err;
+ bool needs_kick = false;
spin_lock_irqsave(&tgt->tgt_lock, flags);
virtscsi_map_cmd(tgt, cmd, &out_num, &in_num, req_size, resp_size);
spin_lock(&vq->vq_lock);
- ret = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
+ err = virtqueue_add_buf(vq->vq, tgt->sg, out_num, in_num, cmd, gfp);
spin_unlock(&tgt->tgt_lock);
- if (ret >= 0)
- ret = virtqueue_kick_prepare(vq->vq);
+ if (!err)
+ needs_kick = virtqueue_kick_prepare(vq->vq);
spin_unlock_irqrestore(&vq->vq_lock, flags);
- if (ret > 0)
+ if (needs_kick)
virtqueue_notify(vq->vq);
- return ret;
+ return err;
}
static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
@@ -467,8 +469,10 @@ static int virtscsi_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
if (virtscsi_kick_cmd(tgt, &vscsi->req_vq, cmd,
sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
- GFP_ATOMIC) >= 0)
+ GFP_ATOMIC) == 0)
ret = 0;
+ else
+ mempool_free(cmd, virtscsi_cmd_pool);
out:
return ret;
@@ -675,7 +679,7 @@ out:
return err;
}
-static int __devinit virtscsi_probe(struct virtio_device *vdev)
+static int virtscsi_probe(struct virtio_device *vdev)
{
struct Scsi_Host *shost;
struct virtio_scsi *vscsi;
@@ -729,7 +733,7 @@ virtscsi_init_failed:
return err;
}
-static void __devexit virtscsi_remove(struct virtio_device *vdev)
+static void virtscsi_remove(struct virtio_device *vdev)
{
struct Scsi_Host *shost = virtio_scsi_host(vdev);
struct virtio_scsi *vscsi = shost_priv(shost);
@@ -781,7 +785,7 @@ static struct virtio_driver virtio_scsi_driver = {
.freeze = virtscsi_freeze,
.restore = virtscsi_restore,
#endif
- .remove = __devexit_p(virtscsi_remove),
+ .remove = virtscsi_remove,
};
static int __init init(void)
diff --git a/drivers/scsi/vmw_pvscsi.c b/drivers/scsi/vmw_pvscsi.c
index 20b3a483c2cc..3bfaa66fa0d1 100644
--- a/drivers/scsi/vmw_pvscsi.c
+++ b/drivers/scsi/vmw_pvscsi.c
@@ -397,7 +397,7 @@ static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
}
-static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
+static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
{
adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
&adapter->ringStatePA);
@@ -1152,7 +1152,7 @@ static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
* just use a statically allocated scatter list.
*
*/
-static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
+static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
{
struct pvscsi_ctx *ctx;
int i;
@@ -1233,8 +1233,7 @@ exit:
return numPhys;
}
-static int __devinit pvscsi_probe(struct pci_dev *pdev,
- const struct pci_device_id *id)
+static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct pvscsi_adapter *adapter;
struct Scsi_Host *host;
@@ -1454,7 +1453,7 @@ static struct pci_driver pvscsi_pci_driver = {
.name = "vmw_pvscsi",
.id_table = pvscsi_pci_tbl,
.probe = pvscsi_probe,
- .remove = __devexit_p(pvscsi_remove),
+ .remove = pvscsi_remove,
.shutdown = pvscsi_shutdown,
};
diff --git a/drivers/scsi/zalon.c b/drivers/scsi/zalon.c
index 27e84e4b1fa9..97ccb0383539 100644
--- a/drivers/scsi/zalon.c
+++ b/drivers/scsi/zalon.c
@@ -182,7 +182,7 @@ static struct parisc_driver zalon_driver = {
.name = "zalon",
.id_table = zalon_tbl,
.probe = zalon_probe,
- .remove = __devexit_p(zalon_remove),
+ .remove = zalon_remove,
};
static int __init zalon7xx_init(void)
diff --git a/drivers/scsi/zorro7xx.c b/drivers/scsi/zorro7xx.c
index e17764d71476..cbf3476c68cd 100644
--- a/drivers/scsi/zorro7xx.c
+++ b/drivers/scsi/zorro7xx.c
@@ -38,7 +38,7 @@ static struct zorro_driver_data {
const char *name;
unsigned long offset;
int absolute; /* offset is absolute address */
-} zorro7xx_driver_data[] __devinitdata = {
+} zorro7xx_driver_data[] = {
{ .name = "PowerUP 603e+", .offset = 0xf40000, .absolute = 1 },
{ .name = "WarpEngine 40xx", .offset = 0x40000 },
{ .name = "A4091", .offset = 0x800000 },
@@ -46,7 +46,7 @@ static struct zorro_driver_data {
{ 0 }
};
-static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
+static struct zorro_device_id zorro7xx_zorro_tbl[] = {
{
.id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS,
.driver_data = (unsigned long)&zorro7xx_driver_data[0],
@@ -71,8 +71,8 @@ static struct zorro_device_id zorro7xx_zorro_tbl[] __devinitdata = {
};
MODULE_DEVICE_TABLE(zorro, zorro7xx_zorro_tbl);
-static int __devinit zorro7xx_init_one(struct zorro_dev *z,
- const struct zorro_device_id *ent)
+static int zorro7xx_init_one(struct zorro_dev *z,
+ const struct zorro_device_id *ent)
{
struct Scsi_Host *host;
struct NCR_700_Host_Parameters *hostdata;
@@ -150,7 +150,7 @@ static int __devinit zorro7xx_init_one(struct zorro_dev *z,
return -ENODEV;
}
-static __devexit void zorro7xx_remove_one(struct zorro_dev *z)
+static void zorro7xx_remove_one(struct zorro_dev *z)
{
struct Scsi_Host *host = zorro_get_drvdata(z);
struct NCR_700_Host_Parameters *hostdata = shost_priv(host);
@@ -167,7 +167,7 @@ static struct zorro_driver zorro7xx_driver = {
.name = "zorro7xx-scsi",
.id_table = zorro7xx_zorro_tbl,
.probe = zorro7xx_init_one,
- .remove = __devexit_p(zorro7xx_remove_one),
+ .remove = zorro7xx_remove_one,
};
static int __init zorro7xx_scsi_init(void)