summaryrefslogtreecommitdiff
path: root/drivers/scsi/lpfc/lpfc_init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/lpfc/lpfc_init.c')
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c512
1 files changed, 397 insertions, 115 deletions
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index eaaef682de25..6d6b14295734 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -72,7 +72,7 @@ unsigned long _dump_buf_dif_order;
spinlock_t _dump_buf_lock;
/* Used when mapping IRQ vectors in a driver centric manner */
-uint32_t lpfc_present_cpu;
+static uint32_t lpfc_present_cpu;
static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
static int lpfc_post_rcv_buf(struct lpfc_hba *);
@@ -93,8 +93,8 @@ static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
static void lpfc_sli4_disable_intr(struct lpfc_hba *);
static uint32_t lpfc_sli4_enable_intr(struct lpfc_hba *, uint32_t);
static void lpfc_sli4_oas_verify(struct lpfc_hba *phba);
-static uint16_t lpfc_find_eq_handle(struct lpfc_hba *, uint16_t);
static uint16_t lpfc_find_cpu_handle(struct lpfc_hba *, uint16_t, int);
+static void lpfc_setup_bg(struct lpfc_hba *, struct Scsi_Host *);
static struct scsi_transport_template *lpfc_transport_template = NULL;
static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -1274,8 +1274,10 @@ lpfc_hb_eq_delay_work(struct work_struct *work)
if (!eqcnt)
goto requeue;
+ /* Loop thru all IRQ vectors */
for (i = 0; i < phba->cfg_irq_chann; i++) {
- eq = phba->sli4_hba.hdwq[i].hba_eq;
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[i].eq;
if (eq && eqcnt[eq->last_cpu] < 2)
eqcnt[eq->last_cpu]++;
continue;
@@ -4114,14 +4116,13 @@ lpfc_new_io_buf(struct lpfc_hba *phba, int num_to_alloc)
* pci bus space for an I/O. The DMA buffer includes the
* number of SGE's necessary to support the sg_tablesize.
*/
- lpfc_ncmd->data = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
- GFP_KERNEL,
- &lpfc_ncmd->dma_handle);
+ lpfc_ncmd->data = dma_pool_zalloc(phba->lpfc_sg_dma_buf_pool,
+ GFP_KERNEL,
+ &lpfc_ncmd->dma_handle);
if (!lpfc_ncmd->data) {
kfree(lpfc_ncmd);
break;
}
- memset(lpfc_ncmd->data, 0, phba->cfg_sg_dma_buf_size);
/*
* 4K Page alignment is CRITICAL to BlockGuard, double check
@@ -4347,6 +4348,9 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0);
+ if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+ lpfc_setup_bg(phba, shost);
+
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
if (error)
goto out_put_shost;
@@ -5055,7 +5059,7 @@ lpfc_update_trunk_link_status(struct lpfc_hba *phba,
bf_get(lpfc_acqe_fc_la_speed, acqe_fc));
phba->sli4_hba.link_state.logical_speed =
- bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc);
+ bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
/* We got FC link speed, convert to fc_linkspeed (READ_TOPOLOGY) */
phba->fc_linkspeed =
lpfc_async_link_speed_to_read_top(
@@ -5158,8 +5162,14 @@ lpfc_sli4_async_fc_evt(struct lpfc_hba *phba, struct lpfc_acqe_fc_la *acqe_fc)
bf_get(lpfc_acqe_fc_la_port_number, acqe_fc);
phba->sli4_hba.link_state.fault =
bf_get(lpfc_acqe_link_fault, acqe_fc);
- phba->sli4_hba.link_state.logical_speed =
+
+ if (bf_get(lpfc_acqe_fc_la_att_type, acqe_fc) ==
+ LPFC_FC_LA_TYPE_LINK_DOWN)
+ phba->sli4_hba.link_state.logical_speed = 0;
+ else if (!phba->sli4_hba.conf_trunk)
+ phba->sli4_hba.link_state.logical_speed =
bf_get(lpfc_acqe_fc_la_llink_spd, acqe_fc) * 10;
+
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
"2896 Async FC event - Speed:%dGBaud Topology:x%x "
"LA Type:x%x Port Type:%d Port Number:%d Logical speed:"
@@ -6551,6 +6561,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
spin_lock_init(&phba->sli4_hba.abts_nvmet_buf_list_lock);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvmet_ctx_list);
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_nvmet_io_wait_list);
+ spin_lock_init(&phba->sli4_hba.t_active_list_lock);
+ INIT_LIST_HEAD(&phba->sli4_hba.t_active_ctx_list);
}
/* This abort list used by worker thread */
@@ -7660,8 +7672,6 @@ lpfc_post_init_setup(struct lpfc_hba *phba)
*/
shost = pci_get_drvdata(phba->pcidev);
shost->can_queue = phba->cfg_hba_queue_depth - 10;
- if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
- lpfc_setup_bg(phba, shost);
lpfc_host_attrib_init(shost);
@@ -8740,8 +8750,10 @@ int
lpfc_sli4_queue_create(struct lpfc_hba *phba)
{
struct lpfc_queue *qdesc;
- int idx, eqidx, cpu;
+ int idx, cpu, eqcpu;
struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_vector_map_info *cpup;
+ struct lpfc_vector_map_info *eqcpup;
struct lpfc_eq_intr_info *eqi;
/*
@@ -8826,40 +8838,60 @@ lpfc_sli4_queue_create(struct lpfc_hba *phba)
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_wq_list);
/* Create HBA Event Queues (EQs) */
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- /* determine EQ affinity */
- eqidx = lpfc_find_eq_handle(phba, idx);
- cpu = lpfc_find_cpu_handle(phba, eqidx, LPFC_FIND_BY_EQ);
- /*
- * If there are more Hardware Queues than available
- * EQs, multiple Hardware Queues may share a common EQ.
+ for_each_present_cpu(cpu) {
+ /* We only want to create 1 EQ per vector, even though
+ * multiple CPUs might be using that vector. so only
+ * selects the CPUs that are LPFC_CPU_FIRST_IRQ.
*/
- if (idx >= phba->cfg_irq_chann) {
- /* Share an existing EQ */
- phba->sli4_hba.hdwq[idx].hba_eq =
- phba->sli4_hba.hdwq[eqidx].hba_eq;
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
continue;
- }
- /* Create an EQ */
+
+ /* Get a ptr to the Hardware Queue associated with this CPU */
+ qp = &phba->sli4_hba.hdwq[cpup->hdwq];
+
+ /* Allocate an EQ */
qdesc = lpfc_sli4_queue_alloc(phba, LPFC_DEFAULT_PAGE_SIZE,
phba->sli4_hba.eq_esize,
phba->sli4_hba.eq_ecount, cpu);
if (!qdesc) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0497 Failed allocate EQ (%d)\n", idx);
+ "0497 Failed allocate EQ (%d)\n",
+ cpup->hdwq);
goto out_error;
}
qdesc->qe_valid = 1;
- qdesc->hdwq = idx;
-
- /* Save the CPU this EQ is affinitised to */
- qdesc->chann = cpu;
- phba->sli4_hba.hdwq[idx].hba_eq = qdesc;
+ qdesc->hdwq = cpup->hdwq;
+ qdesc->chann = cpu; /* First CPU this EQ is affinitised to */
qdesc->last_cpu = qdesc->chann;
+
+ /* Save the allocated EQ in the Hardware Queue */
+ qp->hba_eq = qdesc;
+
eqi = per_cpu_ptr(phba->sli4_hba.eq_info, qdesc->last_cpu);
list_add(&qdesc->cpu_list, &eqi->list);
}
+ /* Now we need to populate the other Hardware Queues, that share
+ * an IRQ vector, with the associated EQ ptr.
+ */
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Check for EQ already allocated in previous loop */
+ if (cpup->flag & LPFC_CPU_FIRST_IRQ)
+ continue;
+
+ /* Check for multiple CPUs per hdwq */
+ qp = &phba->sli4_hba.hdwq[cpup->hdwq];
+ if (qp->hba_eq)
+ continue;
+
+ /* We need to share an EQ for this hdwq */
+ eqcpu = lpfc_find_cpu_handle(phba, cpup->eq, LPFC_FIND_BY_EQ);
+ eqcpup = &phba->sli4_hba.cpu_map[eqcpu];
+ qp->hba_eq = phba->sli4_hba.hdwq[eqcpup->hdwq].hba_eq;
+ }
/* Allocate SCSI SLI4 CQ/WQs */
for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
@@ -9122,23 +9154,31 @@ static inline void
lpfc_sli4_release_hdwq(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *hdwq;
+ struct lpfc_queue *eq;
uint32_t idx;
hdwq = phba->sli4_hba.hdwq;
- for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
- if (idx < phba->cfg_irq_chann)
- lpfc_sli4_queue_free(hdwq[idx].hba_eq);
- hdwq[idx].hba_eq = NULL;
+ /* Loop thru all Hardware Queues */
+ for (idx = 0; idx < phba->cfg_hdw_queue; idx++) {
+ /* Free the CQ/WQ corresponding to the Hardware Queue */
lpfc_sli4_queue_free(hdwq[idx].fcp_cq);
lpfc_sli4_queue_free(hdwq[idx].nvme_cq);
lpfc_sli4_queue_free(hdwq[idx].fcp_wq);
lpfc_sli4_queue_free(hdwq[idx].nvme_wq);
+ hdwq[idx].hba_eq = NULL;
hdwq[idx].fcp_cq = NULL;
hdwq[idx].nvme_cq = NULL;
hdwq[idx].fcp_wq = NULL;
hdwq[idx].nvme_wq = NULL;
}
+ /* Loop thru all IRQ vectors */
+ for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ /* Free the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[idx].eq;
+ lpfc_sli4_queue_free(eq);
+ phba->sli4_hba.hba_eq_hdl[idx].eq = NULL;
+ }
}
/**
@@ -9316,16 +9356,17 @@ static void
lpfc_setup_cq_lookup(struct lpfc_hba *phba)
{
struct lpfc_queue *eq, *childq;
- struct lpfc_sli4_hdw_queue *qp;
int qidx;
- qp = phba->sli4_hba.hdwq;
memset(phba->sli4_hba.cq_lookup, 0,
(sizeof(struct lpfc_queue *) * (phba->sli4_hba.cq_max + 1)));
+ /* Loop thru all IRQ vectors */
for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
- eq = qp[qidx].hba_eq;
+ /* Get the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
if (!eq)
continue;
+ /* Loop through all CQs associated with that EQ */
list_for_each_entry(childq, &eq->child_list, list) {
if (childq->queue_id > phba->sli4_hba.cq_max)
continue;
@@ -9354,9 +9395,10 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
{
uint32_t shdr_status, shdr_add_status;
union lpfc_sli4_cfg_shdr *shdr;
+ struct lpfc_vector_map_info *cpup;
struct lpfc_sli4_hdw_queue *qp;
LPFC_MBOXQ_t *mboxq;
- int qidx;
+ int qidx, cpu;
uint32_t length, usdelay;
int rc = -ENOMEM;
@@ -9417,32 +9459,55 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
rc = -ENOMEM;
goto out_error;
}
+
+ /* Loop thru all IRQ vectors */
for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
- if (!qp[qidx].hba_eq) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0522 Fast-path EQ (%d) not "
- "allocated\n", qidx);
- rc = -ENOMEM;
- goto out_destroy;
- }
- rc = lpfc_eq_create(phba, qp[qidx].hba_eq,
- phba->cfg_fcp_imax);
- if (rc) {
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
- "0523 Failed setup of fast-path EQ "
- "(%d), rc = 0x%x\n", qidx,
- (uint32_t)rc);
- goto out_destroy;
+ /* Create HBA Event Queues (EQs) in order */
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Look for the CPU thats using that vector with
+ * LPFC_CPU_FIRST_IRQ set.
+ */
+ if (!(cpup->flag & LPFC_CPU_FIRST_IRQ))
+ continue;
+ if (qidx != cpup->eq)
+ continue;
+
+ /* Create an EQ for that vector */
+ rc = lpfc_eq_create(phba, qp[cpup->hdwq].hba_eq,
+ phba->cfg_fcp_imax);
+ if (rc) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "0523 Failed setup of fast-path"
+ " EQ (%d), rc = 0x%x\n",
+ cpup->eq, (uint32_t)rc);
+ goto out_destroy;
+ }
+
+ /* Save the EQ for that vector in the hba_eq_hdl */
+ phba->sli4_hba.hba_eq_hdl[cpup->eq].eq =
+ qp[cpup->hdwq].hba_eq;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "2584 HBA EQ setup: queue[%d]-id=%d\n",
+ cpup->eq,
+ qp[cpup->hdwq].hba_eq->queue_id);
}
- lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
- "2584 HBA EQ setup: queue[%d]-id=%d\n", qidx,
- qp[qidx].hba_eq->queue_id);
}
+ /* Loop thru all Hardware Queues */
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ cpu = lpfc_find_cpu_handle(phba, qidx,
+ LPFC_FIND_BY_HDWQ);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Create the CQ/WQ corresponding to the
+ * Hardware Queue
+ */
rc = lpfc_create_wq_cq(phba,
- qp[qidx].hba_eq,
+ phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
qp[qidx].nvme_cq,
qp[qidx].nvme_wq,
&phba->sli4_hba.hdwq[qidx].nvme_cq_map,
@@ -9458,8 +9523,12 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
}
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ cpu = lpfc_find_cpu_handle(phba, qidx, LPFC_FIND_BY_HDWQ);
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Create the CQ/WQ corresponding to the Hardware Queue */
rc = lpfc_create_wq_cq(phba,
- qp[qidx].hba_eq,
+ phba->sli4_hba.hdwq[cpup->hdwq].hba_eq,
qp[qidx].fcp_cq,
qp[qidx].fcp_wq,
&phba->sli4_hba.hdwq[qidx].fcp_cq_map,
@@ -9711,6 +9780,7 @@ void
lpfc_sli4_queue_unset(struct lpfc_hba *phba)
{
struct lpfc_sli4_hdw_queue *qp;
+ struct lpfc_queue *eq;
int qidx;
/* Unset mailbox command work queue */
@@ -9762,14 +9832,20 @@ lpfc_sli4_queue_unset(struct lpfc_hba *phba)
/* Unset fast-path SLI4 queues */
if (phba->sli4_hba.hdwq) {
+ /* Loop thru all Hardware Queues */
for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
+ /* Destroy the CQ/WQ corresponding to Hardware Queue */
qp = &phba->sli4_hba.hdwq[qidx];
lpfc_wq_destroy(phba, qp->fcp_wq);
lpfc_wq_destroy(phba, qp->nvme_wq);
lpfc_cq_destroy(phba, qp->fcp_cq);
lpfc_cq_destroy(phba, qp->nvme_cq);
- if (qidx < phba->cfg_irq_chann)
- lpfc_eq_destroy(phba, qp->hba_eq);
+ }
+ /* Loop thru all IRQ vectors */
+ for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
+ /* Destroy the EQ corresponding to the IRQ vector */
+ eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
+ lpfc_eq_destroy(phba, eq);
}
}
@@ -10559,11 +10635,12 @@ lpfc_sli_disable_intr(struct lpfc_hba *phba)
}
/**
- * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified EQ
+ * lpfc_find_cpu_handle - Find the CPU that corresponds to the specified Queue
* @phba: pointer to lpfc hba data structure.
* @id: EQ vector index or Hardware Queue index
* @match: LPFC_FIND_BY_EQ = match by EQ
* LPFC_FIND_BY_HDWQ = match by Hardware Queue
+ * Return the CPU that matches the selection criteria
*/
static uint16_t
lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
@@ -10571,40 +10648,27 @@ lpfc_find_cpu_handle(struct lpfc_hba *phba, uint16_t id, int match)
struct lpfc_vector_map_info *cpup;
int cpu;
- /* Find the desired phys_id for the specified EQ */
+ /* Loop through all CPUs */
for_each_present_cpu(cpu) {
cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* If we are matching by EQ, there may be multiple CPUs using
+ * using the same vector, so select the one with
+ * LPFC_CPU_FIRST_IRQ set.
+ */
if ((match == LPFC_FIND_BY_EQ) &&
+ (cpup->flag & LPFC_CPU_FIRST_IRQ) &&
(cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
(cpup->eq == id))
return cpu;
+
+ /* If matching by HDWQ, select the first CPU that matches */
if ((match == LPFC_FIND_BY_HDWQ) && (cpup->hdwq == id))
return cpu;
}
return 0;
}
-/**
- * lpfc_find_eq_handle - Find the EQ that corresponds to the specified
- * Hardware Queue
- * @phba: pointer to lpfc hba data structure.
- * @hdwq: Hardware Queue index
- */
-static uint16_t
-lpfc_find_eq_handle(struct lpfc_hba *phba, uint16_t hdwq)
-{
- struct lpfc_vector_map_info *cpup;
- int cpu;
-
- /* Find the desired phys_id for the specified EQ */
- for_each_present_cpu(cpu) {
- cpup = &phba->sli4_hba.cpu_map[cpu];
- if (cpup->hdwq == hdwq)
- return cpup->eq;
- }
- return 0;
-}
-
#ifdef CONFIG_X86
/**
* lpfc_find_hyper - Determine if the CPU map entry is hyper-threaded
@@ -10645,24 +10709,31 @@ lpfc_find_hyper(struct lpfc_hba *phba, int cpu,
static void
lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
{
- int i, cpu, idx;
+ int i, cpu, idx, new_cpu, start_cpu, first_cpu;
int max_phys_id, min_phys_id;
int max_core_id, min_core_id;
struct lpfc_vector_map_info *cpup;
+ struct lpfc_vector_map_info *new_cpup;
const struct cpumask *maskp;
#ifdef CONFIG_X86
struct cpuinfo_x86 *cpuinfo;
#endif
/* Init cpu_map array */
- memset(phba->sli4_hba.cpu_map, 0xff,
- (sizeof(struct lpfc_vector_map_info) *
- phba->sli4_hba.num_possible_cpu));
+ for_each_possible_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ cpup->phys_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->core_id = LPFC_VECTOR_MAP_EMPTY;
+ cpup->hdwq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->eq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->irq = LPFC_VECTOR_MAP_EMPTY;
+ cpup->flag = 0;
+ }
max_phys_id = 0;
- min_phys_id = 0xffff;
+ min_phys_id = LPFC_VECTOR_MAP_EMPTY;
max_core_id = 0;
- min_core_id = 0xffff;
+ min_core_id = LPFC_VECTOR_MAP_EMPTY;
/* Update CPU map with physical id and core id of each CPU */
for_each_present_cpu(cpu) {
@@ -10671,13 +10742,12 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
cpuinfo = &cpu_data(cpu);
cpup->phys_id = cpuinfo->phys_proc_id;
cpup->core_id = cpuinfo->cpu_core_id;
- cpup->hyper = lpfc_find_hyper(phba, cpu,
- cpup->phys_id, cpup->core_id);
+ if (lpfc_find_hyper(phba, cpu, cpup->phys_id, cpup->core_id))
+ cpup->flag |= LPFC_CPU_MAP_HYPER;
#else
/* No distinction between CPUs for other platforms */
cpup->phys_id = 0;
cpup->core_id = cpu;
- cpup->hyper = 0;
#endif
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -10703,23 +10773,216 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
eqi->icnt = 0;
}
+ /* This loop sets up all CPUs that are affinitized with a
+ * irq vector assigned to the driver. All affinitized CPUs
+ * will get a link to that vectors IRQ and EQ.
+ */
for (idx = 0; idx < phba->cfg_irq_chann; idx++) {
+ /* Get a CPU mask for all CPUs affinitized to this vector */
maskp = pci_irq_get_affinity(phba->pcidev, idx);
if (!maskp)
continue;
+ i = 0;
+ /* Loop through all CPUs associated with vector idx */
for_each_cpu_and(cpu, maskp, cpu_present_mask) {
+ /* Set the EQ index and IRQ for that vector */
cpup = &phba->sli4_hba.cpu_map[cpu];
cpup->eq = idx;
- cpup->hdwq = idx;
cpup->irq = pci_irq_vector(phba->pcidev, idx);
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"3336 Set Affinity: CPU %d "
- "hdwq %d irq %d\n",
- cpu, cpup->hdwq, cpup->irq);
+ "irq %d eq %d\n",
+ cpu, cpup->irq, cpup->eq);
+
+ /* If this is the first CPU thats assigned to this
+ * vector, set LPFC_CPU_FIRST_IRQ.
+ */
+ if (!i)
+ cpup->flag |= LPFC_CPU_FIRST_IRQ;
+ i++;
}
}
+
+ /* After looking at each irq vector assigned to this pcidev, its
+ * possible to see that not ALL CPUs have been accounted for.
+ * Next we will set any unassigned (unaffinitized) cpu map
+ * entries to a IRQ on the same phys_id.
+ */
+ first_cpu = cpumask_first(cpu_present_mask);
+ start_cpu = first_cpu;
+
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Is this CPU entry unassigned */
+ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
+ /* Mark CPU as IRQ not assigned by the kernel */
+ cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
+
+ /* If so, find a new_cpup thats on the the SAME
+ * phys_id as cpup. start_cpu will start where we
+ * left off so all unassigned entries don't get assgined
+ * the IRQ of the first entry.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
+ (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->phys_id == cpup->phys_id))
+ goto found_same;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+ /* At this point, we leave the CPU as unassigned */
+ continue;
+found_same:
+ /* We found a matching phys_id, so copy the IRQ info */
+ cpup->eq = new_cpup->eq;
+ cpup->irq = new_cpup->irq;
+
+ /* Bump start_cpu to the next slot to minmize the
+ * chance of having multiple unassigned CPU entries
+ * selecting the same IRQ.
+ */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3337 Set Affinity: CPU %d "
+ "irq %d from id %d same "
+ "phys_id (%d)\n",
+ cpu, cpup->irq, new_cpu, cpup->phys_id);
+ }
+ }
+
+ /* Set any unassigned cpu map entries to a IRQ on any phys_id */
+ start_cpu = first_cpu;
+
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+
+ /* Is this entry unassigned */
+ if (cpup->eq == LPFC_VECTOR_MAP_EMPTY) {
+ /* Mark it as IRQ not assigned by the kernel */
+ cpup->flag |= LPFC_CPU_MAP_UNASSIGN;
+
+ /* If so, find a new_cpup thats on ANY phys_id
+ * as the cpup. start_cpu will start where we
+ * left off so all unassigned entries don't get
+ * assigned the IRQ of the first entry.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
+ (new_cpup->irq != LPFC_VECTOR_MAP_EMPTY))
+ goto found_any;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+ /* We should never leave an entry unassigned */
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3339 Set Affinity: CPU %d "
+ "irq %d UNASSIGNED\n",
+ cpup->hdwq, cpup->irq);
+ continue;
+found_any:
+ /* We found an available entry, copy the IRQ info */
+ cpup->eq = new_cpup->eq;
+ cpup->irq = new_cpup->irq;
+
+ /* Bump start_cpu to the next slot to minmize the
+ * chance of having multiple unassigned CPU entries
+ * selecting the same IRQ.
+ */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+ "3338 Set Affinity: CPU %d "
+ "irq %d from id %d (%d/%d)\n",
+ cpu, cpup->irq, new_cpu,
+ new_cpup->phys_id, new_cpup->core_id);
+ }
+ }
+
+ /* Finally we need to associate a hdwq with each cpu_map entry
+ * This will be 1 to 1 - hdwq to cpu, unless there are less
+ * hardware queues then CPUs. For that case we will just round-robin
+ * the available hardware queues as they get assigned to CPUs.
+ */
+ idx = 0;
+ start_cpu = 0;
+ for_each_present_cpu(cpu) {
+ cpup = &phba->sli4_hba.cpu_map[cpu];
+ if (idx >= phba->cfg_hdw_queue) {
+ /* We need to reuse a Hardware Queue for another CPU,
+ * so be smart about it and pick one that has its
+ * IRQ/EQ mapped to the same phys_id (CPU package).
+ * and core_id.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->phys_id == cpup->phys_id) &&
+ (new_cpup->core_id == cpup->core_id))
+ goto found_hdwq;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+
+ /* If we can't match both phys_id and core_id,
+ * settle for just a phys_id match.
+ */
+ new_cpu = start_cpu;
+ for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) {
+ new_cpup = &phba->sli4_hba.cpu_map[new_cpu];
+ if ((new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY) &&
+ (new_cpup->phys_id == cpup->phys_id))
+ goto found_hdwq;
+ new_cpu = cpumask_next(
+ new_cpu, cpu_present_mask);
+ if (new_cpu == nr_cpumask_bits)
+ new_cpu = first_cpu;
+ }
+
+ /* Otherwise just round robin on cfg_hdw_queue */
+ cpup->hdwq = idx % phba->cfg_hdw_queue;
+ goto logit;
+found_hdwq:
+ /* We found an available entry, copy the IRQ info */
+ start_cpu = cpumask_next(new_cpu, cpu_present_mask);
+ if (start_cpu == nr_cpumask_bits)
+ start_cpu = first_cpu;
+ cpup->hdwq = new_cpup->hdwq;
+ } else {
+ /* 1 to 1, CPU to hdwq */
+ cpup->hdwq = idx;
+ }
+logit:
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "3335 Set Affinity: CPU %d (phys %d core %d): "
+ "hdwq %d eq %d irq %d flg x%x\n",
+ cpu, cpup->phys_id, cpup->core_id,
+ cpup->hdwq, cpup->eq, cpup->irq, cpup->flag);
+ idx++;
+ }
+
+ /* The cpu_map array will be used later during initialization
+ * when EQ / CQ / WQs are allocated and configured.
+ */
return;
}
@@ -11331,24 +11594,43 @@ lpfc_get_sli4_parameters(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
mbx_sli4_parameters);
phba->sli4_hba.extents_in_use = bf_get(cfg_ext, mbx_sli4_parameters);
phba->sli4_hba.rpi_hdrs_in_use = bf_get(cfg_hdrr, mbx_sli4_parameters);
- phba->nvme_support = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
- bf_get(cfg_xib, mbx_sli4_parameters));
-
- if ((phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) ||
- !phba->nvme_support) {
- phba->nvme_support = 0;
- phba->nvmet_support = 0;
- phba->cfg_nvmet_mrq = 0;
- lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
- "6101 Disabling NVME support: "
- "Not supported by firmware: %d %d\n",
- bf_get(cfg_nvme, mbx_sli4_parameters),
- bf_get(cfg_xib, mbx_sli4_parameters));
-
- /* If firmware doesn't support NVME, just use SCSI support */
- if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
- return -ENODEV;
- phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+
+ /* Check for firmware nvme support */
+ rc = (bf_get(cfg_nvme, mbx_sli4_parameters) &&
+ bf_get(cfg_xib, mbx_sli4_parameters));
+
+ if (rc) {
+ /* Save this to indicate the Firmware supports NVME */
+ sli4_params->nvme = 1;
+
+ /* Firmware NVME support, check driver FC4 NVME support */
+ if (phba->cfg_enable_fc4_type == LPFC_ENABLE_FCP) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_NVME,
+ "6133 Disabling NVME support: "
+ "FC4 type not supported: x%x\n",
+ phba->cfg_enable_fc4_type);
+ goto fcponly;
+ }
+ } else {
+ /* No firmware NVME support, check driver FC4 NVME support */
+ sli4_params->nvme = 0;
+ if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_NVME,
+ "6101 Disabling NVME support: Not "
+ "supported by firmware (%d %d) x%x\n",
+ bf_get(cfg_nvme, mbx_sli4_parameters),
+ bf_get(cfg_xib, mbx_sli4_parameters),
+ phba->cfg_enable_fc4_type);
+fcponly:
+ phba->nvme_support = 0;
+ phba->nvmet_support = 0;
+ phba->cfg_nvmet_mrq = 0;
+
+ /* If no FC4 type support, move to just SCSI support */
+ if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP))
+ return -ENODEV;
+ phba->cfg_enable_fc4_type = LPFC_ENABLE_FCP;
+ }
}
/* Only embed PBDE for if_type 6, PBDE support requires xib be set */