dect
/
linux-2.6
Archived
13
0
Fork 0

[SCSI] qla2xxx: Code changes to support new dynamic logging infrastructure.

The code is changed to support the new dynamic logging infrastructure.
Following are the levels added.
Default is 0 - no logging.  0x40000000 - Module Init & Probe.
0x20000000 - Mailbox Cmnds. 0x10000000 - Device Discovery.
0x08000000 - IO tracing.    0x04000000 - DPC Thread.
0x02000000 - Async events.  0x01000000 - Timer routines.
0x00800000 - User space.    0x00400000 - Task Management.
0x00200000 - AER/EEH.       0x00100000 - Multi Q.
0x00080000 - P3P Specific.  0x00040000 - Virtual Port.
0x00020000 - Buffer Dump.   0x00010000 - Misc.
0x7fffffff - For enabling all logs, can be too many logs.

Setting ql2xextended_error_logging module parameter to any of the above
value, will enable the debug for that particular level.
Do LOGICAL OR of the value to enable more than one level.

Signed-off-by: Saurav Kashyap <saurav.kashyap@qlogic.com>
Signed-off-by: Giridhar Malavali <giridhar.malavali@qlogic.com>
Signed-off-by: Andrew Vasquez <andrew.vasquez@qlogic.com>
Signed-off-by: Joe Carnuccio <joe.carnuccio@qlogic.com>
Signed-off-by: Chad Dupuis <chad.dupuis@qlogic.com>
Signed-off-by: Madhuranath Iyengar <Madhu.Iyengar@qlogic.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
Saurav Kashyap 2011-07-14 12:00:13 -07:00 committed by James Bottomley
parent 3ce8866cea
commit 7c3df1320e
15 changed files with 2944 additions and 2609 deletions

View File

@ -42,8 +42,8 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
int reading;
if (IS_QLA82XX(ha)) {
DEBUG2(qla_printk(KERN_INFO, ha,
"Firmware dump not supported for ISP82xx\n"));
ql_dbg(ql_dbg_user, vha, 0x705b,
"Firmware dump not supported for ISP82xx\n");
return count;
}
@ -56,7 +56,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
if (!ha->fw_dump_reading)
break;
qla_printk(KERN_INFO, ha,
ql_log(ql_log_info, vha, 0x705d,
"Firmware dump cleared on (%ld).\n", vha->host_no);
ha->fw_dump_reading = 0;
@ -66,7 +66,7 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
if (ha->fw_dumped && !ha->fw_dump_reading) {
ha->fw_dump_reading = 1;
qla_printk(KERN_INFO, ha,
ql_log(ql_log_info, vha, 0x705e,
"Raw firmware dump ready for read on (%ld).\n",
vha->host_no);
}
@ -148,7 +148,7 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x705f,
"HBA not online, failing NVRAM update.\n");
return -EAGAIN;
}
@ -158,6 +158,8 @@ qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
count);
ql_dbg(ql_dbg_user, vha, 0x7060,
"Setting ISP_ABORT_NEEDED\n");
/* NVRAM settings take effect immediately. */
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
@ -255,9 +257,9 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SWAITING;
DEBUG2(qla_printk(KERN_INFO, ha,
ql_dbg(ql_dbg_user, vha, 0x7061,
"Freeing flash region allocation -- 0x%x bytes.\n",
ha->optrom_region_size));
ha->optrom_region_size);
vfree(ha->optrom_buffer);
ha->optrom_buffer = NULL;
@ -273,7 +275,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SREADING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7062,
"Unable to allocate memory for optrom retrieval "
"(%x).\n", ha->optrom_region_size);
@ -282,14 +284,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
}
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"HBA not online, failing NVRAM update.\n");
ql_log(ql_log_warn, vha, 0x7063,
"HBA not online, failing NVRAM update.\n");
return -EAGAIN;
}
DEBUG2(qla_printk(KERN_INFO, ha,
ql_dbg(ql_dbg_user, vha, 0x7064,
"Reading flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size));
ha->optrom_region_start, ha->optrom_region_size);
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
@ -328,7 +330,7 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7065,
"Invalid start region 0x%x/0x%x.\n", start, size);
return -EINVAL;
}
@ -340,17 +342,17 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
ha->optrom_state = QLA_SWRITING;
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (ha->optrom_buffer == NULL) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7066,
"Unable to allocate memory for optrom update "
"(%x).\n", ha->optrom_region_size);
"(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
return count;
}
DEBUG2(qla_printk(KERN_INFO, ha,
ql_dbg(ql_dbg_user, vha, 0x7067,
"Staging flash region write -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size));
ha->optrom_region_start, ha->optrom_region_size);
memset(ha->optrom_buffer, 0, ha->optrom_region_size);
break;
@ -359,14 +361,14 @@ qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
break;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7068,
"HBA not online, failing flash update.\n");
return -EAGAIN;
}
DEBUG2(qla_printk(KERN_INFO, ha,
ql_dbg(ql_dbg_user, vha, 0x7069,
"Writing flash region -- 0x%x/0x%x.\n",
ha->optrom_region_start, ha->optrom_region_size));
ha->optrom_region_start, ha->optrom_region_size);
ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
ha->optrom_region_start, ha->optrom_region_size);
@ -425,7 +427,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
return 0;
if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x706a,
"HBA not online, failing VPD update.\n");
return -EAGAIN;
}
@ -440,7 +442,7 @@ qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
tmp_data = vmalloc(256);
if (!tmp_data) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x706b,
"Unable to allocate memory for VPD information update.\n");
goto done;
}
@ -480,7 +482,7 @@ qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->sfp_data_dma);
if (!ha->sfp_data) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x706c,
"Unable to allocate memory for SFP read-data.\n");
return 0;
}
@ -499,9 +501,10 @@ do_read:
rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
addr, offset, SFP_BLOCK_SIZE, 0);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x706d,
"Unable to read SFP data (%x/%x/%x).\n", rval,
addr, offset);
count = 0;
break;
}
@ -538,8 +541,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
type = simple_strtol(buf, NULL, 10);
switch (type) {
case 0x2025c:
qla_printk(KERN_INFO, ha,
"Issuing ISP reset on (%ld).\n", vha->host_no);
ql_log(ql_log_info, vha, 0x706e,
"Issuing ISP reset.\n");
scsi_block_requests(vha->host);
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
@ -551,8 +554,8 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
if (!IS_QLA81XX(ha))
break;
qla_printk(KERN_INFO, ha,
"Issuing MPI reset on (%ld).\n", vha->host_no);
ql_log(ql_log_info, vha, 0x706f,
"Issuing MPI reset.\n");
/* Make sure FC side is not in reset */
qla2x00_wait_for_hba_online(vha);
@ -560,20 +563,19 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
/* Issue MPI reset */
scsi_block_requests(vha->host);
if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
"MPI reset failed on (%ld).\n", vha->host_no);
ql_log(ql_log_warn, vha, 0x7070,
"MPI reset failed.\n");
scsi_unblock_requests(vha->host);
break;
case 0x2025e:
if (!IS_QLA82XX(ha) || vha != base_vha) {
qla_printk(KERN_INFO, ha,
"FCoE ctx reset not supported for host%ld.\n",
vha->host_no);
ql_log(ql_log_info, vha, 0x7071,
"FCoE ctx reset no supported.\n");
return count;
}
qla_printk(KERN_INFO, ha,
"Issuing FCoE CTX reset on host%ld.\n", vha->host_no);
ql_log(ql_log_info, vha, 0x7072,
"Issuing FCoE ctx reset.\n");
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_fcoe_ctx_reset(vha);
@ -611,8 +613,8 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
DEBUG2(qla_printk(KERN_INFO, ha,
"Unable to allocate memory for EDC write.\n"));
ql_log(ql_log_warn, vha, 0x7073,
"Unable to allocate memory for EDC write.\n");
return 0;
}
}
@ -631,9 +633,9 @@ qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
DEBUG2(qla_printk(KERN_INFO, ha,
"Unable to write EDC (%x) %02x:%02x:%04x:%02x:%02x.\n",
rval, dev, adr, opt, len, buf[8]));
ql_log(ql_log_warn, vha, 0x7074,
"Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
rval, dev, adr, opt, len, buf[8]);
return 0;
}
@ -669,8 +671,8 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
&ha->edc_data_dma);
if (!ha->edc_data) {
DEBUG2(qla_printk(KERN_INFO, ha,
"Unable to allocate memory for EDC status.\n"));
ql_log(ql_log_warn, vha, 0x708c,
"Unable to allocate memory for EDC status.\n");
return 0;
}
}
@ -688,9 +690,9 @@ qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
dev, adr, len, opt);
if (rval != QLA_SUCCESS) {
DEBUG2(qla_printk(KERN_INFO, ha,
"Unable to write EDC status (%x) %02x:%02x:%04x:%02x.\n",
rval, dev, adr, opt, len));
ql_log(ql_log_info, vha, 0x7075,
"Unable to write EDC status (%x) %02x:%04x:%02x.\n",
rval, dev, adr, opt, len);
return 0;
}
@ -749,7 +751,7 @@ qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
&ha->xgmac_data_dma, GFP_KERNEL);
if (!ha->xgmac_data) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7076,
"Unable to allocate memory for XGMAC read-data.\n");
return 0;
}
@ -761,7 +763,7 @@ do_read:
rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
XGMAC_DATA_SIZE, &actual_size);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7077,
"Unable to read XGMAC data (%x).\n", rval);
count = 0;
}
@ -801,7 +803,7 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
&ha->dcbx_tlv_dma, GFP_KERNEL);
if (!ha->dcbx_tlv) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7078,
"Unable to allocate memory for DCBX TLV read-data.\n");
return 0;
}
@ -813,8 +815,8 @@ do_read:
rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
DCBX_TLV_DATA_SIZE);
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Unable to read DCBX TLV data (%x).\n", rval);
ql_log(ql_log_warn, vha, 0x7079,
"Unable to read DCBX TLV (%x).\n", rval);
count = 0;
}
@ -869,9 +871,13 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
iter->attr);
if (ret)
qla_printk(KERN_INFO, vha->hw,
"Unable to create sysfs %s binary attribute "
"(%d).\n", iter->name, ret);
ql_log(ql_log_warn, vha, 0x00f3,
"Unable to create sysfs %s binary attribute (%d).\n",
iter->name, ret);
else
ql_dbg(ql_dbg_init, vha, 0x00f4,
"Successfully created sysfs %s binary attribure.\n",
iter->name);
}
}
@ -1126,7 +1132,7 @@ qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
return -EPERM;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x707a,
"Abort ISP active -- ignoring beacon request.\n");
return -EBUSY;
}
@ -1322,9 +1328,8 @@ qla2x00_thermal_temp_show(struct device *dev,
temp = frac = 0;
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
DEBUG2_3_11(printk(KERN_WARNING
"%s(%ld): isp reset in progress.\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x707b,
"ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
if (rval != QLA_SUCCESS)
@ -1343,8 +1348,8 @@ qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
DEBUG2_3_11(printk("%s(%ld): isp reset in progress.\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x707c,
"ISP reset active.\n");
else if (!vha->hw->flags.eeh_busy)
rval = qla2x00_get_firmware_state(vha, state);
if (rval != QLA_SUCCESS)
@ -1645,8 +1650,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
if (stats == NULL) {
DEBUG2_3_11(printk("%s(%ld): Failed to allocate memory.\n",
__func__, base_vha->host_no));
ql_log(ql_log_warn, vha, 0x707d,
"Failed to allocate memory for stats.\n");
goto done;
}
memset(stats, 0, DMA_POOL_SIZE);
@ -1746,15 +1751,14 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
ret = qla24xx_vport_create_req_sanity_check(fc_vport);
if (ret) {
DEBUG15(printk("qla24xx_vport_create_req_sanity_check failed, "
"status %x\n", ret));
ql_log(ql_log_warn, vha, 0x707e,
"Vport sanity check failed, status %x\n", ret);
return (ret);
}
vha = qla24xx_create_vhost(fc_vport);
if (vha == NULL) {
DEBUG15(printk ("qla24xx_create_vhost failed, vha = %p\n",
vha));
ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
return FC_VPORT_FAILED;
}
if (disable) {
@ -1764,8 +1768,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
atomic_set(&vha->vp_state, VP_FAILED);
/* ready to create vport */
qla_printk(KERN_INFO, vha->hw, "VP entry id %d assigned.\n",
vha->vp_idx);
ql_log(ql_log_info, vha, 0x7080,
"VP entry id %d assigned.\n", vha->vp_idx);
/* initialized vport states */
atomic_set(&vha->loop_state, LOOP_DOWN);
@ -1775,8 +1779,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
/* Don't retry or attempt login of this virtual port */
DEBUG15(printk ("scsi(%ld): pport loop_state is not UP.\n",
base_vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x7081,
"Vport loop state is not UP.\n");
atomic_set(&vha->loop_state, LOOP_DEAD);
if (!disable)
fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
@ -1785,9 +1789,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if ((IS_QLA25XX(ha) || IS_QLA81XX(ha)) && ql2xenabledif) {
if (ha->fw_attributes & BIT_4) {
vha->flags.difdix_supported = 1;
DEBUG18(qla_printk(KERN_INFO, ha,
"Registering for DIF/DIX type 1 and 3"
" protection.\n"));
ql_dbg(ql_dbg_user, vha, 0x7082,
"Registered for DIF/DIX type 1 and 3 protection.\n");
scsi_host_set_prot(vha->host,
SHOST_DIF_TYPE1_PROTECTION
| SHOST_DIF_TYPE2_PROTECTION
@ -1802,8 +1805,8 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
&ha->pdev->dev)) {
DEBUG15(printk("scsi(%ld): scsi_add_host failure for VP[%d].\n",
vha->host_no, vha->vp_idx));
ql_dbg(ql_dbg_user, vha, 0x7083,
"scsi_add_host failure for VP[%d].\n", vha->vp_idx);
goto vport_create_failed_2;
}
@ -1820,6 +1823,10 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
if (ha->flags.cpu_affinity_enabled) {
req = ha->req_q_map[1];
ql_dbg(ql_dbg_multiq, vha, 0xc000,
"Request queue %p attached with "
"VP[%d], cpu affinity =%d\n",
req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
goto vport_queue;
} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
goto vport_queue;
@ -1836,13 +1843,16 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
qos);
if (!ret)
qla_printk(KERN_WARNING, ha,
"Can't create request queue for vp_idx:%d\n",
vha->vp_idx);
ql_log(ql_log_warn, vha, 0x7084,
"Can't create request queue for VP[%d]\n",
vha->vp_idx);
else {
DEBUG2(qla_printk(KERN_INFO, ha,
"Request Que:%d (QoS: %d) created for vp_idx:%d\n",
ret, qos, vha->vp_idx));
ql_dbg(ql_dbg_multiq, vha, 0xc001,
"Request Que:%d Q0s: %d) created for VP[%d]\n",
ret, qos, vha->vp_idx);
ql_dbg(ql_dbg_user, vha, 0x7085,
"Request Que:%d Q0s: %d) created for VP[%d]\n",
ret, qos, vha->vp_idx);
req = ha->req_q_map[ret];
}
}
@ -1882,12 +1892,13 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
if (vha->timer_active) {
qla2x00_vp_stop_timer(vha);
DEBUG15(printk(KERN_INFO "scsi(%ld): timer for the vport[%d]"
" = %p has stopped\n", vha->host_no, vha->vp_idx, vha));
ql_dbg(ql_dbg_user, vha, 0x7086,
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
/* No pending activities shall be there on the vha now */
DEBUG(msleep(random32()%10)); /* Just to see if something falls on
if (ql2xextended_error_logging & ql_dbg_user)
msleep(random32()%10); /* Just to see if something falls on
* the net we have placed below */
BUG_ON(atomic_read(&vha->vref_count));
@ -1901,12 +1912,12 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
"Queue delete failed.\n");
ql_log(ql_log_warn, vha, 0x7087,
"Queue delete failed.\n");
}
scsi_host_put(vha->host);
qla_printk(KERN_INFO, ha, "vport %d deleted\n", id);
ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
return 0;
}

View File

@ -36,7 +36,8 @@ done:
}
int
qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *vha,
struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
{
int i, ret, num_valid;
uint8_t *bcode;
@ -51,18 +52,17 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
if (bcode_val == 0xFFFFFFFF) {
/* No FCP Priority config data in flash */
DEBUG2(printk(KERN_INFO
"%s: No FCP priority config data.\n",
__func__));
ql_dbg(ql_dbg_user, vha, 0x7051,
"No FCP Priority config data.\n");
return 0;
}
if (bcode[0] != 'H' || bcode[1] != 'Q' || bcode[2] != 'O' ||
bcode[3] != 'S') {
/* Invalid FCP priority data header*/
DEBUG2(printk(KERN_ERR
"%s: Invalid FCP Priority data header. bcode=0x%x\n",
__func__, bcode_val));
ql_dbg(ql_dbg_user, vha, 0x7052,
"Invalid FCP Priority data header. bcode=0x%x.\n",
bcode_val);
return 0;
}
if (flag != 1)
@ -77,15 +77,14 @@ qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *pri_cfg, uint8_t flag)
if (num_valid == 0) {
/* No valid FCP priority data entries */
DEBUG2(printk(KERN_ERR
"%s: No valid FCP Priority data entries.\n",
__func__));
ql_dbg(ql_dbg_user, vha, 0x7053,
"No valid FCP Priority data entries.\n");
ret = 0;
} else {
/* FCP priority data is valid */
DEBUG2(printk(KERN_INFO
"%s: Valid FCP priority data. num entries = %d\n",
__func__, num_valid));
ql_dbg(ql_dbg_user, vha, 0x7054,
"Valid FCP priority data. num entries = %d.\n",
num_valid);
}
return ret;
@ -182,10 +181,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
if (!ha->fcp_prio_cfg) {
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
qla_printk(KERN_WARNING, ha,
"Unable to allocate memory "
"for fcp prio config data (%x).\n",
FCP_PRIO_CFG_SIZE);
ql_log(ql_log_warn, vha, 0x7050,
"Unable to allocate memory for fcp prio "
"config data (%x).\n", FCP_PRIO_CFG_SIZE);
bsg_job->reply->result = (DID_ERROR << 16);
ret = -ENOMEM;
goto exit_fcp_prio_cfg;
@ -198,9 +196,9 @@ qla24xx_proc_fcp_prio_cfg_cmd(struct fc_bsg_job *bsg_job)
FCP_PRIO_CFG_SIZE);
/* validate fcp priority data */
if (!qla24xx_fcp_prio_cfg_valid(
(struct qla_fcp_prio_cfg *)
ha->fcp_prio_cfg, 1)) {
if (!qla24xx_fcp_prio_cfg_valid(vha,
(struct qla_fcp_prio_cfg *) ha->fcp_prio_cfg, 1)) {
bsg_job->reply->result = (DID_ERROR << 16);
ret = -EINVAL;
/* If buffer was invalidatic int
@ -256,9 +254,8 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* pass through is supported only for ISP 4Gb or higher */
if (!IS_FWI2_CAPABLE(ha)) {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld):ELS passthru not supported for ISP23xx based "
"adapters\n", vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x7001,
"ELS passthru not supported for ISP23xx based adapters.\n");
rval = -EPERM;
goto done;
}
@ -266,11 +263,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
/* Multiple SG's are not supported for ELS requests */
if (bsg_job->request_payload.sg_cnt > 1 ||
bsg_job->reply_payload.sg_cnt > 1) {
DEBUG2(printk(KERN_INFO
"multiple SG's are not supported for ELS requests"
" [request_sg_cnt: %x reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt,
bsg_job->reply_payload.sg_cnt));
ql_dbg(ql_dbg_user, vha, 0x7002,
"Multiple SG's are not suppored for ELS requests, "
"request_sg_cnt=%x reply_sg_cnt=%x.\n",
bsg_job->request_payload.sg_cnt,
bsg_job->reply_payload.sg_cnt);
rval = -EPERM;
goto done;
}
@ -281,9 +278,9 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
* if not perform fabric login
*/
if (qla2x00_fabric_login(vha, fcport, &nextlid)) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"failed to login port %06X for ELS passthru\n",
fcport->d_id.b24));
ql_dbg(ql_dbg_user, vha, 0x7003,
"Failed to login port %06X for ELS passthru.\n",
fcport->d_id.b24);
rval = -EIO;
goto done;
}
@ -314,8 +311,7 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
}
if (!vha->flags.online) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"host not online\n"));
ql_log(ql_log_warn, vha, 0x7005, "Host not online.\n");
rval = -EIO;
goto done;
}
@ -337,12 +333,11 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
DEBUG2(printk(KERN_INFO
"dma mapping resulted in different sg counts \
[request_sg_cnt: %x dma_request_sg_cnt: %x\
reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt, req_sg_cnt,
bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
ql_log(ql_log_warn, vha, 0x7008,
"dma mapping resulted in different sg counts, "
"request_sg_cnt: %x dma_request_sg_cnt:%x reply_sg_cnt:%x "
"dma_reply_sg_cnt:%x.\n", bsg_job->request_payload.sg_cnt,
req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@ -363,15 +358,16 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
"bsg_els_rpt" : "bsg_els_hst");
els->u.bsg_job = bsg_job;
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
bsg_job->request->rqst_data.h_els.command_code,
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa));
ql_dbg(ql_dbg_user, vha, 0x700a,
"bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%-2x%02x%02x.\n", type,
bsg_job->request->rqst_data.h_els.command_code, fcport->loop_id,
fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x700e,
"qla2x00_start_sp failed = %d\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
@ -411,6 +407,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!req_sg_cnt) {
ql_log(ql_log_warn, vha, 0x700f,
"dma_map_sg return %d for request\n", req_sg_cnt);
rval = -ENOMEM;
goto done;
}
@ -418,24 +416,25 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
rsp_sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!rsp_sg_cnt) {
ql_log(ql_log_warn, vha, 0x7010,
"dma_map_sg return %d for reply\n", rsp_sg_cnt);
rval = -ENOMEM;
goto done;
}
if ((req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"[request_sg_cnt: %x dma_request_sg_cnt: %x\
reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt, req_sg_cnt,
bsg_job->reply_payload.sg_cnt, rsp_sg_cnt));
ql_log(ql_log_warn, vha, 0x7011,
"request_sg_cnt: %x dma_request_sg_cnt: %x reply_sg_cnt:%x "
"dma_reply_sg_cnt: %x\n", bsg_job->request_payload.sg_cnt,
req_sg_cnt, bsg_job->reply_payload.sg_cnt, rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
if (!vha->flags.online) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"host not online\n"));
ql_log(ql_log_warn, vha, 0x7012,
"Host is not online.\n");
rval = -EIO;
goto done_unmap_sg;
}
@ -451,8 +450,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
loop_id = vha->mgmt_svr_loop_id;
break;
default:
DEBUG2(qla_printk(KERN_INFO, ha,
"Unknown loop id: %x\n", loop_id));
ql_dbg(ql_dbg_user, vha, 0x7013,
"Unknown loop id: %x.\n", loop_id);
rval = -EINVAL;
goto done_unmap_sg;
}
@ -464,6 +463,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
*/
fcport = qla2x00_alloc_fcport(vha, GFP_KERNEL);
if (!fcport) {
ql_log(ql_log_warn, vha, 0x7014,
"Failed to allocate fcport.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@ -479,6 +480,8 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
/* Alloc SRB structure */
sp = qla2x00_get_ctx_bsg_sp(vha, fcport, sizeof(struct srb_ctx));
if (!sp) {
ql_log(ql_log_warn, vha, 0x7015,
"qla2x00_get_ctx_bsg_sp failed.\n");
rval = -ENOMEM;
goto done_free_fcport;
}
@ -488,15 +491,17 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
ct->name = "bsg_ct";
ct->u.bsg_job = bsg_job;
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld:%x): bsg rqst type: %s els type: %x - loop-id=%x "
"portid=%02x%02x%02x.\n", vha->host_no, sp->handle, type,
(bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa));
ql_dbg(ql_dbg_user, vha, 0x7016,
"bsg rqst type: %s else type: %x - "
"loop-id=%x portid=%02x%02x%02x.\n", type,
(bsg_job->request->rqst_data.h_ct.preamble_word2 >> 16),
fcport->loop_id, fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa);
rval = qla2x00_start_sp(sp);
if (rval != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x7017,
"qla2x00_start_sp failed=%d.\n", rval);
kfree(sp->ctx);
mempool_free(sp, ha->srb_mempool);
rval = -EIO;
@ -535,9 +540,8 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
ha->notify_dcbx_comp = 1;
ret = qla81xx_set_port_config(vha, new_config);
if (ret != QLA_SUCCESS) {
DEBUG2(printk(KERN_ERR
"%s(%lu): Set port config failed\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7021,
"set port config failed.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_set_internal;
@ -545,11 +549,11 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
/* Wait for DCBX complete event */
if (!wait_for_completion_timeout(&ha->dcbx_comp, (20 * HZ))) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"State change notificaition not received.\n"));
ql_dbg(ql_dbg_user, vha, 0x7022,
"State change notification not received.\n");
} else
DEBUG2(qla_printk(KERN_INFO, ha,
"State change RECEIVED\n"));
ql_dbg(ql_dbg_user, vha, 0x7023,
"State change received.\n");
ha->notify_dcbx_comp = 0;
@ -581,9 +585,8 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
ha->notify_dcbx_comp = wait;
ret = qla81xx_set_port_config(vha, new_config);
if (ret != QLA_SUCCESS) {
DEBUG2(printk(KERN_ERR
"%s(%lu): Set port config failed\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7025,
"Set port config failed.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_reset_internal;
@ -592,14 +595,14 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
/* Wait for DCBX complete event */
if (wait && !wait_for_completion_timeout(&ha->dcbx_comp,
(20 * HZ))) {
DEBUG2(qla_printk(KERN_WARNING, ha,
"State change notificaition not received.\n"));
ql_dbg(ql_dbg_user, vha, 0x7026,
"State change notification not received.\n");
ha->notify_dcbx_comp = 0;
rval = -EINVAL;
goto done_reset_internal;
} else
DEBUG2(qla_printk(KERN_INFO, ha,
"State change RECEIVED\n"));
ql_dbg(ql_dbg_user, vha, 0x7027,
"State change received.\n");
ha->notify_dcbx_comp = 0;
}
@ -629,11 +632,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x7018, "Abort active or needed.\n");
return -EBUSY;
}
if (!vha->flags.online) {
DEBUG2(qla_printk(KERN_WARNING, ha, "host not online\n"));
ql_log(ql_log_warn, vha, 0x7019, "Host is not online.\n");
return -EIO;
}
@ -641,26 +646,31 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
bsg_job->request_payload.sg_list, bsg_job->request_payload.sg_cnt,
DMA_TO_DEVICE);
if (!elreq.req_sg_cnt)
if (!elreq.req_sg_cnt) {
ql_log(ql_log_warn, vha, 0x701a,
"dma_map_sg returned %d for request.\n", elreq.req_sg_cnt);
return -ENOMEM;
}
elreq.rsp_sg_cnt = dma_map_sg(&ha->pdev->dev,
bsg_job->reply_payload.sg_list, bsg_job->reply_payload.sg_cnt,
DMA_FROM_DEVICE);
if (!elreq.rsp_sg_cnt) {
ql_log(ql_log_warn, vha, 0x701b,
"dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt);
rval = -ENOMEM;
goto done_unmap_req_sg;
}
if ((elreq.req_sg_cnt != bsg_job->request_payload.sg_cnt) ||
(elreq.rsp_sg_cnt != bsg_job->reply_payload.sg_cnt)) {
DEBUG2(printk(KERN_INFO
"dma mapping resulted in different sg counts "
"[request_sg_cnt: %x dma_request_sg_cnt: %x "
"reply_sg_cnt: %x dma_reply_sg_cnt: %x]\n",
bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt));
ql_log(ql_log_warn, vha, 0x701c,
"dma mapping resulted in different sg counts, "
"request_sg_cnt: %x dma_request_sg_cnt: %x "
"reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
bsg_job->request_payload.sg_cnt, elreq.req_sg_cnt,
bsg_job->reply_payload.sg_cnt, elreq.rsp_sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@ -668,8 +678,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
req_data = dma_alloc_coherent(&ha->pdev->dev, req_data_len,
&req_data_dma, GFP_KERNEL);
if (!req_data) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for req_data "
"failed for host=%lu\n", __func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x701d,
"dma alloc failed for req_data.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@ -677,8 +687,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
rsp_data = dma_alloc_coherent(&ha->pdev->dev, rsp_data_len,
&rsp_data_dma, GFP_KERNEL);
if (!rsp_data) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for rsp_data "
"failed for host=%lu\n", __func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7004,
"dma alloc failed for rsp_data.\n");
rval = -ENOMEM;
goto done_free_dma_req;
}
@ -699,8 +709,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
&& req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
elreq.options == EXTERNAL_LOOPBACK) {
type = "FC_BSG_HST_VENDOR_ECHO_DIAG";
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld) bsg rqst type: %s\n", vha->host_no, type));
ql_dbg(ql_dbg_user, vha, 0x701e,
"BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_ECHO_CMD;
rval = qla2x00_echo_test(vha, &elreq, response);
} else {
@ -708,9 +718,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
memset(config, 0, sizeof(config));
memset(new_config, 0, sizeof(new_config));
if (qla81xx_get_port_config(vha, config)) {
DEBUG2(printk(KERN_ERR
"%s(%lu): Get port config failed\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x701f,
"Get port config failed.\n");
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
rval = -EPERM;
@ -718,11 +727,13 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
if (elreq.options != EXTERNAL_LOOPBACK) {
DEBUG2(qla_printk(KERN_INFO, ha,
"Internal: current port config = %x\n",
config[0]));
ql_dbg(ql_dbg_user, vha, 0x7020,
"Internal: curent port config = %x\n",
config[0]);
if (qla81xx_set_internal_loopback(vha, config,
new_config)) {
ql_log(ql_log_warn, vha, 0x7024,
"Internal loopback failed.\n");
bsg_job->reply->reply_payload_rcv_len =
0;
bsg_job->reply->result =
@ -746,9 +757,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
type = "FC_BSG_HST_VENDOR_LOOPBACK";
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld) bsg rqst type: %s\n",
vha->host_no, type));
ql_dbg(ql_dbg_user, vha, 0x7028,
"BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
@ -763,17 +773,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
if (response[0] == MBS_COMMAND_ERROR &&
response[1] == MBS_LB_RESET) {
DEBUG2(printk(KERN_ERR "%s(%ld): ABORTing "
"ISP\n", __func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7029,
"MBX command error, Aborting ISP.\n");
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
qla2xxx_wake_dpc(vha);
qla2x00_wait_for_chip_reset(vha);
/* Also reset the MPI */
if (qla81xx_restart_mpi_firmware(vha) !=
QLA_SUCCESS) {
qla_printk(KERN_INFO, ha,
"MPI reset failed for host%ld.\n",
vha->host_no);
ql_log(ql_log_warn, vha, 0x702a,
"MPI reset failed.\n");
}
bsg_job->reply->reply_payload_rcv_len = 0;
@ -783,17 +792,16 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
}
} else {
type = "FC_BSG_HST_VENDOR_LOOPBACK";
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld) bsg rqst type: %s\n",
vha->host_no, type));
ql_dbg(ql_dbg_user, vha, 0x702b,
"BSG request type: %s.\n", type);
command_sent = INT_DEF_LB_LOOPBACK_CMD;
rval = qla2x00_loopback_test(vha, &elreq, response);
}
}
if (rval) {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request %s failed\n", vha->host_no, type));
ql_log(ql_log_warn, vha, 0x702c,
"Vendor request %s failed.\n", type);
fw_sts_ptr = ((uint8_t *)bsg_job->req->sense) +
sizeof(struct fc_bsg_reply);
@ -805,8 +813,8 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request %s completed\n", vha->host_no, type));
ql_dbg(ql_dbg_user, vha, 0x702d,
"Vendor request %s completed.\n", type);
bsg_job->reply_len = sizeof(struct fc_bsg_reply) +
sizeof(response) + sizeof(uint8_t);
@ -851,12 +859,13 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x702e, "Abort active or needed.\n");
return -EBUSY;
}
if (!IS_QLA84XX(ha)) {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
"exiting.\n", vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x702f, "Not 84xx, exiting.\n");
return -EINVAL;
}
@ -865,14 +874,14 @@ qla84xx_reset(struct fc_bsg_job *bsg_job)
rval = qla84xx_reset_chip(vha, flag == A84_ISSUE_RESET_DIAG_FW);
if (rval) {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request 84xx reset failed\n", vha->host_no));
ql_log(ql_log_warn, vha, 0x7030,
"Vendor request 84xx reset failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request 84xx reset completed\n", vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x7031,
"Vendor request 84xx reset completed.\n");
bsg_job->reply->result = DID_OK;
}
@ -902,21 +911,24 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
return -EBUSY;
if (!IS_QLA84XX(ha)) {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
"exiting.\n", vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x7032,
"Not 84xx, exiting.\n");
return -EINVAL;
}
sg_cnt = dma_map_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!sg_cnt)
if (!sg_cnt) {
ql_log(ql_log_warn, vha, 0x7033,
"dma_map_sg returned %d for request.\n", sg_cnt);
return -ENOMEM;
}
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
DEBUG2(printk(KERN_INFO
"dma mapping resulted in different sg counts "
"request_sg_cnt: %x dma_request_sg_cnt: %x ",
bsg_job->request_payload.sg_cnt, sg_cnt));
ql_log(ql_log_warn, vha, 0x7034,
"DMA mapping resulted in different sg counts, "
"request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
bsg_job->request_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@ -925,8 +937,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
fw_buf = dma_alloc_coherent(&ha->pdev->dev, data_len,
&fw_dma, GFP_KERNEL);
if (!fw_buf) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for fw_buf "
"failed for host=%lu\n", __func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7035,
"DMA alloc failed for fw_buf.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@ -936,8 +948,8 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
"failed for host=%lu\n", __func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7036,
"DMA alloc failed for fw buffer.\n");
rval = -ENOMEM;
goto done_free_fw_buf;
}
@ -965,15 +977,15 @@ qla84xx_updatefw(struct fc_bsg_job *bsg_job)
rval = qla2x00_issue_iocb_timeout(vha, mn, mn_dma, 0, 120);
if (rval) {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request 84xx updatefw failed\n", vha->host_no));
ql_log(ql_log_warn, vha, 0x7037,
"Vendor request 84xx updatefw failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request 84xx updatefw completed\n", vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x7038,
"Vendor request 84xx updatefw completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
bsg_job->reply->result = DID_OK;
@ -1009,27 +1021,30 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x7039,
"Abort active or needed.\n");
return -EBUSY;
}
if (!IS_QLA84XX(ha)) {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld): Not 84xx, "
"exiting.\n", vha->host_no));
ql_log(ql_log_warn, vha, 0x703a,
"Not 84xx, exiting.\n");
return -EINVAL;
}
ql84_mgmt = (struct qla_bsg_a84_mgmt *)((char *)bsg_job->request +
sizeof(struct fc_bsg_request));
if (!ql84_mgmt) {
DEBUG2(printk("%s(%ld): mgmt header not provided, exiting.\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x703b,
"MGMT header not provided, exiting.\n");
return -EINVAL;
}
mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
if (!mn) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for fw buffer "
"failed for host=%lu\n", __func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x703c,
"DMA alloc failed for fw buffer.\n");
return -ENOMEM;
}
@ -1044,6 +1059,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
bsg_job->reply_payload.sg_list,
bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
if (!sg_cnt) {
ql_log(ql_log_warn, vha, 0x703d,
"dma_map_sg returned %d for reply.\n", sg_cnt);
rval = -ENOMEM;
goto exit_mgmt;
}
@ -1051,10 +1068,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
dma_direction = DMA_FROM_DEVICE;
if (sg_cnt != bsg_job->reply_payload.sg_cnt) {
DEBUG2(printk(KERN_INFO
"dma mapping resulted in different sg counts "
"reply_sg_cnt: %x dma_reply_sg_cnt: %x\n",
bsg_job->reply_payload.sg_cnt, sg_cnt));
ql_log(ql_log_warn, vha, 0x703e,
"DMA mapping resulted in different sg counts, "
"reply_sg_cnt: %x dma_reply_sg_cnt: %x.\n",
bsg_job->reply_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@ -1064,9 +1081,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
&mgmt_dma, GFP_KERNEL);
if (!mgmt_b) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
"failed for host=%lu\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x703f,
"DMA alloc failed for mgmt_b.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@ -1094,6 +1110,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
if (!sg_cnt) {
ql_log(ql_log_warn, vha, 0x7040,
"dma_map_sg returned %d.\n", sg_cnt);
rval = -ENOMEM;
goto exit_mgmt;
}
@ -1101,10 +1119,10 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
dma_direction = DMA_TO_DEVICE;
if (sg_cnt != bsg_job->request_payload.sg_cnt) {
DEBUG2(printk(KERN_INFO
"dma mapping resulted in different sg counts "
"request_sg_cnt: %x dma_request_sg_cnt: %x ",
bsg_job->request_payload.sg_cnt, sg_cnt));
ql_log(ql_log_warn, vha, 0x7041,
"DMA mapping resulted in different sg counts, "
"request_sg_cnt: %x dma_request_sg_cnt: %x.\n",
bsg_job->request_payload.sg_cnt, sg_cnt);
rval = -EAGAIN;
goto done_unmap_sg;
}
@ -1113,9 +1131,8 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
mgmt_b = dma_alloc_coherent(&ha->pdev->dev, data_len,
&mgmt_dma, GFP_KERNEL);
if (!mgmt_b) {
DEBUG2(printk(KERN_ERR "%s: dma alloc for mgmt_b "
"failed for host=%lu\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7042,
"DMA alloc failed for mgmt_b.\n");
rval = -ENOMEM;
goto done_unmap_sg;
}
@ -1156,15 +1173,15 @@ qla84xx_mgmt_cmd(struct fc_bsg_job *bsg_job)
rval = qla2x00_issue_iocb(vha, mn, mn_dma, 0);
if (rval) {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request 84xx mgmt failed\n", vha->host_no));
ql_log(ql_log_warn, vha, 0x7043,
"Vendor request 84xx mgmt failed.\n");
rval = bsg_job->reply->reply_payload_rcv_len = 0;
bsg_job->reply->result = (DID_ERROR << 16);
} else {
DEBUG2(qla_printk(KERN_WARNING, ha, "scsi(%ld) Vendor "
"request 84xx mgmt completed\n", vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x7044,
"Vendor request 84xx mgmt completed.\n");
bsg_job->reply_len = sizeof(struct fc_bsg_reply);
bsg_job->reply->result = DID_OK;
@ -1204,7 +1221,6 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
{
struct Scsi_Host *host = bsg_job->shost;
scsi_qla_host_t *vha = shost_priv(host);
struct qla_hw_data *ha = vha->hw;
int rval = 0;
struct qla_port_param *port_param = NULL;
fc_port_t *fcport = NULL;
@ -1215,26 +1231,27 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
if (test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags))
test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x7045, "abort active or needed.\n");
return -EBUSY;
}
if (!IS_IIDMA_CAPABLE(vha->hw)) {
DEBUG2(qla_printk(KERN_WARNING, ha, "%s(%lu): iiDMA not "
"supported\n", __func__, vha->host_no));
ql_log(ql_log_info, vha, 0x7046, "iiDMA not supported.\n");
return -EINVAL;
}
port_param = (struct qla_port_param *)((char *)bsg_job->request +
sizeof(struct fc_bsg_request));
if (!port_param) {
DEBUG2(printk("%s(%ld): port_param header not provided, "
"exiting.\n", __func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7047,
"port_param header not provided.\n");
return -EINVAL;
}
if (port_param->fc_scsi_addr.dest_type != EXT_DEF_TYPE_WWPN) {
DEBUG2(printk(KERN_ERR "%s(%ld): Invalid destination type\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7048,
"Invalid destination type.\n");
return -EINVAL;
}
@ -1249,21 +1266,20 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
if (!fcport) {
DEBUG2(printk(KERN_ERR "%s(%ld): Failed to find port\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x7049,
"Failed to find port.\n");
return -EINVAL;
}
if (atomic_read(&fcport->state) != FCS_ONLINE) {
DEBUG2(printk(KERN_ERR "%s(%ld): Port not online\n",
__func__, vha->host_no));
ql_log(ql_log_warn, vha, 0x704a,
"Port is not online.\n");
return -EINVAL;
}
if (fcport->flags & FCF_LOGIN_NEEDED) {
DEBUG2(printk(KERN_ERR "%s(%ld): Remote port not logged in, "
"flags = 0x%x\n",
__func__, vha->host_no, fcport->flags));
ql_log(ql_log_warn, vha, 0x704b,
"Remote port not logged in flags = 0x%x.\n", fcport->flags);
return -EINVAL;
}
@ -1275,15 +1291,13 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
&port_param->speed, mb);
if (rval) {
DEBUG16(printk(KERN_ERR "scsi(%ld): iIDMA cmd failed for "
"%02x%02x%02x%02x%02x%02x%02x%02x -- "
"%04x %x %04x %04x.\n",
vha->host_no, fcport->port_name[0],
fcport->port_name[1],
fcport->port_name[2], fcport->port_name[3],
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7], rval,
fcport->fp_speed, mb[0], mb[1]));
ql_log(ql_log_warn, vha, 0x704c,
"iIDMA cmd failed for %02x%02x%02x%02x%02x%02x%02x%02x -- "
"%04x %x %04x %04x.\n", fcport->port_name[0],
fcport->port_name[1], fcport->port_name[2],
fcport->port_name[3], fcport->port_name[4],
fcport->port_name[5], fcport->port_name[6],
fcport->port_name[7], rval, fcport->fp_speed, mb[0], mb[1]);
rval = 0;
bsg_job->reply->result = (DID_ERROR << 16);
@ -1307,11 +1321,12 @@ qla24xx_iidma(struct fc_bsg_job *bsg_job)
}
static int
qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, scsi_qla_host_t *vha,
uint8_t is_update)
{
uint32_t start = 0;
int valid = 0;
struct qla_hw_data *ha = vha->hw;
bsg_job->reply->reply_payload_rcv_len = 0;
@ -1319,14 +1334,20 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
return -EINVAL;
start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
if (start > ha->optrom_size)
if (start > ha->optrom_size) {
ql_log(ql_log_warn, vha, 0x7055,
"start %d > optrom_size %d.\n", start, ha->optrom_size);
return -EINVAL;
}
if (ha->optrom_state != QLA_SWAITING)
if (ha->optrom_state != QLA_SWAITING) {
ql_log(ql_log_info, vha, 0x7056,
"optrom_state %d.\n", ha->optrom_state);
return -EBUSY;
}
ha->optrom_region_start = start;
ql_dbg(ql_dbg_user, vha, 0x7057, "is_update=%d.\n", is_update);
if (is_update) {
if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
valid = 1;
@ -1337,9 +1358,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
IS_QLA8XXX_TYPE(ha))
valid = 1;
if (!valid) {
qla_printk(KERN_WARNING, ha,
"Invalid start region 0x%x/0x%x.\n",
start, bsg_job->request_payload.payload_len);
ql_log(ql_log_warn, vha, 0x7058,
"Invalid start region 0x%x/0x%x.\n", start,
bsg_job->request_payload.payload_len);
return -EINVAL;
}
@ -1358,9 +1379,9 @@ qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
ha->optrom_buffer = vmalloc(ha->optrom_region_size);
if (!ha->optrom_buffer) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7059,
"Read: Unable to allocate memory for optrom retrieval "
"(%x).\n", ha->optrom_region_size);
"(%x)\n", ha->optrom_region_size);
ha->optrom_state = QLA_SWAITING;
return -ENOMEM;
@ -1378,7 +1399,7 @@ qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
rval = qla2x00_optrom_setup(bsg_job, ha, 0);
rval = qla2x00_optrom_setup(bsg_job, vha, 0);
if (rval)
return rval;
@ -1406,7 +1427,7 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
struct qla_hw_data *ha = vha->hw;
int rval = 0;
rval = qla2x00_optrom_setup(bsg_job, ha, 1);
rval = qla2x00_optrom_setup(bsg_job, vha, 1);
if (rval)
return rval;
@ -1464,6 +1485,23 @@ int
qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
{
int ret = -EINVAL;
struct fc_rport *rport;
fc_port_t *fcport = NULL;
struct Scsi_Host *host;
scsi_qla_host_t *vha;
if (bsg_job->request->msgcode == FC_BSG_RPT_ELS) {
rport = bsg_job->rport;
fcport = *(fc_port_t **) rport->dd_data;
host = rport_to_shost(rport);
vha = shost_priv(host);
} else {
host = bsg_job->shost;
vha = shost_priv(host);
}
ql_dbg(ql_dbg_user, vha, 0x7000,
"Entered %s msgcode=%d.\n", __func__, bsg_job->request->msgcode);
switch (bsg_job->request->msgcode) {
case FC_BSG_RPT_ELS:
@ -1480,7 +1518,7 @@ qla24xx_bsg_request(struct fc_bsg_job *bsg_job)
case FC_BSG_HST_DEL_RPORT:
case FC_BSG_RPT_CT:
default:
DEBUG2(printk("qla2xxx: unsupported BSG request\n"));
ql_log(ql_log_warn, vha, 0x705a, "Unsupported BSG request.\n");
break;
}
return ret;
@ -1514,17 +1552,15 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
&& (sp_bsg->u.bsg_job == bsg_job)) {
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (ha->isp_ops->abort_command(sp)) {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld): mbx "
"abort_command failed\n",
vha->host_no));
ql_log(ql_log_warn, vha, 0x7089,
"mbx abort_command "
"failed.\n");
bsg_job->req->errors =
bsg_job->reply->result = -EIO;
} else {
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld): mbx "
"abort_command success\n",
vha->host_no));
ql_dbg(ql_dbg_user, vha, 0x708a,
"mbx abort_command "
"success.\n");
bsg_job->req->errors =
bsg_job->reply->result = 0;
}
@ -1535,8 +1571,7 @@ qla24xx_bsg_timeout(struct fc_bsg_job *bsg_job)
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(qla_printk(KERN_INFO, ha,
"scsi(%ld) SRB not found to abort\n", vha->host_no));
ql_log(ql_log_info, vha, 0x708b, "SRB not found to abort.\n");
bsg_job->req->errors = bsg_job->reply->result = -ENXIO;
return 0;

View File

@ -11,18 +11,18 @@
* -----------------------------------------------------
* | Level | Last Value Used |
* -----------------------------------------------------
* | Module Init and Probe | 0x0109 |
* | Mailbox commands | 0x1120 |
* | Device Discovery | 0x207d |
* | Queue Command and IO tracing | 0x304f |
* | Module Init and Probe | 0x0116 |
* | Mailbox commands | 0x111e |
* | Device Discovery | 0x2083 |
* | Queue Command and IO tracing | 0x302e |
* | DPC Thread | 0x401c |
* | Async Events | 0x5058 |
* | Async Events | 0x5059 |
* | Timer Routines | 0x600d |
* | User Space Interactions | 0x70a1 |
* | Task Management | 0x8032 |
* | AER/EEH | 0x9010 |
* | User Space Interactions | 0x709c |
* | Task Management | 0x8043 |
* | AER/EEH | 0x900f |
* | Virtual Port | 0xa007 |
* | ISP82XX Specific | 0xb028 |
* | ISP82XX Specific | 0xb027 |
* | MultiQ | 0xc00b |
* | Misc | 0xd00b |
* -----------------------------------------------------
@ -409,11 +409,11 @@ qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
struct qla_hw_data *ha = vha->hw;
if (rval != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Failed to dump firmware (%x)!!!\n", rval);
ql_log(ql_log_warn, vha, 0xd000,
"Failed to dump firmware (%x).\n", rval);
ha->fw_dumped = 0;
} else {
qla_printk(KERN_INFO, ha,
ql_log(ql_log_info, vha, 0xd001,
"Firmware dump saved to temp buffer (%ld/%p).\n",
vha->host_no, ha->fw_dump);
ha->fw_dumped = 1;
@ -445,15 +445,16 @@ qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
qla_printk(KERN_WARNING, ha,
"No buffer available for dump!!!\n");
ql_log(ql_log_warn, vha, 0xd002,
"No buffer available for dump.\n");
goto qla2300_fw_dump_failed;
}
if (ha->fw_dumped) {
qla_printk(KERN_WARNING, ha,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
ql_log(ql_log_warn, vha, 0xd003,
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
goto qla2300_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp23;
@ -608,15 +609,16 @@ qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
qla_printk(KERN_WARNING, ha,
"No buffer available for dump!!!\n");
ql_log(ql_log_warn, vha, 0xd004,
"No buffer available for dump.\n");
goto qla2100_fw_dump_failed;
}
if (ha->fw_dumped) {
qla_printk(KERN_WARNING, ha,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
ql_log(ql_log_warn, vha, 0xd005,
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
goto qla2100_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp21;
@ -805,15 +807,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
qla_printk(KERN_WARNING, ha,
"No buffer available for dump!!!\n");
ql_log(ql_log_warn, vha, 0xd006,
"No buffer available for dump.\n");
goto qla24xx_fw_dump_failed;
}
if (ha->fw_dumped) {
qla_printk(KERN_WARNING, ha,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
ql_log(ql_log_warn, vha, 0xd007,
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
goto qla24xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp24;
@ -1043,15 +1046,16 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
qla_printk(KERN_WARNING, ha,
"No buffer available for dump!!!\n");
ql_log(ql_log_warn, vha, 0xd008,
"No buffer available for dump.\n");
goto qla25xx_fw_dump_failed;
}
if (ha->fw_dumped) {
qla_printk(KERN_WARNING, ha,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
ql_log(ql_log_warn, vha, 0xd009,
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
goto qla25xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp25;
@ -1354,15 +1358,16 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
spin_lock_irqsave(&ha->hardware_lock, flags);
if (!ha->fw_dump) {
qla_printk(KERN_WARNING, ha,
"No buffer available for dump!!!\n");
ql_log(ql_log_warn, vha, 0xd00a,
"No buffer available for dump.\n");
goto qla81xx_fw_dump_failed;
}
if (ha->fw_dumped) {
qla_printk(KERN_WARNING, ha,
"Firmware has been previously dumped (%p) -- ignoring "
"request...\n", ha->fw_dump);
ql_log(ql_log_warn, vha, 0xd00b,
"Firmware has been previously dumped (%p) "
"-- ignoring request.\n",
ha->fw_dump);
goto qla81xx_fw_dump_failed;
}
fw = &ha->fw_dump->isp.isp81;

View File

@ -64,7 +64,7 @@ qla2x00_dfs_fce_open(struct inode *inode, struct file *file)
/* Pause tracing to flush FCE buffers. */
rval = qla2x00_disable_fce_trace(vha, &ha->fce_wr, &ha->fce_rd);
if (rval)
qla_printk(KERN_WARNING, ha,
ql_dbg(ql_dbg_user, vha, 0x705c,
"DebugFS: Unable to disable FCE (%d).\n", rval);
ha->flags.fce_enabled = 0;
@ -92,7 +92,7 @@ qla2x00_dfs_fce_release(struct inode *inode, struct file *file)
rval = qla2x00_enable_fce_trace(vha, ha->fce_dma, ha->fce_bufs,
ha->fce_mb, &ha->fce_bufs);
if (rval) {
qla_printk(KERN_WARNING, ha,
ql_dbg(ql_dbg_user, vha, 0x700d,
"DebugFS: Unable to reinitialize FCE (%d).\n", rval);
ha->flags.fce_enabled = 0;
}
@ -125,8 +125,8 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
atomic_set(&qla2x00_dfs_root_count, 0);
qla2x00_dfs_root = debugfs_create_dir(QLA2XXX_DRIVER_NAME, NULL);
if (!qla2x00_dfs_root) {
qla_printk(KERN_NOTICE, ha,
"DebugFS: Unable to create root directory.\n");
ql_log(ql_log_warn, vha, 0x00f7,
"Unable to create debugfs root directory.\n");
goto out;
}
@ -137,8 +137,8 @@ create_dir:
mutex_init(&ha->fce_mutex);
ha->dfs_dir = debugfs_create_dir(vha->host_str, qla2x00_dfs_root);
if (!ha->dfs_dir) {
qla_printk(KERN_NOTICE, ha,
"DebugFS: Unable to create ha directory.\n");
ql_log(ql_log_warn, vha, 0x00f8,
"Unable to create debugfs ha directory.\n");
goto out;
}
@ -148,8 +148,8 @@ create_nodes:
ha->dfs_fce = debugfs_create_file("fce", S_IRUSR, ha->dfs_dir, vha,
&dfs_fce_ops);
if (!ha->dfs_fce) {
qla_printk(KERN_NOTICE, ha,
"DebugFS: Unable to fce node.\n");
ql_log(ql_log_warn, vha, 0x00f9,
"Unable to create debugfs fce node.\n");
goto out;
}
out:

View File

@ -185,7 +185,7 @@ extern int qla24xx_start_scsi(srb_t *sp);
int qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
uint16_t, uint16_t, uint8_t);
extern int qla2x00_start_sp(srb_t *);
extern uint16_t qla24xx_calc_iocbs(uint16_t);
extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
extern int qla24xx_dif_start_scsi(srb_t *);
@ -481,7 +481,8 @@ extern int qla2x00_loopback_test(scsi_qla_host_t *, struct msg_echo_lb *, uint16
extern int qla2x00_echo_test(scsi_qla_host_t *,
struct msg_echo_lb *, uint16_t *);
extern int qla24xx_update_all_fcp_prio(scsi_qla_host_t *);
extern int qla24xx_fcp_prio_cfg_valid(struct qla_fcp_prio_cfg *, uint8_t);
extern int qla24xx_fcp_prio_cfg_valid(scsi_qla_host_t *,
struct qla_fcp_prio_cfg *, uint8_t);
/*
* Global Function Prototypes in qla_dfs.c source file.

View File

@ -121,11 +121,10 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
rval = QLA_FUNCTION_FAILED;
if (ms_pkt->entry_status != 0) {
DEBUG2_3(printk(KERN_WARNING "scsi(%ld): %s failed, error status "
"(%x) on port_id: %02x%02x%02x.\n",
vha->host_no, routine, ms_pkt->entry_status,
vha->d_id.b.domain, vha->d_id.b.area,
vha->d_id.b.al_pa));
ql_dbg(ql_dbg_disc, vha, 0x2031,
"%s failed, error status (%x) on port_id: %02x%02x%02x.\n",
routine, ms_pkt->entry_status, vha->d_id.b.domain,
vha->d_id.b.area, vha->d_id.b.al_pa);
} else {
if (IS_FWI2_CAPABLE(ha))
comp_status = le16_to_cpu(
@ -138,24 +137,24 @@ qla2x00_chk_ms_status(scsi_qla_host_t *vha, ms_iocb_entry_t *ms_pkt,
case CS_DATA_OVERRUN: /* Overrun? */
if (ct_rsp->header.response !=
__constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
DEBUG2_3(printk("scsi(%ld): %s failed, "
"rejected request on port_id: %02x%02x%02x\n",
vha->host_no, routine,
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
"%s failed rejected request on port_id: "
"%02x%02x%02x.\n", routine,
vha->d_id.b.domain, vha->d_id.b.area,
vha->d_id.b.al_pa));
DEBUG2_3(qla2x00_dump_buffer(
(uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr)));
vha->d_id.b.al_pa);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
0x2078, (uint8_t *)&ct_rsp->header,
sizeof(struct ct_rsp_hdr));
rval = QLA_INVALID_COMMAND;
} else
rval = QLA_SUCCESS;
break;
default:
DEBUG2_3(printk("scsi(%ld): %s failed, completion "
"status (%x) on port_id: %02x%02x%02x.\n",
vha->host_no, routine, comp_status,
ql_dbg(ql_dbg_disc, vha, 0x2033,
"%s failed, completion status (%x) on port_id: "
"%02x%02x%02x.\n", routine, comp_status,
vha->d_id.b.domain, vha->d_id.b.area,
vha->d_id.b.al_pa));
vha->d_id.b.al_pa);
break;
}
}
@ -202,8 +201,8 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GA_NXT issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2062,
"GA_NXT issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GA_NXT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@ -222,11 +221,10 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
ct_rsp->rsp.ga_nxt.port_type != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
ql_dbg(ql_dbg_disc, vha, 0x2063,
"GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
"portid=%02x%02x%02x.\n",
vha->host_no,
"port_id=%02x%02x%02x.\n",
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@ -236,7 +234,7 @@ qla2x00_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7],
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa));
fcport->d_id.b.al_pa);
}
return (rval);
@ -287,8 +285,8 @@ qla2x00_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GID_PT issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2055,
"GID_PT issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "GID_PT") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@ -364,8 +362,8 @@ qla2x00_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GPN_ID issue IOCB failed "
"(%d).\n", vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2056,
"GPN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@ -424,8 +422,8 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GNN_ID issue IOCB failed "
"(%d).\n", vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2057,
"GNN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GNN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@ -434,11 +432,10 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
memcpy(list[i].node_name,
ct_rsp->rsp.gnn_id.node_name, WWN_SIZE);
DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
ql_dbg(ql_dbg_disc, vha, 0x2058,
"GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02X%02x "
"pn %02x%02x%02x%02x%02x%02x%02X%02x "
"portid=%02x%02x%02x.\n",
vha->host_no,
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@ -448,7 +445,7 @@ qla2x00_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].port_name[4], list[i].port_name[5],
list[i].port_name[6], list[i].port_name[7],
list[i].d_id.b.domain, list[i].d_id.b.area,
list[i].d_id.b.al_pa));
list[i].d_id.b.al_pa);
}
/* Last device exit. */
@ -499,14 +496,14 @@ qla2x00_rft_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RFT_ID issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2043,
"RFT_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFT_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2044,
"RFT_ID exiting normally.\n");
}
return (rval);
@ -528,8 +525,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
DEBUG2(printk("scsi(%ld): RFF_ID call unsupported on "
"ISP2100/ISP2200.\n", vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2046,
"RFF_ID call not supported on ISP2100/ISP2200.\n");
return (QLA_SUCCESS);
}
@ -556,14 +553,14 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RFF_ID issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2047,
"RFF_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RFF_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RFF_ID exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2048,
"RFF_ID exiting normally.\n");
}
return (rval);
@ -609,14 +606,14 @@ qla2x00_rnn_id(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RNN_ID issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x204d,
"RNN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RNN_ID") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x204e,
"RNN_ID exiting normally.\n");
}
return (rval);
@ -647,8 +644,8 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
struct ct_sns_rsp *ct_rsp;
if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
DEBUG2(printk("scsi(%ld): RSNN_ID call unsupported on "
"ISP2100/ISP2200.\n", vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2050,
"RSNN_ID call unsupported on ISP2100/ISP2200.\n");
return (QLA_SUCCESS);
}
@ -682,14 +679,14 @@ qla2x00_rsnn_nn(scsi_qla_host_t *vha)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RSNN_NN issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2051,
"RSNN_NN issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RSNN_NN") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RSNN_NN exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2052,
"RSNN_NN exiting normally.\n");
}
return (rval);
@ -757,13 +754,14 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GA_NXT Send SNS failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x205f,
"GA_NXT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gan_data[8] != 0x80 ||
sns_cmd->p.gan_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GA_NXT failed, rejected request, "
"ga_nxt_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gan_data, 16));
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207d,
"GA_NXT failed, rejected request ga_nxt_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2074,
sns_cmd->p.gan_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Populate fc_port_t entry. */
@ -778,11 +776,10 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
sns_cmd->p.gan_data[16] != NS_NL_PORT_TYPE)
fcport->d_id.b.domain = 0xf0;
DEBUG2_3(printk("scsi(%ld): GA_NXT entry - "
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
ql_dbg(ql_dbg_disc, vha, 0x2061,
"GA_NXT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
"portid=%02x%02x%02x.\n",
vha->host_no,
"port_id=%02x%02x%02x.\n",
fcport->node_name[0], fcport->node_name[1],
fcport->node_name[2], fcport->node_name[3],
fcport->node_name[4], fcport->node_name[5],
@ -792,7 +789,7 @@ qla2x00_sns_ga_nxt(scsi_qla_host_t *vha, fc_port_t *fcport)
fcport->port_name[4], fcport->port_name[5],
fcport->port_name[6], fcport->port_name[7],
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa));
fcport->d_id.b.al_pa);
}
return (rval);
@ -831,13 +828,14 @@ qla2x00_sns_gid_pt(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GID_PT Send SNS failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x206d,
"GID_PT Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gid_data[8] != 0x80 ||
sns_cmd->p.gid_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GID_PT failed, rejected request, "
"gid_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gid_data, 16));
ql_dbg(ql_dbg_disc, vha, 0x202f,
"GID_PT failed, rejected request, gid_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2081,
sns_cmd->p.gid_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Set port IDs in switch info list. */
@ -900,13 +898,14 @@ qla2x00_sns_gpn_id(scsi_qla_host_t *vha, sw_info_t *list)
GPN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GPN_ID Send SNS failed "
"(%d).\n", vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2032,
"GPN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gpn_data[8] != 0x80 ||
sns_cmd->p.gpn_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GPN_ID failed, rejected "
"request, gpn_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gpn_data, 16));
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207e,
"GPN_ID failed, rejected request, gpn_rsp:\n");
ql_dump_buffer(ql_dbg_disc, vha, 0x207f,
sns_cmd->p.gpn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Save portname */
@ -955,24 +954,24 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
GNN_ID_SNS_CMD_SIZE / 2, sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GNN_ID Send SNS failed "
"(%d).\n", vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x203f,
"GNN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.gnn_data[8] != 0x80 ||
sns_cmd->p.gnn_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): GNN_ID failed, rejected "
"request, gnn_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.gnn_data, 16));
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2082,
"GNN_ID failed, rejected request, gnn_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207a,
sns_cmd->p.gnn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
/* Save nodename */
memcpy(list[i].node_name, &sns_cmd->p.gnn_data[16],
WWN_SIZE);
DEBUG2_3(printk("scsi(%ld): GID_PT entry - "
"nn %02x%02x%02x%02x%02x%02x%02x%02x "
ql_dbg(ql_dbg_disc, vha, 0x206e,
"GID_PT entry - nn %02x%02x%02x%02x%02x%02x%02x%02x "
"pn %02x%02x%02x%02x%02x%02x%02x%02x "
"portid=%02x%02x%02x.\n",
vha->host_no,
"port_id=%02x%02x%02x.\n",
list[i].node_name[0], list[i].node_name[1],
list[i].node_name[2], list[i].node_name[3],
list[i].node_name[4], list[i].node_name[5],
@ -982,7 +981,7 @@ qla2x00_sns_gnn_id(scsi_qla_host_t *vha, sw_info_t *list)
list[i].port_name[4], list[i].port_name[5],
list[i].port_name[6], list[i].port_name[7],
list[i].d_id.b.domain, list[i].d_id.b.area,
list[i].d_id.b.al_pa));
list[i].d_id.b.al_pa);
}
/* Last device exit. */
@ -1025,17 +1024,18 @@ qla2x00_sns_rft_id(scsi_qla_host_t *vha)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RFT_ID Send SNS failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2060,
"RFT_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.rft_data[8] != 0x80 ||
sns_cmd->p.rft_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): RFT_ID failed, rejected request, "
"rft_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rft_data, 16));
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2083,
"RFT_ID failed, rejected request rft_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2080,
sns_cmd->p.rft_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RFT_ID exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2073,
"RFT_ID exiting normally.\n");
}
return (rval);
@ -1081,17 +1081,18 @@ qla2x00_sns_rnn_id(scsi_qla_host_t *vha)
sizeof(struct sns_cmd_pkt));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RNN_ID Send SNS failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x204a,
"RNN_ID Send SNS failed (%d).\n", rval);
} else if (sns_cmd->p.rnn_data[8] != 0x80 ||
sns_cmd->p.rnn_data[9] != 0x02) {
DEBUG2_3(printk("scsi(%ld): RNN_ID failed, rejected request, "
"rnn_rsp:\n", vha->host_no));
DEBUG2_3(qla2x00_dump_buffer(sns_cmd->p.rnn_data, 16));
ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x207b,
"RNN_ID failed, rejected request, rnn_rsp:\n");
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x207c,
sns_cmd->p.rnn_data, 16);
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RNN_ID exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x204c,
"RNN_ID exiting normally.\n");
}
return (rval);
@ -1116,10 +1117,10 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
mb, BIT_1|BIT_0);
if (mb[0] != MBS_COMMAND_COMPLETE) {
DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
"loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
__func__, vha->host_no, vha->mgmt_svr_loop_id, mb[0], mb[1],
mb[2], mb[6], mb[7]));
ql_dbg(ql_dbg_disc, vha, 0x2024,
"Failed management_server login: loopid=%x mb[0]=%x "
"mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x.\n",
vha->mgmt_svr_loop_id, mb[0], mb[1], mb[2], mb[6], mb[7]);
ret = QLA_FUNCTION_FAILED;
} else
vha->flags.management_server_logged_in = 1;
@ -1292,11 +1293,12 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
size += 4 + WWN_SIZE;
DEBUG13(printk("%s(%ld): NODENAME=%02x%02x%02x%02x%02x%02x%02x%02x.\n",
__func__, vha->host_no,
eiter->a.node_name[0], eiter->a.node_name[1], eiter->a.node_name[2],
eiter->a.node_name[3], eiter->a.node_name[4], eiter->a.node_name[5],
eiter->a.node_name[6], eiter->a.node_name[7]));
ql_dbg(ql_dbg_disc, vha, 0x2025,
"NodeName = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
eiter->a.node_name[0], eiter->a.node_name[1],
eiter->a.node_name[2], eiter->a.node_name[3],
eiter->a.node_name[4], eiter->a.node_name[5],
eiter->a.node_name[6], eiter->a.node_name[7]);
/* Manufacturer. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1307,8 +1309,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MANUFACTURER=%s.\n", __func__, vha->host_no,
eiter->a.manufacturer));
ql_dbg(ql_dbg_disc, vha, 0x2026,
"Manufacturer = %s.\n", eiter->a.manufacturer);
/* Serial number. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1320,8 +1322,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): SERIALNO=%s.\n", __func__, vha->host_no,
eiter->a.serial_num));
ql_dbg(ql_dbg_disc, vha, 0x2027,
"Serial no. = %s.\n", eiter->a.serial_num);
/* Model name. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1332,8 +1334,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MODEL_NAME=%s.\n", __func__, vha->host_no,
eiter->a.model));
ql_dbg(ql_dbg_disc, vha, 0x2028,
"Model Name = %s.\n", eiter->a.model);
/* Model description. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1345,8 +1347,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): MODEL_DESC=%s.\n", __func__, vha->host_no,
eiter->a.model_desc));
ql_dbg(ql_dbg_disc, vha, 0x2029,
"Model Desc = %s.\n", eiter->a.model_desc);
/* Hardware version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1357,8 +1359,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): HARDWAREVER=%s.\n", __func__, vha->host_no,
eiter->a.hw_version));
ql_dbg(ql_dbg_disc, vha, 0x202a,
"Hardware ver = %s.\n", eiter->a.hw_version);
/* Driver version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1369,8 +1371,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): DRIVERVER=%s.\n", __func__, vha->host_no,
eiter->a.driver_version));
ql_dbg(ql_dbg_disc, vha, 0x202b,
"Driver ver = %s.\n", eiter->a.driver_version);
/* Option ROM version. */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1381,8 +1383,8 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): OPTROMVER=%s.\n", __func__, vha->host_no,
eiter->a.orom_version));
ql_dbg(ql_dbg_disc, vha , 0x202c,
"Optrom vers = %s.\n", eiter->a.orom_version);
/* Firmware version */
eiter = (struct ct_fdmi_hba_attr *) (entries + size);
@ -1393,44 +1395,46 @@ qla2x00_fdmi_rhba(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): FIRMWAREVER=%s.\n", __func__, vha->host_no,
eiter->a.fw_version));
ql_dbg(ql_dbg_disc, vha, 0x202d,
"Firmware vers = %s.\n", eiter->a.fw_version);
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
DEBUG13(printk("%s(%ld): RHBA identifier="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
vha->host_no, ct_req->req.rhba.hba_identifier[0],
ql_dbg(ql_dbg_disc, vha, 0x202e,
"RHBA identifier = "
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n",
ct_req->req.rhba.hba_identifier[0],
ct_req->req.rhba.hba_identifier[1],
ct_req->req.rhba.hba_identifier[2],
ct_req->req.rhba.hba_identifier[3],
ct_req->req.rhba.hba_identifier[4],
ct_req->req.rhba.hba_identifier[5],
ct_req->req.rhba.hba_identifier[6],
ct_req->req.rhba.hba_identifier[7], size));
DEBUG13(qla2x00_dump_buffer(entries, size));
ct_req->req.rhba.hba_identifier[7], size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2076,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RHBA issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2030,
"RHBA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
ct_rsp->header.explanation_code ==
CT_EXPL_ALREADY_REGISTERED) {
DEBUG2_13(printk("%s(%ld): HBA already registered.\n",
__func__, vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2034,
"HBA already registered.\n");
rval = QLA_ALREADY_REGISTERED;
}
} else {
DEBUG2(printk("scsi(%ld): RHBA exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2035,
"RHBA exiting normally.\n");
}
return rval;
@ -1464,26 +1468,26 @@ qla2x00_fdmi_dhba(scsi_qla_host_t *vha)
/* Prepare FDMI command arguments -- portname. */
memcpy(ct_req->req.dhba.port_name, vha->port_name, WWN_SIZE);
DEBUG13(printk("%s(%ld): DHBA portname="
"%02x%02x%02x%02x%02x%02x%02x%02x.\n", __func__, vha->host_no,
ql_dbg(ql_dbg_disc, vha, 0x2036,
"DHBA portname = %02x%02x%02x%02x%02x%02x%02x%02x.\n",
ct_req->req.dhba.port_name[0], ct_req->req.dhba.port_name[1],
ct_req->req.dhba.port_name[2], ct_req->req.dhba.port_name[3],
ct_req->req.dhba.port_name[4], ct_req->req.dhba.port_name[5],
ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]));
ct_req->req.dhba.port_name[6], ct_req->req.dhba.port_name[7]);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): DHBA issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2037,
"DHBA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "DHBA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): DHBA exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2038,
"DHBA exiting normally.\n");
}
return rval;
@ -1534,9 +1538,10 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.fc4_types[2] = 0x01;
size += 4 + 32;
DEBUG13(printk("%s(%ld): FC4_TYPES=%02x %02x.\n", __func__,
vha->host_no, eiter->a.fc4_types[2],
eiter->a.fc4_types[1]));
ql_dbg(ql_dbg_disc, vha, 0x2039,
"FC4_TYPES=%02x %02x.\n",
eiter->a.fc4_types[2],
eiter->a.fc4_types[1]);
/* Supported speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@ -1561,8 +1566,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
FDMI_PORT_SPEED_1GB);
size += 4 + 4;
DEBUG13(printk("%s(%ld): SUPPORTED_SPEED=%x.\n", __func__, vha->host_no,
eiter->a.sup_speed));
ql_dbg(ql_dbg_disc, vha, 0x203a,
"Supported_Speed=%x.\n", eiter->a.sup_speed);
/* Current speed. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@ -1596,8 +1601,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
}
size += 4 + 4;
DEBUG13(printk("%s(%ld): CURRENT_SPEED=%x.\n", __func__, vha->host_no,
eiter->a.cur_speed));
ql_dbg(ql_dbg_disc, vha, 0x203b,
"Current_Speed=%x.\n", eiter->a.cur_speed);
/* Max frame size. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@ -1609,8 +1614,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
size += 4 + 4;
DEBUG13(printk("%s(%ld): MAX_FRAME_SIZE=%x.\n", __func__, vha->host_no,
eiter->a.max_frame_size));
ql_dbg(ql_dbg_disc, vha, 0x203c,
"Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
/* OS device name. */
eiter = (struct ct_fdmi_port_attr *) (entries + size);
@ -1621,8 +1626,8 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): OS_DEVICE_NAME=%s.\n", __func__, vha->host_no,
eiter->a.os_dev_name));
ql_dbg(ql_dbg_disc, vha, 0x204b,
"OS_Device_Name=%s.\n", eiter->a.os_dev_name);
/* Hostname. */
if (strlen(fc_host_system_hostname(vha->host))) {
@ -1637,35 +1642,36 @@ qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
eiter->len = cpu_to_be16(4 + alen);
size += 4 + alen;
DEBUG13(printk("%s(%ld): HOSTNAME=%s.\n", __func__,
vha->host_no, eiter->a.host_name));
ql_dbg(ql_dbg_disc, vha, 0x203d,
"HostName=%s.\n", eiter->a.host_name);
}
/* Update MS request size. */
qla2x00_update_ms_fdmi_iocb(vha, size + 16);
DEBUG13(printk("%s(%ld): RPA portname="
"%02x%02x%02x%02x%02x%02x%02x%02x size=%d.\n", __func__,
vha->host_no, ct_req->req.rpa.port_name[0],
ct_req->req.rpa.port_name[1], ct_req->req.rpa.port_name[2],
ct_req->req.rpa.port_name[3], ct_req->req.rpa.port_name[4],
ct_req->req.rpa.port_name[5], ct_req->req.rpa.port_name[6],
ct_req->req.rpa.port_name[7], size));
DEBUG13(qla2x00_dump_buffer(entries, size));
ql_dbg(ql_dbg_disc, vha, 0x203e,
"RPA portname= %02x%02x%02x%02x%02X%02x%02x%02x size=%d.\n",
ct_req->req.rpa.port_name[0], ct_req->req.rpa.port_name[1],
ct_req->req.rpa.port_name[2], ct_req->req.rpa.port_name[3],
ct_req->req.rpa.port_name[4], ct_req->req.rpa.port_name[5],
ct_req->req.rpa.port_name[6], ct_req->req.rpa.port_name[7],
size);
ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
entries, size);
/* Execute MS IOCB */
rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): RPA issue IOCB failed (%d).\n",
vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2040,
"RPA issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
} else {
DEBUG2(printk("scsi(%ld): RPA exiting normally.\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x2041,
"RPA exiting nornally.\n");
}
return rval;
@ -1749,8 +1755,8 @@ qla2x00_gfpn_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GFPN_ID issue IOCB "
"failed (%d).\n", vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2023,
"GFPN_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFPN_ID") != QLA_SUCCESS) {
rval = QLA_FUNCTION_FAILED;
@ -1860,8 +1866,8 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
/*EMPTY*/
DEBUG2_3(printk("scsi(%ld): GPSC issue IOCB "
"failed (%d).\n", vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x2059,
"GPSC issue IOCB failed (%d).\n", rval);
} else if ((rval = qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GPSC")) != QLA_SUCCESS) {
/* FM command unsupported? */
@ -1870,9 +1876,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
CT_REASON_INVALID_COMMAND_CODE ||
ct_rsp->header.reason_code ==
CT_REASON_COMMAND_UNSUPPORTED)) {
DEBUG2(printk("scsi(%ld): GPSC command "
"unsupported, disabling query...\n",
vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x205a,
"GPSC command unsupported, disabling "
"query.\n");
ha->flags.gpsc_supported = 0;
rval = QLA_FUNCTION_FAILED;
break;
@ -1898,9 +1904,10 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
break;
}
DEBUG2_3(printk("scsi(%ld): GPSC ext entry - "
"fpn %02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
"speed=%04x.\n", vha->host_no,
ql_dbg(ql_dbg_disc, vha, 0x205b,
"GPSC ext entry - fpn "
"%02x%02x%02x%02x%02x%02x%02x%02x speeds=%04x "
"speed=%04x.\n",
list[i].fabric_port_name[0],
list[i].fabric_port_name[1],
list[i].fabric_port_name[2],
@ -1910,7 +1917,7 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
list[i].fabric_port_name[6],
list[i].fabric_port_name[7],
be16_to_cpu(ct_rsp->rsp.gpsc.speeds),
be16_to_cpu(ct_rsp->rsp.gpsc.speed)));
be16_to_cpu(ct_rsp->rsp.gpsc.speed));
}
/* Last device exit. */
@ -1968,14 +1975,12 @@ qla2x00_gff_id(scsi_qla_host_t *vha, sw_info_t *list)
sizeof(ms_iocb_entry_t));
if (rval != QLA_SUCCESS) {
DEBUG2_3(printk(KERN_INFO
"scsi(%ld): GFF_ID issue IOCB failed "
"(%d).\n", vha->host_no, rval));
ql_dbg(ql_dbg_disc, vha, 0x205c,
"GFF_ID issue IOCB failed (%d).\n", rval);
} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp,
"GFF_ID") != QLA_SUCCESS) {
DEBUG2_3(printk(KERN_INFO
"scsi(%ld): GFF_ID IOCB status had a "
"failure status code\n", vha->host_no));
ql_dbg(ql_dbg_disc, vha, 0x205d,
"GFF_ID IOCB status had a failure status code.\n");
} else {
fcp_scsi_features =
ct_rsp->rsp.gff_id.fc4_features[GFF_FCP_SCSI_OFFSET];

File diff suppressed because it is too large Load Diff

View File

@ -94,11 +94,11 @@ qla2x00_set_fcport_state(fc_port_t *fcport, int state)
/* Don't print state transitions during initial allocation of fcport */
if (old_state && old_state != state) {
DEBUG(qla_printk(KERN_WARNING, fcport->vha->hw,
"scsi(%ld): FCPort state transitioned from %s to %s - "
"portid=%02x%02x%02x.\n", fcport->vha->host_no,
ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
"FCPort state transitioned from %s to %s - "
"portid=%02x%02x%02x.\n",
port_state_str[old_state], port_state_str[state],
fcport->d_id.b.domain, fcport->d_id.b.area,
fcport->d_id.b.al_pa));
fcport->d_id.b.al_pa);
}
}

View File

@ -150,7 +150,8 @@ qla24xx_configure_prot_mode(srb_t *sp, uint16_t *fw_prot_opts)
/* We only support T10 DIF right now */
if (guard != SHOST_DIX_GUARD_CRC) {
DEBUG2(printk(KERN_ERR "Unsupported guard: %d\n", guard));
ql_dbg(ql_dbg_io, sp->fcport->vha, 0x3007,
"Unsupported guard: %d for cmd=%p.\n", guard, sp->cmd);
return 0;
}
@ -343,9 +344,10 @@ qla2x00_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
!= QLA_SUCCESS)
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
QLA_SUCCESS) {
return (QLA_FUNCTION_FAILED);
}
vha->marker_needed = 0;
}
@ -490,8 +492,8 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
mrk24 = NULL;
mrk = (mrk_entry_t *)qla2x00_alloc_iocbs(vha, 0);
if (mrk == NULL) {
DEBUG2_3(printk("%s(%ld): failed to allocate Marker IOCB.\n",
__func__, base_vha->host_no));
ql_log(ql_log_warn, base_vha, 0x3026,
"Failed to allocate Marker IOCB.\n");
return (QLA_FUNCTION_FAILED);
}
@ -547,9 +549,10 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
DEBUG5(printk("%s(): IOCB data:\n", __func__));
DEBUG5(qla2x00_dump_buffer(
(uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE));
ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x302d,
"IOCB data:\n");
ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302e,
(uint8_t *)req->ring_ptr, REQUEST_ENTRY_SIZE);
/* Adjust ring index. */
req->ring_index++;
@ -604,7 +607,7 @@ qla2x00_isp_cmd(struct scsi_qla_host *vha, struct req_que *req)
* Returns the number of IOCB entries needed to store @dsds.
*/
inline uint16_t
qla24xx_calc_iocbs(uint16_t dsds)
qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
{
uint16_t iocbs;
@ -614,8 +617,6 @@ qla24xx_calc_iocbs(uint16_t dsds)
if ((dsds - 1) % 5)
iocbs++;
}
DEBUG3(printk(KERN_DEBUG "%s(): Required PKT(s) = %d\n",
__func__, iocbs));
return iocbs;
}
@ -712,6 +713,7 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
unsigned int protcnt)
{
struct sd_dif_tuple *spt;
scsi_qla_host_t *vha = shost_priv(cmd->device->host);
unsigned char op = scsi_get_prot_op(cmd);
switch (scsi_get_prot_type(cmd)) {
@ -768,9 +770,9 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
op == SCSI_PROT_WRITE_PASS)) {
spt = page_address(sg_page(scsi_prot_sglist(cmd))) +
scsi_prot_sglist(cmd)[0].offset;
DEBUG18(printk(KERN_DEBUG
"%s(): LBA from user %p, lba = 0x%x\n",
__func__, spt, (int)spt->ref_tag));
ql_dbg(ql_dbg_io, vha, 0x3008,
"LBA from user %p, lba = 0x%x for cmd=%p.\n",
spt, (int)spt->ref_tag, cmd);
pkt->ref_tag = swab32(spt->ref_tag);
pkt->app_tag_mask[0] = 0x0;
pkt->app_tag_mask[1] = 0x0;
@ -789,11 +791,11 @@ qla24xx_set_t10dif_tags(struct scsi_cmnd *cmd, struct fw_dif_context *pkt,
break;
}
DEBUG18(printk(KERN_DEBUG
"%s(): Setting protection Tags: (BIG) ref tag = 0x%x,"
" app tag = 0x%x, prot SG count %d , cmd lba 0x%x,"
" prot_type=%u\n", __func__, pkt->ref_tag, pkt->app_tag, protcnt,
(int)scsi_get_lba(cmd), scsi_get_prot_type(cmd)));
ql_dbg(ql_dbg_io, vha, 0x3009,
"Setting protection Tags: (BIG) ref tag = 0x%x, app tag = 0x%x, "
"prot SG count %d, cmd lba 0x%x, prot_type=%u cmd=%p.\n",
pkt->ref_tag, pkt->app_tag, protcnt, (int)scsi_get_lba(cmd),
scsi_get_prot_type(cmd), cmd);
}
@ -809,6 +811,7 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
uint32_t *cur_dsd = dsd;
int i;
uint16_t used_dsds = tot_dsds;
scsi_qla_host_t *vha = shost_priv(sp->cmd->device->host);
uint8_t *cp;
@ -853,9 +856,10 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
cur_dsd = (uint32_t *)next_dsd;
}
sle_dma = sg_dma_address(sg);
DEBUG18(printk("%s(): %p, sg entry %d - addr =0x%x 0x%x,"
" len =%d\n", __func__ , cur_dsd, i, LSD(sle_dma),
MSD(sle_dma), sg_dma_len(sg)));
ql_dbg(ql_dbg_io, vha, 0x300a,
"sg entry %d - addr=0x%x 0x%x, " "len=%d for cmd=%p.\n",
cur_dsd, i, LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg),
sp->cmd);
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
*cur_dsd++ = cpu_to_le32(sg_dma_len(sg));
@ -863,8 +867,8 @@ qla24xx_walk_and_build_sglist(struct qla_hw_data *ha, srb_t *sp, uint32_t *dsd,
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
DEBUG18(printk("%s(): User Data buffer= %p:\n",
__func__ , cp));
ql_dbg(ql_dbg_io, vha, 0x300b,
"User data buffer=%p for cmd=%p.\n", cp, sp->cmd);
}
}
/* Null termination */
@ -888,7 +892,7 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
struct scsi_cmnd *cmd;
uint32_t *cur_dsd = dsd;
uint16_t used_dsds = tot_dsds;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
uint8_t *cp;
@ -935,10 +939,11 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
}
sle_dma = sg_dma_address(sg);
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
DEBUG18(printk(KERN_DEBUG
"%s(): %p, sg entry %d - addr =0x%x"
"0x%x, len =%d\n", __func__ , cur_dsd, i,
LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg)));
ql_dbg(ql_dbg_io, vha, 0x3027,
"%s(): %p, sg_entry %d - "
"addr=0x%x0x%x, len=%d.\n",
__func__, cur_dsd, i,
LSD(sle_dma), MSD(sle_dma), sg_dma_len(sg));
}
*cur_dsd++ = cpu_to_le32(LSD(sle_dma));
*cur_dsd++ = cpu_to_le32(MSD(sle_dma));
@ -946,8 +951,9 @@ qla24xx_walk_and_build_prot_sglist(struct qla_hw_data *ha, srb_t *sp,
if (scsi_get_prot_op(sp->cmd) == SCSI_PROT_WRITE_PASS) {
cp = page_address(sg_page(sg)) + sg->offset;
DEBUG18(printk("%s(): Protection Data buffer = %p:\n",
__func__ , cp));
ql_dbg(ql_dbg_io, vha, 0x3028,
"%s(): Protection Data buffer = %p.\n", __func__,
cp);
}
avail_dsds--;
}
@ -996,21 +1002,15 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
*((uint32_t *)(&cmd_pkt->entry_type)) =
__constant_cpu_to_le32(COMMAND_TYPE_CRC_2);
/* No data transfer */
data_bytes = scsi_bufflen(cmd);
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
__func__, data_bytes));
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return QLA_SUCCESS;
}
vha = sp->fcport->vha;
ha = vha->hw;
DEBUG18(printk(KERN_DEBUG
"%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
/* No data transfer */
data_bytes = scsi_bufflen(cmd);
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return QLA_SUCCESS;
}
cmd_pkt->vp_index = sp->fcport->vp_idx;
@ -1056,8 +1056,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
/* Determine SCSI command length -- align to 4 byte boundary */
if (cmd->cmd_len > 16) {
DEBUG18(printk(KERN_INFO "%s(): **** SCSI CMD > 16\n",
__func__));
additional_fcpcdb_len = cmd->cmd_len - 16;
if ((cmd->cmd_len % 4) != 0) {
/* SCSI cmd > 16 bytes must be multiple of 4 */
@ -1108,11 +1106,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */
DEBUG18(printk(KERN_INFO "%s(%ld): Total SG(s) Entries %d, Data"
"entries %d, data bytes %d, Protection entries %d\n",
__func__, vha->host_no, tot_dsds, (tot_dsds-tot_prot_dsds),
data_bytes, tot_prot_dsds));
/* Compute dif len and adjust data len to incude protection */
total_bytes = data_bytes;
dif_bytes = 0;
@ -1150,14 +1143,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
additional_fcpcdb_len);
*fcp_dl = htonl(total_bytes);
DEBUG18(printk(KERN_INFO "%s(%ld): dif bytes = 0x%x (%d), total bytes"
" = 0x%x (%d), dat block size =0x%x (%d)\n", __func__,
vha->host_no, dif_bytes, dif_bytes, total_bytes, total_bytes,
crc_ctx_pkt->blk_size, crc_ctx_pkt->blk_size));
if (!data_bytes || cmd->sc_data_direction == DMA_NONE) {
DEBUG18(printk(KERN_INFO "%s: Zero data bytes or DMA-NONE %d\n",
__func__, data_bytes));
cmd_pkt->byte_count = __constant_cpu_to_le32(0);
return QLA_SUCCESS;
}
@ -1182,8 +1168,6 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
return QLA_SUCCESS;
crc_queuing_error:
DEBUG18(qla_printk(KERN_INFO, ha,
"CMD sent FAILED crc_q error:sp = %p\n", sp));
/* Cleanup will be performed by the caller */
return QLA_FUNCTION_FAILED;
@ -1225,8 +1209,8 @@ qla24xx_start_scsi(srb_t *sp)
/* Send marker if required */
if (vha->marker_needed != 0) {
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL)
!= QLA_SUCCESS)
if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) !=
QLA_SUCCESS)
return QLA_FUNCTION_FAILED;
vha->marker_needed = 0;
}
@ -1243,8 +1227,9 @@ qla24xx_start_scsi(srb_t *sp)
if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
if (index == MAX_OUTSTANDING_COMMANDS) {
goto queuing_error;
}
/* Map the sg table so we have an accurate count of sg entries needed */
if (scsi_sg_count(cmd)) {
@ -1256,8 +1241,7 @@ qla24xx_start_scsi(srb_t *sp)
nseg = 0;
tot_dsds = nseg;
req_cnt = qla24xx_calc_iocbs(tot_dsds);
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
if (req->cnt < (req_cnt + 2)) {
cnt = RD_REG_DWORD_RELAXED(req->req_q_out);
@ -1322,7 +1306,6 @@ qla24xx_start_scsi(srb_t *sp)
/* Specify response queue number where completion should happen */
cmd_pkt->entry_status = (uint8_t) rsp->id;
wmb();
/* Adjust ring index. */
req->ring_index++;
if (req->ring_index == req->length) {
@ -1534,9 +1517,6 @@ queuing_error:
/* Cleanup will be performed by the caller (queuecommand) */
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG18(qla_printk(KERN_INFO, ha,
"CMD sent FAILED SCSI prot_op:%02x\n", scsi_get_prot_op(cmd)));
return QLA_FUNCTION_FAILED;
}
@ -1581,8 +1561,11 @@ qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
if (!req->outstanding_cmds[handle])
break;
}
if (index == MAX_OUTSTANDING_COMMANDS)
if (index == MAX_OUTSTANDING_COMMANDS) {
ql_log(ql_log_warn, vha, 0x700b,
"No room on oustanding cmd array.\n");
goto queuing_error;
}
/* Prep command array. */
req->current_outstanding_cmd = handle;
@ -1999,8 +1982,11 @@ qla2x00_start_sp(srb_t *sp)
rval = QLA_FUNCTION_FAILED;
spin_lock_irqsave(&ha->hardware_lock, flags);
pkt = qla2x00_alloc_iocbs(sp->fcport->vha, sp);
if (!pkt)
if (!pkt) {
ql_log(ql_log_warn, sp->fcport->vha, 0x700c,
"qla2x00_alloc_iocbs failed.\n");
goto done;
}
rval = QLA_SUCCESS;
switch (ctx->type) {

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -36,8 +36,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
mutex_lock(&ha->vport_lock);
vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1);
if (vp_id > ha->max_npiv_vports) {
DEBUG15(printk ("vp_id %d is bigger than max-supported %d.\n",
vp_id, ha->max_npiv_vports));
ql_dbg(ql_dbg_vport, vha, 0xa000,
"vp_id %d is bigger than max-supported %d.\n",
vp_id, ha->max_npiv_vports);
mutex_unlock(&ha->vport_lock);
return vp_id;
}
@ -131,9 +132,9 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
fc_port_t *fcport;
list_for_each_entry(fcport, &vha->vp_fcports, list) {
DEBUG15(printk("scsi(%ld): Marking port dead, "
"loop_id=0x%04x :%x\n",
vha->host_no, fcport->loop_id, fcport->vp_idx));
ql_dbg(ql_dbg_vport, vha, 0xa001,
"Marking port dead, loop_id=0x%04x : %x.\n",
fcport->loop_id, fcport->vp_idx);
qla2x00_mark_device_lost(vha, fcport, 0, 0);
qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@ -187,13 +188,13 @@ qla24xx_enable_vp(scsi_qla_host_t *vha)
goto enable_failed;
}
DEBUG15(qla_printk(KERN_INFO, ha,
"Virtual port with id: %d - Enabled\n", vha->vp_idx));
ql_dbg(ql_dbg_taskm, vha, 0x801a,
"Virtual port with id: %d - Enabled.\n", vha->vp_idx);
return 0;
enable_failed:
DEBUG15(qla_printk(KERN_INFO, ha,
"Virtual port with id: %d - Disabled\n", vha->vp_idx));
ql_dbg(ql_dbg_taskm, vha, 0x801b,
"Virtual port with id: %d - Disabled.\n", vha->vp_idx);
return 1;
}
@ -205,12 +206,12 @@ qla24xx_configure_vp(scsi_qla_host_t *vha)
fc_vport = vha->fc_vport;
DEBUG15(printk("scsi(%ld): %s: change request #3 for this host.\n",
vha->host_no, __func__));
ql_dbg(ql_dbg_vport, vha, 0xa002,
"%s: change request #3.\n", __func__);
ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx);
if (ret != QLA_SUCCESS) {
DEBUG15(qla_printk(KERN_ERR, vha->hw, "Failed to enable "
"receiving of RSCN requests: 0x%x\n", ret));
ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable "
"receiving of RSCN requests: 0x%x.\n", ret);
return;
} else {
/* Corresponds to SCR enabled */
@ -248,9 +249,9 @@ qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb)
case MBA_CHG_IN_CONNECTION:
case MBA_PORT_UPDATE:
case MBA_RSCN_UPDATE:
DEBUG15(printk("scsi(%ld)%s: Async_event for"
" VP[%d], mb = 0x%x, vha=%p\n",
vha->host_no, __func__, i, *mb, vha));
ql_dbg(ql_dbg_async, vha, 0x5024,
"Async_event for VP[%d], mb=0x%x vha=%p.\n",
i, *mb, vha);
qla2x00_async_event(vha, rsp, mb);
break;
}
@ -286,37 +287,49 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
DEBUG15(printk("scsi(%ld): Scheduling enable of Vport %d...\n",
vha->host_no, vha->vp_idx));
ql_dbg(ql_dbg_taskm, vha, 0x801d,
"Scheduling enable of Vport %d.\n", vha->vp_idx);
return qla24xx_enable_vp(vha);
}
static int
qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
{
ql_dbg(ql_dbg_dpc, vha, 0x4012,
"Entering %s.\n", __func__);
ql_dbg(ql_dbg_dpc, vha, 0x4013,
"vp_flags: 0x%lx.\n", vha->vp_flags);
qla2x00_do_work(vha);
if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
/* VP acquired. complete port configuration */
ql_dbg(ql_dbg_dpc, vha, 0x4014,
"Configure VP scheduled.\n");
qla24xx_configure_vp(vha);
ql_dbg(ql_dbg_dpc, vha, 0x4015,
"Configure VP end.\n");
return 0;
}
if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) {
ql_dbg(ql_dbg_dpc, vha, 0x4016,
"FCPort update scheduled.\n");
qla2x00_update_fcports(vha);
clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
ql_dbg(ql_dbg_dpc, vha, 0x4017,
"FCPort update end.\n");
}
if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) &&
!test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) &&
atomic_read(&vha->loop_state) != LOOP_DOWN) {
DEBUG(printk("scsi(%ld): qla2x00_port_login()\n",
vha->host_no));
ql_dbg(ql_dbg_dpc, vha, 0x4018,
"Relogin needed scheduled.\n");
qla2x00_relogin(vha);
DEBUG(printk("scsi(%ld): qla2x00_port_login - end\n",
vha->host_no));
ql_dbg(ql_dbg_dpc, vha, 0x4019,
"Relogin needed end.\n");
}
if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) &&
@ -326,11 +339,17 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) {
ql_dbg(ql_dbg_dpc, vha, 0x401a,
"Loop resync scheduled.\n");
qla2x00_loop_resync(vha);
clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags);
ql_dbg(ql_dbg_dpc, vha, 0x401b,
"Loop resync end.\n");
}
}
ql_dbg(ql_dbg_dpc, vha, 0x401c,
"Exiting %s.\n", __func__);
return 0;
}
@ -396,9 +415,10 @@ qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport)
/* Check up max-npiv-supports */
if (ha->num_vhosts > ha->max_npiv_vports) {
DEBUG15(printk("scsi(%ld): num_vhosts %ud is bigger than "
"max_npv_vports %ud.\n", base_vha->host_no,
ha->num_vhosts, ha->max_npiv_vports));
ql_dbg(ql_dbg_vport, vha, 0xa004,
"num_vhosts %ud is bigger "
"than max_npiv_vports %ud.\n",
ha->num_vhosts, ha->max_npiv_vports);
return VPCERR_UNSUPPORTED;
}
return 0;
@ -415,7 +435,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha = qla2x00_create_host(sht, ha);
if (!vha) {
DEBUG(printk("qla2xxx: scsi_host_alloc() failed for vport\n"));
ql_log(ql_log_warn, vha, 0xa005,
"scsi_host_alloc() failed for vport.\n");
return(NULL);
}
@ -429,8 +450,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
vha->device_flags = 0;
vha->vp_idx = qla24xx_allocate_vp_id(vha);
if (vha->vp_idx > ha->max_npiv_vports) {
DEBUG15(printk("scsi(%ld): Couldn't allocate vp_id.\n",
vha->host_no));
ql_dbg(ql_dbg_vport, vha, 0xa006,
"Couldn't allocate vp_id.\n");
goto create_vhost_failed;
}
vha->mgmt_svr_loop_id = 10 + vha->vp_idx;
@ -461,8 +482,9 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
host->max_id = MAX_TARGETS_2200;
host->transportt = qla2xxx_transport_vport_template;
DEBUG15(printk("DEBUG: detect vport hba %ld at address = %p\n",
vha->host_no, vha));
ql_dbg(ql_dbg_vport, vha, 0xa007,
"Detect vport hba %ld at address = %p.\n",
vha->host_no, vha);
vha->flags.init_done = 1;
@ -567,9 +589,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
if (req) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Couldn't delete req que %d\n",
req->id);
ql_log(ql_log_warn, vha, 0x00ea,
"Couldn't delete req que %d.\n",
req->id);
return ret;
}
}
@ -581,9 +603,9 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
if (rsp) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Couldn't delete rsp que %d\n",
rsp->id);
ql_log(ql_log_warn, vha, 0x00eb,
"Couldn't delete rsp que %d.\n",
rsp->id);
return ret;
}
}
@ -604,8 +626,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
if (req == NULL) {
qla_printk(KERN_WARNING, ha, "could not allocate memory"
"for request que\n");
ql_log(ql_log_fatal, base_vha, 0x00d9,
"Failed to allocate memory for request queue.\n");
goto failed;
}
@ -614,8 +636,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
(req->length + 1) * sizeof(request_t),
&req->dma, GFP_KERNEL);
if (req->ring == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - request_ring\n");
ql_log(ql_log_fatal, base_vha, 0x00da,
"Failed to allocte memory for request_ring.\n");
goto que_failed;
}
@ -623,8 +645,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
if (que_id >= ha->max_req_queues) {
mutex_unlock(&ha->vport_lock);
qla_printk(KERN_INFO, ha, "No resources to create "
"additional request queue\n");
ql_log(ql_log_warn, base_vha, 0x00db,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->req_qid_map);
@ -633,6 +655,12 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
req->vp_idx = vp_idx;
req->qos = qos;
ql_dbg(ql_dbg_multiq, base_vha, 0xc002,
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
que_id, req->rid, req->vp_idx, req->qos);
ql_dbg(ql_dbg_init, base_vha, 0x00dc,
"queue_id=%d rid=%d vp_idx=%d qos=%d.\n",
que_id, req->rid, req->vp_idx, req->qos);
if (rsp_que < 0)
req->rsp = NULL;
else
@ -645,6 +673,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
options |= BIT_5;
req->options = options;
ql_dbg(ql_dbg_multiq, base_vha, 0xc003,
"options=0x%x.\n", req->options);
ql_dbg(ql_dbg_init, base_vha, 0x00dd,
"options=0x%x.\n", req->options);
for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
req->outstanding_cmds[cnt] = NULL;
req->current_outstanding_cmd = 1;
@ -656,10 +688,21 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
reg = ISP_QUE_REG(ha, que_id);
req->max_q_depth = ha->req_q_map[0]->max_q_depth;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
req->ring_ptr, req->ring_index,
req->cnt, req->id, req->max_q_depth);
ql_dbg(ql_dbg_init, base_vha, 0x00de,
"ring_ptr=%p ring_index=%d, "
"cnt=%d id=%d max_q_depth=%d.\n",
req->ring_ptr, req->ring_index, req->cnt,
req->id, req->max_q_depth);
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
ql_log(ql_log_fatal, base_vha, 0x00df,
"%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->req_qid_map);
mutex_unlock(&ha->vport_lock);
@ -700,8 +743,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL);
if (rsp == NULL) {
qla_printk(KERN_WARNING, ha, "could not allocate memory for"
" response que\n");
ql_log(ql_log_warn, base_vha, 0x0066,
"Failed to allocate memory for response queue.\n");
goto failed;
}
@ -710,8 +753,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
(rsp->length + 1) * sizeof(response_t),
&rsp->dma, GFP_KERNEL);
if (rsp->ring == NULL) {
qla_printk(KERN_WARNING, ha,
"Memory Allocation failed - response_ring\n");
ql_log(ql_log_warn, base_vha, 0x00e1,
"Failed to allocate memory for response ring.\n");
goto que_failed;
}
@ -719,8 +762,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
if (que_id >= ha->max_rsp_queues) {
mutex_unlock(&ha->vport_lock);
qla_printk(KERN_INFO, ha, "No resources to create "
"additional response queue\n");
ql_log(ql_log_warn, base_vha, 0x00e2,
"No resources to create additional request queue.\n");
goto que_failed;
}
set_bit(que_id, ha->rsp_qid_map);
@ -728,12 +771,16 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
if (ha->flags.msix_enabled)
rsp->msix = &ha->msix_entries[que_id + 1];
else
qla_printk(KERN_WARNING, ha, "msix not enabled\n");
ql_log(ql_log_warn, base_vha, 0x00e3,
"MSIX not enalbled.\n");
ha->rsp_q_map[que_id] = rsp;
rsp->rid = rid;
rsp->vp_idx = vp_idx;
rsp->hw = ha;
ql_dbg(ql_dbg_init, base_vha, 0x00e4,
"queue_id=%d rid=%d vp_idx=%d hw=%p.\n",
que_id, rsp->rid, rsp->vp_idx, rsp->hw);
/* Use alternate PCI bus number */
if (MSB(rsp->rid))
options |= BIT_4;
@ -750,6 +797,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
mutex_unlock(&ha->vport_lock);
ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ql_dbg(ql_dbg_init, base_vha, 0x00e5,
"options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
rsp->options, rsp->id, rsp->rsp_q_in,
rsp->rsp_q_out);
ret = qla25xx_request_irq(rsp);
if (ret)
@ -757,7 +812,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha, "%s failed\n", __func__);
ql_log(ql_log_fatal, base_vha, 0x00e7,
"%s failed.\n", __func__);
mutex_lock(&ha->vport_lock);
clear_bit(que_id, ha->rsp_qid_map);
mutex_unlock(&ha->vport_lock);

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -189,6 +189,7 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
uint16_t word;
uint32_t nv_cmd, wait_cnt;
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
qla2x00_nv_write(ha, NVR_DATA_OUT);
qla2x00_nv_write(ha, 0);
@ -220,8 +221,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
DEBUG9_10(qla_printk(KERN_WARNING, ha,
"NVRAM didn't go ready...\n"));
ql_dbg(ql_dbg_user, vha, 0x708d,
"NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@ -308,6 +309,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
uint16_t wprot, wprot_old;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
/* Clear NVRAM write protection. */
ret = QLA_FUNCTION_FAILED;
@ -350,8 +352,8 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
DEBUG9_10(qla_printk(KERN_WARNING, ha,
"NVRAM didn't go ready...\n"));
ql_dbg(ql_dbg_user, vha, 0x708e,
"NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@ -371,6 +373,7 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
{
struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
uint32_t word, wait_cnt;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
if (stat != QLA_SUCCESS)
return;
@ -409,8 +412,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
wait_cnt = NVR_WAIT_CNT;
do {
if (!--wait_cnt) {
DEBUG9_10(qla_printk(KERN_WARNING, ha,
"NVRAM didn't go ready...\n"));
ql_dbg(ql_dbg_user, vha, 0x708f,
"NVRAM didn't go ready...\n");
break;
}
NVRAM_DELAY();
@ -607,9 +610,10 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
qla_printk(KERN_ERR, ha,
ql_log(ql_log_fatal, vha, 0x0045,
"Inconsistent FLTL detected: checksum=0x%x.\n", chksum);
qla2x00_dump_buffer(buf, sizeof(struct qla_flt_location));
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010e,
buf, sizeof(struct qla_flt_location));
return QLA_FUNCTION_FAILED;
}
@ -618,7 +622,9 @@ qla2xxx_find_flt_start(scsi_qla_host_t *vha, uint32_t *start)
*start = (le16_to_cpu(fltl->start_hi) << 16 |
le16_to_cpu(fltl->start_lo)) >> 2;
end:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLTL[%s] = 0x%x.\n", loc, *start));
ql_dbg(ql_dbg_init, vha, 0x0046,
"FLTL[%s] = 0x%x.\n",
loc, *start);
return QLA_SUCCESS;
}
@ -685,10 +691,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
if (*wptr == __constant_cpu_to_le16(0xffff))
goto no_flash_data;
if (flt->version != __constant_cpu_to_le16(1)) {
DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported FLT detected: "
"version=0x%x length=0x%x checksum=0x%x.\n",
ql_log(ql_log_warn, vha, 0x0047,
"Unsupported FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
le16_to_cpu(flt->checksum)));
le16_to_cpu(flt->checksum));
goto no_flash_data;
}
@ -696,10 +702,10 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
for (chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FLT detected: "
"version=0x%x length=0x%x checksum=0x%x.\n",
ql_log(ql_log_fatal, vha, 0x0048,
"Inconsistent FLT detected: version=0x%x length=0x%x checksum=0x%x.\n",
le16_to_cpu(flt->version), le16_to_cpu(flt->length),
chksum));
le16_to_cpu(flt->checksum));
goto no_flash_data;
}
@ -708,10 +714,11 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
for ( ; cnt; cnt--, region++) {
/* Store addresses as DWORD offsets. */
start = le32_to_cpu(region->start) >> 2;
DEBUG3(qla_printk(KERN_DEBUG, ha, "FLT[%02x]: start=0x%x "
"end=0x%x size=0x%x.\n", le32_to_cpu(region->code), start,
le32_to_cpu(region->end) >> 2, le32_to_cpu(region->size)));
ql_dbg(ql_dbg_init, vha, 0x0049,
"FLT[%02x]: start=0x%x "
"end=0x%x size=0x%x.\n", le32_to_cpu(region->code),
start, le32_to_cpu(region->end) >> 2,
le32_to_cpu(region->size));
switch (le32_to_cpu(region->code) & 0xff) {
case FLT_REG_FW:
@ -796,12 +803,16 @@ no_flash_data:
ha->flt_region_npiv_conf = ha->flags.port0 ?
def_npiv_conf0[def] : def_npiv_conf1[def];
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
"vpd_nvram=0x%x vpd=0x%x nvram=0x%x fdt=0x%x flt=0x%x "
"npiv=0x%x. fcp_prio_cfg=0x%x\n", loc, ha->flt_region_boot,
ha->flt_region_fw, ha->flt_region_vpd_nvram, ha->flt_region_vpd,
ha->flt_region_nvram, ha->flt_region_fdt, ha->flt_region_flt,
ha->flt_region_npiv_conf, ha->flt_region_fcp_prio));
ql_dbg(ql_dbg_init, vha, 0x004a,
"FLT[%s]: boot=0x%x fw=0x%x vpd_nvram=0x%x vpd=0x%x.\n",
loc, ha->flt_region_boot,
ha->flt_region_fw, ha->flt_region_vpd_nvram,
ha->flt_region_vpd);
ql_dbg(ql_dbg_init, vha, 0x004b,
"nvram=0x%x fdt=0x%x flt=0x%x npiv=0x%x fcp_prif_cfg=0x%x.\n",
ha->flt_region_nvram,
ha->flt_region_fdt, ha->flt_region_flt,
ha->flt_region_npiv_conf, ha->flt_region_fcp_prio);
}
static void
@ -833,10 +844,12 @@ qla2xxx_get_fdt_info(scsi_qla_host_t *vha)
cnt++)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent FDT detected: "
"checksum=0x%x id=%c version=0x%x.\n", chksum, fdt->sig[0],
le16_to_cpu(fdt->version)));
DEBUG9(qla2x00_dump_buffer((uint8_t *)fdt, sizeof(*fdt)));
ql_dbg(ql_dbg_init, vha, 0x004c,
"Inconsistent FDT detected:"
" checksum=0x%x id=%c version0x%x.\n", chksum,
fdt->sig[0], le16_to_cpu(fdt->version));
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x0113,
(uint8_t *)fdt, sizeof(*fdt));
goto no_flash_data;
}
@ -890,11 +903,12 @@ no_flash_data:
break;
}
done:
DEBUG2(qla_printk(KERN_DEBUG, ha, "FDT[%s]: (0x%x/0x%x) erase=0x%x "
"pro=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ql_dbg(ql_dbg_init, vha, 0x004d,
"FDT[%x]: (0x%x/0x%x) erase=0x%x "
"pr=%x upro=%x wrtd=0x%x blk=0x%x.\n", loc, mid, fid,
ha->fdt_erase_cmd, ha->fdt_protect_sec_cmd,
ha->fdt_unprotect_sec_cmd, ha->fdt_wrt_disable,
ha->fdt_block_size));
ha->fdt_wrt_disable, ha->fdt_block_size);
}
static void
@ -919,6 +933,10 @@ qla2xxx_get_idc_param(scsi_qla_host_t *vha)
ha->nx_dev_init_timeout = le32_to_cpu(*wptr++);
ha->nx_reset_timeout = le32_to_cpu(*wptr);
}
ql_dbg(ql_dbg_init, vha, 0x004e,
"nx_dev_init_timeout=%d "
"nx_reset_timeout=%d.\n", ha->nx_dev_init_timeout,
ha->nx_reset_timeout);
return;
}
@ -963,17 +981,18 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
if (hdr.version == __constant_cpu_to_le16(0xffff))
return;
if (hdr.version != __constant_cpu_to_le16(1)) {
DEBUG2(qla_printk(KERN_INFO, ha, "Unsupported NPIV-Config "
ql_dbg(ql_dbg_user, vha, 0x7090,
"Unsupported NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
le16_to_cpu(hdr.checksum)));
le16_to_cpu(hdr.checksum));
return;
}
data = kmalloc(NPIV_CONFIG_SIZE, GFP_KERNEL);
if (!data) {
DEBUG2(qla_printk(KERN_INFO, ha, "NPIV-Config: Unable to "
"allocate memory.\n"));
ql_log(ql_log_warn, vha, 0x7091,
"Unable to allocate memory for data.\n");
return;
}
@ -985,10 +1004,11 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
for (wptr = data, chksum = 0; cnt; cnt--)
chksum += le16_to_cpu(*wptr++);
if (chksum) {
DEBUG2(qla_printk(KERN_INFO, ha, "Inconsistent NPIV-Config "
ql_dbg(ql_dbg_user, vha, 0x7092,
"Inconsistent NPIV-Config "
"detected: version=0x%x entries=0x%x checksum=0x%x.\n",
le16_to_cpu(hdr.version), le16_to_cpu(hdr.entries),
chksum));
le16_to_cpu(hdr.checksum));
goto done;
}
@ -1014,21 +1034,22 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
vid.port_name = wwn_to_u64(entry->port_name);
vid.node_name = wwn_to_u64(entry->node_name);
DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name,
le16_to_cpu(entry->vf_id),
entry->q_qos, entry->f_qos));
ql_dbg(ql_dbg_user, vha, 0x7093,
"NPIV[%02x]: wwpn=%llx "
"wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name,
le16_to_cpu(entry->vf_id),
entry->q_qos, entry->f_qos);
if (i < QLA_PRECONFIG_VPORTS) {
vport = fc_vport_create(vha->host, 0, &vid);
if (!vport)
qla_printk(KERN_INFO, ha,
"NPIV-Config: Failed to create vport [%02x]: "
"wwpn=%llx wwnn=%llx.\n", cnt,
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name);
ql_log(ql_log_warn, vha, 0x7094,
"NPIV-Config Failed to create vport [%02x]: "
"wwpn=%llx wwnn=%llx.\n", cnt,
(unsigned long long)vid.port_name,
(unsigned long long)vid.node_name);
}
}
done:
@ -1127,9 +1148,10 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
qla_printk(KERN_DEBUG, ha,
"Unable to allocate memory for optrom burst write "
"(%x KB).\n", OPTROM_BURST_SIZE / 1024);
ql_log(ql_log_warn, vha, 0x7095,
"Unable to allocate "
"memory for optrom burst write (%x KB).\n",
OPTROM_BURST_SIZE / 1024);
}
}
@ -1138,7 +1160,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_unprotect_flash(vha);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7096,
"Unable to unprotect flash for update.\n");
goto done;
}
@ -1156,9 +1178,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
0xff0000) | ((fdata >> 16) & 0xff));
ret = qla24xx_erase_sector(vha, fdata);
if (ret != QLA_SUCCESS) {
DEBUG9(qla_printk(KERN_WARNING, ha,
"Unable to erase sector: address=%x.\n",
faddr));
ql_dbg(ql_dbg_user, vha, 0x7007,
"Unable to erase erase sector: address=%x.\n",
faddr);
break;
}
}
@ -1172,12 +1194,12 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
flash_data_addr(ha, faddr),
OPTROM_BURST_DWORDS);
if (ret != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7097,
"Unable to burst-write optrom segment "
"(%x/%x/%llx).\n", ret,
flash_data_addr(ha, faddr),
(unsigned long long)optrom_dma);
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7098,
"Reverting to slow-write.\n");
dma_free_coherent(&ha->pdev->dev,
@ -1194,9 +1216,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_write_flash_dword(ha,
flash_data_addr(ha, faddr), cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
DEBUG9(printk("%s(%ld) Unable to program flash "
"address=%x data=%x.\n", __func__,
vha->host_no, faddr, *dwptr));
ql_dbg(ql_dbg_user, vha, 0x7006,
"Unable to program flash address=%x data=%x.\n",
faddr, *dwptr);
break;
}
@ -1211,7 +1233,7 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
ret = qla24xx_protect_flash(vha);
if (ret != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7099,
"Unable to protect flash after update.\n");
done:
if (optrom)
@ -1324,9 +1346,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
ret = qla24xx_write_flash_dword(ha,
nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
if (ret != QLA_SUCCESS) {
DEBUG9(qla_printk(KERN_WARNING, ha,
ql_dbg(ql_dbg_user, vha, 0x709a,
"Unable to program nvram address=%x data=%x.\n",
naddr, *dwptr));
naddr, *dwptr);
break;
}
}
@ -1476,7 +1498,7 @@ qla2x00_beacon_on(struct scsi_qla_host *vha)
ha->fw_options[1] |= FO1_DISABLE_GPIO6_7;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x709b,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@ -1541,7 +1563,7 @@ qla2x00_beacon_off(struct scsi_qla_host *vha)
rval = qla2x00_set_fw_options(vha, ha->fw_options);
if (rval != QLA_SUCCESS)
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x709c,
"Unable to update fw options (beacon off).\n");
return rval;
}
@ -1616,7 +1638,7 @@ qla24xx_beacon_on(struct scsi_qla_host *vha)
if (qla2x00_get_fw_options(vha, ha->fw_options) !=
QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x7009,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@ -1670,14 +1692,14 @@ qla24xx_beacon_off(struct scsi_qla_host *vha)
ha->fw_options[1] &= ~ADD_FO1_DISABLE_GPIO_LED_CTRL;
if (qla2x00_set_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Unable to update fw options (beacon off).\n");
ql_log(ql_log_warn, vha, 0x704d,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
if (qla2x00_get_fw_options(vha, ha->fw_options) != QLA_SUCCESS) {
qla_printk(KERN_WARNING, ha,
"Unable to get fw options (beacon off).\n");
ql_log(ql_log_warn, vha, 0x704e,
"Unable to update fw options (beacon on).\n");
return QLA_FUNCTION_FAILED;
}
@ -2389,10 +2411,9 @@ try_fast:
optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
&optrom_dma, GFP_KERNEL);
if (!optrom) {
qla_printk(KERN_DEBUG, ha,
"Unable to allocate memory for optrom burst read "
"(%x KB).\n", OPTROM_BURST_SIZE / 1024);
ql_log(ql_log_warn, vha, 0x00cc,
"Unable to allocate memory for optrom burst read (%x KB).\n",
OPTROM_BURST_SIZE / 1024);
goto slow_read;
}
@ -2407,12 +2428,11 @@ try_fast:
rval = qla2x00_dump_ram(vha, optrom_dma,
flash_data_addr(ha, faddr), burst);
if (rval) {
qla_printk(KERN_WARNING, ha,
"Unable to burst-read optrom segment "
"(%x/%x/%llx).\n", rval,
flash_data_addr(ha, faddr),
ql_log(ql_log_warn, vha, 0x00f5,
"Unable to burst-read optrom segment (%x/%x/%llx).\n",
rval, flash_data_addr(ha, faddr),
(unsigned long long)optrom_dma);
qla_printk(KERN_WARNING, ha,
ql_log(ql_log_warn, vha, 0x00f6,
"Reverting to slow-read.\n");
dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE,
@ -2556,8 +2576,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (qla2x00_read_flash_byte(ha, pcihdr) != 0x55 ||
qla2x00_read_flash_byte(ha, pcihdr + 0x01) != 0xaa) {
/* No signature */
DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
"signature.\n"));
ql_log(ql_log_fatal, vha, 0x0050,
"No matching ROM signature.\n");
ret = QLA_FUNCTION_FAILED;
break;
}
@ -2573,8 +2593,8 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x2) != 'I' ||
qla2x00_read_flash_byte(ha, pcids + 0x3) != 'R') {
/* Incorrect header. */
DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
"found pcir_adr=%x.\n", pcids));
ql_log(ql_log_fatal, vha, 0x0051,
"PCI data struct not found pcir_adr=%x.\n", pcids);
ret = QLA_FUNCTION_FAILED;
break;
}
@ -2588,8 +2608,9 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->bios_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
ha->bios_revision[1], ha->bios_revision[0]));
ql_dbg(ql_dbg_init, vha, 0x0052,
"Read BIOS %d.%d.\n",
ha->bios_revision[1], ha->bios_revision[0]);
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
@ -2602,12 +2623,14 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_byte(ha, pcids + 0x12);
ha->efi_revision[1] =
qla2x00_read_flash_byte(ha, pcids + 0x13);
DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
ha->efi_revision[1], ha->efi_revision[0]));
ql_dbg(ql_dbg_init, vha, 0x0053,
"Read EFI %d.%d.\n",
ha->efi_revision[1], ha->efi_revision[0]);
break;
default:
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
"type %x at pcids %x.\n", code_type, pcids));
ql_log(ql_log_warn, vha, 0x0054,
"Unrecognized code type %x at pcids %x.\n",
code_type, pcids);
break;
}
@ -2627,21 +2650,28 @@ qla2x00_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
qla2x00_read_flash_data(ha, dbyte, ha->flt_region_fw * 4 + 10,
8);
DEBUG3(qla_printk(KERN_DEBUG, ha, "dumping fw ver from "
"flash:\n"));
DEBUG3(qla2x00_dump_buffer((uint8_t *)dbyte, 8));
ql_dbg(ql_dbg_init + ql_dbg_buffer, vha, 0x010a,
"Dumping fw "
"ver from flash:.\n");
ql_dump_buffer(ql_dbg_init + ql_dbg_buffer, vha, 0x010b,
(uint8_t *)dbyte, 8);
if ((dcode[0] == 0xffff && dcode[1] == 0xffff &&
dcode[2] == 0xffff && dcode[3] == 0xffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
"revision at %x.\n", ha->flt_region_fw * 4));
ql_log(ql_log_warn, vha, 0x0057,
"Unrecognized fw revision at %x.\n",
ha->flt_region_fw * 4);
} else {
/* values are in big endian */
ha->fw_revision[0] = dbyte[0] << 16 | dbyte[1];
ha->fw_revision[1] = dbyte[2] << 16 | dbyte[3];
ha->fw_revision[2] = dbyte[4] << 16 | dbyte[5];
ql_dbg(ql_dbg_init, vha, 0x0058,
"FW Version: "
"%d.%d.%d.\n", ha->fw_revision[0],
ha->fw_revision[1], ha->fw_revision[2]);
}
}
@ -2683,8 +2713,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
bcode = mbuf + (pcihdr % 4);
if (bcode[0x0] != 0x55 || bcode[0x1] != 0xaa) {
/* No signature */
DEBUG2(qla_printk(KERN_DEBUG, ha, "No matching ROM "
"signature.\n"));
ql_log(ql_log_fatal, vha, 0x0059,
"No matching ROM signature.\n");
ret = QLA_FUNCTION_FAILED;
break;
}
@ -2699,8 +2729,8 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (bcode[0x0] != 'P' || bcode[0x1] != 'C' ||
bcode[0x2] != 'I' || bcode[0x3] != 'R') {
/* Incorrect header. */
DEBUG2(qla_printk(KERN_INFO, ha, "PCI data struct not "
"found pcir_adr=%x.\n", pcids));
ql_log(ql_log_fatal, vha, 0x005a,
"PCI data struct not found pcir_adr=%x.\n", pcids);
ret = QLA_FUNCTION_FAILED;
break;
}
@ -2712,26 +2742,30 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
/* Intel x86, PC-AT compatible. */
ha->bios_revision[0] = bcode[0x12];
ha->bios_revision[1] = bcode[0x13];
DEBUG3(qla_printk(KERN_DEBUG, ha, "read BIOS %d.%d.\n",
ha->bios_revision[1], ha->bios_revision[0]));
ql_dbg(ql_dbg_init, vha, 0x005b,
"Read BIOS %d.%d.\n",
ha->bios_revision[1], ha->bios_revision[0]);
break;
case ROM_CODE_TYPE_FCODE:
/* Open Firmware standard for PCI (FCode). */
ha->fcode_revision[0] = bcode[0x12];
ha->fcode_revision[1] = bcode[0x13];
DEBUG3(qla_printk(KERN_DEBUG, ha, "read FCODE %d.%d.\n",
ha->fcode_revision[1], ha->fcode_revision[0]));
ql_dbg(ql_dbg_init, vha, 0x005c,
"Read FCODE %d.%d.\n",
ha->fcode_revision[1], ha->fcode_revision[0]);
break;
case ROM_CODE_TYPE_EFI:
/* Extensible Firmware Interface (EFI). */
ha->efi_revision[0] = bcode[0x12];
ha->efi_revision[1] = bcode[0x13];
DEBUG3(qla_printk(KERN_DEBUG, ha, "read EFI %d.%d.\n",
ha->efi_revision[1], ha->efi_revision[0]));
ql_dbg(ql_dbg_init, vha, 0x005d,
"Read EFI %d.%d.\n",
ha->efi_revision[1], ha->efi_revision[0]);
break;
default:
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized code "
"type %x at pcids %x.\n", code_type, pcids));
ql_log(ql_log_warn, vha, 0x005e,
"Unrecognized code type %x at pcids %x.\n",
code_type, pcids);
break;
}
@ -2753,13 +2787,18 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
dcode[2] == 0xffffffff && dcode[3] == 0xffffffff) ||
(dcode[0] == 0 && dcode[1] == 0 && dcode[2] == 0 &&
dcode[3] == 0)) {
DEBUG2(qla_printk(KERN_INFO, ha, "Unrecognized fw "
"revision at %x.\n", ha->flt_region_fw * 4));
ql_log(ql_log_warn, vha, 0x005f,
"Unrecognized fw revision at %x.\n",
ha->flt_region_fw * 4);
} else {
ha->fw_revision[0] = dcode[0];
ha->fw_revision[1] = dcode[1];
ha->fw_revision[2] = dcode[2];
ha->fw_revision[3] = dcode[3];
ql_dbg(ql_dbg_init, vha, 0x0060,
"Firmware revision %d.%d.%d.%d.\n",
ha->fw_revision[0], ha->fw_revision[1],
ha->fw_revision[2], ha->fw_revision[3]);
}
/* Check for golden firmware and get version if available */
@ -2775,9 +2814,9 @@ qla24xx_get_flash_version(scsi_qla_host_t *vha, void *mbuf)
if (dcode[4] == 0xFFFFFFFF && dcode[5] == 0xFFFFFFFF &&
dcode[6] == 0xFFFFFFFF && dcode[7] == 0xFFFFFFFF) {
DEBUG2(qla_printk(KERN_INFO, ha,
"%s(%ld): Unrecognized golden fw at 0x%x.\n",
__func__, vha->host_no, ha->flt_region_gold_fw * 4));
ql_log(ql_log_warn, vha, 0x0056,
"Unrecognized golden fw at 0x%x.\n",
ha->flt_region_gold_fw * 4);
return ret;
}
@ -2843,9 +2882,9 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
if (!ha->fcp_prio_cfg) {
ha->fcp_prio_cfg = vmalloc(FCP_PRIO_CFG_SIZE);
if (!ha->fcp_prio_cfg) {
qla_printk(KERN_WARNING, ha,
"Unable to allocate memory for fcp priority data "
"(%x).\n", FCP_PRIO_CFG_SIZE);
ql_log(ql_log_warn, vha, 0x00d5,
"Unable to allocate memory for fcp priorty data (%x).\n",
FCP_PRIO_CFG_SIZE);
return QLA_FUNCTION_FAILED;
}
}
@ -2857,7 +2896,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
ha->isp_ops->read_optrom(vha, (uint8_t *)ha->fcp_prio_cfg,
fcp_prio_addr << 2, FCP_PRIO_CFG_HDR_SIZE);
if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 0))
if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 0))
goto fail;
/* read remaining FCP CMD config data from flash */
@ -2869,7 +2908,7 @@ qla24xx_read_fcp_prio_cfg(scsi_qla_host_t *vha)
fcp_prio_addr << 2, (len < max_len ? len : max_len));
/* revalidate the entire FCP priority config data, including entries */
if (!qla24xx_fcp_prio_cfg_valid(ha->fcp_prio_cfg, 1))
if (!qla24xx_fcp_prio_cfg_valid(vha, ha->fcp_prio_cfg, 1))
goto fail;
ha->flags.fcp_prio_enabled = 1;