[SCSI] Initial Commit of qla4xxx

open-iSCSI driver for Qlogic Corporation's iSCSI HBAs

Signed-off-by: Ravi Anand <ravi.anand@qlogic.com>
Signed-off-by: David Somayajulu <david.somayajulu@qlogic.com>
Signed-off-by: Doug Maxey <dwm@bubba.enoyolf.org>
Signed-off-by: Mike Christie <michaelc@cs.wisc.edu>
Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
This commit is contained in:
David Somayajulu 2006-09-19 10:28:00 -07:00 committed by James Bottomley
parent ed542bed12
commit afaf5a2d34
18 changed files with 7540 additions and 0 deletions

View file

@ -1244,6 +1244,7 @@ config SCSI_QLOGICPTI
module will be called qlogicpti.
source "drivers/scsi/qla2xxx/Kconfig"
source "drivers/scsi/qla4xxx/Kconfig"
config SCSI_LPFC
tristate "Emulex LightPulse Fibre Channel Support"

View file

@ -84,6 +84,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o
obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o
obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/
obj-$(CONFIG_SCSI_LPFC) += lpfc/
obj-$(CONFIG_SCSI_PAS16) += pas16.o
obj-$(CONFIG_SCSI_SEAGATE) += seagate.o

View file

@ -0,0 +1,7 @@
config SCSI_QLA_ISCSI
tristate "QLogic ISP4XXX host adapter family support"
depends on PCI && SCSI
select SCSI_ISCSI_ATTRS
---help---
This driver supports the QLogic 40xx (ISP4XXX) iSCSI host
adapter family.

View file

@ -0,0 +1,5 @@
qla4xxx-y := ql4_os.o ql4_init.o ql4_mbx.o ql4_iocb.o ql4_isr.o \
ql4_nvram.o ql4_dbg.o
obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx.o

View file

@ -0,0 +1,197 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include <scsi/scsi_dbg.h>
static void qla4xxx_print_srb_info(struct srb * srb)
{
printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
__func__, srb->cmd, (unsigned long) srb->dma_handle);
printk("%s: fw_ddb_index = %d, lun = %d\n",
__func__, srb->fw_ddb_index, srb->cmd->device->lun);
printk("%s: iocb_tov = %d\n",
__func__, srb->iocb_tov);
printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
__func__, srb->cc_stat, srb->r_start, srb->u_start);
}
void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
{
printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
cmd->device->channel, cmd->device->id, cmd->device->lun,
cmd->cmd_len);
scsi_print_command(cmd);
printk(" seg_cnt = %d\n", cmd->use_sg);
printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
cmd->request_buffer, cmd->request_bufflen);
if (cmd->use_sg) {
struct scatterlist *sg;
sg = (struct scatterlist *)cmd->request_buffer;
printk(" SG buffer: \n");
qla4xxx_dump_buffer((caddr_t) sg,
(cmd->use_sg * sizeof(*sg)));
}
printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
cmd->transfersize);
printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
cmd->sc_data_direction);
printk(" Current time (jiffies) = 0x%lx, "
"timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
}
void __dump_registers(struct scsi_qla_host *ha)
{
uint8_t i;
for (i = 0; i < MBOX_REG_COUNT; i++) {
printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
readw(&ha->reg->mailbox[i]));
}
printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, flash_address),
readw(&ha->reg->flash_address));
printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, flash_data),
readw(&ha->reg->flash_data));
printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, ctrl_status),
readw(&ha->reg->ctrl_status));
if (is_qla4010(ha)) {
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
readw(&ha->reg->u1.isp4010.nvram));
}
else if (is_qla4022(ha)) {
printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u1.isp4022.intr_mask),
readw(&ha->reg->u1.isp4022.intr_mask));
printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
readw(&ha->reg->u1.isp4022.nvram));
printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u1.isp4022.semaphore),
readw(&ha->reg->u1.isp4022.semaphore));
}
printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, req_q_in),
readw(&ha->reg->req_q_in));
printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, rsp_q_out),
readw(&ha->reg->rsp_q_out));
if (is_qla4010(ha)) {
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.ext_hw_conf),
readw(&ha->reg->u2.isp4010.ext_hw_conf));
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.port_ctrl),
readw(&ha->reg->u2.isp4010.port_ctrl));
printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.port_status),
readw(&ha->reg->u2.isp4010.port_status));
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.req_q_out),
readw(&ha->reg->u2.isp4010.req_q_out));
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
readw(&ha->reg->u2.isp4010.gp_out));
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
readw(&ha->reg->u2.isp4010.gp_in));
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4010.port_err_status),
readw(&ha->reg->u2.isp4010.port_err_status));
}
else if (is_qla4022(ha)) {
printk(KERN_INFO "Page 0 Registers:\n");
printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.ext_hw_conf),
readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.port_ctrl),
readw(&ha->reg->u2.isp4022.p0.port_ctrl));
printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.port_status),
readw(&ha->reg->u2.isp4022.p0.port_status));
printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.gp_out),
readw(&ha->reg->u2.isp4022.p0.gp_out));
printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
readw(&ha->reg->u2.isp4022.p0.gp_in));
printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p0.port_err_status),
readw(&ha->reg->u2.isp4022.p0.port_err_status));
printk(KERN_INFO "Page 1 Registers:\n");
writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
&ha->reg->ctrl_status);
printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
(uint8_t) offsetof(struct isp_reg,
u2.isp4022.p1.req_q_out),
readw(&ha->reg->u2.isp4022.p1.req_q_out));
writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
&ha->reg->ctrl_status);
}
}
void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
int i = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (i = 1; i < MBOX_REG_COUNT; i++)
printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
readw(&ha->reg->mailbox[i]));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void qla4xxx_dump_registers(struct scsi_qla_host *ha)
{
unsigned long flags = 0;
spin_lock_irqsave(&ha->hardware_lock, flags);
__dump_registers(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
void qla4xxx_dump_buffer(void *b, uint32_t size)
{
uint32_t cnt;
uint8_t *c = b;
printk(" 0 1 2 3 4 5 6 7 8 9 Ah Bh Ch Dh Eh "
"Fh\n");
printk("------------------------------------------------------------"
"--\n");
for (cnt = 0; cnt < size; cnt++, c++) {
printk(KERN_DEBUG "%02x", *c);
if (!(cnt % 16))
printk(KERN_DEBUG "\n");
else
printk(KERN_DEBUG " ");
}
if (cnt % 16)
printk(KERN_DEBUG "\n");
}

View file

@ -0,0 +1,55 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
* Driver debug definitions.
*/
/* #define QL_DEBUG */ /* DEBUG messages */
/* #define QL_DEBUG_LEVEL_3 */ /* Output function tracing */
/* #define QL_DEBUG_LEVEL_4 */
/* #define QL_DEBUG_LEVEL_5 */
/* #define QL_DEBUG_LEVEL_9 */
#define QL_DEBUG_LEVEL_2 /* ALways enable error messagess */
#if defined(QL_DEBUG)
#define DEBUG(x) do {x;} while (0);
#else
#define DEBUG(x) do {} while (0);
#endif
#if defined(QL_DEBUG_LEVEL_2)
#define DEBUG2(x) do {if(extended_error_logging == 2) x;} while (0);
#define DEBUG2_3(x) do {x;} while (0);
#else /* */
#define DEBUG2(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_3)
#define DEBUG3(x) do {if(extended_error_logging == 3) x;} while (0);
#else /* */
#define DEBUG3(x) do {} while (0);
#if !defined(QL_DEBUG_LEVEL_2)
#define DEBUG2_3(x) do {} while (0);
#endif /* */
#endif /* */
#if defined(QL_DEBUG_LEVEL_4)
#define DEBUG4(x) do {x;} while (0);
#else /* */
#define DEBUG4(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_5)
#define DEBUG5(x) do {x;} while (0);
#else /* */
#define DEBUG5(x) do {} while (0);
#endif /* */
#if defined(QL_DEBUG_LEVEL_9)
#define DEBUG9(x) do {x;} while (0);
#else /* */
#define DEBUG9(x) do {} while (0);
#endif /* */

View file

@ -0,0 +1,586 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QL4_DEF_H
#define __QL4_DEF_H
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/dmapool.h>
#include <linux/mempool.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_iscsi.h>
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
#define PCI_DEVICE_ID_QLOGIC_ISP4010 0x4010
#endif
#ifndef PCI_DEVICE_ID_QLOGIC_ISP4022
#define PCI_DEVICE_ID_QLOGIC_ISP4022 0x4022
#endif /* */
#define QLA_SUCCESS 0
#define QLA_ERROR 1
/*
* Data bit definitions
*/
#define BIT_0 0x1
#define BIT_1 0x2
#define BIT_2 0x4
#define BIT_3 0x8
#define BIT_4 0x10
#define BIT_5 0x20
#define BIT_6 0x40
#define BIT_7 0x80
#define BIT_8 0x100
#define BIT_9 0x200
#define BIT_10 0x400
#define BIT_11 0x800
#define BIT_12 0x1000
#define BIT_13 0x2000
#define BIT_14 0x4000
#define BIT_15 0x8000
#define BIT_16 0x10000
#define BIT_17 0x20000
#define BIT_18 0x40000
#define BIT_19 0x80000
#define BIT_20 0x100000
#define BIT_21 0x200000
#define BIT_22 0x400000
#define BIT_23 0x800000
#define BIT_24 0x1000000
#define BIT_25 0x2000000
#define BIT_26 0x4000000
#define BIT_27 0x8000000
#define BIT_28 0x10000000
#define BIT_29 0x20000000
#define BIT_30 0x40000000
#define BIT_31 0x80000000
/*
* Host adapter default definitions
***********************************/
#define MAX_HBAS 16
#define MAX_BUSES 1
#define MAX_TARGETS (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
#define MAX_LUNS 0xffff
#define MAX_AEN_ENTRIES 256 /* should be > EXT_DEF_MAX_AEN_QUEUE */
#define MAX_DDB_ENTRIES (MAX_PRST_DEV_DB_ENTRIES + MAX_DEV_DB_ENTRIES)
#define MAX_PDU_ENTRIES 32
#define INVALID_ENTRY 0xFFFF
#define MAX_CMDS_TO_RISC 1024
#define MAX_SRBS MAX_CMDS_TO_RISC
#define MBOX_AEN_REG_COUNT 5
#define MAX_INIT_RETRIES 5
#define IOCB_HIWAT_CUSHION 16
/*
* Buffer sizes
*/
#define REQUEST_QUEUE_DEPTH MAX_CMDS_TO_RISC
#define RESPONSE_QUEUE_DEPTH 64
#define QUEUE_SIZE 64
#define DMA_BUFFER_SIZE 512
/*
* Misc
*/
#define MAC_ADDR_LEN 6 /* in bytes */
#define IP_ADDR_LEN 4 /* in bytes */
#define DRIVER_NAME "qla4xxx"
#define MAX_LINKED_CMDS_PER_LUN 3
#define MAX_REQS_SERVICED_PER_INTR 16
#define ISCSI_IPADDR_SIZE 4 /* IP address size */
#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
#define ISCSI_NAME_SIZE 255 /* ISCSI Name size -
* usually a string */
#define LSDW(x) ((u32)((u64)(x)))
#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
/*
* Retry & Timeout Values
*/
#define MBOX_TOV 60
#define SOFT_RESET_TOV 30
#define RESET_INTR_TOV 3
#define SEMAPHORE_TOV 10
#define ADAPTER_INIT_TOV 120
#define ADAPTER_RESET_TOV 180
#define EXTEND_CMD_TOV 60
#define WAIT_CMD_TOV 30
#define EH_WAIT_CMD_TOV 120
#define FIRMWARE_UP_TOV 60
#define RESET_FIRMWARE_TOV 30
#define LOGOUT_TOV 10
#define IOCB_TOV_MARGIN 10
#define RELOGIN_TOV 18
#define ISNS_DEREG_TOV 5
#define MAX_RESET_HA_RETRIES 2
/*
* SCSI Request Block structure (srb) that is placed
* on cmd->SCp location of every I/O [We have 22 bytes available]
*/
struct srb {
struct list_head list; /* (8) */
struct scsi_qla_host *ha; /* HA the SP is queued on */
struct ddb_entry *ddb;
uint16_t flags; /* (1) Status flags. */
#define SRB_DMA_VALID BIT_3 /* DMA Buffer mapped. */
#define SRB_GOT_SENSE BIT_4 /* sense data recieved. */
uint8_t state; /* (1) Status flags. */
#define SRB_NO_QUEUE_STATE 0 /* Request is in between states */
#define SRB_FREE_STATE 1
#define SRB_ACTIVE_STATE 3
#define SRB_ACTIVE_TIMEOUT_STATE 4
#define SRB_SUSPENDED_STATE 7 /* Request in suspended state */
struct scsi_cmnd *cmd; /* (4) SCSI command block */
dma_addr_t dma_handle; /* (4) for unmap of single transfers */
atomic_t ref_count; /* reference count for this srb */
uint32_t fw_ddb_index;
uint8_t err_id; /* error id */
#define SRB_ERR_PORT 1 /* Request failed because "port down" */
#define SRB_ERR_LOOP 2 /* Request failed because "loop down" */
#define SRB_ERR_DEVICE 3 /* Request failed because "device error" */
#define SRB_ERR_OTHER 4
uint16_t reserved;
uint16_t iocb_tov;
uint16_t iocb_cnt; /* Number of used iocbs */
uint16_t cc_stat;
u_long r_start; /* Time we recieve a cmd from OS */
u_long u_start; /* Time when we handed the cmd to F/W */
};
/*
* Device Database (DDB) structure
*/
struct ddb_entry {
struct list_head list; /* ddb list */
struct scsi_qla_host *ha;
struct iscsi_cls_session *sess;
struct iscsi_cls_conn *conn;
atomic_t state; /* DDB State */
unsigned long flags; /* DDB Flags */
unsigned long dev_scan_wait_to_start_relogin;
unsigned long dev_scan_wait_to_complete_relogin;
uint16_t os_target_id; /* Target ID */
uint16_t fw_ddb_index; /* DDB firmware index */
uint8_t reserved[2];
uint32_t fw_ddb_device_state; /* F/W Device State -- see ql4_fw.h */
uint32_t CmdSn;
uint16_t target_session_id;
uint16_t connection_id;
uint16_t exe_throttle; /* Max mumber of cmds outstanding
* simultaneously */
uint16_t task_mgmt_timeout; /* Min time for task mgmt cmds to
* complete */
uint16_t default_relogin_timeout; /* Max time to wait for
* relogin to complete */
uint16_t tcp_source_port_num;
uint32_t default_time2wait; /* Default Min time between
* relogins (+aens) */
atomic_t port_down_timer; /* Device connection timer */
atomic_t retry_relogin_timer; /* Min Time between relogins
* (4000 only) */
atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
atomic_t relogin_retry_count; /* Num of times relogin has been
* retried */
uint16_t port;
uint32_t tpgt;
uint8_t ip_addr[ISCSI_IPADDR_SIZE];
uint8_t iscsi_name[ISCSI_NAME_SIZE]; /* 72 x48 */
uint8_t iscsi_alias[0x20];
};
/*
* DDB states.
*/
#define DDB_STATE_DEAD 0 /* We can no longer talk to
* this device */
#define DDB_STATE_ONLINE 1 /* Device ready to accept
* commands */
#define DDB_STATE_MISSING 2 /* Device logged off, trying
* to re-login */
/*
* DDB flags.
*/
#define DF_RELOGIN 0 /* Relogin to device */
#define DF_NO_RELOGIN 1 /* Do not relogin if IOCTL
* logged it out */
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
#define DF_FO_MASKED 3
/*
* Asynchronous Event Queue structure
*/
struct aen {
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
};
#include "ql4_fw.h"
#include "ql4_nvram.h"
/*
* Linux Host Adapter structure
*/
struct scsi_qla_host {
/* Linux adapter configuration data */
struct Scsi_Host *host; /* pointer to host data */
uint32_t tot_ddbs;
unsigned long flags;
#define AF_ONLINE 0 /* 0x00000001 */
#define AF_INIT_DONE 1 /* 0x00000002 */
#define AF_MBOX_COMMAND 2 /* 0x00000004 */
#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
#define AF_LINK_UP 8 /* 0x00000100 */
#define AF_TOPCAT_CHIP_PRESENT 9 /* 0x00000200 */
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */
#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
unsigned long dpc_flags;
#define DPC_RESET_HA 1 /* 0x00000002 */
#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
#define DPC_ISNS_RESTART 7 /* 0x00000080 */
#define DPC_AEN 9 /* 0x00000200 */
#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
uint16_t iocb_cnt;
uint16_t iocb_hiwat;
/* SRB cache. */
#define SRB_MIN_REQ 128
mempool_t *srb_mempool;
/* pci information */
struct pci_dev *pdev;
struct isp_reg __iomem *reg; /* Base I/O address */
unsigned long pio_address;
unsigned long pio_length;
#define MIN_IOBASE_LEN 0x100
uint16_t req_q_count;
uint8_t marker_needed;
uint8_t rsvd1;
unsigned long host_no;
/* NVRAM registers */
struct eeprom_data *nvram;
spinlock_t hardware_lock ____cacheline_aligned;
spinlock_t list_lock;
uint32_t eeprom_cmd_data;
/* Counters for general statistics */
uint64_t adapter_error_count;
uint64_t device_error_count;
uint64_t total_io_count;
uint64_t total_mbytes_xferred;
uint64_t link_failure_count;
uint64_t invalid_crc_count;
uint32_t spurious_int_count;
uint32_t aborted_io_count;
uint32_t io_timeout_count;
uint32_t mailbox_timeout_count;
uint32_t seconds_since_last_intr;
uint32_t seconds_since_last_heartbeat;
uint32_t mac_index;
/* Info Needed for Management App */
/* --- From GetFwVersion --- */
uint32_t firmware_version[2];
uint32_t patch_number;
uint32_t build_number;
/* --- From Init_FW --- */
/* init_cb_t *init_cb; */
uint16_t firmware_options;
uint16_t tcp_options;
uint8_t ip_address[IP_ADDR_LEN];
uint8_t subnet_mask[IP_ADDR_LEN];
uint8_t gateway[IP_ADDR_LEN];
uint8_t alias[32];
uint8_t name_string[256];
uint8_t heartbeat_interval;
uint8_t rsvd;
/* --- From FlashSysInfo --- */
uint8_t my_mac[MAC_ADDR_LEN];
uint8_t serial_number[16];
/* --- From GetFwState --- */
uint32_t firmware_state;
uint32_t board_id;
uint32_t addl_fw_state;
/* Linux kernel thread */
struct workqueue_struct *dpc_thread;
struct work_struct dpc_work;
/* Linux timer thread */
struct timer_list timer;
uint32_t timer_active;
/* Recovery Timers */
uint32_t port_down_retry_count;
uint32_t discovery_wait;
atomic_t check_relogin_timeouts;
uint32_t retry_reset_ha_cnt;
uint32_t isp_reset_timer; /* reset test timer */
uint32_t nic_reset_timer; /* simulated nic reset test timer */
int eh_start;
struct list_head free_srb_q;
uint16_t free_srb_q_count;
uint16_t num_srbs_allocated;
/* DMA Memory Block */
void *queues;
dma_addr_t queues_dma;
unsigned long queues_len;
#define MEM_ALIGN_VALUE \
((max(REQUEST_QUEUE_DEPTH, RESPONSE_QUEUE_DEPTH)) * \
sizeof(struct queue_entry))
/* request and response queue variables */
dma_addr_t request_dma;
struct queue_entry *request_ring;
struct queue_entry *request_ptr;
dma_addr_t response_dma;
struct queue_entry *response_ring;
struct queue_entry *response_ptr;
dma_addr_t shadow_regs_dma;
struct shadow_regs *shadow_regs;
uint16_t request_in; /* Current indexes. */
uint16_t request_out;
uint16_t response_in;
uint16_t response_out;
/* aen queue variables */
uint16_t aen_q_count; /* Number of available aen_q entries */
uint16_t aen_in; /* Current indexes */
uint16_t aen_out;
struct aen aen_q[MAX_AEN_ENTRIES];
/* This mutex protects several threads to do mailbox commands
* concurrently.
*/
struct mutex mbox_sem;
wait_queue_head_t mailbox_wait_queue;
/* temporary mailbox status registers */
volatile uint8_t mbox_status_count;
volatile uint32_t mbox_status[MBOX_REG_COUNT];
/* local device database list (contains internal ddb entries) */
struct list_head ddb_list;
/* Map ddb_list entry by FW ddb index */
struct ddb_entry *fw_ddb_index_map[MAX_DDB_ENTRIES];
};
static inline int is_qla4010(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4010;
}
static inline int is_qla4022(struct scsi_qla_host *ha)
{
return ha->pdev->device == PCI_DEVICE_ID_QLOGIC_ISP4022;
}
static inline int adapter_up(struct scsi_qla_host *ha)
{
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
(test_bit(AF_LINK_UP, &ha->flags) != 0);
}
static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
{
return (struct scsi_qla_host *)shost->hostdata;
}
static inline void __iomem* isp_semaphore(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u1.isp4022.semaphore :
&ha->reg->u1.isp4010.nvram);
}
static inline void __iomem* isp_nvram(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u1.isp4022.nvram :
&ha->reg->u1.isp4010.nvram);
}
static inline void __iomem* isp_ext_hw_conf(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.ext_hw_conf :
&ha->reg->u2.isp4010.ext_hw_conf);
}
static inline void __iomem* isp_port_status(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_status :
&ha->reg->u2.isp4010.port_status);
}
static inline void __iomem* isp_port_ctrl(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_ctrl :
&ha->reg->u2.isp4010.port_ctrl);
}
static inline void __iomem* isp_port_error_status(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.port_err_status :
&ha->reg->u2.isp4010.port_err_status);
}
static inline void __iomem * isp_gp_out(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
&ha->reg->u2.isp4022.p0.gp_out :
&ha->reg->u2.isp4010.gp_out);
}
static inline int eeprom_ext_hw_conf_offset(struct scsi_qla_host *ha)
{
return (is_qla4022(ha) ?
offsetof(struct eeprom_data, isp4022.ext_hw_conf) / 2 :
offsetof(struct eeprom_data, isp4010.ext_hw_conf) / 2);
}
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask);
int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits);
static inline int ql4xxx_lock_flash(struct scsi_qla_host *a)
{
if (is_qla4022(a))
return ql4xxx_sem_spinlock(a, QL4022_FLASH_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 13);
else
return ql4xxx_sem_spinlock(a, QL4010_FLASH_SEM_MASK,
QL4010_FLASH_SEM_BITS);
}
static inline void ql4xxx_unlock_flash(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_FLASH_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_FLASH_SEM_MASK);
}
static inline int ql4xxx_lock_nvram(struct scsi_qla_host *a)
{
if (is_qla4022(a))
return ql4xxx_sem_spinlock(a, QL4022_NVRAM_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 10);
else
return ql4xxx_sem_spinlock(a, QL4010_NVRAM_SEM_MASK,
QL4010_NVRAM_SEM_BITS);
}
static inline void ql4xxx_unlock_nvram(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_NVRAM_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_NVRAM_SEM_MASK);
}
static inline int ql4xxx_lock_drvr(struct scsi_qla_host *a)
{
if (is_qla4022(a))
return ql4xxx_sem_lock(a, QL4022_DRVR_SEM_MASK,
(QL4022_RESOURCE_BITS_BASE_CODE |
(a->mac_index)) << 1);
else
return ql4xxx_sem_lock(a, QL4010_DRVR_SEM_MASK,
QL4010_DRVR_SEM_BITS);
}
static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
{
if (is_qla4022(a))
ql4xxx_sem_unlock(a, QL4022_DRVR_SEM_MASK);
else
ql4xxx_sem_unlock(a, QL4010_DRVR_SEM_MASK);
}
/*---------------------------------------------------------------------------*/
/* Defines for qla4xxx_initialize_adapter() and qla4xxx_recover_adapter() */
#define PRESERVE_DDB_LIST 0
#define REBUILD_DDB_LIST 1
/* Defines for process_aen() */
#define PROCESS_ALL_AENS 0
#define FLUSH_DDB_CHANGED_AENS 1
#define RELOGIN_DDB_CHANGED_AENS 2
#include "ql4_version.h"
#include "ql4_glbl.h"
#include "ql4_dbg.h"
#include "ql4_inline.h"
#endif /*_QLA4XXX_H */

View file

@ -0,0 +1,843 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QLA4X_FW_H
#define _QLA4X_FW_H
#define MAX_PRST_DEV_DB_ENTRIES 64
#define MIN_DISC_DEV_DB_ENTRY MAX_PRST_DEV_DB_ENTRIES
#define MAX_DEV_DB_ENTRIES 512
/*************************************************************************
*
* ISP 4010 I/O Register Set Structure and Definitions
*
*************************************************************************/
struct port_ctrl_stat_regs {
__le32 ext_hw_conf; /* 80 x50 R/W */
__le32 intChipConfiguration; /* 84 x54 */
__le32 port_ctrl; /* 88 x58 */
__le32 port_status; /* 92 x5c */
__le32 HostPrimMACHi; /* 96 x60 */
__le32 HostPrimMACLow; /* 100 x64 */
__le32 HostSecMACHi; /* 104 x68 */
__le32 HostSecMACLow; /* 108 x6c */
__le32 EPPrimMACHi; /* 112 x70 */
__le32 EPPrimMACLow; /* 116 x74 */
__le32 EPSecMACHi; /* 120 x78 */
__le32 EPSecMACLow; /* 124 x7c */
__le32 HostPrimIPHi; /* 128 x80 */
__le32 HostPrimIPMidHi; /* 132 x84 */
__le32 HostPrimIPMidLow; /* 136 x88 */
__le32 HostPrimIPLow; /* 140 x8c */
__le32 HostSecIPHi; /* 144 x90 */
__le32 HostSecIPMidHi; /* 148 x94 */
__le32 HostSecIPMidLow; /* 152 x98 */
__le32 HostSecIPLow; /* 156 x9c */
__le32 EPPrimIPHi; /* 160 xa0 */
__le32 EPPrimIPMidHi; /* 164 xa4 */
__le32 EPPrimIPMidLow; /* 168 xa8 */
__le32 EPPrimIPLow; /* 172 xac */
__le32 EPSecIPHi; /* 176 xb0 */
__le32 EPSecIPMidHi; /* 180 xb4 */
__le32 EPSecIPMidLow; /* 184 xb8 */
__le32 EPSecIPLow; /* 188 xbc */
__le32 IPReassemblyTimeout; /* 192 xc0 */
__le32 EthMaxFramePayload; /* 196 xc4 */
__le32 TCPMaxWindowSize; /* 200 xc8 */
__le32 TCPCurrentTimestampHi; /* 204 xcc */
__le32 TCPCurrentTimestampLow; /* 208 xd0 */
__le32 LocalRAMAddress; /* 212 xd4 */
__le32 LocalRAMData; /* 216 xd8 */
__le32 PCSReserved1; /* 220 xdc */
__le32 gp_out; /* 224 xe0 */
__le32 gp_in; /* 228 xe4 */
__le32 ProbeMuxAddr; /* 232 xe8 */
__le32 ProbeMuxData; /* 236 xec */
__le32 ERMQueueBaseAddr0; /* 240 xf0 */
__le32 ERMQueueBaseAddr1; /* 244 xf4 */
__le32 MACConfiguration; /* 248 xf8 */
__le32 port_err_status; /* 252 xfc COR */
};
struct host_mem_cfg_regs {
__le32 NetRequestQueueOut; /* 80 x50 */
__le32 NetRequestQueueOutAddrHi; /* 84 x54 */
__le32 NetRequestQueueOutAddrLow; /* 88 x58 */
__le32 NetRequestQueueBaseAddrHi; /* 92 x5c */
__le32 NetRequestQueueBaseAddrLow; /* 96 x60 */
__le32 NetRequestQueueLength; /* 100 x64 */
__le32 NetResponseQueueIn; /* 104 x68 */
__le32 NetResponseQueueInAddrHi; /* 108 x6c */
__le32 NetResponseQueueInAddrLow; /* 112 x70 */
__le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
__le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
__le32 NetResponseQueueLength; /* 124 x7c */
__le32 req_q_out; /* 128 x80 */
__le32 RequestQueueOutAddrHi; /* 132 x84 */
__le32 RequestQueueOutAddrLow; /* 136 x88 */
__le32 RequestQueueBaseAddrHi; /* 140 x8c */
__le32 RequestQueueBaseAddrLow; /* 144 x90 */
__le32 RequestQueueLength; /* 148 x94 */
__le32 ResponseQueueIn; /* 152 x98 */
__le32 ResponseQueueInAddrHi; /* 156 x9c */
__le32 ResponseQueueInAddrLow; /* 160 xa0 */
__le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
__le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
__le32 ResponseQueueLength; /* 172 xac */
__le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
__le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
__le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
__le32 NetRxLargeBufferQueueLength; /* 188 xbc */
__le32 NetRxLargeBufferLength; /* 192 xc0 */
__le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
__le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
__le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
__le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
__le32 NetRxSmallBufferLength; /* 212 xd4 */
__le32 HMCReserved0[10]; /* 216 xd8 */
};
struct local_ram_cfg_regs {
__le32 BufletSize; /* 80 x50 */
__le32 BufletMaxCount; /* 84 x54 */
__le32 BufletCurrCount; /* 88 x58 */
__le32 BufletPauseThresholdCount; /* 92 x5c */
__le32 BufletTCPWinThresholdHi; /* 96 x60 */
__le32 BufletTCPWinThresholdLow; /* 100 x64 */
__le32 IPHashTableBaseAddr; /* 104 x68 */
__le32 IPHashTableSize; /* 108 x6c */
__le32 TCPHashTableBaseAddr; /* 112 x70 */
__le32 TCPHashTableSize; /* 116 x74 */
__le32 NCBAreaBaseAddr; /* 120 x78 */
__le32 NCBMaxCount; /* 124 x7c */
__le32 NCBCurrCount; /* 128 x80 */
__le32 DRBAreaBaseAddr; /* 132 x84 */
__le32 DRBMaxCount; /* 136 x88 */
__le32 DRBCurrCount; /* 140 x8c */
__le32 LRCReserved[28]; /* 144 x90 */
};
struct prot_stat_regs {
__le32 MACTxFrameCount; /* 80 x50 R */
__le32 MACTxByteCount; /* 84 x54 R */
__le32 MACRxFrameCount; /* 88 x58 R */
__le32 MACRxByteCount; /* 92 x5c R */
__le32 MACCRCErrCount; /* 96 x60 R */
__le32 MACEncErrCount; /* 100 x64 R */
__le32 MACRxLengthErrCount; /* 104 x68 R */
__le32 IPTxPacketCount; /* 108 x6c R */
__le32 IPTxByteCount; /* 112 x70 R */
__le32 IPTxFragmentCount; /* 116 x74 R */
__le32 IPRxPacketCount; /* 120 x78 R */
__le32 IPRxByteCount; /* 124 x7c R */
__le32 IPRxFragmentCount; /* 128 x80 R */
__le32 IPDatagramReassemblyCount; /* 132 x84 R */
__le32 IPV6RxPacketCount; /* 136 x88 R */
__le32 IPErrPacketCount; /* 140 x8c R */
__le32 IPReassemblyErrCount; /* 144 x90 R */
__le32 TCPTxSegmentCount; /* 148 x94 R */
__le32 TCPTxByteCount; /* 152 x98 R */
__le32 TCPRxSegmentCount; /* 156 x9c R */
__le32 TCPRxByteCount; /* 160 xa0 R */
__le32 TCPTimerExpCount; /* 164 xa4 R */
__le32 TCPRxAckCount; /* 168 xa8 R */
__le32 TCPTxAckCount; /* 172 xac R */
__le32 TCPRxErrOOOCount; /* 176 xb0 R */
__le32 PSReserved0; /* 180 xb4 */
__le32 TCPRxWindowProbeUpdateCount; /* 184 xb8 R */
__le32 ECCErrCorrectionCount; /* 188 xbc R */
__le32 PSReserved1[16]; /* 192 xc0 */
};
/* remote register set (access via PCI memory read/write) */
struct isp_reg {
#define MBOX_REG_COUNT 8
__le32 mailbox[MBOX_REG_COUNT];
__le32 flash_address; /* 0x20 */
__le32 flash_data;
__le32 ctrl_status;
union {
struct {
__le32 nvram;
__le32 reserved1[2]; /* 0x30 */
} __attribute__ ((packed)) isp4010;
struct {
__le32 intr_mask;
__le32 nvram; /* 0x30 */
__le32 semaphore;
} __attribute__ ((packed)) isp4022;
} u1;
__le32 req_q_in; /* SCSI Request Queue Producer Index */
__le32 rsp_q_out; /* SCSI Completion Queue Consumer Index */
__le32 reserved2[4]; /* 0x40 */
union {
struct {
__le32 ext_hw_conf; /* 0x50 */
__le32 flow_ctrl;
__le32 port_ctrl;
__le32 port_status;
__le32 reserved3[8]; /* 0x60 */
__le32 req_q_out; /* 0x80 */
__le32 reserved4[23]; /* 0x84 */
__le32 gp_out; /* 0xe0 */
__le32 gp_in;
__le32 reserved5[5];
__le32 port_err_status; /* 0xfc */
} __attribute__ ((packed)) isp4010;
struct {
union {
struct port_ctrl_stat_regs p0;
struct host_mem_cfg_regs p1;
struct local_ram_cfg_regs p2;
struct prot_stat_regs p3;
__le32 r_union[44];
};
} __attribute__ ((packed)) isp4022;
} u2;
}; /* 256 x100 */
/* Semaphore Defines for 4010 */
#define QL4010_DRVR_SEM_BITS 0x00000030
#define QL4010_GPIO_SEM_BITS 0x000000c0
#define QL4010_SDRAM_SEM_BITS 0x00000300
#define QL4010_PHY_SEM_BITS 0x00000c00
#define QL4010_NVRAM_SEM_BITS 0x00003000
#define QL4010_FLASH_SEM_BITS 0x0000c000
#define QL4010_DRVR_SEM_MASK 0x00300000
#define QL4010_GPIO_SEM_MASK 0x00c00000
#define QL4010_SDRAM_SEM_MASK 0x03000000
#define QL4010_PHY_SEM_MASK 0x0c000000
#define QL4010_NVRAM_SEM_MASK 0x30000000
#define QL4010_FLASH_SEM_MASK 0xc0000000
/* Semaphore Defines for 4022 */
#define QL4022_RESOURCE_MASK_BASE_CODE 0x7
#define QL4022_RESOURCE_BITS_BASE_CODE 0x4
#define QL4022_DRVR_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (1+16))
#define QL4022_DDR_RAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (4+16))
#define QL4022_PHY_GIO_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (7+16))
#define QL4022_NVRAM_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (10+16))
#define QL4022_FLASH_SEM_MASK (QL4022_RESOURCE_MASK_BASE_CODE << (13+16))
/* Page # defines for 4022 */
#define PORT_CTRL_STAT_PAGE 0 /* 4022 */
#define HOST_MEM_CFG_PAGE 1 /* 4022 */
#define LOCAL_RAM_CFG_PAGE 2 /* 4022 */
#define PROT_STAT_PAGE 3 /* 4022 */
/* Register Mask - sets corresponding mask bits in the upper word */
static inline uint32_t set_rmask(uint32_t val)
{
return (val & 0xffff) | (val << 16);
}
static inline uint32_t clr_rmask(uint32_t val)
{
return 0 | (val << 16);
}
/* ctrl_status definitions */
#define CSR_SCSI_PAGE_SELECT 0x00000003
#define CSR_SCSI_INTR_ENABLE 0x00000004 /* 4010 */
#define CSR_SCSI_RESET_INTR 0x00000008
#define CSR_SCSI_COMPLETION_INTR 0x00000010
#define CSR_SCSI_PROCESSOR_INTR 0x00000020
#define CSR_INTR_RISC 0x00000040
#define CSR_BOOT_ENABLE 0x00000080
#define CSR_NET_PAGE_SELECT 0x00000300 /* 4010 */
#define CSR_FUNC_NUM 0x00000700 /* 4022 */
#define CSR_NET_RESET_INTR 0x00000800 /* 4010 */
#define CSR_FORCE_SOFT_RESET 0x00002000 /* 4022 */
#define CSR_FATAL_ERROR 0x00004000
#define CSR_SOFT_RESET 0x00008000
#define ISP_CONTROL_FN_MASK CSR_FUNC_NUM
#define ISP_CONTROL_FN0_SCSI 0x0500
#define ISP_CONTROL_FN1_SCSI 0x0700
#define INTR_PENDING (CSR_SCSI_COMPLETION_INTR |\
CSR_SCSI_PROCESSOR_INTR |\
CSR_SCSI_RESET_INTR)
/* ISP InterruptMask definitions */
#define IMR_SCSI_INTR_ENABLE 0x00000004 /* 4022 */
/* ISP 4022 nvram definitions */
#define NVR_WRITE_ENABLE 0x00000010 /* 4022 */
/* ISP port_status definitions */
/* ISP Semaphore definitions */
/* ISP General Purpose Output definitions */
#define GPOR_TOPCAT_RESET 0x00000004
/* shadow registers (DMA'd from HA to system memory. read only) */
struct shadow_regs {
/* SCSI Request Queue Consumer Index */
__le32 req_q_out; /* 0 x0 R */
/* SCSI Completion Queue Producer Index */
__le32 rsp_q_in; /* 4 x4 R */
}; /* 8 x8 */
/* External hardware configuration register */
union external_hw_config_reg {
struct {
/* FIXME: Do we even need this? All values are
* referred to by 16 bit quantities. Platform and
* endianess issues. */
__le32 bReserved0:1;
__le32 bSDRAMProtectionMethod:2;
__le32 bSDRAMBanks:1;
__le32 bSDRAMChipWidth:1;
__le32 bSDRAMChipSize:2;
__le32 bParityDisable:1;
__le32 bExternalMemoryType:1;
__le32 bFlashBIOSWriteEnable:1;
__le32 bFlashUpperBankSelect:1;
__le32 bWriteBurst:2;
__le32 bReserved1:3;
__le32 bMask:16;
};
uint32_t Asuint32_t;
};
/*************************************************************************
*
* Mailbox Commands Structures and Definitions
*
*************************************************************************/
/* Mailbox command definitions */
#define MBOX_CMD_ABOUT_FW 0x0009
#define MBOX_CMD_LUN_RESET 0x0016
#define MBOX_CMD_GET_FW_STATUS 0x001F
#define MBOX_CMD_SET_ISNS_SERVICE 0x0021
#define ISNS_DISABLE 0
#define ISNS_ENABLE 1
#define MBOX_CMD_READ_FLASH 0x0026
#define MBOX_CMD_CLEAR_DATABASE_ENTRY 0x0031
#define MBOX_CMD_CONN_CLOSE_SESS_LOGOUT 0x0056
#define LOGOUT_OPTION_CLOSE_SESSION 0x01
#define LOGOUT_OPTION_RELOGIN 0x02
#define MBOX_CMD_EXECUTE_IOCB_A64 0x005A
#define MBOX_CMD_INITIALIZE_FIRMWARE 0x0060
#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK 0x0061
#define MBOX_CMD_REQUEST_DATABASE_ENTRY 0x0062
#define MBOX_CMD_SET_DATABASE_ENTRY 0x0063
#define MBOX_CMD_GET_DATABASE_ENTRY 0x0064
#define DDB_DS_UNASSIGNED 0x00
#define DDB_DS_NO_CONNECTION_ACTIVE 0x01
#define DDB_DS_SESSION_ACTIVE 0x04
#define DDB_DS_SESSION_FAILED 0x06
#define DDB_DS_LOGIN_IN_PROCESS 0x07
#define MBOX_CMD_GET_FW_STATE 0x0069
/* Mailbox 1 */
#define FW_STATE_READY 0x0000
#define FW_STATE_CONFIG_WAIT 0x0001
#define FW_STATE_ERROR 0x0004
#define FW_STATE_DHCP_IN_PROGRESS 0x0008
/* Mailbox 3 */
#define FW_ADDSTATE_OPTICAL_MEDIA 0x0001
#define FW_ADDSTATE_DHCP_ENABLED 0x0002
#define FW_ADDSTATE_LINK_UP 0x0010
#define FW_ADDSTATE_ISNS_SVC_ENABLED 0x0020
#define MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS 0x006B
#define MBOX_CMD_CONN_OPEN_SESS_LOGIN 0x0074
#define MBOX_CMD_GET_CRASH_RECORD 0x0076 /* 4010 only */
#define MBOX_CMD_GET_CONN_EVENT_LOG 0x0077
/* Mailbox status definitions */
#define MBOX_COMPLETION_STATUS 4
#define MBOX_STS_BUSY 0x0007
#define MBOX_STS_INTERMEDIATE_COMPLETION 0x1000
#define MBOX_STS_COMMAND_COMPLETE 0x4000
#define MBOX_STS_COMMAND_ERROR 0x4005
#define MBOX_ASYNC_EVENT_STATUS 8
#define MBOX_ASTS_SYSTEM_ERROR 0x8002
#define MBOX_ASTS_REQUEST_TRANSFER_ERROR 0x8003
#define MBOX_ASTS_RESPONSE_TRANSFER_ERROR 0x8004
#define MBOX_ASTS_PROTOCOL_STATISTIC_ALARM 0x8005
#define MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED 0x8006
#define MBOX_ASTS_LINK_UP 0x8010
#define MBOX_ASTS_LINK_DOWN 0x8011
#define MBOX_ASTS_DATABASE_CHANGED 0x8014
#define MBOX_ASTS_UNSOLICITED_PDU_RECEIVED 0x8015
#define MBOX_ASTS_SELF_TEST_FAILED 0x8016
#define MBOX_ASTS_LOGIN_FAILED 0x8017
#define MBOX_ASTS_DNS 0x8018
#define MBOX_ASTS_HEARTBEAT 0x8019
#define MBOX_ASTS_NVRAM_INVALID 0x801A
#define MBOX_ASTS_MAC_ADDRESS_CHANGED 0x801B
#define MBOX_ASTS_IP_ADDRESS_CHANGED 0x801C
#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
#define ISNS_EVENT_DATA_RECEIVED 0x0000
#define ISNS_EVENT_CONNECTION_OPENED 0x0001
#define ISNS_EVENT_CONNECTION_FAILED 0x0002
#define MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR 0x8022
#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
/*************************************************************************/
/* Host Adapter Initialization Control Block (from host) */
struct init_fw_ctrl_blk {
uint8_t Version; /* 00 */
uint8_t Control; /* 01 */
uint16_t FwOptions; /* 02-03 */
#define FWOPT_HEARTBEAT_ENABLE 0x1000
#define FWOPT_SESSION_MODE 0x0040
#define FWOPT_INITIATOR_MODE 0x0020
#define FWOPT_TARGET_MODE 0x0010
uint16_t ExecThrottle; /* 04-05 */
uint8_t RetryCount; /* 06 */
uint8_t RetryDelay; /* 07 */
uint16_t MaxEthFrPayloadSize; /* 08-09 */
uint16_t AddFwOptions; /* 0A-0B */
uint8_t HeartbeatInterval; /* 0C */
uint8_t InstanceNumber; /* 0D */
uint16_t RES2; /* 0E-0F */
uint16_t ReqQConsumerIndex; /* 10-11 */
uint16_t ComplQProducerIndex; /* 12-13 */
uint16_t ReqQLen; /* 14-15 */
uint16_t ComplQLen; /* 16-17 */
uint32_t ReqQAddrLo; /* 18-1B */
uint32_t ReqQAddrHi; /* 1C-1F */
uint32_t ComplQAddrLo; /* 20-23 */
uint32_t ComplQAddrHi; /* 24-27 */
uint32_t ShadowRegBufAddrLo; /* 28-2B */
uint32_t ShadowRegBufAddrHi; /* 2C-2F */
uint16_t iSCSIOptions; /* 30-31 */
uint16_t TCPOptions; /* 32-33 */
uint16_t IPOptions; /* 34-35 */
uint16_t MaxPDUSize; /* 36-37 */
uint16_t RcvMarkerInt; /* 38-39 */
uint16_t SndMarkerInt; /* 3A-3B */
uint16_t InitMarkerlessInt; /* 3C-3D */
uint16_t FirstBurstSize; /* 3E-3F */
uint16_t DefaultTime2Wait; /* 40-41 */
uint16_t DefaultTime2Retain; /* 42-43 */
uint16_t MaxOutStndngR2T; /* 44-45 */
uint16_t KeepAliveTimeout; /* 46-47 */
uint16_t PortNumber; /* 48-49 */
uint16_t MaxBurstSize; /* 4A-4B */
uint32_t RES4; /* 4C-4F */
uint8_t IPAddr[4]; /* 50-53 */
uint8_t RES5[12]; /* 54-5F */
uint8_t SubnetMask[4]; /* 60-63 */
uint8_t RES6[12]; /* 64-6F */
uint8_t GatewayIPAddr[4]; /* 70-73 */
uint8_t RES7[12]; /* 74-7F */
uint8_t PriDNSIPAddr[4]; /* 80-83 */
uint8_t SecDNSIPAddr[4]; /* 84-87 */
uint8_t RES8[8]; /* 88-8F */
uint8_t Alias[32]; /* 90-AF */
uint8_t TargAddr[8]; /* B0-B7 *//* /FIXME: Remove?? */
uint8_t CHAPNameSecretsTable[8]; /* B8-BF */
uint8_t EthernetMACAddr[6]; /* C0-C5 */
uint16_t TargetPortalGroup; /* C6-C7 */
uint8_t SendScale; /* C8 */
uint8_t RecvScale; /* C9 */
uint8_t TypeOfService; /* CA */
uint8_t Time2Live; /* CB */
uint16_t VLANPriority; /* CC-CD */
uint16_t Reserved8; /* CE-CF */
uint8_t SecIPAddr[4]; /* D0-D3 */
uint8_t Reserved9[12]; /* D4-DF */
uint8_t iSNSIPAddr[4]; /* E0-E3 */
uint16_t iSNSServerPortNumber; /* E4-E5 */
uint8_t Reserved10[10]; /* E6-EF */
uint8_t SLPDAIPAddr[4]; /* F0-F3 */
uint8_t Reserved11[12]; /* F4-FF */
uint8_t iSCSINameString[256]; /* 100-1FF */
};
/*************************************************************************/
struct dev_db_entry {
uint8_t options; /* 00 */
#define DDB_OPT_DISC_SESSION 0x10
#define DDB_OPT_TARGET 0x02 /* device is a target */
uint8_t control; /* 01 */
uint16_t exeThrottle; /* 02-03 */
uint16_t exeCount; /* 04-05 */
uint8_t retryCount; /* 06 */
uint8_t retryDelay; /* 07 */
uint16_t iSCSIOptions; /* 08-09 */
uint16_t TCPOptions; /* 0A-0B */
uint16_t IPOptions; /* 0C-0D */
uint16_t maxPDUSize; /* 0E-0F */
uint16_t rcvMarkerInt; /* 10-11 */
uint16_t sndMarkerInt; /* 12-13 */
uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
uint16_t firstBurstSize; /* 16-17 */
uint16_t minTime2Wait; /* 18-19 : RA :default_time2wait */
uint16_t maxTime2Retain; /* 1A-1B */
uint16_t maxOutstndngR2T; /* 1C-1D */
uint16_t keepAliveTimeout; /* 1E-1F */
uint8_t ISID[6]; /* 20-25 big-endian, must be converted
* to little-endian */
uint16_t TSID; /* 26-27 */
uint16_t portNumber; /* 28-29 */
uint16_t maxBurstSize; /* 2A-2B */
uint16_t taskMngmntTimeout; /* 2C-2D */
uint16_t reserved1; /* 2E-2F */
uint8_t ipAddr[0x10]; /* 30-3F */
uint8_t iSCSIAlias[0x20]; /* 40-5F */
uint8_t targetAddr[0x20]; /* 60-7F */
uint8_t userID[0x20]; /* 80-9F */
uint8_t password[0x20]; /* A0-BF */
uint8_t iscsiName[0x100]; /* C0-1BF : xxzzy Make this a
* pointer to a string so we
* don't have to reserve soooo
* much RAM */
uint16_t ddbLink; /* 1C0-1C1 */
uint16_t CHAPTableIndex; /* 1C2-1C3 */
uint16_t TargetPortalGroup; /* 1C4-1C5 */
uint16_t reserved2[2]; /* 1C6-1C7 */
uint32_t statSN; /* 1C8-1CB */
uint32_t expStatSN; /* 1CC-1CF */
uint16_t reserved3[0x2C]; /* 1D0-1FB */
uint16_t ddbValidCookie; /* 1FC-1FD */
uint16_t ddbValidSize; /* 1FE-1FF */
};
/*************************************************************************/
/* Flash definitions */
#define FLASH_OFFSET_SYS_INFO 0x02000000
#define FLASH_DEFAULTBLOCKSIZE 0x20000
#define FLASH_EOF_OFFSET (FLASH_DEFAULTBLOCKSIZE-8) /* 4 bytes
* for EOF
* signature */
struct sys_info_phys_addr {
uint8_t address[6]; /* 00-05 */
uint8_t filler[2]; /* 06-07 */
};
struct flash_sys_info {
uint32_t cookie; /* 00-03 */
uint32_t physAddrCount; /* 04-07 */
struct sys_info_phys_addr physAddr[4]; /* 08-27 */
uint8_t vendorId[128]; /* 28-A7 */
uint8_t productId[128]; /* A8-127 */
uint32_t serialNumber; /* 128-12B */
/* PCI Configuration values */
uint32_t pciDeviceVendor; /* 12C-12F */
uint32_t pciDeviceId; /* 130-133 */
uint32_t pciSubsysVendor; /* 134-137 */
uint32_t pciSubsysId; /* 138-13B */
/* This validates version 1. */
uint32_t crumbs; /* 13C-13F */
uint32_t enterpriseNumber; /* 140-143 */
uint32_t mtu; /* 144-147 */
uint32_t reserved0; /* 148-14b */
uint32_t crumbs2; /* 14c-14f */
uint8_t acSerialNumber[16]; /* 150-15f */
uint32_t crumbs3; /* 160-16f */
/* Leave this last in the struct so it is declared invalid if
* any new items are added.
*/
uint32_t reserved1[39]; /* 170-1ff */
}; /* 200 */
struct crash_record {
uint16_t fw_major_version; /* 00 - 01 */
uint16_t fw_minor_version; /* 02 - 03 */
uint16_t fw_patch_version; /* 04 - 05 */
uint16_t fw_build_version; /* 06 - 07 */
uint8_t build_date[16]; /* 08 - 17 */
uint8_t build_time[16]; /* 18 - 27 */
uint8_t build_user[16]; /* 28 - 37 */
uint8_t card_serial_num[16]; /* 38 - 47 */
uint32_t time_of_crash_in_secs; /* 48 - 4B */
uint32_t time_of_crash_in_ms; /* 4C - 4F */
uint16_t out_RISC_sd_num_frames; /* 50 - 51 */
uint16_t OAP_sd_num_words; /* 52 - 53 */
uint16_t IAP_sd_num_frames; /* 54 - 55 */
uint16_t in_RISC_sd_num_words; /* 56 - 57 */
uint8_t reserved1[28]; /* 58 - 7F */
uint8_t out_RISC_reg_dump[256]; /* 80 -17F */
uint8_t in_RISC_reg_dump[256]; /*180 -27F */
uint8_t in_out_RISC_stack_dump[0]; /*280 - ??? */
};
struct conn_event_log_entry {
#define MAX_CONN_EVENT_LOG_ENTRIES 100
uint32_t timestamp_sec; /* 00 - 03 seconds since boot */
uint32_t timestamp_ms; /* 04 - 07 milliseconds since boot */
uint16_t device_index; /* 08 - 09 */
uint16_t fw_conn_state; /* 0A - 0B */
uint8_t event_type; /* 0C - 0C */
uint8_t error_code; /* 0D - 0D */
uint16_t error_code_detail; /* 0E - 0F */
uint8_t num_consecutive_events; /* 10 - 10 */
uint8_t rsvd[3]; /* 11 - 13 */
};
/*************************************************************************
*
* IOCB Commands Structures and Definitions
*
*************************************************************************/
#define IOCB_MAX_CDB_LEN 16 /* Bytes in a CBD */
#define IOCB_MAX_SENSEDATA_LEN 32 /* Bytes of sense data */
/* IOCB header structure */
struct qla4_header {
uint8_t entryType;
#define ET_STATUS 0x03
#define ET_MARKER 0x04
#define ET_CONT_T1 0x0A
#define ET_STATUS_CONTINUATION 0x10
#define ET_CMND_T3 0x19
#define ET_PASSTHRU0 0x3A
#define ET_PASSTHRU_STATUS 0x3C
uint8_t entryStatus;
uint8_t systemDefined;
uint8_t entryCount;
/* SyetemDefined definition */
};
/* Generic queue entry structure*/
struct queue_entry {
uint8_t data[60];
uint32_t signature;
};
/* 64 bit addressing segment counts*/
#define COMMAND_SEG_A64 1
#define CONTINUE_SEG_A64 5
/* 64 bit addressing segment definition*/
struct data_seg_a64 {
struct {
uint32_t addrLow;
uint32_t addrHigh;
} base;
uint32_t count;
};
/* Command Type 3 entry structure*/
struct command_t3_entry {
struct qla4_header hdr; /* 00-03 */
uint32_t handle; /* 04-07 */
uint16_t target; /* 08-09 */
uint16_t connection_id; /* 0A-0B */
uint8_t control_flags; /* 0C */
/* data direction (bits 5-6) */
#define CF_WRITE 0x20
#define CF_READ 0x40
#define CF_NO_DATA 0x00
/* task attributes (bits 2-0) */
#define CF_HEAD_TAG 0x03
#define CF_ORDERED_TAG 0x02
#define CF_SIMPLE_TAG 0x01
/* STATE FLAGS FIELD IS A PLACE HOLDER. THE FW WILL SET BITS
* IN THIS FIELD AS THE COMMAND IS PROCESSED. WHEN THE IOCB IS
* CHANGED TO AN IOSB THIS FIELD WILL HAVE THE STATE FLAGS SET
* PROPERLY.
*/
uint8_t state_flags; /* 0D */
uint8_t cmdRefNum; /* 0E */
uint8_t reserved1; /* 0F */
uint8_t cdb[IOCB_MAX_CDB_LEN]; /* 10-1F */
struct scsi_lun lun; /* FCP LUN (BE). */
uint32_t cmdSeqNum; /* 28-2B */
uint16_t timeout; /* 2C-2D */
uint16_t dataSegCnt; /* 2E-2F */
uint32_t ttlByteCnt; /* 30-33 */
struct data_seg_a64 dataseg[COMMAND_SEG_A64]; /* 34-3F */
};
/* Continuation Type 1 entry structure*/
struct continuation_t1_entry {
struct qla4_header hdr;
struct data_seg_a64 dataseg[CONTINUE_SEG_A64];
};
/* Parameterize for 64 or 32 bits */
#define COMMAND_SEG COMMAND_SEG_A64
#define CONTINUE_SEG CONTINUE_SEG_A64
#define ET_COMMAND ET_CMND_T3
#define ET_CONTINUE ET_CONT_T1
/* Marker entry structure*/
struct marker_entry {
struct qla4_header hdr; /* 00-03 */
uint32_t system_defined; /* 04-07 */
uint16_t target; /* 08-09 */
uint16_t modifier; /* 0A-0B */
#define MM_LUN_RESET 0
uint16_t flags; /* 0C-0D */
uint16_t reserved1; /* 0E-0F */
struct scsi_lun lun; /* FCP LUN (BE). */
uint64_t reserved2; /* 18-1F */
uint64_t reserved3; /* 20-27 */
uint64_t reserved4; /* 28-2F */
uint64_t reserved5; /* 30-37 */
uint64_t reserved6; /* 38-3F */
};
/* Status entry structure*/
struct status_entry {
struct qla4_header hdr; /* 00-03 */
uint32_t handle; /* 04-07 */
uint8_t scsiStatus; /* 08 */
#define SCSI_CHECK_CONDITION 0x02
uint8_t iscsiFlags; /* 09 */
#define ISCSI_FLAG_RESIDUAL_UNDER 0x02
#define ISCSI_FLAG_RESIDUAL_OVER 0x04
uint8_t iscsiResponse; /* 0A */
uint8_t completionStatus; /* 0B */
#define SCS_COMPLETE 0x00
#define SCS_INCOMPLETE 0x01
#define SCS_RESET_OCCURRED 0x04
#define SCS_ABORTED 0x05
#define SCS_TIMEOUT 0x06
#define SCS_DATA_OVERRUN 0x07
#define SCS_DATA_UNDERRUN 0x15
#define SCS_QUEUE_FULL 0x1C
#define SCS_DEVICE_UNAVAILABLE 0x28
#define SCS_DEVICE_LOGGED_OUT 0x29
uint8_t reserved1; /* 0C */
/* state_flags MUST be at the same location as state_flags in
* the Command_T3/4_Entry */
uint8_t state_flags; /* 0D */
uint16_t senseDataByteCnt; /* 0E-0F */
uint32_t residualByteCnt; /* 10-13 */
uint32_t bidiResidualByteCnt; /* 14-17 */
uint32_t expSeqNum; /* 18-1B */
uint32_t maxCmdSeqNum; /* 1C-1F */
uint8_t senseData[IOCB_MAX_SENSEDATA_LEN]; /* 20-3F */
};
struct passthru0 {
struct qla4_header hdr; /* 00-03 */
uint32_t handle; /* 04-07 */
uint16_t target; /* 08-09 */
uint16_t connectionID; /* 0A-0B */
#define ISNS_DEFAULT_SERVER_CONN_ID ((uint16_t)0x8000)
uint16_t controlFlags; /* 0C-0D */
#define PT_FLAG_ETHERNET_FRAME 0x8000
#define PT_FLAG_ISNS_PDU 0x8000
#define PT_FLAG_SEND_BUFFER 0x0200
#define PT_FLAG_WAIT_4_RESPONSE 0x0100
uint16_t timeout; /* 0E-0F */
#define PT_DEFAULT_TIMEOUT 30 /* seconds */
struct data_seg_a64 outDataSeg64; /* 10-1B */
uint32_t res1; /* 1C-1F */
struct data_seg_a64 inDataSeg64; /* 20-2B */
uint8_t res2[20]; /* 2C-3F */
};
struct passthru_status {
struct qla4_header hdr; /* 00-03 */
uint32_t handle; /* 04-07 */
uint16_t target; /* 08-09 */
uint16_t connectionID; /* 0A-0B */
uint8_t completionStatus; /* 0C */
#define PASSTHRU_STATUS_COMPLETE 0x01
uint8_t residualFlags; /* 0D */
uint16_t timeout; /* 0E-0F */
uint16_t portNumber; /* 10-11 */
uint8_t res1[10]; /* 12-1B */
uint32_t outResidual; /* 1C-1F */
uint8_t res2[12]; /* 20-2B */
uint32_t inResidual; /* 2C-2F */
uint8_t res4[16]; /* 30-3F */
};
#endif /* _QLA4X_FW_H */

View file

@ -0,0 +1,78 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef __QLA4x_GBL_H
#define __QLA4x_GBL_H
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
int qla4xxx_initialize_adapter(struct scsi_qla_host * ha,
uint8_t renew_ddb_list);
int qla4xxx_soft_reset(struct scsi_qla_host *ha);
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs);
void qla4xxx_free_ddb_list(struct scsi_qla_host * ha);
void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen);
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha);
int qla4xxx_relogin_device(struct scsi_qla_host * ha,
struct ddb_entry * ddb_entry);
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
int lun);
int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t len);
int qla4xxx_get_firmware_status(struct scsi_qla_host * ha);
int qla4xxx_get_firmware_state(struct scsi_qla_host * ha);
int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha);
/* FIXME: Goodness! this really wants a small struct to hold the
* parameters. On x86 the args will get passed on the stack! */
int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t fw_ddb_index,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma,
uint32_t *num_valid_ddb_entries,
uint32_t *next_ddb_index,
uint32_t *fw_ddb_device_state,
uint32_t *conn_err_detail,
uint16_t *tcp_source_port_num,
uint16_t *connection_id);
struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host * ha,
uint32_t fw_ddb_index);
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
dma_addr_t fw_ddb_entry_dma);
void qla4xxx_mark_device_missing(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry);
u16 rd_nvram_word(struct scsi_qla_host * ha, int offset);
void qla4xxx_get_crash_record(struct scsi_qla_host * ha);
struct ddb_entry *qla4xxx_alloc_sess(struct scsi_qla_host *ha);
int qla4xxx_add_sess(struct ddb_entry *);
void qla4xxx_destroy_sess(struct ddb_entry *ddb_entry);
int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
uint16_t fw_ddb_index,
uint16_t connection_id,
uint16_t option);
int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
uint16_t fw_ddb_index);
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha);
int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status);
int qla4xxx_init_rings(struct scsi_qla_host * ha);
void qla4xxx_dump_buffer(void *b, uint32_t size);
struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index);
void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
uint32_t fw_ddb_index, uint32_t state);
extern int extended_error_logging;
extern int ql4xdiscoverywait;
extern int ql4xdontresethba;
#endif /* _QLA4x_GBL_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,84 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
/*
*
* qla4xxx_lookup_ddb_by_fw_index
* This routine locates a device handle given the firmware device
* database index. If device doesn't exist, returns NULL.
*
* Input:
* ha - Pointer to host adapter structure.
* fw_ddb_index - Firmware's device database index
*
* Returns:
* Pointer to the corresponding internal device database structure
*/
static inline struct ddb_entry *
qla4xxx_lookup_ddb_by_fw_index(struct scsi_qla_host *ha, uint32_t fw_ddb_index)
{
struct ddb_entry *ddb_entry = NULL;
if ((fw_ddb_index < MAX_DDB_ENTRIES) &&
(ha->fw_ddb_index_map[fw_ddb_index] !=
(struct ddb_entry *) INVALID_ENTRY)) {
ddb_entry = ha->fw_ddb_index_map[fw_ddb_index];
}
DEBUG3(printk("scsi%d: %s: index [%d], ddb_entry = %p\n",
ha->host_no, __func__, fw_ddb_index, ddb_entry));
return ddb_entry;
}
static inline void
__qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha)) {
writel(set_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);
} else {
writel(set_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
set_bit(AF_INTERRUPTS_ON, &ha->flags);
}
static inline void
__qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{
if (is_qla4022(ha)) {
writel(clr_rmask(IMR_SCSI_INTR_ENABLE),
&ha->reg->u1.isp4022.intr_mask);
readl(&ha->reg->u1.isp4022.intr_mask);
} else {
writel(clr_rmask(CSR_SCSI_INTR_ENABLE), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
clear_bit(AF_INTERRUPTS_ON, &ha->flags);
}
static inline void
qla4xxx_enable_intrs(struct scsi_qla_host *ha)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
__qla4xxx_enable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
static inline void
qla4xxx_disable_intrs(struct scsi_qla_host *ha)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
__qla4xxx_disable_intrs(ha);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}

View file

@ -0,0 +1,368 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
#include <scsi/scsi_tcq.h>
/**
* qla4xxx_get_req_pkt - returns a valid entry in request queue.
* @ha: Pointer to host adapter structure.
* @queue_entry: Pointer to pointer to queue entry structure
*
* This routine performs the following tasks:
* - returns the current request_in pointer (if queue not full)
* - advances the request_in pointer
* - checks for queue full
**/
int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
struct queue_entry **queue_entry)
{
uint16_t request_in;
uint8_t status = QLA_SUCCESS;
*queue_entry = ha->request_ptr;
/* get the latest request_in and request_out index */
request_in = ha->request_in;
ha->request_out = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
/* Advance request queue pointer and check for queue full */
if (request_in == (REQUEST_QUEUE_DEPTH - 1)) {
request_in = 0;
ha->request_ptr = ha->request_ring;
} else {
request_in++;
ha->request_ptr++;
}
/* request queue is full, try again later */
if ((ha->iocb_cnt + 1) >= ha->iocb_hiwat) {
/* restore request pointer */
ha->request_ptr = *queue_entry;
status = QLA_ERROR;
} else {
ha->request_in = request_in;
memset(*queue_entry, 0, sizeof(**queue_entry));
}
return status;
}
/**
* qla4xxx_send_marker_iocb - issues marker iocb to HBA
* @ha: Pointer to host adapter structure.
* @ddb_entry: Pointer to device database entry
* @lun: SCSI LUN
* @marker_type: marker identifier
*
* This routine issues a marker IOCB.
**/
int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
struct ddb_entry *ddb_entry, int lun)
{
struct marker_entry *marker_entry;
unsigned long flags = 0;
uint8_t status = QLA_SUCCESS;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
/* Get pointer to the queue entry for the marker */
if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
QLA_SUCCESS) {
status = QLA_ERROR;
goto exit_send_marker;
}
/* Put the marker in the request queue */
marker_entry->hdr.entryType = ET_MARKER;
marker_entry->hdr.entryCount = 1;
marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
marker_entry->modifier = cpu_to_le16(MM_LUN_RESET);
int_to_scsilun(lun, &marker_entry->lun);
wmb();
/* Tell ISP it's got a new I/O request */
writel(ha->request_in, &ha->reg->req_q_in);
readl(&ha->reg->req_q_in);
exit_send_marker:
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return status;
}
struct continuation_t1_entry* qla4xxx_alloc_cont_entry(
struct scsi_qla_host *ha)
{
struct continuation_t1_entry *cont_entry;
cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
/* Advance request queue pointer */
if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
ha->request_in = 0;
ha->request_ptr = ha->request_ring;
} else {
ha->request_in++;
ha->request_ptr++;
}
/* Load packet defaults */
cont_entry->hdr.entryType = ET_CONTINUE;
cont_entry->hdr.entryCount = 1;
cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
return cont_entry;
}
uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
{
uint16_t iocbs;
iocbs = 1;
if (dsds > COMMAND_SEG) {
iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
iocbs++;
}
return iocbs;
}
void qla4xxx_build_scsi_iocbs(struct srb *srb,
struct command_t3_entry *cmd_entry,
uint16_t tot_dsds)
{
struct scsi_qla_host *ha;
uint16_t avail_dsds;
struct data_seg_a64 *cur_dsd;
struct scsi_cmnd *cmd;
cmd = srb->cmd;
ha = srb->ha;
if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) {
/* No data being transferred */
cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
return;
}
avail_dsds = COMMAND_SEG;
cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
/* Load data segments */
if (cmd->use_sg) {
struct scatterlist *cur_seg;
struct scatterlist *end_seg;
cur_seg = (struct scatterlist *)cmd->request_buffer;
end_seg = cur_seg + tot_dsds;
while (cur_seg < end_seg) {
dma_addr_t sle_dma;
/* Allocate additional continuation packets? */
if (avail_dsds == 0) {
struct continuation_t1_entry *cont_entry;
cont_entry = qla4xxx_alloc_cont_entry(ha);
cur_dsd =
(struct data_seg_a64 *)
&cont_entry->dataseg[0];
avail_dsds = CONTINUE_SEG;
}
sle_dma = sg_dma_address(cur_seg);
cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
avail_dsds--;
cur_dsd++;
cur_seg++;
}
} else {
cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle));
cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle));
cur_dsd->count = cpu_to_le32(cmd->request_bufflen);
}
}
/**
* qla4xxx_send_command_to_isp - issues command to HBA
* @ha: pointer to host adapter structure.
* @srb: pointer to SCSI Request Block to be sent to ISP
*
* This routine is called by qla4xxx_queuecommand to build an ISP
* command and pass it to the ISP for execution.
**/
int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
{
struct scsi_cmnd *cmd = srb->cmd;
struct ddb_entry *ddb_entry;
struct command_t3_entry *cmd_entry;
struct scatterlist *sg = NULL;
uint16_t tot_dsds;
uint16_t req_cnt;
unsigned long flags;
uint16_t cnt;
uint32_t index;
char tag[2];
/* Get real lun and adapter */
ddb_entry = srb->ddb;
/* Send marker(s) if needed. */
if (ha->marker_needed == 1) {
if (qla4xxx_send_marker_iocb(ha, ddb_entry,
cmd->device->lun) != QLA_SUCCESS)
return QLA_ERROR;
ha->marker_needed = 0;
}
tot_dsds = 0;
/* Acquire hardware specific lock */
spin_lock_irqsave(&ha->hardware_lock, flags);
index = (uint32_t)cmd->request->tag;
/* Calculate the number of request entries needed. */
if (cmd->use_sg) {
sg = (struct scatterlist *)cmd->request_buffer;
tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
if (tot_dsds == 0)
goto queuing_error;
} else if (cmd->request_bufflen) {
dma_addr_t req_dma;
req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
cmd->request_bufflen,
cmd->sc_data_direction);
if (dma_mapping_error(req_dma))
goto queuing_error;
srb->dma_handle = req_dma;
tot_dsds = 1;
}
req_cnt = qla4xxx_calc_request_entries(tot_dsds);
if (ha->req_q_count < (req_cnt + 2)) {
cnt = (uint16_t) le32_to_cpu(ha->shadow_regs->req_q_out);
if (ha->request_in < cnt)
ha->req_q_count = cnt - ha->request_in;
else
ha->req_q_count = REQUEST_QUEUE_DEPTH -
(ha->request_in - cnt);
}
if (ha->req_q_count < (req_cnt + 2))
goto queuing_error;
/* total iocbs active */
if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
goto queuing_error;
/* Build command packet */
cmd_entry = (struct command_t3_entry *) ha->request_ptr;
memset(cmd_entry, 0, sizeof(struct command_t3_entry));
cmd_entry->hdr.entryType = ET_COMMAND;
cmd_entry->handle = cpu_to_le32(index);
cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
cmd_entry->connection_id = cpu_to_le16(ddb_entry->connection_id);
int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen);
memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
cmd_entry->hdr.entryCount = req_cnt;
/* Set data transfer direction control flags
* NOTE: Look at data_direction bits iff there is data to be
* transferred, as the data direction bit is sometimed filled
* in when there is no data to be transferred */
cmd_entry->control_flags = CF_NO_DATA;
if (cmd->request_bufflen) {
if (cmd->sc_data_direction == DMA_TO_DEVICE)
cmd_entry->control_flags = CF_WRITE;
else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
cmd_entry->control_flags = CF_READ;
}
/* Set tagged queueing control flags */
cmd_entry->control_flags |= CF_SIMPLE_TAG;
if (scsi_populate_tag_msg(cmd, tag))
switch (tag[0]) {
case MSG_HEAD_TAG:
cmd_entry->control_flags |= CF_HEAD_TAG;
break;
case MSG_ORDERED_TAG:
cmd_entry->control_flags |= CF_ORDERED_TAG;
break;
}
/* Advance request queue pointer */
ha->request_in++;
if (ha->request_in == REQUEST_QUEUE_DEPTH) {
ha->request_in = 0;
ha->request_ptr = ha->request_ring;
} else
ha->request_ptr++;
qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
wmb();
/*
* Check to see if adapter is online before placing request on
* request queue. If a reset occurs and a request is in the queue,
* the firmware will still attempt to process the request, retrieving
* garbage for pointers.
*/
if (!test_bit(AF_ONLINE, &ha->flags)) {
DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
"Do not issue command.\n",
ha->host_no, __func__));
goto queuing_error;
}
srb->cmd->host_scribble = (unsigned char *)srb;
/* update counters */
srb->state = SRB_ACTIVE_STATE;
srb->flags |= SRB_DMA_VALID;
/* Track IOCB used */
ha->iocb_cnt += req_cnt;
srb->iocb_cnt = req_cnt;
ha->req_q_count -= req_cnt;
/* Debug print statements */
writel(ha->request_in, &ha->reg->req_q_in);
readl(&ha->reg->req_q_in);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_SUCCESS;
queuing_error:
if (cmd->use_sg && tot_dsds) {
sg = (struct scatterlist *) cmd->request_buffer;
pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
cmd->sc_data_direction);
} else if (tot_dsds)
pci_unmap_single(ha->pdev, srb->dma_handle,
cmd->request_bufflen, cmd->sc_data_direction);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return QLA_ERROR;
}

View file

@ -0,0 +1,797 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
/**
* qla2x00_process_completed_request() - Process a Fast Post response.
* @ha: SCSI driver HA context
* @index: SRB index
**/
static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
uint32_t index)
{
struct srb *srb;
srb = qla4xxx_del_from_active_array(ha, index);
if (srb) {
/* Save ISP completion status */
srb->cmd->result = DID_OK << 16;
qla4xxx_srb_compl(ha, srb);
} else {
DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
"%d\n", ha->host_no, index));
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
}
/**
* qla4xxx_status_entry - processes status IOCBs
* @ha: Pointer to host adapter structure.
* @sts_entry: Pointer to status entry structure.
**/
static void qla4xxx_status_entry(struct scsi_qla_host *ha,
struct status_entry *sts_entry)
{
uint8_t scsi_status;
struct scsi_cmnd *cmd;
struct srb *srb;
struct ddb_entry *ddb_entry;
uint32_t residual;
uint16_t sensebytecnt;
if (sts_entry->completionStatus == SCS_COMPLETE &&
sts_entry->scsiStatus == 0) {
qla4xxx_process_completed_request(ha,
le32_to_cpu(sts_entry->
handle));
return;
}
srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
if (!srb) {
/* FIXMEdg: Don't we need to reset ISP in this case??? */
DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
"handle 0x%x, sp=%p. This cmd may have already "
"been completed.\n", ha->host_no, __func__,
le32_to_cpu(sts_entry->handle), srb));
return;
}
cmd = srb->cmd;
if (cmd == NULL) {
DEBUG2(printk("scsi%ld: %s: Command already returned back to "
"OS pkt->handle=%d srb=%p srb->state:%d\n",
ha->host_no, __func__, sts_entry->handle,
srb, srb->state));
dev_warn(&ha->pdev->dev, "Command is NULL:"
" already returned to OS (srb=%p)\n", srb);
return;
}
ddb_entry = srb->ddb;
if (ddb_entry == NULL) {
cmd->result = DID_NO_CONNECT << 16;
goto status_entry_exit;
}
residual = le32_to_cpu(sts_entry->residualByteCnt);
/* Translate ISP error to a Linux SCSI error. */
scsi_status = sts_entry->scsiStatus;
switch (sts_entry->completionStatus) {
case SCS_COMPLETE:
if (scsi_status == 0) {
cmd->result = DID_OK << 16;
break;
}
if (sts_entry->iscsiFlags &
(ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
cmd->resid = residual;
cmd->result = DID_OK << 16 | scsi_status;
if (scsi_status != SCSI_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
if (sensebytecnt == 0)
break;
memcpy(cmd->sense_buffer, sts_entry->senseData,
min(sensebytecnt,
(uint16_t) sizeof(cmd->sense_buffer)));
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
"ASC/ASCQ = %02x/%02x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__,
sts_entry->senseData[2] & 0x0f,
sts_entry->senseData[12],
sts_entry->senseData[13]));
srb->flags |= SRB_GOT_SENSE;
break;
case SCS_INCOMPLETE:
/* Always set the status to DID_ERROR, since
* all conditions result in that status anyway */
cmd->result = DID_ERROR << 16;
break;
case SCS_RESET_OCCURRED:
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
cmd->result = DID_RESET << 16;
break;
case SCS_ABORTED:
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun, __func__));
cmd->result = DID_RESET << 16;
break;
case SCS_TIMEOUT:
DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
ha->host_no, cmd->device->channel,
cmd->device->id, cmd->device->lun));
cmd->result = DID_BUS_BUSY << 16;
/*
* Mark device missing so that we won't continue to send
* I/O to this device. We should get a ddb state change
* AEN soon.
*/
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
break;
case SCS_DATA_UNDERRUN:
case SCS_DATA_OVERRUN:
if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__, residual));
cmd->result = DID_ERROR << 16;
break;
}
if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
/*
* Firmware detected a SCSI transport underrun
* condition
*/
cmd->resid = residual;
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
"detected, xferlen = 0x%x, residual = "
"0x%x\n",
ha->host_no, cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
cmd->request_bufflen,
residual));
}
/*
* If there is scsi_status, it takes precedense over
* underflow condition.
*/
if (scsi_status != 0) {
cmd->result = DID_OK << 16 | scsi_status;
if (scsi_status != SCSI_CHECK_CONDITION)
break;
/* Copy Sense Data into sense buffer. */
memset(cmd->sense_buffer, 0,
sizeof(cmd->sense_buffer));
sensebytecnt =
le16_to_cpu(sts_entry->senseDataByteCnt);
if (sensebytecnt == 0)
break;
memcpy(cmd->sense_buffer, sts_entry->senseData,
min(sensebytecnt,
(uint16_t) sizeof(cmd->sense_buffer)));
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
"ASC/ASCQ = %02x/%02x\n", ha->host_no,
cmd->device->channel, cmd->device->id,
cmd->device->lun, __func__,
sts_entry->senseData[2] & 0x0f,
sts_entry->senseData[12],
sts_entry->senseData[13]));
} else {
/*
* If RISC reports underrun and target does not
* report it then we must have a lost frame, so
* tell upper layer to retry it by reporting a
* bus busy.
*/
if ((sts_entry->iscsiFlags &
ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
cmd->result = DID_BUS_BUSY << 16;
} else if ((cmd->request_bufflen - residual) <
cmd->underflow) {
/*
* Handle mid-layer underflow???
*
* For kernels less than 2.4, the driver must
* return an error if an underflow is detected.
* For kernels equal-to and above 2.4, the
* mid-layer will appearantly handle the
* underflow by detecting the residual count --
* unfortunately, we do not see where this is
* actually being done. In the interim, we
* will return DID_ERROR.
*/
DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
"Mid-layer Data underrun, "
"xferlen = 0x%x, "
"residual = 0x%x\n", ha->host_no,
cmd->device->channel,
cmd->device->id,
cmd->device->lun, __func__,
cmd->request_bufflen, residual));
cmd->result = DID_ERROR << 16;
} else {
cmd->result = DID_OK << 16;
}
}
break;
case SCS_DEVICE_LOGGED_OUT:
case SCS_DEVICE_UNAVAILABLE:
/*
* Mark device missing so that we won't continue to
* send I/O to this device. We should get a ddb
* state change AEN soon.
*/
if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
qla4xxx_mark_device_missing(ha, ddb_entry);
cmd->result = DID_BUS_BUSY << 16;
break;
case SCS_QUEUE_FULL:
/*
* SCSI Mid-Layer handles device queue full
*/
cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
"compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
" iResp=%02x\n", ha->host_no, cmd->device->id,
cmd->device->lun, __func__,
sts_entry->completionStatus,
sts_entry->scsiStatus, sts_entry->state_flags,
sts_entry->iscsiFlags,
sts_entry->iscsiResponse));
break;
default:
cmd->result = DID_ERROR << 16;
break;
}
status_entry_exit:
/* complete the request */
srb->cc_stat = sts_entry->completionStatus;
qla4xxx_srb_compl(ha, srb);
}
/**
* qla4xxx_process_response_queue - process response queue completions
* @ha: Pointer to host adapter structure.
*
* This routine process response queue completions in interrupt context.
* Hardware_lock locked upon entry
**/
static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
{
uint32_t count = 0;
struct srb *srb = NULL;
struct status_entry *sts_entry;
/* Process all responses from response queue */
while ((ha->response_in =
(uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
ha->response_out) {
sts_entry = (struct status_entry *) ha->response_ptr;
count++;
/* Advance pointers for next entry */
if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
ha->response_out = 0;
ha->response_ptr = ha->response_ring;
} else {
ha->response_out++;
ha->response_ptr++;
}
/* process entry */
switch (sts_entry->hdr.entryType) {
case ET_STATUS:
/*
* Common status - Single completion posted in single
* IOSB.
*/
qla4xxx_status_entry(ha, sts_entry);
break;
case ET_PASSTHRU_STATUS:
break;
case ET_STATUS_CONTINUATION:
/* Just throw away the status continuation entries */
DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
"- ignoring\n", ha->host_no, __func__));
break;
case ET_COMMAND:
/* ISP device queue is full. Command not
* accepted by ISP. Queue command for
* later */
srb = qla4xxx_del_from_active_array(ha,
le32_to_cpu(sts_entry->
handle));
if (srb == NULL)
goto exit_prq_invalid_handle;
DEBUG2(printk("scsi%ld: %s: FW device queue full, "
"srb %p\n", ha->host_no, __func__, srb));
/* ETRY normally by sending it back with
* DID_BUS_BUSY */
srb->cmd->result = DID_BUS_BUSY << 16;
qla4xxx_srb_compl(ha, srb);
break;
case ET_CONTINUE:
/* Just throw away the continuation entries */
DEBUG2(printk("scsi%ld: %s: Continuation entry - "
"ignoring\n", ha->host_no, __func__));
break;
default:
/*
* Invalid entry in response queue, reset RISC
* firmware.
*/
DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
"response queue \n", ha->host_no,
__func__,
sts_entry->hdr.entryType));
goto exit_prq_error;
}
}
/*
* Done with responses, update the ISP For QLA4010, this also clears
* the interrupt.
*/
writel(ha->response_out, &ha->reg->rsp_q_out);
readl(&ha->reg->rsp_q_out);
return;
exit_prq_invalid_handle:
DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
ha->host_no, __func__, srb, sts_entry->hdr.entryType,
sts_entry->completionStatus));
exit_prq_error:
writel(ha->response_out, &ha->reg->rsp_q_out);
readl(&ha->reg->rsp_q_out);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
/**
* qla4xxx_isr_decode_mailbox - decodes mailbox status
* @ha: Pointer to host adapter structure.
* @mailbox_status: Mailbox status.
*
* This routine decodes the mailbox status during the ISR.
* Hardware_lock locked upon entry. runs in interrupt context.
**/
static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
uint32_t mbox_status)
{
int i;
if ((mbox_status == MBOX_STS_BUSY) ||
(mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
(mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
ha->mbox_status[0] = mbox_status;
if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
/*
* Copy all mailbox registers to a temporary
* location and set mailbox command done flag
*/
for (i = 1; i < ha->mbox_status_count; i++)
ha->mbox_status[i] =
readl(&ha->reg->mailbox[i]);
set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
wake_up(&ha->mailbox_wait_queue);
}
} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
/* Immediately process the AENs that don't require much work.
* Only queue the database_changed AENs */
switch (mbox_status) {
case MBOX_ASTS_SYSTEM_ERROR:
/* Log Mailbox registers */
if (ql4xdontresethba) {
DEBUG2(printk("%s:Dont Reset HBA\n",
__func__));
} else {
set_bit(AF_GET_CRASH_RECORD, &ha->flags);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
}
break;
case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
case MBOX_ASTS_NVRAM_INVALID:
case MBOX_ASTS_IP_ADDRESS_CHANGED:
case MBOX_ASTS_DHCP_LEASE_EXPIRED:
DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
"Reset HA\n", ha->host_no, mbox_status));
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
case MBOX_ASTS_LINK_UP:
DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
ha->host_no, mbox_status));
set_bit(AF_LINK_UP, &ha->flags);
break;
case MBOX_ASTS_LINK_DOWN:
DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
ha->host_no, mbox_status));
clear_bit(AF_LINK_UP, &ha->flags);
break;
case MBOX_ASTS_HEARTBEAT:
ha->seconds_since_last_heartbeat = 0;
break;
case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
"ACQUIRED\n", ha->host_no, mbox_status));
set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
break;
case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
* mode
* only */
case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
case MBOX_ASTS_SUBNET_STATE_CHANGE:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
mbox_status));
break;
case MBOX_ASTS_MAC_ADDRESS_CHANGED:
case MBOX_ASTS_DNS:
/* No action */
DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
"mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
ha->host_no, mbox_status,
readl(&ha->reg->mailbox[1]),
readl(&ha->reg->mailbox[2])));
break;
case MBOX_ASTS_SELF_TEST_FAILED:
case MBOX_ASTS_LOGIN_FAILED:
/* No action */
DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
"mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
ha->host_no, mbox_status,
readl(&ha->reg->mailbox[1]),
readl(&ha->reg->mailbox[2]),
readl(&ha->reg->mailbox[3])));
break;
case MBOX_ASTS_DATABASE_CHANGED:
/* Queue AEN information and process it in the DPC
* routine */
if (ha->aen_q_count > 0) {
/* advance pointer */
if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
ha->aen_in = 0;
else
ha->aen_in++;
/* decrement available counter */
ha->aen_q_count--;
for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
ha->aen_q[ha->aen_in].mbox_sts[i] =
readl(&ha->reg->mailbox[i]);
ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
/* print debug message */
DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
" mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
ha->host_no, ha->aen_in,
mbox_status,
ha->aen_q[ha->aen_in].mbox_sts[1],
ha->aen_q[ha->aen_in].mbox_sts[2],
ha->aen_q[ha->aen_in].mbox_sts[3],
ha->aen_q[ha->aen_in]. mbox_sts[4]));
/* The DPC routine will process the aen */
set_bit(DPC_AEN, &ha->dpc_flags);
} else {
DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
"overflowed! AEN LOST!!\n",
ha->host_no, __func__,
mbox_status));
DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
ha->host_no));
for (i = 0; i < MAX_AEN_ENTRIES; i++) {
DEBUG2(printk("AEN[%d] %04x %04x %04x "
"%04x\n", i,
ha->aen_q[i].mbox_sts[0],
ha->aen_q[i].mbox_sts[1],
ha->aen_q[i].mbox_sts[2],
ha->aen_q[i].mbox_sts[3]));
}
}
break;
default:
DEBUG2(printk(KERN_WARNING
"scsi%ld: AEN %04x UNKNOWN\n",
ha->host_no, mbox_status));
break;
}
} else {
DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
ha->host_no, mbox_status));
ha->mbox_status[0] = mbox_status;
}
}
/**
* qla4xxx_interrupt_service_routine - isr
* @ha: pointer to host adapter structure.
*
* This is the main interrupt service routine.
* hardware_lock locked upon entry. runs in interrupt context.
**/
void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
uint32_t intr_status)
{
/* Process response queue interrupt. */
if (intr_status & CSR_SCSI_COMPLETION_INTR)
qla4xxx_process_response_queue(ha);
/* Process mailbox/asynch event interrupt.*/
if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
qla4xxx_isr_decode_mailbox(ha,
readl(&ha->reg->mailbox[0]));
/* Clear Mailbox Interrupt */
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
}
/**
* qla4xxx_intr_handler - hardware interrupt handler.
* @irq: Unused
* @dev_id: Pointer to host adapter structure
* @regs: Unused
**/
irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
struct scsi_qla_host *ha;
uint32_t intr_status;
unsigned long flags = 0;
uint8_t reqs_count = 0;
ha = (struct scsi_qla_host *) dev_id;
if (!ha) {
DEBUG2(printk(KERN_INFO
"qla4xxx: Interrupt with NULL host ptr\n"));
return IRQ_NONE;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
/*
* Repeatedly service interrupts up to a maximum of
* MAX_REQS_SERVICED_PER_INTR
*/
while (1) {
/*
* Read interrupt status
*/
if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
ha->response_out)
intr_status = CSR_SCSI_COMPLETION_INTR;
else
intr_status = readl(&ha->reg->ctrl_status);
if ((intr_status &
(CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
0) {
if (reqs_count == 0)
ha->spurious_int_count++;
break;
}
if (intr_status & CSR_FATAL_ERROR) {
DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
"Status 0x%04x\n", ha->host_no,
readl(isp_port_error_status (ha))));
/* Issue Soft Reset to clear this error condition.
* This will prevent the RISC from repeatedly
* interrupting the driver; thus, allowing the DPC to
* get scheduled to continue error recovery.
* NOTE: Disabling RISC interrupts does not work in
* this case, as CSR_FATAL_ERROR overrides
* CSR_SCSI_INTR_ENABLE */
if ((readl(&ha->reg->ctrl_status) &
CSR_SCSI_RESET_INTR) == 0) {
writel(set_rmask(CSR_SOFT_RESET),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
}
writel(set_rmask(CSR_FATAL_ERROR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
__qla4xxx_disable_intrs(ha);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
break;
} else if (intr_status & CSR_SCSI_RESET_INTR) {
clear_bit(AF_ONLINE, &ha->flags);
__qla4xxx_disable_intrs(ha);
writel(set_rmask(CSR_SCSI_RESET_INTR),
&ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
break;
} else if (intr_status & INTR_PENDING) {
qla4xxx_interrupt_service_routine(ha, intr_status);
ha->total_io_count++;
if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
break;
intr_status = 0;
}
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return IRQ_HANDLED;
}
/**
* qla4xxx_process_aen - processes AENs generated by firmware
* @ha: pointer to host adapter structure.
* @process_aen: type of AENs to process
*
* Processes specific types of Asynchronous Events generated by firmware.
* The type of AENs to process is specified by process_aen and can be
* PROCESS_ALL_AENS 0
* FLUSH_DDB_CHANGED_AENS 1
* RELOGIN_DDB_CHANGED_AENS 2
**/
void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
{
uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
struct aen *aen;
int i;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (ha->aen_out != ha->aen_in) {
/* Advance pointers for next entry */
if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
ha->aen_out = 0;
else
ha->aen_out++;
ha->aen_q_count++;
aen = &ha->aen_q[ha->aen_out];
/* copy aen information to local structure */
for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
mbox_sts[i] = aen->mbox_sts[i];
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x "
"mod=%x conerr=%08x \n", ha->host_no, ha->aen_out,
mbox_sts[0], mbox_sts[2], mbox_sts[3],
mbox_sts[1], mbox_sts[4]));
switch (mbox_sts[0]) {
case MBOX_ASTS_DATABASE_CHANGED:
if (process_aen == FLUSH_DDB_CHANGED_AENS) {
DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
"[%d] state=%04x FLUSHED!\n",
ha->host_no, ha->aen_out,
mbox_sts[0], mbox_sts[2],
mbox_sts[3]));
break;
} else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
/* for use during init time, we only want to
* relogin non-active ddbs */
struct ddb_entry *ddb_entry;
ddb_entry =
/* FIXME: name length? */
qla4xxx_lookup_ddb_by_fw_index(ha,
mbox_sts[2]);
if (!ddb_entry)
break;
ddb_entry->dev_scan_wait_to_complete_relogin =
0;
ddb_entry->dev_scan_wait_to_start_relogin =
jiffies +
((ddb_entry->default_time2wait +
4) * HZ);
DEBUG2(printk("scsi%ld: ddb index [%d] initate"
" RELOGIN after %d seconds\n",
ha->host_no,
ddb_entry->fw_ddb_index,
ddb_entry->default_time2wait +
4));
break;
}
if (mbox_sts[1] == 0) { /* Global DB change. */
qla4xxx_reinitialize_ddb_list(ha);
} else if (mbox_sts[1] == 1) { /* Specific device. */
qla4xxx_process_ddb_changed(ha, mbox_sts[2],
mbox_sts[3]);
}
break;
}
spin_lock_irqsave(&ha->hardware_lock, flags);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}

View file

@ -0,0 +1,930 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
/**
* qla4xxx_mailbox_command - issues mailbox commands
* @ha: Pointer to host adapter structure.
* @inCount: number of mailbox registers to load.
* @outCount: number of mailbox registers to return.
* @mbx_cmd: data pointer for mailbox in registers.
* @mbx_sts: data pointer for mailbox out registers.
*
* This routine sssue mailbox commands and waits for completion.
* If outCount is 0, this routine completes successfully WITHOUT waiting
* for the mailbox command to complete.
**/
int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
uint8_t outCount, uint32_t *mbx_cmd,
uint32_t *mbx_sts)
{
int status = QLA_ERROR;
uint8_t i;
u_long wait_count;
uint32_t intr_status;
unsigned long flags = 0;
DECLARE_WAITQUEUE(wait, current);
mutex_lock(&ha->mbox_sem);
/* Mailbox code active */
set_bit(AF_MBOX_COMMAND, &ha->flags);
/* Make sure that pointers are valid */
if (!mbx_cmd || !mbx_sts) {
DEBUG2(printk("scsi%ld: %s: Invalid mbx_cmd or mbx_sts "
"pointer\n", ha->host_no, __func__));
goto mbox_exit;
}
/* To prevent overwriting mailbox registers for a command that has
* not yet been serviced, check to see if a previously issued
* mailbox command is interrupting.
* -----------------------------------------------------------------
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
intr_status = readl(&ha->reg->ctrl_status);
if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
/* Service existing interrupt */
qla4xxx_interrupt_service_routine(ha, intr_status);
clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
}
/* Send the mailbox command to the firmware */
ha->mbox_status_count = outCount;
for (i = 0; i < outCount; i++)
ha->mbox_status[i] = 0;
/* Load all mailbox registers, except mailbox 0. */
for (i = 1; i < inCount; i++)
writel(mbx_cmd[i], &ha->reg->mailbox[i]);
/* Wakeup firmware */
writel(mbx_cmd[0], &ha->reg->mailbox[0]);
readl(&ha->reg->mailbox[0]);
writel(set_rmask(CSR_INTR_RISC), &ha->reg->ctrl_status);
readl(&ha->reg->ctrl_status);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
/* Wait for completion */
set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&ha->mailbox_wait_queue, &wait);
/*
* If we don't want status, don't wait for the mailbox command to
* complete. For example, MBOX_CMD_RESET_FW doesn't return status,
* you must poll the inbound Interrupt Mask for completion.
*/
if (outCount == 0) {
status = QLA_SUCCESS;
set_current_state(TASK_RUNNING);
remove_wait_queue(&ha->mailbox_wait_queue, &wait);
goto mbox_exit;
}
/* Wait for command to complete */
wait_count = jiffies + MBOX_TOV * HZ;
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
if (time_after_eq(jiffies, wait_count))
break;
spin_lock_irqsave(&ha->hardware_lock, flags);
intr_status = readl(&ha->reg->ctrl_status);
if (intr_status & INTR_PENDING) {
/*
* Service the interrupt.
* The ISR will save the mailbox status registers
* to a temporary storage location in the adapter
* structure.
*/
ha->mbox_status_count = outCount;
qla4xxx_interrupt_service_routine(ha, intr_status);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
msleep(10);
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&ha->mailbox_wait_queue, &wait);
/* Check for mailbox timeout. */
if (!test_bit(AF_MBOX_COMMAND_DONE, &ha->flags)) {
DEBUG2(printk("scsi%ld: Mailbox Cmd 0x%08X timed out ...,"
" Scheduling Adapter Reset\n", ha->host_no,
mbx_cmd[0]));
ha->mailbox_timeout_count++;
mbx_sts[0] = (-1);
set_bit(DPC_RESET_HA, &ha->dpc_flags);
goto mbox_exit;
}
/*
* Copy the mailbox out registers to the caller's mailbox in/out
* structure.
*/
spin_lock_irqsave(&ha->hardware_lock, flags);
for (i = 0; i < outCount; i++)
mbx_sts[i] = ha->mbox_status[i];
/* Set return status and error flags (if applicable). */
switch (ha->mbox_status[0]) {
case MBOX_STS_COMMAND_COMPLETE:
status = QLA_SUCCESS;
break;
case MBOX_STS_INTERMEDIATE_COMPLETION:
status = QLA_SUCCESS;
break;
case MBOX_STS_BUSY:
DEBUG2( printk("scsi%ld: %s: Cmd = %08X, ISP BUSY\n",
ha->host_no, __func__, mbx_cmd[0]));
ha->mailbox_timeout_count++;
break;
default:
DEBUG2(printk("scsi%ld: %s: **** FAILED, cmd = %08X, "
"sts = %08X ****\n", ha->host_no, __func__,
mbx_cmd[0], mbx_sts[0]));
break;
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mbox_exit:
clear_bit(AF_MBOX_COMMAND, &ha->flags);
clear_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
mutex_unlock(&ha->mbox_sem);
return status;
}
/**
* qla4xxx_issue_iocb - issue mailbox iocb command
* @ha: adapter state pointer.
* @buffer: buffer pointer.
* @phys_addr: physical address of buffer.
* @size: size of buffer.
*
* Issues iocbs via mailbox commands.
* TARGET_QUEUE_LOCK must be released.
* ADAPTER_STATE_LOCK must be released.
**/
int
qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
dma_addr_t phys_addr, size_t size)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
mbox_cmd[1] = 0;
mbox_cmd[2] = LSDW(phys_addr);
mbox_cmd[3] = MSDW(phys_addr);
status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
return status;
}
int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
uint16_t fw_ddb_index,
uint16_t connection_id,
uint16_t option)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
mbox_cmd[1] = fw_ddb_index;
mbox_cmd[2] = connection_id;
mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
"option %04x failed sts %04X %04X",
ha->host_no, __func__,
option, mbox_sts[0], mbox_sts[1]));
if (mbox_sts[0] == 0x4005)
DEBUG2(printk("%s reason %04X\n", __func__,
mbox_sts[1]));
}
return QLA_SUCCESS;
}
int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
uint16_t fw_ddb_index)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
mbox_cmd[1] = fw_ddb_index;
if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
return QLA_ERROR;
return QLA_SUCCESS;
}
/**
* qla4xxx_initialize_fw_cb - initializes firmware control block.
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
{
struct init_fw_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_ERROR;
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct init_fw_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
DEBUG2(printk("scsi%ld: %s: Unable to alloc init_cb\n",
ha->host_no, __func__));
return 10;
}
memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
dma_free_coherent(&ha->pdev->dev,
sizeof(struct init_fw_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return status;
}
/* Initialize request and response queues. */
qla4xxx_init_rings(ha);
/* Fill in the request and response queue information. */
init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out);
init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in);
init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma));
init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma));
init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma));
init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma));
init_fw_cb->ShadowRegBufAddrLo =
cpu_to_le32(LSDW(ha->shadow_regs_dma));
init_fw_cb->ShadowRegBufAddrHi =
cpu_to_le32(MSDW(ha->shadow_regs_dma));
/* Set up required options. */
init_fw_cb->FwOptions |=
__constant_cpu_to_le16(FWOPT_SESSION_MODE |
FWOPT_INITIATOR_MODE);
init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
/* Save some info in adapter structure. */
ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions);
ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions);
ha->heartbeat_interval = init_fw_cb->HeartbeatInterval;
memcpy(ha->ip_address, init_fw_cb->IPAddr,
min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
memcpy(ha->name_string, init_fw_cb->iSCSINameString,
min(sizeof(ha->name_string),
sizeof(init_fw_cb->iSCSINameString)));
memcpy(ha->alias, init_fw_cb->Alias,
min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));
/* Save Command Line Paramater info */
ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout);
ha->discovery_wait = ql4xdiscoverywait;
/* Send Initialize Firmware Control Block. */
mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
mbox_cmd[1] = 0;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) ==
QLA_SUCCESS)
status = QLA_SUCCESS;
else {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_INITIALIZE_FIRMWARE "
"failed w/ status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
}
dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return status;
}
/**
* qla4xxx_get_dhcp_ip_address - gets HBA ip address via DHCP
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
{
struct init_fw_ctrl_blk *init_fw_cb;
dma_addr_t init_fw_cb_dma;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
init_fw_cb = dma_alloc_coherent(&ha->pdev->dev,
sizeof(struct init_fw_ctrl_blk),
&init_fw_cb_dma, GFP_KERNEL);
if (init_fw_cb == NULL) {
printk("scsi%ld: %s: Unable to alloc init_cb\n", ha->host_no,
__func__);
return 10;
}
/* Get Initialize Firmware Control Block. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
mbox_cmd[2] = LSDW(init_fw_cb_dma);
mbox_cmd[3] = MSDW(init_fw_cb_dma);
if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
ha->host_no, __func__));
dma_free_coherent(&ha->pdev->dev,
sizeof(struct init_fw_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return QLA_ERROR;
}
/* Save IP Address. */
memcpy(ha->ip_address, init_fw_cb->IPAddr,
min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr)));
memcpy(ha->subnet_mask, init_fw_cb->SubnetMask,
min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask)));
memcpy(ha->gateway, init_fw_cb->GatewayIPAddr,
min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr)));
dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
init_fw_cb, init_fw_cb_dma);
return QLA_SUCCESS;
}
/**
* qla4xxx_get_firmware_state - gets firmware state of HBA
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
"status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
return QLA_ERROR;
}
ha->firmware_state = mbox_sts[1];
ha->board_id = mbox_sts[2];
ha->addl_fw_state = mbox_sts[3];
DEBUG2(printk("scsi%ld: %s firmware_state=0x%x\n",
ha->host_no, __func__, ha->firmware_state);)
return QLA_SUCCESS;
}
/**
* qla4xxx_get_firmware_status - retrieves firmware status
* @ha: Pointer to host adapter structure.
**/
int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Get firmware version */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
"status %04X\n", ha->host_no, __func__,
mbox_sts[0]));
return QLA_ERROR;
}
/* High-water mark of IOCBs */
ha->iocb_hiwat = mbox_sts[2];
if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
else
dev_info(&ha->pdev->dev, "WARNING!!! You have less than %d "
"firmare IOCBs available (%d).\n",
IOCB_HIWAT_CUSHION, ha->iocb_hiwat);
return QLA_SUCCESS;
}
/**
* qla4xxx_get_fwddb_entry - retrieves firmware ddb entry
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
* @fw_ddb_entry: Pointer to firmware's device database entry structure
* @num_valid_ddb_entries: Pointer to number of valid ddb entries
* @next_ddb_index: Pointer to next valid device database index
* @fw_ddb_device_state: Pointer to device state
**/
int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
uint16_t fw_ddb_index,
struct dev_db_entry *fw_ddb_entry,
dma_addr_t fw_ddb_entry_dma,
uint32_t *num_valid_ddb_entries,
uint32_t *next_ddb_index,
uint32_t *fw_ddb_device_state,
uint32_t *conn_err_detail,
uint16_t *tcp_source_port_num,
uint16_t *connection_id)
{
int status = QLA_ERROR;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Make sure the device index is valid */
if (fw_ddb_index >= MAX_DDB_ENTRIES) {
DEBUG2(printk("scsi%ld: %s: index [%d] out of range.\n",
ha->host_no, __func__, fw_ddb_index));
goto exit_get_fwddb;
}
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) ==
QLA_ERROR) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
" with status 0x%04X\n", ha->host_no, __func__,
mbox_sts[0]));
goto exit_get_fwddb;
}
if (fw_ddb_index != mbox_sts[1]) {
DEBUG2(printk("scsi%ld: %s: index mismatch [%d] != [%d].\n",
ha->host_no, __func__, fw_ddb_index,
mbox_sts[1]));
goto exit_get_fwddb;
}
if (fw_ddb_entry) {
dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
"State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0],
fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2],
fw_ddb_entry->ipAddr[3],
le16_to_cpu(fw_ddb_entry->portNumber),
fw_ddb_entry->iscsiName);
}
if (num_valid_ddb_entries)
*num_valid_ddb_entries = mbox_sts[2];
if (next_ddb_index)
*next_ddb_index = mbox_sts[3];
if (fw_ddb_device_state)
*fw_ddb_device_state = mbox_sts[4];
/*
* RA: This mailbox has been changed to pass connection error and
* details. Its true for ISP4010 as per Version E - Not sure when it
* was changed. Get the time2wait from the fw_dd_entry field :
* default_time2wait which we call it as minTime2Wait DEV_DB_ENTRY
* struct.
*/
if (conn_err_detail)
*conn_err_detail = mbox_sts[5];
if (tcp_source_port_num)
*tcp_source_port_num = (uint16_t) mbox_sts[6] >> 16;
if (connection_id)
*connection_id = (uint16_t) mbox_sts[6] & 0x00FF;
status = QLA_SUCCESS;
exit_get_fwddb:
return status;
}
/**
* qla4xxx_set_fwddb_entry - sets a ddb entry.
* @ha: Pointer to host adapter structure.
* @fw_ddb_index: Firmware's device database index
* @fw_ddb_entry: Pointer to firmware's ddb entry structure, or NULL.
*
* This routine initializes or updates the adapter's device database
* entry for the specified device. It also triggers a login for the
* specified device. Therefore, it may also be used as a secondary
* login routine when a NULL pointer is specified for the fw_ddb_entry.
**/
int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
dma_addr_t fw_ddb_entry_dma)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Do not wait for completion. The firmware will send us an
* ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_SET_DATABASE_ENTRY;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
}
int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
uint16_t fw_ddb_index)
{
int status = QLA_ERROR;
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Do not wait for completion. The firmware will send us an
* ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
mbox_cmd[1] = (uint32_t) fw_ddb_index;
mbox_cmd[2] = 0;
mbox_cmd[3] = 0;
mbox_cmd[4] = 0;
status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
__func__, fw_ddb_index, status, mbox_sts[0],
mbox_sts[1]);)
return status;
}
/**
* qla4xxx_get_crash_record - retrieves crash record.
* @ha: Pointer to host adapter structure.
*
* This routine retrieves a crash record from the QLA4010 after an 8002h aen.
**/
void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
struct crash_record *crash_record = NULL;
dma_addr_t crash_record_dma = 0;
uint32_t crash_record_size = 0;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
ha->host_no, __func__));
goto exit_get_crash_record;
}
crash_record_size = mbox_sts[4];
if (crash_record_size == 0) {
DEBUG2(printk("scsi%ld: %s: ERROR: Crash record size is 0!\n",
ha->host_no, __func__));
goto exit_get_crash_record;
}
/* Alloc Memory for Crash Record. */
crash_record = dma_alloc_coherent(&ha->pdev->dev, crash_record_size,
&crash_record_dma, GFP_KERNEL);
if (crash_record == NULL)
goto exit_get_crash_record;
/* Get Crash Record. */
mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
mbox_cmd[2] = LSDW(crash_record_dma);
mbox_cmd[3] = MSDW(crash_record_dma);
mbox_cmd[4] = crash_record_size;
if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_crash_record;
/* Dump Crash Record. */
exit_get_crash_record:
if (crash_record)
dma_free_coherent(&ha->pdev->dev, crash_record_size,
crash_record, crash_record_dma);
}
/**
* qla4xxx_get_conn_event_log - retrieves connection event log
* @ha: Pointer to host adapter structure.
**/
void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
struct conn_event_log_entry *event_log = NULL;
dma_addr_t event_log_dma = 0;
uint32_t event_log_size = 0;
uint32_t num_valid_entries;
uint32_t oldest_entry = 0;
uint32_t max_event_log_entries;
uint8_t i;
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_cmd));
/* Get size of crash record. */
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS)
goto exit_get_event_log;
event_log_size = mbox_sts[4];
if (event_log_size == 0)
goto exit_get_event_log;
/* Alloc Memory for Crash Record. */
event_log = dma_alloc_coherent(&ha->pdev->dev, event_log_size,
&event_log_dma, GFP_KERNEL);
if (event_log == NULL)
goto exit_get_event_log;
/* Get Crash Record. */
mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
mbox_cmd[2] = LSDW(event_log_dma);
mbox_cmd[3] = MSDW(event_log_dma);
if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
"log!\n", ha->host_no, __func__));
goto exit_get_event_log;
}
/* Dump Event Log. */
num_valid_entries = mbox_sts[1];
max_event_log_entries = event_log_size /
sizeof(struct conn_event_log_entry);
if (num_valid_entries > max_event_log_entries)
oldest_entry = num_valid_entries % max_event_log_entries;
DEBUG3(printk("scsi%ld: Connection Event Log Dump (%d entries):\n",
ha->host_no, num_valid_entries));
if (extended_error_logging == 3) {
if (oldest_entry == 0) {
/* Circular Buffer has not wrapped around */
for (i=0; i < num_valid_entries; i++) {
qla4xxx_dump_buffer((uint8_t *)event_log+
(i*sizeof(*event_log)),
sizeof(*event_log));
}
}
else {
/* Circular Buffer has wrapped around -
* display accordingly*/
for (i=oldest_entry; i < max_event_log_entries; i++) {
qla4xxx_dump_buffer((uint8_t *)event_log+
(i*sizeof(*event_log)),
sizeof(*event_log));
}
for (i=0; i < oldest_entry; i++) {
qla4xxx_dump_buffer((uint8_t *)event_log+
(i*sizeof(*event_log)),
sizeof(*event_log));
}
}
}
exit_get_event_log:
if (event_log)
dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
event_log_dma);
}
/**
* qla4xxx_reset_lun - issues LUN Reset
* @ha: Pointer to host adapter structure.
* @db_entry: Pointer to device database entry
* @un_entry: Pointer to lun entry structure
*
* This routine performs a LUN RESET on the specified target/lun.
* The caller must ensure that the ddb_entry and lun_entry pointers
* are valid before calling this routine.
**/
int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
int lun)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
int status = QLA_SUCCESS;
DEBUG2(printk("scsi%ld:%d:%d: lun reset issued\n", ha->host_no,
ddb_entry->os_target_id, lun));
/*
* Send lun reset command to ISP, so that the ISP will return all
* outstanding requests with RESET status
*/
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_LUN_RESET;
mbox_cmd[1] = ddb_entry->fw_ddb_index;
mbox_cmd[2] = lun << 8;
mbox_cmd[5] = 0x01; /* Immediate Command Enable */
qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]);
if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
status = QLA_ERROR;
return status;
}
int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
uint32_t offset, uint32_t len)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_READ_FLASH;
mbox_cmd[1] = LSDW(dma_addr);
mbox_cmd[2] = MSDW(dma_addr);
mbox_cmd[3] = offset;
mbox_cmd[4] = len;
if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
"status %04X %04X, offset %08x, len %08x\n", ha->host_no,
__func__, mbox_sts[0], mbox_sts[1], offset, len));
return QLA_ERROR;
}
return QLA_SUCCESS;
}
/**
* qla4xxx_get_fw_version - gets firmware version
* @ha: Pointer to host adapter structure.
*
* Retrieves the firmware version on HBA. In QLA4010, mailboxes 2 & 3 may
* hold an address for data. Make sure that we write 0 to those mailboxes,
* if unused.
**/
int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
/* Get firmware version. */
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
"status %04X\n", ha->host_no, __func__, mbox_sts[0]));
return QLA_ERROR;
}
/* Save firmware version information. */
ha->firmware_version[0] = mbox_sts[1];
ha->firmware_version[1] = mbox_sts[2];
ha->patch_number = mbox_sts[3];
ha->build_number = mbox_sts[4];
return QLA_SUCCESS;
}
int qla4xxx_get_default_ddb(struct scsi_qla_host *ha, dma_addr_t dma_addr)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY_DEFAULTS;
mbox_cmd[2] = LSDW(dma_addr);
mbox_cmd[3] = MSDW(dma_addr);
if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
ha->host_no, __func__, mbox_sts[0]));
return QLA_ERROR;
}
return QLA_SUCCESS;
}
int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
{
uint32_t mbox_cmd[MBOX_REG_COUNT];
uint32_t mbox_sts[MBOX_REG_COUNT];
memset(&mbox_cmd, 0, sizeof(mbox_cmd));
memset(&mbox_sts, 0, sizeof(mbox_sts));
mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) !=
QLA_SUCCESS) {
if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
*ddb_index = mbox_sts[2];
} else {
DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
ha->host_no, __func__, mbox_sts[0]));
return QLA_ERROR;
}
} else {
*ddb_index = MAX_PRST_DEV_DB_ENTRIES;
}
return QLA_SUCCESS;
}
int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
{
struct dev_db_entry *fw_ddb_entry;
dma_addr_t fw_ddb_entry_dma;
uint32_t ddb_index;
int ret_val = QLA_SUCCESS;
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
sizeof(*fw_ddb_entry),
&fw_ddb_entry_dma, GFP_KERNEL);
if (!fw_ddb_entry) {
DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n",
ha->host_no, __func__));
ret_val = QLA_ERROR;
goto qla4xxx_send_tgts_exit;
}
ret_val = qla4xxx_get_default_ddb(ha, fw_ddb_entry_dma);
if (ret_val != QLA_SUCCESS)
goto qla4xxx_send_tgts_exit;
ret_val = qla4xxx_req_ddb_entry(ha, &ddb_index);
if (ret_val != QLA_SUCCESS)
goto qla4xxx_send_tgts_exit;
memset((void *)fw_ddb_entry->iSCSIAlias, 0,
sizeof(fw_ddb_entry->iSCSIAlias));
memset((void *)fw_ddb_entry->iscsiName, 0,
sizeof(fw_ddb_entry->iscsiName));
memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr));
memset((void *)fw_ddb_entry->targetAddr, 0,
sizeof(fw_ddb_entry->targetAddr));
fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port));
fw_ddb_entry->ipAddr[0] = *ip;
fw_ddb_entry->ipAddr[1] = *(ip + 1);
fw_ddb_entry->ipAddr[2] = *(ip + 2);
fw_ddb_entry->ipAddr[3] = *(ip + 3);
ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
qla4xxx_send_tgts_exit:
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
fw_ddb_entry, fw_ddb_entry_dma);
return ret_val;
}

View file

@ -0,0 +1,224 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#include "ql4_def.h"
static inline int eeprom_size(struct scsi_qla_host *ha)
{
return is_qla4022(ha) ? FM93C86A_SIZE_16 : FM93C66A_SIZE_16;
}
static inline int eeprom_no_addr_bits(struct scsi_qla_host *ha)
{
return is_qla4022(ha) ? FM93C86A_NO_ADDR_BITS_16 :
FM93C56A_NO_ADDR_BITS_16;
}
static inline int eeprom_no_data_bits(struct scsi_qla_host *ha)
{
return FM93C56A_DATA_BITS_16;
}
static int fm93c56a_select(struct scsi_qla_host * ha)
{
DEBUG5(printk(KERN_ERR "fm93c56a_select:\n"));
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_1 | 0x000f0000;
writel(ha->eeprom_cmd_data, isp_nvram(ha));
readl(isp_nvram(ha));
return 1;
}
static int fm93c56a_cmd(struct scsi_qla_host * ha, int cmd, int addr)
{
int i;
int mask;
int dataBit;
int previousBit;
/* Clock in a zero, then do the start bit. */
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1, isp_nvram(ha));
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | AUBURN_EEPROM_DO_1 |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
mask = 1 << (FM93C56A_CMD_BITS - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < FM93C56A_CMD_BITS; i++) {
dataBit =
(cmd & mask) ? AUBURN_EEPROM_DO_1 : AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
previousBit = dataBit;
}
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
cmd = cmd << 1;
}
mask = 1 << (eeprom_no_addr_bits(ha) - 1);
/* Force the previous data bit to be different. */
previousBit = 0xffff;
for (i = 0; i < eeprom_no_addr_bits(ha); i++) {
dataBit = addr & mask ? AUBURN_EEPROM_DO_1 :
AUBURN_EEPROM_DO_0;
if (previousBit != dataBit) {
/*
* If the bit changed, then change the DO state to
* match.
*/
writel(ha->eeprom_cmd_data | dataBit, isp_nvram(ha));
previousBit = dataBit;
}
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data | dataBit |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
readl(isp_nvram(ha));
addr = addr << 1;
}
return 1;
}
static int fm93c56a_deselect(struct scsi_qla_host * ha)
{
ha->eeprom_cmd_data = AUBURN_EEPROM_CS_0 | 0x000f0000;
writel(ha->eeprom_cmd_data, isp_nvram(ha));
readl(isp_nvram(ha));
return 1;
}
static int fm93c56a_datain(struct scsi_qla_host * ha, unsigned short *value)
{
int i;
int data = 0;
int dataBit;
/* Read the data bits
* The first bit is a dummy. Clock right over it. */
for (i = 0; i < eeprom_no_data_bits(ha); i++) {
writel(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_RISE, isp_nvram(ha));
writel(ha->eeprom_cmd_data |
AUBURN_EEPROM_CLK_FALL, isp_nvram(ha));
dataBit =
(readw(isp_nvram(ha)) & AUBURN_EEPROM_DI_1) ? 1 : 0;
data = (data << 1) | dataBit;
}
*value = data;
return 1;
}
static int eeprom_readword(int eepromAddr, u16 * value,
struct scsi_qla_host * ha)
{
fm93c56a_select(ha);
fm93c56a_cmd(ha, FM93C56A_READ, eepromAddr);
fm93c56a_datain(ha, value);
fm93c56a_deselect(ha);
return 1;
}
/* Hardware_lock must be set before calling */
u16 rd_nvram_word(struct scsi_qla_host * ha, int offset)
{
u16 val;
/* NOTE: NVRAM uses half-word addresses */
eeprom_readword(offset, &val, ha);
return val;
}
int qla4xxx_is_nvram_configuration_valid(struct scsi_qla_host * ha)
{
int status = QLA_ERROR;
uint16_t checksum = 0;
uint32_t index;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
for (index = 0; index < eeprom_size(ha); index++)
checksum += rd_nvram_word(ha, index);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (checksum == 0)
status = QLA_SUCCESS;
return status;
}
/*************************************************************************
*
* Hardware Semaphore routines
*
*************************************************************************/
int ql4xxx_sem_spinlock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
unsigned int seconds = 30;
DEBUG2(printk("scsi%ld : Trying to get SEM lock - mask= 0x%x, code = "
"0x%x\n", ha->host_no, sem_mask, sem_bits));
do {
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, "
"code = 0x%x\n", ha->host_no,
sem_mask, sem_bits));
return QLA_SUCCESS;
}
ssleep(1);
} while (--seconds);
return QLA_ERROR;
}
void ql4xxx_sem_unlock(struct scsi_qla_host * ha, u32 sem_mask)
{
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel(sem_mask, isp_semaphore(ha));
readl(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
DEBUG2(printk("scsi%ld : UNLOCK SEM - mask= 0x%x\n", ha->host_no,
sem_mask));
}
int ql4xxx_sem_lock(struct scsi_qla_host * ha, u32 sem_mask, u32 sem_bits)
{
uint32_t value;
unsigned long flags;
spin_lock_irqsave(&ha->hardware_lock, flags);
writel((sem_mask | sem_bits), isp_semaphore(ha));
value = readw(isp_semaphore(ha));
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if ((value & (sem_mask >> 16)) == sem_bits) {
DEBUG2(printk("scsi%ld : Got SEM LOCK - mask= 0x%x, code = "
"0x%x, sema code=0x%x\n", ha->host_no,
sem_mask, sem_bits, value));
return 1;
}
return 0;
}

View file

@ -0,0 +1,256 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#ifndef _QL4XNVRM_H_
#define _QL4XNVRM_H_
/*
* AM29LV Flash definitions
*/
#define FM93C56A_SIZE_8 0x100
#define FM93C56A_SIZE_16 0x80
#define FM93C66A_SIZE_8 0x200
#define FM93C66A_SIZE_16 0x100/* 4010 */
#define FM93C86A_SIZE_16 0x400/* 4022 */
#define FM93C56A_START 0x1
// Commands
#define FM93C56A_READ 0x2
#define FM93C56A_WEN 0x0
#define FM93C56A_WRITE 0x1
#define FM93C56A_WRITE_ALL 0x0
#define FM93C56A_WDS 0x0
#define FM93C56A_ERASE 0x3
#define FM93C56A_ERASE_ALL 0x0
/* Command Extentions */
#define FM93C56A_WEN_EXT 0x3
#define FM93C56A_WRITE_ALL_EXT 0x1
#define FM93C56A_WDS_EXT 0x0
#define FM93C56A_ERASE_ALL_EXT 0x2
/* Address Bits */
#define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */
#define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */
#define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */
/* Data Bits */
#define FM93C56A_DATA_BITS_16 16
#define FM93C56A_DATA_BITS_8 8
/* Special Bits */
#define FM93C56A_READ_DUMMY_BITS 1
#define FM93C56A_READY 0
#define FM93C56A_BUSY 1
#define FM93C56A_CMD_BITS 2
/* Auburn Bits */
#define AUBURN_EEPROM_DI 0x8
#define AUBURN_EEPROM_DI_0 0x0
#define AUBURN_EEPROM_DI_1 0x8
#define AUBURN_EEPROM_DO 0x4
#define AUBURN_EEPROM_DO_0 0x0
#define AUBURN_EEPROM_DO_1 0x4
#define AUBURN_EEPROM_CS 0x2
#define AUBURN_EEPROM_CS_0 0x0
#define AUBURN_EEPROM_CS_1 0x2
#define AUBURN_EEPROM_CLK_RISE 0x1
#define AUBURN_EEPROM_CLK_FALL 0x0
/* */
/* EEPROM format */
/* */
struct bios_params {
uint16_t SpinUpDelay:1;
uint16_t BIOSDisable:1;
uint16_t MMAPEnable:1;
uint16_t BootEnable:1;
uint16_t Reserved0:12;
uint8_t bootID0:7;
uint8_t bootID0Valid:1;
uint8_t bootLUN0[8];
uint8_t bootID1:7;
uint8_t bootID1Valid:1;
uint8_t bootLUN1[8];
uint16_t MaxLunsPerTarget;
uint8_t Reserved1[10];
};
struct eeprom_port_cfg {
/* MTU MAC 0 */
u16 etherMtu_mac;
/* Flow Control MAC 0 */
u16 pauseThreshold_mac;
u16 resumeThreshold_mac;
u16 reserved[13];
};
struct eeprom_function_cfg {
u8 reserved[30];
/* MAC ADDR */
u8 macAddress[6];
u8 macAddressSecondary[6];
u16 subsysVendorId;
u16 subsysDeviceId;
};
struct eeprom_data {
union {
struct { /* isp4010 */
u8 asic_id[4]; /* x00 */
u8 version; /* x04 */
u8 reserved; /* x05 */
u16 board_id; /* x06 */
#define EEPROM_BOARDID_ELDORADO 1
#define EEPROM_BOARDID_PLACER 2
#define EEPROM_SERIAL_NUM_SIZE 16
u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */
/* ExtHwConfig: */
/* Offset = 24bytes
*
* | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | |
* |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0|
* +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
*/
u16 ext_hw_conf; /* x18 */
u8 mac0[6]; /* x1A */
u8 mac1[6]; /* x20 */
u8 mac2[6]; /* x26 */
u8 mac3[6]; /* x2C */
u16 etherMtu; /* x32 */
u16 macConfig; /* x34 */
#define MAC_CONFIG_ENABLE_ANEG 0x0001
#define MAC_CONFIG_ENABLE_PAUSE 0x0002
u16 phyConfig; /* x36 */
#define PHY_CONFIG_PHY_ADDR_MASK 0x1f
#define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20
u16 topcat; /* x38 */
#define TOPCAT_PRESENT 0x0100
#define TOPCAT_MASK 0xFF00
#define EEPROM_UNUSED_1_SIZE 2
u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */
u16 bufletSize; /* x3C */
u16 bufletCount; /* x3E */
u16 bufletPauseThreshold; /* x40 */
u16 tcpWindowThreshold50; /* x42 */
u16 tcpWindowThreshold25; /* x44 */
u16 tcpWindowThreshold0; /* x46 */
u16 ipHashTableBaseHi; /* x48 */
u16 ipHashTableBaseLo; /* x4A */
u16 ipHashTableSize; /* x4C */
u16 tcpHashTableBaseHi; /* x4E */
u16 tcpHashTableBaseLo; /* x50 */
u16 tcpHashTableSize; /* x52 */
u16 ncbTableBaseHi; /* x54 */
u16 ncbTableBaseLo; /* x56 */
u16 ncbTableSize; /* x58 */
u16 drbTableBaseHi; /* x5A */
u16 drbTableBaseLo; /* x5C */
u16 drbTableSize; /* x5E */
#define EEPROM_UNUSED_2_SIZE 4
u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */
u16 ipReassemblyTimeout; /* x64 */
u16 tcpMaxWindowSizeHi; /* x66 */
u16 tcpMaxWindowSizeLo; /* x68 */
u32 net_ip_addr0; /* x6A Added for TOE
* functionality. */
u32 net_ip_addr1; /* x6E */
u32 scsi_ip_addr0; /* x72 */
u32 scsi_ip_addr1; /* x76 */
#define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account
* for ip addresses */
u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */
u16 subsysVendorId_f0; /* xFA */
u16 subsysDeviceId_f0; /* xFC */
/* Address = 0x7F */
#define FM93C56A_SIGNATURE 0x9356
#define FM93C66A_SIGNATURE 0x9366
u16 signature; /* xFE */
#define EEPROM_UNUSED_4_SIZE 250
u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */
u16 subsysVendorId_f1; /* x1FA */
u16 subsysDeviceId_f1; /* x1FC */
u16 checksum; /* x1FE */
} __attribute__ ((packed)) isp4010;
struct { /* isp4022 */
u8 asicId[4]; /* x00 */
u8 version; /* x04 */
u8 reserved_5; /* x05 */
u16 boardId; /* x06 */
u8 boardIdStr[16]; /* x08 */
u8 serialNumber[16]; /* x18 */
/* External Hardware Configuration */
u16 ext_hw_conf; /* x28 */
/* MAC 0 CONFIGURATION */
struct eeprom_port_cfg macCfg_port0; /* x2A */
/* MAC 1 CONFIGURATION */
struct eeprom_port_cfg macCfg_port1; /* x4A */
/* DDR SDRAM Configuration */
u16 bufletSize; /* x6A */
u16 bufletCount; /* x6C */
u16 tcpWindowThreshold50; /* x6E */
u16 tcpWindowThreshold25; /* x70 */
u16 tcpWindowThreshold0; /* x72 */
u16 ipHashTableBaseHi; /* x74 */
u16 ipHashTableBaseLo; /* x76 */
u16 ipHashTableSize; /* x78 */
u16 tcpHashTableBaseHi; /* x7A */
u16 tcpHashTableBaseLo; /* x7C */
u16 tcpHashTableSize; /* x7E */
u16 ncbTableBaseHi; /* x80 */
u16 ncbTableBaseLo; /* x82 */
u16 ncbTableSize; /* x84 */
u16 drbTableBaseHi; /* x86 */
u16 drbTableBaseLo; /* x88 */
u16 drbTableSize; /* x8A */
u16 reserved_142[4]; /* x8C */
/* TCP/IP Parameters */
u16 ipReassemblyTimeout; /* x94 */
u16 tcpMaxWindowSize; /* x96 */
u16 ipSecurity; /* x98 */
u8 reserved_156[294]; /* x9A */
u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */
struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */
u16 reserved_510; /* x1FE */
/* Address = 512 */
u8 oemSpace[432]; /* x200 */
struct bios_params sBIOSParams_fn1; /* x3B0 */
struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */
u16 reserved_1022; /* x3FE */
/* Address = 1024 */
u8 reserved_1024[464]; /* x400 */
struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */
u16 reserved_1534; /* x5FE */
/* Address = 1536 */
u8 reserved_1536[432]; /* x600 */
struct bios_params sBIOSParams_fn3; /* x7B0 */
struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */
u16 checksum; /* x7FE */
} __attribute__ ((packed)) isp4022;
};
};
#endif /* _QL4XNVRM_H_ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,13 @@
/*
* QLogic iSCSI HBA Driver
* Copyright (c) 2003-2006 QLogic Corporation
*
* See LICENSE.qla4xxx for copyright and licensing details.
*/
#define QLA4XXX_DRIVER_VERSION "5.00.05b9-k"
#define QL4_DRIVER_MAJOR_VER 5
#define QL4_DRIVER_MINOR_VER 0
#define QL4_DRIVER_PATCH_VER 5
#define QL4_DRIVER_BETA_VER 9