Esempio n. 1
0
void 
qla2x00_dump_regs(scsi_qla_host_t *ha) 
{
	device_reg_t __iomem *reg = ha->iobase;

	printk("Mailbox registers:\n");
	printk("scsi(%ld): mbox 0 0x%04x \n",
	    ha->host_no, RD_MAILBOX_REG(ha, reg, 0));
	printk("scsi(%ld): mbox 1 0x%04x \n",
	    ha->host_no, RD_MAILBOX_REG(ha, reg, 1));
	printk("scsi(%ld): mbox 2 0x%04x \n",
	    ha->host_no, RD_MAILBOX_REG(ha, reg, 2));
	printk("scsi(%ld): mbox 3 0x%04x \n",
	    ha->host_no, RD_MAILBOX_REG(ha, reg, 3));
	printk("scsi(%ld): mbox 4 0x%04x \n",
	    ha->host_no, RD_MAILBOX_REG(ha, reg, 4));
	printk("scsi(%ld): mbox 5 0x%04x \n",
	    ha->host_no, RD_MAILBOX_REG(ha, reg, 5));
}
Esempio n. 2
0
/**
 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
 * @ha: HA context
 * @hardware_locked: Called with the hardware_lock
 */
void
qla2300_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
{
	int		rval;
	uint32_t	cnt, timer;
	uint32_t	risc_address;
	uint16_t	mb0, mb2;

	uint32_t	stat;
	device_reg_t __iomem *reg = ha->iobase;
	uint16_t __iomem *dmp_reg;
	unsigned long	flags;
	struct qla2300_fw_dump	*fw;
	uint32_t	dump_size, data_ram_cnt;

	risc_address = data_ram_cnt = 0;
	mb0 = mb2 = 0;
	flags = 0;

	if (!hardware_locked)
		spin_lock_irqsave(&ha->hardware_lock, flags);

	if (ha->fw_dump != NULL) {
		qla_printk(KERN_WARNING, ha,
		    "Firmware has been previously dumped (%p) -- ignoring "
		    "request...\n", ha->fw_dump);
		goto qla2300_fw_dump_failed;
	}

	/* Allocate (large) dump buffer. */
	dump_size = sizeof(struct qla2300_fw_dump);
	dump_size += (ha->fw_memory_size - 0x11000) * sizeof(uint16_t);
	ha->fw_dump_order = get_order(dump_size);
	ha->fw_dump = (struct qla2300_fw_dump *) __get_free_pages(GFP_ATOMIC,
	    ha->fw_dump_order);
	if (ha->fw_dump == NULL) {
		qla_printk(KERN_WARNING, ha,
		    "Unable to allocated memory for firmware dump (%d/%d).\n",
		    ha->fw_dump_order, dump_size);
		goto qla2300_fw_dump_failed;
	}
	fw = ha->fw_dump;

	rval = QLA_SUCCESS;
	fw->hccr = RD_REG_WORD(&reg->hccr);

	/* Pause RISC. */
	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 
	if (IS_QLA2300(ha)) {
		for (cnt = 30000;
		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
			rval == QLA_SUCCESS; cnt--) {
			if (cnt)
				udelay(100);
			else
				rval = QLA_FUNCTION_TIMEOUT;
		}
	} else {
		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
		udelay(10);
	}

	if (rval == QLA_SUCCESS) {
		dmp_reg = (uint16_t __iomem *)(reg + 0);
		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 
			fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++);

		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
		for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++) 
			fw->risc_host_reg[cnt] = RD_REG_WORD(dmp_reg++);

		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x40);
		for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++) 
			fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x40);
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->resp_dma_reg) / 2; cnt++) 
			fw->resp_dma_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x50);
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 
			fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x00);
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 
			fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2000); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++) 
			fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2200); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++) 
			fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2400); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++) 
			fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2600); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++) 
			fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2800); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++) 
			fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2A00); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++) 
			fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2C00); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++) 
			fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2E00); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++) 
			fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x10); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++) 
			fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x20); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++) 
			fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x30); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++) 
			fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++);

		/* Reset RISC. */
		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
		for (cnt = 0; cnt < 30000; cnt++) {
			if ((RD_REG_WORD(&reg->ctrl_status) &
			    CSR_ISP_SOFT_RESET) == 0)
				break;

			udelay(10);
		}
	}

	if (!IS_QLA2300(ha)) {
		for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
		    rval == QLA_SUCCESS; cnt--) {
			if (cnt)
				udelay(100);
			else
				rval = QLA_FUNCTION_TIMEOUT;
		}
	}

	if (rval == QLA_SUCCESS) {
		/* Get RISC SRAM. */
		risc_address = 0x800;
 		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
	}
	for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
	    cnt++, risc_address++) {
 		WRT_MAILBOX_REG(ha, reg, 1, (uint16_t)risc_address);
		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);

		for (timer = 6000000; timer; timer--) {
			/* Check for pending interrupts. */
 			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
			if (stat & HSR_RISC_INT) {
				stat &= 0xff;

				if (stat == 0x1 || stat == 0x2) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					/* Release mailbox registers. */
					WRT_REG_WORD(&reg->semaphore, 0);
					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				} else if (stat == 0x10 || stat == 0x11) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				}

				/* clear this intr; it wasn't a mailbox intr */
				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
				RD_REG_WORD(&reg->hccr);
			}
			udelay(5);
		}

		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
			rval = mb0 & MBS_MASK;
			fw->risc_ram[cnt] = mb2;
		} else {
			rval = QLA_FUNCTION_FAILED;
		}
	}

	if (rval == QLA_SUCCESS) {
		/* Get stack SRAM. */
		risc_address = 0x10000;
 		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
	}
	for (cnt = 0; cnt < sizeof(fw->stack_ram) / 2 && rval == QLA_SUCCESS;
	    cnt++, risc_address++) {
 		WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
 		WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);

		for (timer = 6000000; timer; timer--) {
			/* Check for pending interrupts. */
 			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
			if (stat & HSR_RISC_INT) {
				stat &= 0xff;

				if (stat == 0x1 || stat == 0x2) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					/* Release mailbox registers. */
					WRT_REG_WORD(&reg->semaphore, 0);
					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				} else if (stat == 0x10 || stat == 0x11) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				}

				/* clear this intr; it wasn't a mailbox intr */
				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
				RD_REG_WORD(&reg->hccr);
			}
			udelay(5);
		}

		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
			rval = mb0 & MBS_MASK;
			fw->stack_ram[cnt] = mb2;
		} else {
			rval = QLA_FUNCTION_FAILED;
		}
	}

	if (rval == QLA_SUCCESS) {
		/* Get data SRAM. */
		risc_address = 0x11000;
		data_ram_cnt = ha->fw_memory_size - risc_address + 1;
 		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_EXTENDED);
		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
	}
	for (cnt = 0; cnt < data_ram_cnt && rval == QLA_SUCCESS;
	    cnt++, risc_address++) {
 		WRT_MAILBOX_REG(ha, reg, 1, LSW(risc_address));
 		WRT_MAILBOX_REG(ha, reg, 8, MSW(risc_address));
		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);

		for (timer = 6000000; timer; timer--) {
			/* Check for pending interrupts. */
 			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
			if (stat & HSR_RISC_INT) {
				stat &= 0xff;

				if (stat == 0x1 || stat == 0x2) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					/* Release mailbox registers. */
					WRT_REG_WORD(&reg->semaphore, 0);
					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				} else if (stat == 0x10 || stat == 0x11) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				}

				/* clear this intr; it wasn't a mailbox intr */
				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
				RD_REG_WORD(&reg->hccr);
			}
			udelay(5);
		}

		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
			rval = mb0 & MBS_MASK;
			fw->data_ram[cnt] = mb2;
		} else {
			rval = QLA_FUNCTION_FAILED;
		}
	}


	if (rval != QLA_SUCCESS) {
		qla_printk(KERN_WARNING, ha,
		    "Failed to dump firmware (%x)!!!\n", rval);

		free_pages((unsigned long)ha->fw_dump, ha->fw_dump_order);
		ha->fw_dump = NULL;
	} else {
		qla_printk(KERN_INFO, ha,
		    "Firmware dump saved to temp buffer (%ld/%p).\n",
		    ha->host_no, ha->fw_dump);
	}

qla2300_fw_dump_failed:
	if (!hardware_locked)
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
Esempio n. 3
0
/**
 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
 * @ha: HA context
 * @hardware_locked: Called with the hardware_lock
 */
void
qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
{
	int		rval;
	uint32_t	cnt, timer;
	uint16_t	risc_address;
	uint16_t	mb0, mb2;
	device_reg_t __iomem *reg = ha->iobase;
	uint16_t __iomem *dmp_reg;
	unsigned long	flags;
	struct qla2100_fw_dump	*fw;

	risc_address = 0;
	mb0 = mb2 = 0;
	flags = 0;

	if (!hardware_locked)
		spin_lock_irqsave(&ha->hardware_lock, flags);

	if (ha->fw_dump != NULL) {
		qla_printk(KERN_WARNING, ha,
		    "Firmware has been previously dumped (%p) -- ignoring "
		    "request...\n", ha->fw_dump);
		goto qla2100_fw_dump_failed;
	}

	/* Allocate (large) dump buffer. */
	ha->fw_dump_order = get_order(sizeof(struct qla2100_fw_dump));
	ha->fw_dump = (struct qla2100_fw_dump *) __get_free_pages(GFP_ATOMIC,
	    ha->fw_dump_order);
	if (ha->fw_dump == NULL) {
		qla_printk(KERN_WARNING, ha,
		    "Unable to allocated memory for firmware dump (%d/%Zd).\n",
		    ha->fw_dump_order, sizeof(struct qla2100_fw_dump));
		goto qla2100_fw_dump_failed;
	}
	fw = ha->fw_dump;

	rval = QLA_SUCCESS;
	fw->hccr = RD_REG_WORD(&reg->hccr);

	/* Pause RISC. */
	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 
	for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt)
			udelay(100);
		else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval == QLA_SUCCESS) {
		dmp_reg = (uint16_t __iomem *)(reg + 0);
		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++) 
			fw->pbiu_reg[cnt] = RD_REG_WORD(dmp_reg++);

		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x10);
		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
			if (cnt == 8) {
				dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xe0);
			}
			fw->mailbox_reg[cnt] = RD_REG_WORD(dmp_reg++);
		}

		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x20);
		for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++) 
			fw->dma_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x00);
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0xA0);
		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++) 
			fw->risc_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2000); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp0_reg) / 2; cnt++) 
			fw->risc_gp0_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2100); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp1_reg) / 2; cnt++) 
			fw->risc_gp1_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2200); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp2_reg) / 2; cnt++) 
			fw->risc_gp2_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2300); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp3_reg) / 2; cnt++) 
			fw->risc_gp3_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2400); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp4_reg) / 2; cnt++) 
			fw->risc_gp4_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2500); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp5_reg) / 2; cnt++) 
			fw->risc_gp5_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2600); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp6_reg) / 2; cnt++) 
			fw->risc_gp6_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->pcr, 0x2700); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->risc_gp7_reg) / 2; cnt++) 
			fw->risc_gp7_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x10); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->frame_buf_hdw_reg) / 2; cnt++) 
			fw->frame_buf_hdw_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x20); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->fpm_b0_reg) / 2; cnt++) 
			fw->fpm_b0_reg[cnt] = RD_REG_WORD(dmp_reg++);

		WRT_REG_WORD(&reg->ctrl_status, 0x30); 
		dmp_reg = (uint16_t __iomem *)((uint8_t __iomem *)reg + 0x80);
		for (cnt = 0; cnt < sizeof(fw->fpm_b1_reg) / 2; cnt++) 
			fw->fpm_b1_reg[cnt] = RD_REG_WORD(dmp_reg++);

		/* Reset the ISP. */
		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
	}

	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt)
			udelay(100);
		else
			rval = QLA_FUNCTION_TIMEOUT;
	}

	/* Pause RISC. */
	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
	    (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {

		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC); 
		for (cnt = 30000;
		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
		    rval == QLA_SUCCESS; cnt--) {
			if (cnt)
				udelay(100);
			else
				rval = QLA_FUNCTION_TIMEOUT;
		}
		if (rval == QLA_SUCCESS) {
			/* Set memory configuration and timing. */
			if (IS_QLA2100(ha))
				WRT_REG_WORD(&reg->mctr, 0xf1);
			else
				WRT_REG_WORD(&reg->mctr, 0xf2);
			RD_REG_WORD(&reg->mctr);	/* PCI Posting. */

			/* Release RISC. */
			WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
		}
	}

	if (rval == QLA_SUCCESS) {
		/* Get RISC SRAM. */
		risc_address = 0x1000;
 		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
	}
	for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
	    cnt++, risc_address++) {
 		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);

		for (timer = 6000000; timer != 0; timer--) {
			/* Check for pending interrupts. */
			if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
				if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					WRT_REG_WORD(&reg->semaphore, 0);
					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				}
				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
				RD_REG_WORD(&reg->hccr);
			}
			udelay(5);
		}

		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
			rval = mb0 & MBS_MASK;
			fw->risc_ram[cnt] = mb2;
		} else {
			rval = QLA_FUNCTION_FAILED;
		}
	}

	if (rval != QLA_SUCCESS) {
		qla_printk(KERN_WARNING, ha,
		    "Failed to dump firmware (%x)!!!\n", rval);

		free_pages((unsigned long)ha->fw_dump, ha->fw_dump_order);
		ha->fw_dump = NULL;
	} else {
		qla_printk(KERN_INFO, ha,
		    "Firmware dump saved to temp buffer (%ld/%p).\n",
		    ha->host_no, ha->fw_dump);
	}

qla2100_fw_dump_failed:
	if (!hardware_locked)
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
Esempio n. 4
0
/**
 * qla2x00_intr_handler() - Process interrupts for the ISP.
 * @irq:
 * @dev_id: SCSI driver HA context
 * @regs:
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
qla2x00_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
	scsi_qla_host_t	*ha;
	device_reg_t	*reg;
	uint32_t	mbx;
	int		status = 0;
	unsigned long	flags = 0;
	unsigned long	mbx_flags = 0;
	unsigned long	intr_iter;
	uint32_t	stat;
	uint16_t	hccr;

	/* Don't loop forever, interrupt are OFF */
	intr_iter = 50; 

	ha = (scsi_qla_host_t *) dev_id;
	if (!ha) {
		printk(KERN_INFO
		    "%s(): NULL host pointer\n", __func__);
		return (IRQ_NONE);
	}

	reg = ha->iobase;

	spin_lock_irqsave(&ha->hardware_lock, flags);

	for (;;) {
		/* Relax CPU! */
		if (!(intr_iter--))
			break;

		if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
			if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
				break;

			if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
				RD_REG_WORD(&reg->hccr);

				/* Get mailbox data. */
				mbx = RD_MAILBOX_REG(ha, reg, 0);
				if (mbx > 0x3fff && mbx < 0x8000) {
					qla2x00_mbx_completion(ha,
					    (uint16_t)mbx);
					status |= MBX_INTERRUPT;
				} else if (mbx > 0x7fff && mbx < 0xc000) {
					qla2x00_async_event(ha, mbx);
				} else {
					/*EMPTY*/
					DEBUG2(printk("scsi(%ld): Unrecognized "
					    "interrupt type (%d)\n",
					    ha->host_no, mbx));
				}
				/* Release mailbox registers. */
				WRT_REG_WORD(&reg->semaphore, 0);
				/* Workaround for ISP2100 chip. */
				if (IS_QLA2100(ha))
					RD_REG_WORD(&reg->semaphore);
			} else {
				qla2x00_process_response_queue(ha);
	
				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
				RD_REG_WORD(&reg->hccr);
			}
		} else /* IS_QLA23XX(ha) */ {
			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
			if ((stat & HSR_RISC_INT) == 0)
				break;

			mbx = MSW(stat);
			switch (stat & 0xff) {
			case 0x13:
				qla2x00_process_response_queue(ha);
				break;
			case 0x1:
			case 0x2:
			case 0x10:
			case 0x11:
				qla2x00_mbx_completion(ha, (uint16_t)mbx);
				status |= MBX_INTERRUPT;

				/* Release mailbox registers. */
				WRT_REG_WORD(&reg->semaphore, 0);
				break;
			case 0x12:
				qla2x00_async_event(ha, mbx);
				break;
			case 0x15:
				mbx = mbx << 16 | MBA_CMPLT_1_16BIT;
				qla2x00_async_event(ha, mbx);
				break;
			case 0x16:
				mbx = mbx << 16 | MBA_SCSI_COMPLETION;
				qla2x00_async_event(ha, mbx);
				break;
			default:
				hccr = RD_REG_WORD(&reg->hccr);
				if (hccr & HCCR_RISC_PAUSE) {
					qla_printk(KERN_INFO, ha,
					    "RISC paused, dumping HCCR=%x\n",
					    hccr);

					/*
					 * Issue a "HARD" reset in order for
					 * the RISC interrupt bit to be
					 * cleared.  Schedule a big hammmer to
					 * get out of the RISC PAUSED state.
					 */
					WRT_REG_WORD(&reg->hccr,
					    HCCR_RESET_RISC);
					RD_REG_WORD(&reg->hccr);
					set_bit(ISP_ABORT_NEEDED,
					    &ha->dpc_flags);
					break;
				} else {
					DEBUG2(printk("scsi(%ld): Unrecognized "
					    "interrupt type (%d)\n",
					    ha->host_no, stat & 0xff));
				}
				break;
			}
			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
			RD_REG_WORD(&reg->hccr);
		}
	}

	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	qla2x00_next(ha);
	ha->last_irq_cpu = smp_processor_id();
	ha->total_isr_cnt++;

	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {

		/* There was a mailbox completion */
		DEBUG3(printk("%s(%ld): Going to get mbx reg lock.\n",
		    __func__, ha->host_no));

		spin_lock_irqsave(&ha->mbx_reg_lock, mbx_flags);

		if (ha->mcp == NULL) {
			DEBUG3(printk("%s(%ld): Error mbx pointer.\n",
			    __func__, ha->host_no));
		} else {
			DEBUG3(printk("%s(%ld): Going to set mbx intr flags. "
			    "cmd=%x.\n",
			    __func__, ha->host_no, ha->mcp->mb[0]));
		}
		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);

		DEBUG3(printk("%s(%ld): Going to wake up mbx function for "
		    "completion.\n",
		    __func__, ha->host_no));

		up(&ha->mbx_intr_sem);

		DEBUG3(printk("%s(%ld): Going to release mbx reg lock.\n",
		    __func__, ha->host_no));

		spin_unlock_irqrestore(&ha->mbx_reg_lock, mbx_flags);
	}

	if (!list_empty(&ha->done_queue))
		qla2x00_done(ha);

	/* Wakeup the DPC routine */
	if ((!ha->flags.mbox_busy &&
	    (test_bit(ISP_ABORT_NEEDED, &ha->dpc_flags) ||
		test_bit(RESET_MARKER_NEEDED, &ha->dpc_flags) ||
		test_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags))) &&
		    ha->dpc_wait && !ha->dpc_active) {

		up(ha->dpc_wait);
	}

	return (IRQ_HANDLED);
}
Esempio n. 5
0
/**
 * qla2x00_reset_chip() - Reset ISP chip.
 * @ha: HA context
 *
 * Returns 0 on success.
 */
void
qla2x00_reset_chip(scsi_qla_host_t *vha)
{
	unsigned long   flags = 0;
	struct qla_hw_data *ha = vha->hw;
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
	uint32_t	cnt;
	uint16_t	cmd;

	if (unlikely(pci_channel_offline(ha->pdev)))
		return;

	ha->isp_ops->disable_intrs(ha);

	spin_lock_irqsave(&ha->hardware_lock, flags);

	/* Turn off master enable */
	cmd = 0;
	pci_read_config_word(ha->pdev, PCI_COMMAND, &cmd);
	cmd &= ~PCI_COMMAND_MASTER;
	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);

	if (!IS_QLA2100(ha)) {
		/* Pause RISC. */
		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
		if (IS_QLA2200(ha) || IS_QLA2300(ha)) {
			for (cnt = 0; cnt < 30000; cnt++) {
				if ((RD_REG_WORD(&reg->hccr) &
				    HCCR_RISC_PAUSE) != 0)
					break;
				udelay(100);
			}
		} else {
			RD_REG_WORD(&reg->hccr);	/* PCI Posting. */
			udelay(10);
		}

		/* Select FPM registers. */
		WRT_REG_WORD(&reg->ctrl_status, 0x20);
		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */

		/* FPM Soft Reset. */
		WRT_REG_WORD(&reg->fpm_diag_config, 0x100);
		RD_REG_WORD(&reg->fpm_diag_config);	/* PCI Posting. */

		/* Toggle Fpm Reset. */
		if (!IS_QLA2200(ha)) {
			WRT_REG_WORD(&reg->fpm_diag_config, 0x0);
			RD_REG_WORD(&reg->fpm_diag_config); /* PCI Posting. */
		}

		/* Select frame buffer registers. */
		WRT_REG_WORD(&reg->ctrl_status, 0x10);
		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */

		/* Reset frame buffer FIFOs. */
		if (IS_QLA2200(ha)) {
			WRT_FB_CMD_REG(ha, reg, 0xa000);
			RD_FB_CMD_REG(ha, reg);		/* PCI Posting. */
		} else {
			WRT_FB_CMD_REG(ha, reg, 0x00fc);

			/* Read back fb_cmd until zero or 3 seconds max */
			for (cnt = 0; cnt < 3000; cnt++) {
				if ((RD_FB_CMD_REG(ha, reg) & 0xff) == 0)
					break;
				udelay(100);
			}
		}

		/* Select RISC module registers. */
		WRT_REG_WORD(&reg->ctrl_status, 0);
		RD_REG_WORD(&reg->ctrl_status);		/* PCI Posting. */

		/* Reset RISC processor. */
		WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */

		/* Release RISC processor. */
		WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
	}

	WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
	WRT_REG_WORD(&reg->hccr, HCCR_CLR_HOST_INT);

	/* Reset ISP chip. */
	WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);

	/* Wait for RISC to recover from reset. */
	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
		/*
		 * It is necessary to for a delay here since the card doesn't
		 * respond to PCI reads during a reset. On some architectures
		 * this will result in an MCA.
		 */
		udelay(20);
		for (cnt = 30000; cnt; cnt--) {
			if ((RD_REG_WORD(&reg->ctrl_status) &
			    CSR_ISP_SOFT_RESET) == 0)
				break;
			udelay(100);
		}
	} else
		udelay(10);

	/* Reset RISC processor. */
	WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);

	WRT_REG_WORD(&reg->semaphore, 0);

	/* Release RISC processor. */
	WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
	RD_REG_WORD(&reg->hccr);			/* PCI Posting. */

	if (IS_QLA2100(ha) || IS_QLA2200(ha) || IS_QLA2300(ha)) {
		for (cnt = 0; cnt < 30000; cnt++) {
			if (RD_MAILBOX_REG(ha, reg, 0) != MBS_BUSY)
				break;

			udelay(100);
		}
	} else
		udelay(100);

	/* Turn on master enable */
	cmd |= PCI_COMMAND_MASTER;
	pci_write_config_word(ha->pdev, PCI_COMMAND, cmd);

	/* Disable RISC pause on FPM parity error. */
	if (!IS_QLA2100(ha)) {
		WRT_REG_WORD(&reg->hccr, HCCR_DISABLE_PARITY_PAUSE);
		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
	}

	spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
Esempio n. 6
0
/**
 * qla2x00_async_event() - Process aynchronous events.
 * @ha: SCSI driver HA context
 * @mb0: Mailbox0 register
 */
static void
qla2x00_async_event(scsi_qla_host_t *ha, uint32_t mbx)
{
	static char	*link_speeds[5] = { "1", "2", "4", "?", "10" };
	char		*link_speed;
	uint16_t	mb[4];
	uint16_t	handle_cnt;
	uint16_t	cnt;
	uint32_t	handles[5];
	device_reg_t	*reg = ha->iobase;
	uint32_t	rscn_entry, host_pid;
	uint8_t		rscn_queue_index;

	/* Setup to process RIO completion. */
	handle_cnt = 0;
	mb[0] = LSW(mbx);
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:
		if (IS_QLA2100(ha) || IS_QLA2200(ha))
			handles[0] = le32_to_cpu(
			    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 2) << 16)) |
			    RD_MAILBOX_REG(ha, reg, 1));
		else
			handles[0] = le32_to_cpu(
			    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 2) << 16)) |
			    MSW(mbx));
		handle_cnt = 1;
		break;
	case MBA_CMPLT_1_16BIT:
		if (IS_QLA2100(ha) || IS_QLA2200(ha))
			handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1);
		else
			handles[0] = MSW(mbx);
		handle_cnt = 1;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_16BIT:
		handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1);
		handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2);
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_3_16BIT:
		handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1);
		handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2);
		handles[2] = (uint32_t)RD_MAILBOX_REG(ha, reg, 3);
		handle_cnt = 3;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_4_16BIT:
		handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1);
		handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2);
		handles[2] = (uint32_t)RD_MAILBOX_REG(ha, reg, 3);
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handle_cnt = 4;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_5_16BIT:
		handles[0] = (uint32_t)RD_MAILBOX_REG(ha, reg, 1);
		handles[1] = (uint32_t)RD_MAILBOX_REG(ha, reg, 2);
		handles[2] = (uint32_t)RD_MAILBOX_REG(ha, reg, 3);
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
		handle_cnt = 5;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_32BIT:
		handles[0] = le32_to_cpu(
		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 2) << 16)) |
		    RD_MAILBOX_REG(ha, reg, 1));
		handles[1] = le32_to_cpu(
		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
		    RD_MAILBOX_REG(ha, reg, 6));
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	default:
		break;
	}

	mb[0] = LSW(mbx);
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:	/* Fast Post */
		if (!ha->flags.online)
			break;

		for (cnt = 0; cnt < handle_cnt; cnt++)
			qla2x00_process_completed_request(ha, handles[cnt]);
		break;

	case MBA_RESET:			/* Reset */
		DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));

		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
		break;

	case MBA_SYSTEM_ERR:		/* System Error */
		mb[1] = RD_MAILBOX_REG(ha, reg, 1);
		mb[2] = RD_MAILBOX_REG(ha, reg, 2);
		mb[3] = RD_MAILBOX_REG(ha, reg, 3);

		qla_printk(KERN_INFO, ha,
		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
		    mb[1], mb[2], mb[3]);

		if (IS_QLA2100(ha) || IS_QLA2200(ha))
			qla2100_fw_dump(ha, 1);
		else
	    		qla2300_fw_dump(ha, 1);
		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		break;

	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
		DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
		    ha->host_no));
		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");

		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		break;

	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
		DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
		    ha->host_no));
		qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");

		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		break;

	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
		DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
		    ha->host_no));
		break;

	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
		mb[1] = RD_MAILBOX_REG(ha, reg, 1);

		DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
		    mb[1]));
		qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha);
		}

		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);

		ha->flags.management_server_logged_in = 0;

		/* Update AEN queue. */
		qla2x00_enqueue_aen(ha, MBA_LIP_OCCURRED, NULL);

		ha->total_lip_cnt++;
		break;

	case MBA_LOOP_UP:		/* Loop Up Event */
		mb[1] = RD_MAILBOX_REG(ha, reg, 1);

		ha->link_data_rate = 0;
		if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
			link_speed = link_speeds[0];
		} else {
			link_speed = link_speeds[3];
			if (mb[1] < 5)
				link_speed = link_speeds[mb[1]];
			ha->link_data_rate = mb[1];
		}

		DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
		    ha->host_no, link_speed));
		qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
		    link_speed);

		ha->flags.management_server_logged_in = 0;

		/* Update AEN queue. */
		qla2x00_enqueue_aen(ha, MBA_LOOP_UP, NULL);
		break;

	case MBA_LOOP_DOWN:		/* Loop Down Event */
		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN.\n",
		    ha->host_no));
		qla_printk(KERN_INFO, ha, "LOOP DOWN detected.\n");

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
			ha->device_flags |= DFLG_NO_CABLE;
			qla2x00_mark_all_devices_lost(ha);
		}

		ha->flags.management_server_logged_in = 0;
		ha->link_data_rate = 0;

		/* Update AEN queue. */
		qla2x00_enqueue_aen(ha, MBA_LOOP_DOWN, NULL);
		break;

	case MBA_LIP_RESET:		/* LIP reset occurred */
		mb[1] = RD_MAILBOX_REG(ha, reg, 1);

		DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
		    ha->host_no, mb[1]));
		qla_printk(KERN_INFO, ha,
		    "LIP reset occured (%x).\n", mb[1]);

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha);
		}

		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);

		ha->operating_mode = LOOP;
		ha->flags.management_server_logged_in = 0;

		/* Update AEN queue. */
		qla2x00_enqueue_aen(ha, MBA_LIP_RESET, NULL);

		ha->total_lip_cnt++;
		break;

	case MBA_POINT_TO_POINT:	/* Point-to-Point */
		if (IS_QLA2100(ha))
			break;

		DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
		    ha->host_no));

		/*
		 * Until there's a transition from loop down to loop up, treat
		 * this as loop down only.
		 */
		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			if (!atomic_read(&ha->loop_down_timer))
				atomic_set(&ha->loop_down_timer,
				    LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha);
		}

		if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
			set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
		}
		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
		break;

	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
		if (IS_QLA2100(ha))
			break;

		mb[1] = RD_MAILBOX_REG(ha, reg, 1);

		DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
		    "received.\n",
		    ha->host_no));
		qla_printk(KERN_INFO, ha,
		    "Configuration change detected: value=%x.\n", mb[1]);

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);  
			if (!atomic_read(&ha->loop_down_timer))
				atomic_set(&ha->loop_down_timer,
				    LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha);
		}

		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
		break;

	case MBA_PORT_UPDATE:		/* Port database update */
		mb[1] = RD_MAILBOX_REG(ha, reg, 1);
		mb[2] = RD_MAILBOX_REG(ha, reg, 2);

		/*
		 * If a single remote port just logged into (or logged out of)
		 * us, create a new entry in our rscn fcports list and handle
		 * the event like an RSCN.
		 */
		if (!IS_QLA2100(ha) && !IS_QLA2200(ha) && !IS_QLA6312(ha) &&
		    !IS_QLA6322(ha) && ha->flags.init_done && mb[1] != 0xffff &&
		    ((ha->operating_mode == P2P && mb[1] != 0) ||
		    (ha->operating_mode != P2P && mb[1] !=
			SNS_FIRST_LOOP_ID)) && (mb[2] == 6 || mb[2] == 7)) {
			int rval;
			fc_port_t *rscn_fcport;

			/* Create new fcport for login. */
			rscn_fcport = qla2x00_alloc_rscn_fcport(ha, GFP_ATOMIC);
			if (rscn_fcport) {
				DEBUG14(printk("scsi(%ld): Port Update -- "
				    "creating RSCN fcport %p for login.\n",
				    ha->host_no, rscn_fcport));

				rscn_fcport->loop_id = mb[1];
				rscn_fcport->d_id.b24 = INVALID_PORT_ID;
				atomic_set(&rscn_fcport->state,
				    FCS_DEVICE_LOST);
				list_add_tail(&rscn_fcport->list,
				    &ha->rscn_fcports);

				rval = qla2x00_handle_port_rscn(ha, 0,
				    rscn_fcport, 1);
				if (rval == QLA_SUCCESS)
					break;
			} else {
				DEBUG14(printk("scsi(%ld): Port Update -- "
				    "-- unable to allocate RSCN fcport "
				    "login.\n", ha->host_no));
			}
		}

		/*
		 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
		 * event etc. earlier indicating loop is down) then process
		 * it.  Otherwise ignore it and Wait for RSCN to come in.
		 */
		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
			    "ignored.\n", ha->host_no));
			break;
		}

		DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
		    ha->host_no));
		DEBUG(printk(KERN_INFO
		    "scsi(%ld): Port database changed %04x %04x.\n",
		    ha->host_no, mb[1], mb[2]));

		/*
		 * Mark all devices as missing so we will login again.
		 */
		atomic_set(&ha->loop_state, LOOP_UP);

		atomic_set(&ha->loop_down_timer, 0);
		qla2x00_mark_all_devices_lost(ha);

		ha->flags.rscn_queue_overflow = 1;

		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);

		/* Update AEN queue. */
		qla2x00_enqueue_aen(ha, MBA_PORT_UPDATE, NULL);
		break;

	case MBA_RSCN_UPDATE:		/* State Change Registration */
		mb[1] = RD_MAILBOX_REG(ha, reg, 1);
		mb[2] = RD_MAILBOX_REG(ha, reg, 2);

		DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
		    ha->host_no));
		DEBUG(printk(KERN_INFO
		    "scsi(%ld): RSCN database changed -- %04x %04x.\n",
		    ha->host_no, mb[1], mb[2]));

		rscn_entry = (mb[1] << 16) | mb[2];
		host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
		    ha->d_id.b.al_pa;
		if (rscn_entry == host_pid) {
			DEBUG(printk(KERN_INFO
			    "scsi(%ld): Ignoring RSCN update to local host "
			    "port ID (%06x)\n",
			    ha->host_no, host_pid));
			break;
		}

		rscn_queue_index = ha->rscn_in_ptr + 1;
		if (rscn_queue_index == MAX_RSCN_COUNT)
			rscn_queue_index = 0;
		if (rscn_queue_index != ha->rscn_out_ptr) {
			ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
			ha->rscn_in_ptr = rscn_queue_index;
		} else {
			ha->flags.rscn_queue_overflow = 1;
		}

		atomic_set(&ha->loop_state, LOOP_UPDATE);
		atomic_set(&ha->loop_down_timer, 0);
		ha->flags.management_server_logged_in = 0;

		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
		set_bit(RSCN_UPDATE, &ha->dpc_flags);

		/* Update AEN queue. */
		qla2x00_enqueue_aen(ha, MBA_RSCN_UPDATE, &mb[0]);
		break;

	/* case MBA_RIO_RESPONSE: */
	case MBA_ZIO_RESPONSE:
		DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
		    ha->host_no));
		DEBUG(printk(KERN_INFO
		    "scsi(%ld): [R|Z]IO update completion.\n",
		    ha->host_no));

		qla2x00_process_response_queue(ha);
		break;
	}
}
Esempio n. 7
0
/**
 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
 * @irq:
 * @dev_id: SCSI driver HA context
 * @regs:
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
qla2100_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
	scsi_qla_host_t	*ha;
	struct device_reg_2xxx __iomem *reg;
	int		status;
	unsigned long	flags;
	unsigned long	iter;
	uint16_t	mb[4];

	ha = (scsi_qla_host_t *) dev_id;
	if (!ha) {
		printk(KERN_INFO
		    "%s(): NULL host pointer\n", __func__);
		return (IRQ_NONE);
	}

	reg = &ha->iobase->isp;
	status = 0;

	spin_lock_irqsave(&ha->hardware_lock, flags);
	for (iter = 50; iter--; ) {
		if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
			break;

		if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
			RD_REG_WORD(&reg->hccr);

			/* Get mailbox data. */
			mb[0] = RD_MAILBOX_REG(ha, reg, 0);
			if (mb[0] > 0x3fff && mb[0] < 0x8000) {
				qla2x00_mbx_completion(ha, mb[0]);
				status |= MBX_INTERRUPT;
			} else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
				mb[1] = RD_MAILBOX_REG(ha, reg, 1);
				mb[2] = RD_MAILBOX_REG(ha, reg, 2);
				mb[3] = RD_MAILBOX_REG(ha, reg, 3);
				qla2x00_async_event(ha, mb);
			} else {
				/*EMPTY*/
				DEBUG2(printk("scsi(%ld): Unrecognized "
				    "interrupt type (%d).\n",
				    ha->host_no, mb[0]));
			}
			/* Release mailbox registers. */
			WRT_REG_WORD(&reg->semaphore, 0);
			RD_REG_WORD(&reg->semaphore);
		} else {
			qla2x00_process_response_queue(ha);

			WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
			RD_REG_WORD(&reg->hccr);
		}
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
		spin_lock_irqsave(&ha->mbx_reg_lock, flags);

		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
		up(&ha->mbx_intr_sem);

		spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
	}

	return (IRQ_HANDLED);
}
Esempio n. 8
0
/**
 * qla2x00_async_event() - Process aynchronous events.
 * @ha: SCSI driver HA context
 * @mb: Mailbox registers (0 - 3)
 */
static void
qla2x00_async_event(scsi_qla_host_t *ha, uint16_t *mb)
{
#define LS_UNKNOWN	2
	static char	*link_speeds[5] = { "1", "2", "?", "4", "10" };
	char		*link_speed;
	uint16_t	handle_cnt;
	uint16_t	cnt;
	uint32_t	handles[5];
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
	uint32_t	rscn_entry, host_pid;
	uint8_t		rscn_queue_index;

	/* Setup to process RIO completion. */
	handle_cnt = 0;
	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:
		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
		handle_cnt = 1;
		break;
	case MBA_CMPLT_1_16BIT:
		handles[0] = mb[1];
		handle_cnt = 1;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_16BIT:
		handles[0] = mb[1];
		handles[1] = mb[2];
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_3_16BIT:
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
		handle_cnt = 3;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_4_16BIT:
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handle_cnt = 4;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_5_16BIT:
		handles[0] = mb[1];
		handles[1] = mb[2];
		handles[2] = mb[3];
		handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
		handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
		handle_cnt = 5;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	case MBA_CMPLT_2_32BIT:
		handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
		handles[1] = le32_to_cpu(
		    ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
		    RD_MAILBOX_REG(ha, reg, 6));
		handle_cnt = 2;
		mb[0] = MBA_SCSI_COMPLETION;
		break;
	default:
		break;
	}

	switch (mb[0]) {
	case MBA_SCSI_COMPLETION:	/* Fast Post */
		if (!ha->flags.online)
			break;

		for (cnt = 0; cnt < handle_cnt; cnt++)
			qla2x00_process_completed_request(ha, handles[cnt]);
		break;

	case MBA_RESET:			/* Reset */
		DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n", ha->host_no));

		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
		break;

	case MBA_SYSTEM_ERR:		/* System Error */
		mb[1] = RD_MAILBOX_REG(ha, reg, 1);
		mb[2] = RD_MAILBOX_REG(ha, reg, 2);
		mb[3] = RD_MAILBOX_REG(ha, reg, 3);

		qla_printk(KERN_INFO, ha,
		    "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
		    mb[1], mb[2], mb[3]);

		ha->isp_ops.fw_dump(ha, 1);

		if (IS_QLA24XX(ha) || IS_QLA54XX(ha)) {
			if (mb[1] == 0 && mb[2] == 0) {
				qla_printk(KERN_ERR, ha,
				    "Unrecoverable Hardware Error: adapter "
				    "marked OFFLINE!\n");
				ha->flags.online = 0;
			} else
				set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		} else if (mb[1] == 0) {
			qla_printk(KERN_INFO, ha,
			    "Unrecoverable Hardware Error: adapter marked "
			    "OFFLINE!\n");
			ha->flags.online = 0;
		} else
			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		break;

	case MBA_REQ_TRANSFER_ERR:	/* Request Transfer Error */
		DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
		    ha->host_no));
		qla_printk(KERN_WARNING, ha, "ISP Request Transfer Error.\n");

		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		break;

	case MBA_RSP_TRANSFER_ERR:	/* Response Transfer Error */
		DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
		    ha->host_no));
		qla_printk(KERN_WARNING, ha, "ISP Response Transfer Error.\n");

		set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
		break;

	case MBA_WAKEUP_THRES:		/* Request Queue Wake-up */
		DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
		    ha->host_no));
		break;

	case MBA_LIP_OCCURRED:		/* Loop Initialization Procedure */
		DEBUG2(printk("scsi(%ld): LIP occured (%x).\n", ha->host_no,
		    mb[1]));
		qla_printk(KERN_INFO, ha, "LIP occured (%x).\n", mb[1]);

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha, 1);
		}

		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);

		ha->flags.management_server_logged_in = 0;
		break;

	case MBA_LOOP_UP:		/* Loop Up Event */
		if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
			link_speed = link_speeds[0];
			ha->link_data_rate = LDR_1GB;
		} else {
			link_speed = link_speeds[LS_UNKNOWN];
			if (mb[1] < 5)
				link_speed = link_speeds[mb[1]];
			ha->link_data_rate = mb[1];
		}

		DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
		    ha->host_no, link_speed));
		qla_printk(KERN_INFO, ha, "LOOP UP detected (%s Gbps).\n",
		    link_speed);

		ha->flags.management_server_logged_in = 0;
		break;

	case MBA_LOOP_DOWN:		/* Loop Down Event */
		DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN (%x).\n",
		    ha->host_no, mb[1]));
		qla_printk(KERN_INFO, ha, "LOOP DOWN detected (%x).\n", mb[1]);

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
			ha->device_flags |= DFLG_NO_CABLE;
			qla2x00_mark_all_devices_lost(ha, 1);
		}

		ha->flags.management_server_logged_in = 0;
		ha->link_data_rate = LDR_UNKNOWN;
		if (ql2xfdmienable)
			set_bit(REGISTER_FDMI_NEEDED, &ha->dpc_flags);
		break;

	case MBA_LIP_RESET:		/* LIP reset occurred */
		DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
		    ha->host_no, mb[1]));
		qla_printk(KERN_INFO, ha,
		    "LIP reset occured (%x).\n", mb[1]);

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			atomic_set(&ha->loop_down_timer, LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha, 1);
		}

		set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);

		ha->operating_mode = LOOP;
		ha->flags.management_server_logged_in = 0;
		break;

	case MBA_POINT_TO_POINT:	/* Point-to-Point */
		if (IS_QLA2100(ha))
			break;

		DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE received.\n",
		    ha->host_no));

		/*
		 * Until there's a transition from loop down to loop up, treat
		 * this as loop down only.
		 */
		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			if (!atomic_read(&ha->loop_down_timer))
				atomic_set(&ha->loop_down_timer,
				    LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha, 1);
		}

		if (!(test_bit(ABORT_ISP_ACTIVE, &ha->dpc_flags))) {
			set_bit(RESET_MARKER_NEEDED, &ha->dpc_flags);
		}
		set_bit(REGISTER_FC4_NEEDED, &ha->dpc_flags);
		break;

	case MBA_CHG_IN_CONNECTION:	/* Change in connection mode */
		if (IS_QLA2100(ha))
			break;

		DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
		    "received.\n",
		    ha->host_no));
		qla_printk(KERN_INFO, ha,
		    "Configuration change detected: value=%x.\n", mb[1]);

		if (atomic_read(&ha->loop_state) != LOOP_DOWN) {
			atomic_set(&ha->loop_state, LOOP_DOWN);
			if (!atomic_read(&ha->loop_down_timer))
				atomic_set(&ha->loop_down_timer,
				    LOOP_DOWN_TIME);
			qla2x00_mark_all_devices_lost(ha, 1);
		}

		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
		break;

	case MBA_PORT_UPDATE:		/* Port database update */
		/*
		 * If PORT UPDATE is global (recieved LIP_OCCURED/LIP_RESET
		 * event etc. earlier indicating loop is down) then process
		 * it.  Otherwise ignore it and Wait for RSCN to come in.
		 */
		atomic_set(&ha->loop_down_timer, 0);
		if (atomic_read(&ha->loop_state) != LOOP_DOWN &&
		    atomic_read(&ha->loop_state) != LOOP_DEAD) {
			DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
			    "ignored %04x/%04x/%04x.\n", ha->host_no, mb[1],
			    mb[2], mb[3]));
			break;
		}

		DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
		    ha->host_no));
		DEBUG(printk(KERN_INFO
		    "scsi(%ld): Port database changed %04x %04x %04x.\n",
		    ha->host_no, mb[1], mb[2], mb[3]));

		/*
		 * Mark all devices as missing so we will login again.
		 */
		atomic_set(&ha->loop_state, LOOP_UP);

		qla2x00_mark_all_devices_lost(ha, 1);

		ha->flags.rscn_queue_overflow = 1;

		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
		set_bit(LOCAL_LOOP_UPDATE, &ha->dpc_flags);
		break;

	case MBA_RSCN_UPDATE:		/* State Change Registration */
		DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
		    ha->host_no));
		DEBUG(printk(KERN_INFO
		    "scsi(%ld): RSCN database changed -- %04x %04x.\n",
		    ha->host_no, mb[1], mb[2]));

		rscn_entry = (mb[1] << 16) | mb[2];
		host_pid = (ha->d_id.b.domain << 16) | (ha->d_id.b.area << 8) |
		    ha->d_id.b.al_pa;
		if (rscn_entry == host_pid) {
			DEBUG(printk(KERN_INFO
			    "scsi(%ld): Ignoring RSCN update to local host "
			    "port ID (%06x)\n",
			    ha->host_no, host_pid));
			break;
		}

		rscn_queue_index = ha->rscn_in_ptr + 1;
		if (rscn_queue_index == MAX_RSCN_COUNT)
			rscn_queue_index = 0;
		if (rscn_queue_index != ha->rscn_out_ptr) {
			ha->rscn_queue[ha->rscn_in_ptr] = rscn_entry;
			ha->rscn_in_ptr = rscn_queue_index;
		} else {
			ha->flags.rscn_queue_overflow = 1;
		}

		atomic_set(&ha->loop_state, LOOP_UPDATE);
		atomic_set(&ha->loop_down_timer, 0);
		ha->flags.management_server_logged_in = 0;

		set_bit(LOOP_RESYNC_NEEDED, &ha->dpc_flags);
		set_bit(RSCN_UPDATE, &ha->dpc_flags);
		break;

	/* case MBA_RIO_RESPONSE: */
	case MBA_ZIO_RESPONSE:
		DEBUG2(printk("scsi(%ld): [R|Z]IO update completion.\n",
		    ha->host_no));
		DEBUG(printk(KERN_INFO
		    "scsi(%ld): [R|Z]IO update completion.\n",
		    ha->host_no));

		if (IS_QLA24XX(ha) || IS_QLA54XX(ha))
			qla24xx_process_response_queue(ha);
		else
			qla2x00_process_response_queue(ha);
		break;

	case MBA_DISCARD_RND_FRAME:
		DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
		    "%04x.\n", ha->host_no, mb[1], mb[2], mb[3]));
		break;

	case MBA_TRACE_NOTIFICATION:
		DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
		ha->host_no, mb[1], mb[2]));
		break;
	}
}
Esempio n. 9
0
/**
 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
 * @irq:
 * @dev_id: SCSI driver HA context
 * @regs:
 *
 * Called by system whenever the host adapter generates an interrupt.
 *
 * Returns handled flag.
 */
irqreturn_t
qla2300_intr_handler(int irq, void *dev_id, struct pt_regs *regs)
{
	scsi_qla_host_t	*ha;
	struct device_reg_2xxx __iomem *reg;
	int		status;
	unsigned long	flags;
	unsigned long	iter;
	uint32_t	stat;
	uint16_t	hccr;
	uint16_t	mb[4];

	ha = (scsi_qla_host_t *) dev_id;
	if (!ha) {
		printk(KERN_INFO
		    "%s(): NULL host pointer\n", __func__);
		return (IRQ_NONE);
	}

	reg = &ha->iobase->isp;
	status = 0;

	spin_lock_irqsave(&ha->hardware_lock, flags);
	for (iter = 50; iter--; ) {
		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
		if (stat & HSR_RISC_PAUSED) {
			hccr = RD_REG_WORD(&reg->hccr);
			if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
				qla_printk(KERN_INFO, ha,
				    "Parity error -- HCCR=%x.\n", hccr);
			else
				qla_printk(KERN_INFO, ha,
				    "RISC paused -- HCCR=%x.\n", hccr);

			/*
			 * Issue a "HARD" reset in order for the RISC
			 * interrupt bit to be cleared.  Schedule a big
			 * hammmer to get out of the RISC PAUSED state.
			 */
			WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
			RD_REG_WORD(&reg->hccr);
			set_bit(ISP_ABORT_NEEDED, &ha->dpc_flags);
			break;
		} else if ((stat & HSR_RISC_INT) == 0)
			break;

		switch (stat & 0xff) {
		case 0x1:
		case 0x2:
		case 0x10:
		case 0x11:
			qla2x00_mbx_completion(ha, MSW(stat));
			status |= MBX_INTERRUPT;

			/* Release mailbox registers. */
			WRT_REG_WORD(&reg->semaphore, 0);
			break;
		case 0x12:
			mb[0] = MSW(stat);
			mb[1] = RD_MAILBOX_REG(ha, reg, 1);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
			mb[3] = RD_MAILBOX_REG(ha, reg, 3);
			qla2x00_async_event(ha, mb);
			break;
		case 0x13:
			qla2x00_process_response_queue(ha);
			break;
		case 0x15:
			mb[0] = MBA_CMPLT_1_16BIT;
			mb[1] = MSW(stat);
			qla2x00_async_event(ha, mb);
			break;
		case 0x16:
			mb[0] = MBA_SCSI_COMPLETION;
			mb[1] = MSW(stat);
			mb[2] = RD_MAILBOX_REG(ha, reg, 2);
			qla2x00_async_event(ha, mb);
			break;
		default:
			DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
			    "(%d).\n",
			    ha->host_no, stat & 0xff));
			break;
		}
		WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
		RD_REG_WORD_RELAXED(&reg->hccr);
	}
	spin_unlock_irqrestore(&ha->hardware_lock, flags);

	if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
	    (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
		spin_lock_irqsave(&ha->mbx_reg_lock, flags);

		set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
		up(&ha->mbx_intr_sem);

		spin_unlock_irqrestore(&ha->mbx_reg_lock, flags);
	}

	return (IRQ_HANDLED);
}
Esempio n. 10
0
/**
 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
 * @ha: HA context
 * @hardware_locked: Called with the hardware_lock
 */
void
qla2100_fw_dump(scsi_qla_host_t *ha, int hardware_locked)
{
	int		rval;
	uint32_t	cnt, timer;
	uint16_t	risc_address;
	uint16_t	mb0, mb2;
	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
	uint16_t __iomem *dmp_reg;
	unsigned long	flags;
	struct qla2100_fw_dump	*fw;

	risc_address = 0;
	mb0 = mb2 = 0;
	flags = 0;

	if (!hardware_locked)
		spin_lock_irqsave(&ha->hardware_lock, flags);

	if (!ha->fw_dump) {
		qla_printk(KERN_WARNING, ha,
		    "No buffer available for dump!!!\n");
		goto qla2100_fw_dump_failed;
	}

	if (ha->fw_dumped) {
		qla_printk(KERN_WARNING, ha,
		    "Firmware has been previously dumped (%p) -- ignoring "
		    "request...\n", ha->fw_dump);
		goto qla2100_fw_dump_failed;
	}
	fw = &ha->fw_dump->isp.isp21;
	qla2xxx_prep_dump(ha, ha->fw_dump);

	rval = QLA_SUCCESS;
	fw->hccr = htons(RD_REG_WORD(&reg->hccr));

	/* Pause RISC. */
	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
	for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt)
			udelay(100);
		else
			rval = QLA_FUNCTION_TIMEOUT;
	}
	if (rval == QLA_SUCCESS) {
		dmp_reg = &reg->flash_address;
		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));

		dmp_reg = &reg->u.isp2100.mailbox0;
		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
			if (cnt == 8)
				dmp_reg = &reg->u_end.isp2200.mailbox8;

			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
		}

		dmp_reg = &reg->u.isp2100.unused_2[0];
		for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
			fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));

		WRT_REG_WORD(&reg->ctrl_status, 0x00);
		dmp_reg = &reg->risc_hw;
		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));

		WRT_REG_WORD(&reg->pcr, 0x2000);
		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);

		WRT_REG_WORD(&reg->pcr, 0x2100);
		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);

		WRT_REG_WORD(&reg->pcr, 0x2200);
		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);

		WRT_REG_WORD(&reg->pcr, 0x2300);
		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);

		WRT_REG_WORD(&reg->pcr, 0x2400);
		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);

		WRT_REG_WORD(&reg->pcr, 0x2500);
		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);

		WRT_REG_WORD(&reg->pcr, 0x2600);
		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);

		WRT_REG_WORD(&reg->pcr, 0x2700);
		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);

		WRT_REG_WORD(&reg->ctrl_status, 0x10);
		qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);

		WRT_REG_WORD(&reg->ctrl_status, 0x20);
		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);

		WRT_REG_WORD(&reg->ctrl_status, 0x30);
		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);

		/* Reset the ISP. */
		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
	}

	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
	    rval == QLA_SUCCESS; cnt--) {
		if (cnt)
			udelay(100);
		else
			rval = QLA_FUNCTION_TIMEOUT;
	}

	/* Pause RISC. */
	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
	    (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {

		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
		for (cnt = 30000;
		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
		    rval == QLA_SUCCESS; cnt--) {
			if (cnt)
				udelay(100);
			else
				rval = QLA_FUNCTION_TIMEOUT;
		}
		if (rval == QLA_SUCCESS) {
			/* Set memory configuration and timing. */
			if (IS_QLA2100(ha))
				WRT_REG_WORD(&reg->mctr, 0xf1);
			else
				WRT_REG_WORD(&reg->mctr, 0xf2);
			RD_REG_WORD(&reg->mctr);	/* PCI Posting. */

			/* Release RISC. */
			WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
		}
	}

	if (rval == QLA_SUCCESS) {
		/* Get RISC SRAM. */
		risc_address = 0x1000;
 		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
	}
	for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
	    cnt++, risc_address++) {
 		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);

		for (timer = 6000000; timer != 0; timer--) {
			/* Check for pending interrupts. */
			if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
				if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
					set_bit(MBX_INTERRUPT,
					    &ha->mbx_cmd_flags);

					mb0 = RD_MAILBOX_REG(ha, reg, 0);
					mb2 = RD_MAILBOX_REG(ha, reg, 2);

					WRT_REG_WORD(&reg->semaphore, 0);
					WRT_REG_WORD(&reg->hccr,
					    HCCR_CLR_RISC_INT);
					RD_REG_WORD(&reg->hccr);
					break;
				}
				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
				RD_REG_WORD(&reg->hccr);
			}
			udelay(5);
		}

		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
			rval = mb0 & MBS_MASK;
			fw->risc_ram[cnt] = htons(mb2);
		} else {
			rval = QLA_FUNCTION_FAILED;
		}
	}

	if (rval == QLA_SUCCESS)
		qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);

	if (rval != QLA_SUCCESS) {
		qla_printk(KERN_WARNING, ha,
		    "Failed to dump firmware (%x)!!!\n", rval);
		ha->fw_dumped = 0;

	} else {
		qla_printk(KERN_INFO, ha,
		    "Firmware dump saved to temp buffer (%ld/%p).\n",
		    ha->host_no, ha->fw_dump);
		ha->fw_dumped = 1;
	}

qla2100_fw_dump_failed:
	if (!hardware_locked)
		spin_unlock_irqrestore(&ha->hardware_lock, flags);
}