Exemplo n.º 1
0
/**
 * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
 * @adapter: The zfcp adapter to register with the SCSI midlayer
 */
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
	struct ccw_dev_id dev_id;

	if (adapter->scsi_host)
		return 0;

	ccw_device_get_id(adapter->ccw_device, &dev_id);
	/* register adapter as SCSI host with mid layer of SCSI stack */
	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
					     sizeof (struct zfcp_adapter *));
	if (!adapter->scsi_host) {
		dev_err(&adapter->ccw_device->dev,
			"Registering the FCP device with the "
			"SCSI stack failed\n");
		return -EIO;
	}

	/* tell the SCSI stack some characteristics of this adapter */
	adapter->scsi_host->max_id = 511;
	adapter->scsi_host->max_lun = 0xFFFFFFFF;
	adapter->scsi_host->max_channel = 0;
	adapter->scsi_host->unique_id = dev_id.devno;
	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
	adapter->scsi_host->transportt = zfcp_scsi_transport_template;

	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;

	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
		scsi_host_put(adapter->scsi_host);
		return -EIO;
	}

	return 0;
}
Exemplo n.º 2
0
/*
 * fcoe_sw_shost_config - sets up fc_lport->host
 * @lp : ptr to the fc_lport
 * @shost : ptr to the associated scsi host
 * @dev : device associated to scsi host
 *
 * Must be called after fcoe_sw_lport_config) and fcoe_sw_netdev_config()
 *
 * Returns : 0 for success
 *
 */
static int fcoe_sw_shost_config(struct fc_lport *lp, struct Scsi_Host *shost,
				struct device *dev)
{
	int rc = 0;

	/* lport scsi host config */
	lp->host = shost;

	lp->host->max_lun = FCOE_MAX_LUN;
	lp->host->max_id = FCOE_MAX_FCP_TARGET;
	lp->host->max_channel = 0;
	lp->host->transportt = scsi_transport_fcoe_sw;

	/* add the new host to the SCSI-ml */
	rc = scsi_add_host(lp->host, dev);
	if (rc) {
		FC_DBG("fcoe_sw_shost_config:error on scsi_add_host\n");
		return rc;
	}
	sprintf(fc_host_symbolic_name(lp->host), "%s v%s over %s",
		FCOE_SW_NAME, FCOE_SW_VERSION,
		fcoe_netdev(lp)->name);

	return 0;
}
Exemplo n.º 3
0
static int __devinit a2091_probe(struct zorro_dev *z,
				 const struct zorro_device_id *ent)
{
	struct Scsi_Host *instance;
	int error;
	struct a2091_scsiregs *regs;
	wd33c93_regs wdregs;
	struct a2091_hostdata *hdata;

	if (!request_mem_region(z->resource.start, 256, "wd33c93"))
		return -EBUSY;

	instance = scsi_host_alloc(&a2091_scsi_template,
				   sizeof(struct a2091_hostdata));
	if (!instance) {
		error = -ENOMEM;
		goto fail_alloc;
	}

	instance->irq = IRQ_AMIGA_PORTS;
	instance->unique_id = z->slotaddr;

	regs = (struct a2091_scsiregs *)ZTWO_VADDR(z->resource.start);
	regs->DAWR = DAWR_A2091;

	wdregs.SASR = &regs->SASR;
	wdregs.SCMD = &regs->SCMD;

	hdata = shost_priv(instance);
	hdata->wh.no_sync = 0xff;
	hdata->wh.fast = 0;
	hdata->wh.dma_mode = CTRL_DMA;
	hdata->regs = regs;

	wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_8_10);
	error = request_irq(IRQ_AMIGA_PORTS, a2091_intr, IRQF_SHARED,
			    "A2091 SCSI", instance);
	if (error)
		goto fail_irq;

	regs->CNTR = CNTR_PDMD | CNTR_INTEN;

	error = scsi_add_host(instance, NULL);
	if (error)
		goto fail_host;

	zorro_set_drvdata(z, instance);

	scsi_scan_host(instance);
	return 0;

fail_host:
	free_irq(IRQ_AMIGA_PORTS, instance);
fail_irq:
	scsi_host_put(instance);
fail_alloc:
	release_mem_region(z->resource.start, 256);
	return error;
}
Exemplo n.º 4
0
static int __init mvme147_init(void)
{
	wd33c93_regs regs;
	struct WD33C93_hostdata *hdata;
	int error = -ENOMEM;

	if (!MACH_IS_MVME147)
		return 0;

	mvme147_shost = scsi_host_alloc(&mvme147_host_template,
			sizeof(struct WD33C93_hostdata));
	if (!mvme147_shost)
		goto err_out;
	mvme147_shost->base = 0xfffe4000;
	mvme147_shost->irq = MVME147_IRQ_SCSI_PORT;

	regs.SASR = (volatile unsigned char *)0xfffe4000;
	regs.SCMD = (volatile unsigned char *)0xfffe4001;

	hdata = shost_priv(mvme147_shost);
	hdata->no_sync = 0xff;
	hdata->fast = 0;
	hdata->dma_mode = CTRL_DMA;

	wd33c93_init(mvme147_shost, regs, dma_setup, dma_stop, WD33C93_FS_8_10);

	error = request_irq(MVME147_IRQ_SCSI_PORT, mvme147_intr, 0,
			"MVME147 SCSI PORT", mvme147_shost);
	if (error)
		goto err_unregister;
	error = request_irq(MVME147_IRQ_SCSI_DMA, mvme147_intr, 0,
			"MVME147 SCSI DMA", mvme147_shost);
	if (error)
		goto err_free_irq;
#if 0	/* Disabled; causes problems booting */
	m147_pcc->scsi_interrupt = 0x10;	/* Assert SCSI bus reset */
	udelay(100);
	m147_pcc->scsi_interrupt = 0x00;	/* Negate SCSI bus reset */
	udelay(2000);
	m147_pcc->scsi_interrupt = 0x40;	/* Clear bus reset interrupt */
#endif
	m147_pcc->scsi_interrupt = 0x09;	/* Enable interrupt */

	m147_pcc->dma_cntrl = 0x00;	/* ensure DMA is stopped */
	m147_pcc->dma_intr = 0x89;	/* Ack and enable ints */

	error = scsi_add_host(mvme147_shost, NULL);
	if (error)
		goto err_free_irq;
	scsi_scan_host(mvme147_shost);
	return 0;

err_free_irq:
	free_irq(MVME147_IRQ_SCSI_PORT, mvme147_shost);
err_unregister:
	scsi_host_put(mvme147_shost);
err_out:
	return error;
}
Exemplo n.º 5
0
static int dmx3191d_probe_one(struct pci_dev *pdev,
			      const struct pci_device_id *id)
{
	struct Scsi_Host *shost;
	struct NCR5380_hostdata *hostdata;
	unsigned long io;
	int error = -ENODEV;

	if (pci_enable_device(pdev))
		goto out;

	io = pci_resource_start(pdev, 0);
	if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) {
		printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n",
				io, io + DMX3191D_REGION_LEN);
		goto out_disable_device;
	}

	shost = scsi_host_alloc(&dmx3191d_driver_template,
			sizeof(struct NCR5380_hostdata));
	if (!shost)
		goto out_release_region;       

	hostdata = shost_priv(shost);
	hostdata->base = io;

	/* This card does not seem to raise an interrupt on pdev->irq.
	 * Steam-powered SCSI controllers run without an IRQ anyway.
	 */
	shost->irq = NO_IRQ;

	error = NCR5380_init(shost, 0);
	if (error)
		goto out_host_put;

	NCR5380_maybe_reset_bus(shost);

	pci_set_drvdata(pdev, shost);

	error = scsi_add_host(shost, &pdev->dev);
	if (error)
		goto out_exit;

	scsi_scan_host(shost);
	return 0;

out_exit:
	NCR5380_exit(shost);
out_host_put:
	scsi_host_put(shost);
 out_release_region:
	release_region(io, DMX3191D_REGION_LEN);
 out_disable_device:
	pci_disable_device(pdev);
 out:
	return error;
}
Exemplo n.º 6
0
static int __devinit
cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
	struct Scsi_Host *host;
	int ret = -ENOMEM;

	host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata));
	if (!host)
		goto out;

        host->io_port = ecard_address(ec, ECARD_IOC, ECARD_SLOW) + 0x800;
	host->irq = ec->irq;

	NCR5380_init(host, 0);

	host->n_io_port = 255;
	if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) {
		ret = -EBUSY;
		goto out_free;
	}

        ((struct NCR5380_hostdata *)host->hostdata)->ctrl = 0;
        outb(0x00, host->io_port - 577);

	ret = request_irq(host->irq, cumanascsi_intr, SA_INTERRUPT,
			  "CumanaSCSI-1", host);
	if (ret) {
		printk("scsi%d: IRQ%d not free: %d\n",
		    host->host_no, host->irq, ret);
		goto out_release;
	}

	printk("scsi%d: at port 0x%08lx irq %d",
		host->host_no, host->io_port, host->irq);
	printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
		host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE);
	printk("\nscsi%d:", host->host_no);
	NCR5380_print_options(host);
	printk("\n");

	ret = scsi_add_host(host, &ec->dev);
	if (ret)
		goto out_free_irq;

	scsi_scan_host(host);
	goto out;

 out_free_irq:
	free_irq(host->irq, host);
 out_release:
	release_region(host->io_port, host->n_io_port);
 out_free:
	scsi_host_put(host);
 out:
	return ret;
}
Exemplo n.º 7
0
static struct Scsi_Host *qlogic_detect(struct scsi_host_template *host,
				struct pcmcia_device *link, int qbase, int qlirq)
{
	int qltyp;		/* type of chip */
	int qinitid;
	struct Scsi_Host *shost;	/* registered host structure */
	struct qlogicfas408_priv *priv;

	qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE);
	qinitid = host->this_id;
	if (qinitid < 0)
		qinitid = 7;	/* if no ID, use 7 */

	qlogicfas408_setup(qbase, qinitid, INT_TYPE);

	host->name = qlogic_name;
	shost = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv));
	if (!shost)
		goto err;
	shost->io_port = qbase;
	shost->n_io_port = 16;
	shost->dma_channel = -1;
	if (qlirq != -1)
		shost->irq = qlirq;

	priv = get_priv_by_host(shost);
	priv->qlirq = qlirq;
	priv->qbase = qbase;
	priv->qinitid = qinitid;
	priv->shost = shost;
	priv->int_type = INT_TYPE;					

	if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogic_name, shost))
		goto free_scsi_host;

	sprintf(priv->qinfo,
		"Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d",
		qltyp, qbase, qlirq, QL_TURBO_PDMA);

	if (scsi_add_host(shost, NULL))
		goto free_interrupt;

	scsi_scan_host(shost);

	return shost;

free_interrupt:
	free_irq(qlirq, shost);

free_scsi_host:
	scsi_host_put(shost);
	
err:
	return NULL;
}
Exemplo n.º 8
0
static int __devinit dmx3191d_probe_one(struct pci_dev *pdev,
        const struct pci_device_id *id)
{
    struct Scsi_Host *shost;
    unsigned long io;
    int error = -ENODEV;

    if (pci_enable_device(pdev))
        goto out;

    io = pci_resource_start(pdev, 0);
    if (!request_region(io, DMX3191D_REGION_LEN, DMX3191D_DRIVER_NAME)) {
        printk(KERN_ERR "dmx3191: region 0x%lx-0x%lx already reserved\n",
                io, io + DMX3191D_REGION_LEN);
        goto out_disable_device;
    }

    shost = scsi_host_alloc(&dmx3191d_driver_template,
            sizeof(struct NCR5380_hostdata));
    if (!shost)
        goto out_release_region;       
    shost->io_port = io;
    shost->irq = pdev->irq;

    NCR5380_init(shost, FLAG_NO_PSEUDO_DMA | FLAG_DTC3181E);

    if (request_irq(pdev->irq, NCR5380_intr, IRQF_SHARED,
                DMX3191D_DRIVER_NAME, shost)) {
        /*
         * Steam powered scsi controllers run without an IRQ anyway
         */
        printk(KERN_WARNING "dmx3191: IRQ %d not available - "
                    "switching to polled mode.\n", pdev->irq);
        shost->irq = SCSI_IRQ_NONE;
    }

    pci_set_drvdata(pdev, shost);

    error = scsi_add_host(shost, &pdev->dev);
    if (error)
        goto out_free_irq;

    scsi_scan_host(shost);
    return 0;

 out_free_irq:
    free_irq(shost->irq, shost);
 out_release_region:
    release_region(io, DMX3191D_REGION_LEN);
 out_disable_device:
    pci_disable_device(pdev);
 out:
    return error;
}
Exemplo n.º 9
0
static int __devinit
oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
	struct Scsi_Host *host;
	int ret = -ENOMEM;

	ret = ecard_request_resources(ec);
	if (ret)
		goto out;

	host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata));
	if (!host) {
		ret = -ENOMEM;
		goto release;
	}

	priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
				   ecard_resource_len(ec, ECARD_RES_MEMC));
	if (!priv(host)->base) {
		ret = -ENOMEM;
		goto unreg;
	}

	host->irq = IRQ_NONE;
	host->n_io_port = 255;

	NCR5380_init(host, 0);

	printk("scsi%d: at port 0x%08lx irqs disabled",
		host->host_no, host->io_port);
	printk(" options CAN_QUEUE=%d  CMD_PER_LUN=%d release=%d",
		host->can_queue, host->cmd_per_lun, OAKSCSI_PUBLIC_RELEASE);
	printk("\nscsi%d:", host->host_no);
	NCR5380_print_options(host);
	printk("\n");

	ret = scsi_add_host(host, &ec->dev);
	if (ret)
		goto out_unmap;

	scsi_scan_host(host);
	goto out;

 out_unmap:
	iounmap(priv(host)->base);
 unreg:
	scsi_host_put(host);
 release:
	ecard_release_resources(ec);
 out:
	return ret;
}
Exemplo n.º 10
0
Arquivo: oak.c Projeto: 020gzh/linux
static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
	struct Scsi_Host *host;
	int ret = -ENOMEM;

	ret = ecard_request_resources(ec);
	if (ret)
		goto out;

	host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata));
	if (!host) {
		ret = -ENOMEM;
		goto release;
	}

	priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
				   ecard_resource_len(ec, ECARD_RES_MEMC));
	if (!priv(host)->base) {
		ret = -ENOMEM;
		goto unreg;
	}

	host->irq = NO_IRQ;
	host->n_io_port = 255;

	ret = NCR5380_init(host, 0);
	if (ret)
		goto out_unmap;

	NCR5380_maybe_reset_bus(host);

	ret = scsi_add_host(host, &ec->dev);
	if (ret)
		goto out_exit;

	scsi_scan_host(host);
	goto out;

 out_exit:
	NCR5380_exit(host);
 out_unmap:
	iounmap(priv(host)->base);
 unreg:
	scsi_host_put(host);
 release:
	ecard_release_resources(ec);
 out:
	return ret;
}
Exemplo n.º 11
0
/**
 * megaraid_io_attach - attach a device with the IO subsystem
 * @adapter		: controller's soft state
 *
 * Attach this device with the IO subsystem.
 */
static int
megaraid_io_attach(adapter_t *adapter)
{
	struct Scsi_Host	*host;

	// Initialize SCSI Host structure
	host = scsi_host_alloc(&megaraid_template_g, 8);
	if (!host) {
		con_log(CL_ANN, (KERN_WARNING
			"megaraid mbox: scsi_register failed\n"));

		return -1;
	}

	SCSIHOST2ADAP(host)	= (caddr_t)adapter;
	adapter->host		= host;

	host->irq		= adapter->irq;
	host->unique_id		= adapter->unique_id;
	host->can_queue		= adapter->max_cmds;
	host->this_id		= adapter->init_id;
	host->sg_tablesize	= adapter->sglen;
	host->max_sectors	= adapter->max_sectors;
	host->cmd_per_lun	= adapter->cmd_per_lun;
	host->max_channel	= adapter->max_channel;
	host->max_id		= adapter->max_target;
	host->max_lun		= adapter->max_lun;


	// notify mid-layer about the new controller
	if (scsi_add_host(host, &adapter->pdev->dev)) {

		con_log(CL_ANN, (KERN_WARNING
			"megaraid mbox: scsi_add_host failed\n"));

		scsi_host_put(host);

		return -1;
	}

	scsi_scan_host(host);

	return 0;
}
Exemplo n.º 12
0
static int fdomain_config(struct pcmcia_device *link)
{
    scsi_info_t *info = link->priv;
    int ret;
    char str[22];
    struct Scsi_Host *host;

    dev_dbg(&link->dev, "fdomain_config\n");

    ret = pcmcia_loop_config(link, fdomain_config_check, NULL);
    if (ret)
	    goto failed;

    if (!link->irq)
	    goto failed;
    ret = pcmcia_enable_device(link);
    if (ret)
	    goto failed;

    /* A bad hack... */
    release_region(link->resource[0]->start, resource_size(link->resource[0]));

    /* Set configuration options for the fdomain driver */
    sprintf(str, "%d,%d", (unsigned int) link->resource[0]->start, link->irq);
    fdomain_setup(str);

    host = __fdomain_16x0_detect(&fdomain_driver_template);
    if (!host) {
        printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
	goto failed;
    }

    if (scsi_add_host(host, NULL))
	    goto failed;
    scsi_scan_host(host);

    info->host = host;

    return 0;

failed:
    fdomain_release(link);
    return -ENODEV;
} /* fdomain_config */
Exemplo n.º 13
0
static int tcm_loop_driver_probe(struct device *dev)
{
	struct tcm_loop_hba *tl_hba;
	struct Scsi_Host *sh;
	int error, host_prot;

	tl_hba = to_tcm_loop_hba(dev);

	sh = scsi_host_alloc(&tcm_loop_driver_template,
			sizeof(struct tcm_loop_hba));
	if (!sh) {
		pr_err("Unable to allocate struct scsi_host\n");
		return -ENODEV;
	}
	tl_hba->sh = sh;

	/*
	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
	 */
	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
	/*
	 * Setup single ID, Channel and LUN for now..
	 */
	sh->max_id = 2;
	sh->max_lun = 0;
	sh->max_channel = 0;
	sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;

	host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
		    SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
		    SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;

	scsi_host_set_prot(sh, host_prot);
	scsi_host_set_guard(sh, SHOST_DIX_GUARD_CRC);

	error = scsi_add_host(sh, &tl_hba->dev);
	if (error) {
		pr_err("%s: scsi_add_host failed\n", __func__);
		scsi_host_put(sh);
		return -ENODEV;
	}
	return 0;
}
Exemplo n.º 14
0
static int __devinit
oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
{
	struct Scsi_Host *host;
	int ret = -ENOMEM;

	host = scsi_host_alloc(&oakscsi_template, sizeof(struct NCR5380_hostdata));
	if (!host)
		goto out;

	host->io_port = ecard_address(ec, ECARD_MEMC, 0);
	host->irq = IRQ_NONE;
	host->n_io_port = 255;

	ret = -EBUSY;
	if (!request_region (host->io_port, host->n_io_port, "Oak SCSI"))
		goto unreg;

	NCR5380_init(host, 0);

	printk("scsi%d: at port 0x%08lx irqs disabled",
		host->host_no, host->io_port);
	printk(" options CAN_QUEUE=%d  CMD_PER_LUN=%d release=%d",
		host->can_queue, host->cmd_per_lun, OAKSCSI_PUBLIC_RELEASE);
	printk("\nscsi%d:", host->host_no);
	NCR5380_print_options(host);
	printk("\n");

	ret = scsi_add_host(host, &ec->dev);
	if (ret)
		goto out_release;

	scsi_scan_host(host);
	goto out;

 out_release:
	release_region(host->io_port, host->n_io_port);
 unreg:
	scsi_host_put(host);
 out:
	return ret;
}
Exemplo n.º 15
0
static int tcm_loop_driver_probe(struct device *dev)
{
	struct tcm_loop_hba *tl_hba;
	struct Scsi_Host *sh;
	int error;

	tl_hba = to_tcm_loop_hba(dev);

	sh = scsi_host_alloc(&tcm_loop_driver_template,
			sizeof(struct tcm_loop_hba));
	if (!sh) {
		printk(KERN_ERR "Unable to allocate struct scsi_host\n");
		return -ENODEV;
	}
	tl_hba->sh = sh;

	/*
	 * Assign the struct tcm_loop_hba pointer to struct Scsi_Host->hostdata
	 */
	*((struct tcm_loop_hba **)sh->hostdata) = tl_hba;
	/*
	 * Setup single ID, Channel and LUN for now..
	 */
	sh->max_id = 2;
	sh->max_lun = 0;
	sh->max_channel = 0;
	sh->max_cmd_len = TL_SCSI_MAX_CMD_LEN;

	error = scsi_add_host(sh, &tl_hba->dev);
	if (error) {
		printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
		scsi_host_put(sh);
		return -ENODEV;
	}
	return 0;
}
Exemplo n.º 16
0
static int __init amiga_a3000_scsi_probe(struct platform_device *pdev)
{
	struct resource *res;
	struct Scsi_Host *instance;
	int error;
	struct a3000_scsiregs *regs;
	wd33c93_regs wdregs;
	struct a3000_hostdata *hdata;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	if (!request_mem_region(res->start, resource_size(res), "wd33c93"))
		return -EBUSY;

	instance = scsi_host_alloc(&amiga_a3000_scsi_template,
				   sizeof(struct a3000_hostdata));
	if (!instance) {
		error = -ENOMEM;
		goto fail_alloc;
	}

	instance->irq = IRQ_AMIGA_PORTS;

	regs = (struct a3000_scsiregs *)ZTWO_VADDR(res->start);
	regs->DAWR = DAWR_A3000;

	wdregs.SASR = &regs->SASR;
	wdregs.SCMD = &regs->SCMD;

	hdata = shost_priv(instance);
	hdata->wh.no_sync = 0xff;
	hdata->wh.fast = 0;
	hdata->wh.dma_mode = CTRL_DMA;
	hdata->regs = regs;

	wd33c93_init(instance, wdregs, dma_setup, dma_stop, WD33C93_FS_12_15);
	error = request_irq(IRQ_AMIGA_PORTS, a3000_intr, IRQF_SHARED,
			    "A3000 SCSI", instance);
	if (error)
		goto fail_irq;

	regs->CNTR = CNTR_PDMD | CNTR_INTEN;

	error = scsi_add_host(instance, NULL);
	if (error)
		goto fail_host;

	platform_set_drvdata(pdev, instance);

	scsi_scan_host(instance);
	return 0;

fail_host:
	free_irq(IRQ_AMIGA_PORTS, instance);
fail_irq:
	scsi_host_put(instance);
fail_alloc:
	release_mem_region(res->start, resource_size(res));
	return error;
}
Exemplo n.º 17
0
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
{
	struct zfcp_adapter *adapter = zsdev->port->adapter;
	struct zfcp_scsi_req_filter filter = {
		.tmf_scope = FCP_TMF_TGT_RESET,
		.port_handle = zsdev->port->handle,
	};
	unsigned long flags;

	if (tm_flags == FCP_TMF_LUN_RESET) {
		filter.tmf_scope = FCP_TMF_LUN_RESET;
		filter.lun_handle = zsdev->lun_handle;
	}

	/*
	 * abort_lock secures against other processings - in the abort-function
	 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
	 */
	write_lock_irqsave(&adapter->abort_lock, flags);
	zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
				   &filter);
	write_unlock_irqrestore(&adapter->abort_lock, flags);
}

/**
 * zfcp_scsi_task_mgmt_function() - Send a task management function (sync).
 * @sdev: Pointer to SCSI device to send the task management command to.
 * @tm_flags: Task management flags,
 *	      here we only handle %FCP_TMF_TGT_RESET or %FCP_TMF_LUN_RESET.
 */
static int zfcp_scsi_task_mgmt_function(struct scsi_device *sdev, u8 tm_flags)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
	struct zfcp_fsf_req *fsf_req = NULL;
	int retval = SUCCESS, ret;
	int retry = 3;

	while (retry--) {
		fsf_req = zfcp_fsf_fcp_task_mgmt(sdev, tm_flags);
		if (fsf_req)
			break;

		zfcp_dbf_scsi_devreset("wait", sdev, tm_flags, NULL);
		zfcp_erp_wait(adapter);
		ret = fc_block_rport(rport);
		if (ret) {
			zfcp_dbf_scsi_devreset("fiof", sdev, tm_flags, NULL);
			return ret;
		}

		if (!(atomic_read(&adapter->status) &
		      ZFCP_STATUS_COMMON_RUNNING)) {
			zfcp_dbf_scsi_devreset("nres", sdev, tm_flags, NULL);
			return SUCCESS;
		}
	}
	if (!fsf_req) {
		zfcp_dbf_scsi_devreset("reqf", sdev, tm_flags, NULL);
		return FAILED;
	}

	wait_for_completion(&fsf_req->completion);

	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
		zfcp_dbf_scsi_devreset("fail", sdev, tm_flags, fsf_req);
		retval = FAILED;
	} else {
		zfcp_dbf_scsi_devreset("okay", sdev, tm_flags, fsf_req);
		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
	}

	zfcp_fsf_req_free(fsf_req);
	return retval;
}

static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
	struct scsi_device *sdev = scpnt->device;

	return zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_LUN_RESET);
}

static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
	struct scsi_target *starget = scsi_target(scpnt->device);
	struct fc_rport *rport = starget_to_rport(starget);
	struct Scsi_Host *shost = rport_to_shost(rport);
	struct scsi_device *sdev = NULL, *tmp_sdev;
	struct zfcp_adapter *adapter =
		(struct zfcp_adapter *)shost->hostdata[0];
	int ret;

	shost_for_each_device(tmp_sdev, shost) {
		if (tmp_sdev->id == starget->id) {
			sdev = tmp_sdev;
			break;
		}
	}
	if (!sdev) {
		ret = FAILED;
		zfcp_dbf_scsi_eh("tr_nosd", adapter, starget->id, ret);
		return ret;
	}

	ret = zfcp_scsi_task_mgmt_function(sdev, FCP_TMF_TGT_RESET);

	/* release reference from above shost_for_each_device */
	if (sdev)
		scsi_device_put(tmp_sdev);

	return ret;
}

static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	int ret = SUCCESS, fc_ret;

	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
	zfcp_erp_wait(adapter);
	fc_ret = fc_block_scsi_eh(scpnt);
	if (fc_ret)
		ret = fc_ret;

	zfcp_dbf_scsi_eh("schrh_r", adapter, ~0, ret);
	return ret;
}

/**
 * zfcp_scsi_sysfs_host_reset() - Support scsi_host sysfs attribute host_reset.
 * @shost: Pointer to Scsi_Host to perform action on.
 * @reset_type: We support %SCSI_ADAPTER_RESET but not %SCSI_FIRMWARE_RESET.
 *
 * Return: 0 on %SCSI_ADAPTER_RESET, -%EOPNOTSUPP otherwise.
 *
 * This is similar to zfcp_sysfs_adapter_failed_store().
 */
static int zfcp_scsi_sysfs_host_reset(struct Scsi_Host *shost, int reset_type)
{
	struct zfcp_adapter *adapter =
		(struct zfcp_adapter *)shost->hostdata[0];
	int ret = 0;

	if (reset_type != SCSI_ADAPTER_RESET) {
		ret = -EOPNOTSUPP;
		zfcp_dbf_scsi_eh("scshr_n", adapter, ~0, ret);
		return ret;
	}

	zfcp_erp_adapter_reset_sync(adapter, "scshr_y");
	return ret;
}

struct scsi_transport_template *zfcp_scsi_transport_template;

static struct scsi_host_template zfcp_scsi_host_template = {
	.module			 = THIS_MODULE,
	.name			 = "zfcp",
	.queuecommand		 = zfcp_scsi_queuecommand,
	.eh_timed_out		 = fc_eh_timed_out,
	.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
	.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
	.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
	.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
	.slave_alloc		 = zfcp_scsi_slave_alloc,
	.slave_configure	 = zfcp_scsi_slave_configure,
	.slave_destroy		 = zfcp_scsi_slave_destroy,
	.change_queue_depth	 = scsi_change_queue_depth,
	.host_reset		 = zfcp_scsi_sysfs_host_reset,
	.proc_name		 = "zfcp",
	.can_queue		 = 4096,
	.this_id		 = -1,
	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
				   /* GCD, adjusted later */
	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
				   /* GCD, adjusted later */
	/* report size limit per scatter-gather segment */
	.max_segment_size	 = ZFCP_QDIO_SBALE_LEN,
	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
	.shost_attrs		 = zfcp_sysfs_shost_attrs,
	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
	.track_queue_depth	 = 1,
	.supported_mode		 = MODE_INITIATOR,
};

/**
 * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
 * @adapter: The zfcp adapter to register with the SCSI midlayer
 */
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
	struct ccw_dev_id dev_id;

	if (adapter->scsi_host)
		return 0;

	ccw_device_get_id(adapter->ccw_device, &dev_id);
	/* register adapter as SCSI host with mid layer of SCSI stack */
	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
					     sizeof (struct zfcp_adapter *));
	if (!adapter->scsi_host) {
		dev_err(&adapter->ccw_device->dev,
			"Registering the FCP device with the "
			"SCSI stack failed\n");
		return -EIO;
	}

	/* tell the SCSI stack some characteristics of this adapter */
	adapter->scsi_host->max_id = 511;
	adapter->scsi_host->max_lun = 0xFFFFFFFF;
	adapter->scsi_host->max_channel = 0;
	adapter->scsi_host->unique_id = dev_id.devno;
	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
	adapter->scsi_host->transportt = zfcp_scsi_transport_template;

	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;

	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
		scsi_host_put(adapter->scsi_host);
		return -EIO;
	}

	return 0;
}

/**
 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
 * @adapter: The zfcp adapter to unregister.
 */
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
	struct Scsi_Host *shost;
	struct zfcp_port *port;

	shost = adapter->scsi_host;
	if (!shost)
		return;

	read_lock_irq(&adapter->port_list_lock);
	list_for_each_entry(port, &adapter->port_list, list)
		port->rport = NULL;
	read_unlock_irq(&adapter->port_list_lock);

	fc_remove_host(shost);
	scsi_remove_host(shost);
	scsi_host_put(shost);
	adapter->scsi_host = NULL;
}

static struct fc_host_statistics*
zfcp_scsi_init_fc_host_stats(struct zfcp_adapter *adapter)
{
	struct fc_host_statistics *fc_stats;

	if (!adapter->fc_stats) {
		fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
		if (!fc_stats)
			return NULL;
		adapter->fc_stats = fc_stats; /* freed in adapter_release */
	}
	memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
	return adapter->fc_stats;
}

static void zfcp_scsi_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
					   struct fsf_qtcb_bottom_port *data,
					   struct fsf_qtcb_bottom_port *old)
{
	fc_stats->seconds_since_last_reset =
		data->seconds_since_last_reset - old->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames - old->tx_frames;
	fc_stats->tx_words = data->tx_words - old->tx_words;
	fc_stats->rx_frames = data->rx_frames - old->rx_frames;
	fc_stats->rx_words = data->rx_words - old->rx_words;
	fc_stats->lip_count = data->lip - old->lip;
	fc_stats->nos_count = data->nos - old->nos;
	fc_stats->error_frames = data->error_frames - old->error_frames;
	fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
	fc_stats->link_failure_count = data->link_failure - old->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
	fc_stats->loss_of_signal_count =
		data->loss_of_signal - old->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count =
		data->psp_error_counts - old->psp_error_counts;
	fc_stats->invalid_tx_word_count =
		data->invalid_tx_words - old->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
	fc_stats->fcp_input_requests =
		data->input_requests - old->input_requests;
	fc_stats->fcp_output_requests =
		data->output_requests - old->output_requests;
	fc_stats->fcp_control_requests =
		data->control_requests - old->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}

static void zfcp_scsi_set_fc_host_stats(struct fc_host_statistics *fc_stats,
					struct fsf_qtcb_bottom_port *data)
{
	fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames;
	fc_stats->tx_words = data->tx_words;
	fc_stats->rx_frames = data->rx_frames;
	fc_stats->rx_words = data->rx_words;
	fc_stats->lip_count = data->lip;
	fc_stats->nos_count = data->nos;
	fc_stats->error_frames = data->error_frames;
	fc_stats->dumped_frames = data->dumped_frames;
	fc_stats->link_failure_count = data->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync;
	fc_stats->loss_of_signal_count = data->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
	fc_stats->invalid_tx_word_count = data->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs;
	fc_stats->fcp_input_requests = data->input_requests;
	fc_stats->fcp_output_requests = data->output_requests;
	fc_stats->fcp_control_requests = data->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb;
}
Exemplo n.º 18
0
static void fdomain_config(dev_link_t *link)
{
    client_handle_t handle = link->handle;
    scsi_info_t *info = link->priv;
    tuple_t tuple;
    cisparse_t parse;
    int i, last_ret, last_fn;
    u_char tuple_data[64];
    char str[16];
    struct Scsi_Host *host;

    DEBUG(0, "fdomain_config(0x%p)\n", link);

    tuple.DesiredTuple = CISTPL_CONFIG;
    tuple.TupleData = tuple_data;
    tuple.TupleDataMax = 64;
    tuple.TupleOffset = 0;
    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
    CS_CHECK(GetTupleData, pcmcia_get_tuple_data(handle, &tuple));
    CS_CHECK(ParseTuple, pcmcia_parse_tuple(handle, &tuple, &parse));
    link->conf.ConfigBase = parse.config.base;

    /* Configure card */
    link->state |= DEV_CONFIG;
    
    tuple.DesiredTuple = CISTPL_CFTABLE_ENTRY;
    CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(handle, &tuple));
    while (1) {
	if (pcmcia_get_tuple_data(handle, &tuple) != 0 ||
		pcmcia_parse_tuple(handle, &tuple, &parse) != 0)
	    goto next_entry;
	link->conf.ConfigIndex = parse.cftable_entry.index;
	link->io.BasePort1 = parse.cftable_entry.io.win[0].base;
	i = pcmcia_request_io(handle, &link->io);
	if (i == CS_SUCCESS) break;
    next_entry:
	CS_CHECK(GetNextTuple, pcmcia_get_next_tuple(handle, &tuple));
    }

    CS_CHECK(RequestIRQ, pcmcia_request_irq(handle, &link->irq));
    CS_CHECK(RequestConfiguration, pcmcia_request_configuration(handle, &link->conf));
    
    /* A bad hack... */
    release_region(link->io.BasePort1, link->io.NumPorts1);

    /* Set configuration options for the fdomain driver */
    sprintf(str, "%d,%d", link->io.BasePort1, link->irq.AssignedIRQ);
    fdomain_setup(str);
    
    host = __fdomain_16x0_detect(&fdomain_driver_template);
    if (!host) {
        printk(KERN_INFO "fdomain_cs: no SCSI devices found\n");
	goto cs_failed;
    }
 
    scsi_add_host(host, NULL); /* XXX handle failure */
    scsi_scan_host(host);

    sprintf(info->node.dev_name, "scsi%d", host->host_no);
    link->dev = &info->node;
    info->host = host;
    
    link->state &= ~DEV_CONFIG_PENDING;
    return;
    
cs_failed:
    cs_error(link->handle, last_fn, last_ret);
    fdomain_release(link);
    return;
    
} /* fdomain_config */
Exemplo n.º 19
0
static int rtsx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
{
	struct Scsi_Host *host;
	struct rtsx_dev *dev;
	int err = 0;
	struct task_struct *th;

	printk(KERN_INFO "--- %s ---\n", DRIVER_MAKE_TIME);

	err = pci_enable_device(pci);
	if (err < 0) {
		printk(KERN_ERR "PCI enable device failed!\n");
		return err;
	}

	err = pci_request_regions(pci, CR_DRIVER_NAME);
	if (err < 0) {
		printk(KERN_ERR "PCI request regions for %s failed!\n", CR_DRIVER_NAME);
		pci_disable_device(pci);
		return err;
	}

	/*
	 * Ask the SCSI layer to allocate a host structure, with extra
	 * space at the end for our private rtsx_dev structure.
	 */
	host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
	if (!host) {
		printk(KERN_ERR "Unable to allocate the scsi host\n");
		pci_release_regions(pci);
		pci_disable_device(pci);
		return -ENOMEM;
	}

	dev = host_to_rtsx(host);
	memset(dev, 0, sizeof(struct rtsx_dev));

	dev->chip = (struct rtsx_chip *)kmalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
	if (dev->chip == NULL) {
		goto errout;
	}
	memset(dev->chip, 0, sizeof(struct rtsx_chip));

	spin_lock_init(&dev->reg_lock);
	mutex_init(&(dev->dev_mutex));
	sema_init(&(dev->sema), 0);
	init_completion(&(dev->notify));
	init_waitqueue_head(&dev->delay_wait);

	dev->pci = pci;
	dev->irq = -1;

	printk(KERN_INFO "Resource length: 0x%x\n", (unsigned int)pci_resource_len(pci,0));
	dev->addr = pci_resource_start(pci, 0);
	dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci,0));
	if (dev->remap_addr == NULL) {
		printk(KERN_ERR "ioremap error\n");
		err = -ENXIO;
		goto errout;
	}

	printk(KERN_INFO "Original address: 0x%lx, remapped address: 0x%lx\n", 
			(unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));

	dev->rtsx_resv_buf = dma_alloc_coherent(&(pci->dev), RTSX_RESV_BUF_LEN, 
			&(dev->rtsx_resv_buf_addr), GFP_KERNEL);
	if (dev->rtsx_resv_buf == NULL) {
		printk(KERN_ERR "alloc dma buffer fail\n");
		err = -ENXIO;
		goto errout;
	}
	dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
	dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
	dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
	dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN;

	dev->chip->rtsx = dev;
	
	rtsx_init_options(dev->chip);

	printk(KERN_INFO "pci->irq = %d\n", pci->irq);
	
	if (dev->chip->msi_en) {
		if (pci_enable_msi(pci) < 0) {
			dev->chip->msi_en = 0;
		}
	}
	
	if (rtsx_acquire_irq(dev) < 0) {
		err = -EBUSY;
		goto errout;
	}

	pci_set_master(pci);
	synchronize_irq(dev->irq);

	err = scsi_add_host(host, &pci->dev);
	if (err) {
		printk(KERN_ERR "Unable to add the scsi host\n");
		goto errout;
	}

	rtsx_init_chip(dev->chip);
	
	
	th = kthread_create(rtsx_control_thread, dev, CR_DRIVER_NAME);
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start control thread\n");
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the control thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);
	
	
	th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-scanning thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the scanning thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);

	
	th = kthread_create(rtsx_polling_thread, dev, "rtsx-polling");
	if (IS_ERR(th)) {
		printk(KERN_ERR "Unable to start the device-polling thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	/* Take a reference to the host for the polling thread and
	 * count it among all the threads we have launched.  Then
	 * start it up. */
	scsi_host_get(rtsx_to_host(dev));
	atomic_inc(&total_threads);
	wake_up_process(th);

	pci_set_drvdata(pci, dev);

	return 0;

	
errout:
	printk(KERN_ERR "rtsx_probe() failed\n");
	release_everything(dev);

	return err;
}
Exemplo n.º 20
0
static int rtsx_probe(struct pci_dev *pci,
				const struct pci_device_id *pci_id)
{
	struct Scsi_Host *host;
	struct rtsx_dev *dev;
	int err = 0;
	struct task_struct *th;

	dev_dbg(&pci->dev, "Realtek PCI-E card reader detected\n");

	err = pcim_enable_device(pci);
	if (err < 0) {
		dev_err(&pci->dev, "PCI enable device failed!\n");
		return err;
	}

	err = pci_request_regions(pci, CR_DRIVER_NAME);
	if (err < 0) {
		dev_err(&pci->dev, "PCI request regions for %s failed!\n",
			CR_DRIVER_NAME);
		return err;
	}

	/*
	 * Ask the SCSI layer to allocate a host structure, with extra
	 * space at the end for our private rtsx_dev structure.
	 */
	host = scsi_host_alloc(&rtsx_host_template, sizeof(*dev));
	if (!host) {
		dev_err(&pci->dev, "Unable to allocate the scsi host\n");
		return -ENOMEM;
	}

	dev = host_to_rtsx(host);
	memset(dev, 0, sizeof(struct rtsx_dev));

	dev->chip = kzalloc(sizeof(struct rtsx_chip), GFP_KERNEL);
	if (!dev->chip) {
		err = -ENOMEM;
		goto errout;
	}

	spin_lock_init(&dev->reg_lock);
	mutex_init(&(dev->dev_mutex));
	init_completion(&dev->cmnd_ready);
	init_completion(&dev->control_exit);
	init_completion(&dev->polling_exit);
	init_completion(&(dev->notify));
	init_completion(&dev->scanning_done);
	init_waitqueue_head(&dev->delay_wait);

	dev->pci = pci;
	dev->irq = -1;

	dev_info(&pci->dev, "Resource length: 0x%x\n",
		 (unsigned int)pci_resource_len(pci, 0));
	dev->addr = pci_resource_start(pci, 0);
	dev->remap_addr = ioremap_nocache(dev->addr, pci_resource_len(pci, 0));
	if (!dev->remap_addr) {
		dev_err(&pci->dev, "ioremap error\n");
		err = -ENXIO;
		goto errout;
	}

	/*
	 * Using "unsigned long" cast here to eliminate gcc warning in
	 * 64-bit system
	 */
	dev_info(&pci->dev, "Original address: 0x%lx, remapped address: 0x%lx\n",
		 (unsigned long)(dev->addr), (unsigned long)(dev->remap_addr));

	dev->rtsx_resv_buf = dmam_alloc_coherent(&pci->dev, RTSX_RESV_BUF_LEN,
			&dev->rtsx_resv_buf_addr, GFP_KERNEL);
	if (!dev->rtsx_resv_buf) {
		dev_err(&pci->dev, "alloc dma buffer fail\n");
		err = -ENXIO;
		goto errout;
	}
	dev->chip->host_cmds_ptr = dev->rtsx_resv_buf;
	dev->chip->host_cmds_addr = dev->rtsx_resv_buf_addr;
	dev->chip->host_sg_tbl_ptr = dev->rtsx_resv_buf + HOST_CMDS_BUF_LEN;
	dev->chip->host_sg_tbl_addr = dev->rtsx_resv_buf_addr +
				      HOST_CMDS_BUF_LEN;

	dev->chip->rtsx = dev;

	rtsx_init_options(dev->chip);

	dev_info(&pci->dev, "pci->irq = %d\n", pci->irq);

	if (dev->chip->msi_en) {
		if (pci_enable_msi(pci) < 0)
			dev->chip->msi_en = 0;
	}

	if (rtsx_acquire_irq(dev) < 0) {
		err = -EBUSY;
		goto errout;
	}

	pci_set_master(pci);
	synchronize_irq(dev->irq);

	rtsx_init_chip(dev->chip);

	/*
	 * set the supported max_lun and max_id for the scsi host
	 * NOTE: the minimal value of max_id is 1
	 */
	host->max_id = 1;
	host->max_lun = dev->chip->max_lun;

	/* Start up our control thread */
	th = kthread_run(rtsx_control_thread, dev, CR_DRIVER_NAME);
	if (IS_ERR(th)) {
		dev_err(&pci->dev, "Unable to start control thread\n");
		err = PTR_ERR(th);
		goto errout;
	}
	dev->ctl_thread = th;

	err = scsi_add_host(host, &pci->dev);
	if (err) {
		dev_err(&pci->dev, "Unable to add the scsi host\n");
		goto errout;
	}

	/* Start up the thread for delayed SCSI-device scanning */
	th = kthread_run(rtsx_scan_thread, dev, "rtsx-scan");
	if (IS_ERR(th)) {
		dev_err(&pci->dev, "Unable to start the device-scanning thread\n");
		complete(&dev->scanning_done);
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}

	/* Start up the thread for polling thread */
	th = kthread_run(rtsx_polling_thread, dev, "rtsx-polling");
	if (IS_ERR(th)) {
		dev_err(&pci->dev, "Unable to start the device-polling thread\n");
		quiesce_and_remove_host(dev);
		err = PTR_ERR(th);
		goto errout;
	}
	dev->polling_thread = th;

	pci_set_drvdata(pci, dev);

	return 0;

	/* We come here if there are any problems */
errout:
	dev_err(&pci->dev, "rtsx_probe() failed\n");
	release_everything(dev);

	return err;
}
Exemplo n.º 21
0
static int __devinit
cumanascsi1_probe(struct expansion_card *ec, const struct ecard_id *id)
{
	struct Scsi_Host *host;
	int ret;

	ret = ecard_request_resources(ec);
	if (ret)
		goto out;

	host = scsi_host_alloc(&cumanascsi_template, sizeof(struct NCR5380_hostdata));
	if (!host) {
		ret = -ENOMEM;
		goto out_release;
	}

	priv(host)->base = ioremap(ecard_resource_start(ec, ECARD_RES_IOCSLOW),
				   ecard_resource_len(ec, ECARD_RES_IOCSLOW));
	priv(host)->dma = ioremap(ecard_resource_start(ec, ECARD_RES_MEMC),
				  ecard_resource_len(ec, ECARD_RES_MEMC));
	if (!priv(host)->base || !priv(host)->dma) {
		ret = -ENOMEM;
		goto out_unmap;
	}

	host->irq = ec->irq;

	NCR5380_init(host, 0);

        priv(host)->ctrl = 0;
        writeb(0, priv(host)->base + CTRL);

	host->n_io_port = 255;
	if (!(request_region(host->io_port, host->n_io_port, "CumanaSCSI-1"))) {
		ret = -EBUSY;
		goto out_unmap;
	}

	ret = request_irq(host->irq, cumanascsi_intr, IRQF_DISABLED,
			  "CumanaSCSI-1", host);
	if (ret) {
		printk("scsi%d: IRQ%d not free: %d\n",
		    host->host_no, host->irq, ret);
		goto out_unmap;
	}

	printk("scsi%d: at port 0x%08lx irq %d",
		host->host_no, host->io_port, host->irq);
	printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d",
		host->can_queue, host->cmd_per_lun, CUMANASCSI_PUBLIC_RELEASE);
	printk("\nscsi%d:", host->host_no);
	NCR5380_print_options(host);
	printk("\n");

	ret = scsi_add_host(host, &ec->dev);
	if (ret)
		goto out_free_irq;

	scsi_scan_host(host);
	goto out;

 out_free_irq:
	free_irq(host->irq, host);
 out_unmap:
	iounmap(priv(host)->base);
	iounmap(priv(host)->dma);
	scsi_host_put(host);
 out_release:
	ecard_release_resources(ec);
 out:
	return ret;
}
Exemplo n.º 22
0
static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host,
								int qbase,
								int qlirq)
{
	int qltyp;		
	int qinitid;
	struct Scsi_Host *hreg;	
	struct qlogicfas408_priv *priv;


	if (!qbase || qlirq == -1)
		goto err;

	if (!request_region(qbase, 0x10, qlogicfas_name)) {
		printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name,
							      qbase);
		goto err;
	}

	if (!qlogicfas408_detect(qbase, INT_TYPE)) {
		printk(KERN_WARNING "%s: probe failed for %#x\n",
								qlogicfas_name,
								qbase);
		goto err_release_mem;
	}

	printk(KERN_INFO "%s: Using preset base address of %03x,"
			 " IRQ %d\n", qlogicfas_name, qbase, qlirq);

	qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE);
	qinitid = host->this_id;
	if (qinitid < 0)
		qinitid = 7;	

	qlogicfas408_setup(qbase, qinitid, INT_TYPE);

	hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv));
	if (!hreg)
		goto err_release_mem;
	priv = get_priv_by_host(hreg);
	hreg->io_port = qbase;
	hreg->n_io_port = 16;
	hreg->dma_channel = -1;
	if (qlirq != -1)
		hreg->irq = qlirq;
	priv->qbase = qbase;
	priv->qlirq = qlirq;
	priv->qinitid = qinitid;
	priv->shost = hreg;
	priv->int_type = INT_TYPE;

	sprintf(priv->qinfo,
		"Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d",
		qltyp, qbase, qlirq, QL_TURBO_PDMA);
	host->name = qlogicfas_name;

	if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg))
		goto free_scsi_host;

	if (scsi_add_host(hreg, NULL))
		goto free_interrupt;

	scsi_scan_host(hreg);

	return hreg;

free_interrupt:
	free_irq(qlirq, hreg);

free_scsi_host:
	scsi_host_put(hreg);

err_release_mem:
	release_region(qbase, 0x10);
err:
	return NULL;
}
Exemplo n.º 23
0
static int mts_usb_probe(struct usb_interface *intf,
			 const struct usb_device_id *id)
{
	int i;
	int ep_out = -1;
	int ep_in_set[3]; /* this will break if we have more than three endpoints
			   which is why we check */
	int *ep_in_current = ep_in_set;
	int err_retval = -ENOMEM;

	struct mts_desc * new_desc;
	struct vendor_product const* p;
	struct usb_device *dev = interface_to_usbdev (intf);

	/* the current altsetting on the interface we're probing */
	struct usb_host_interface *altsetting;

	MTS_DEBUG_GOT_HERE();
	MTS_DEBUG( "usb-device descriptor at %x\n", (int)dev );

	MTS_DEBUG( "product id = 0x%x, vendor id = 0x%x\n",
		   le16_to_cpu(dev->descriptor.idProduct),
		   le16_to_cpu(dev->descriptor.idVendor) );

	MTS_DEBUG_GOT_HERE();

	p = &mts_supported_products[id - mts_usb_ids];

	MTS_DEBUG_GOT_HERE();

	MTS_DEBUG( "found model %s\n", p->name );
	if ( p->support_status != mts_sup_full )
		MTS_MESSAGE( "model %s is not known to be fully supported, reports welcome!\n",
			     p->name );

	/* the current altsetting on the interface we're probing */
	altsetting = intf->cur_altsetting;


	/* Check if the config is sane */

	if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) {
		MTS_WARNING( "expecting %d got %d endpoints! Bailing out.\n",
			     (int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints );
		return -ENODEV;
	}

	for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) {
		if ((altsetting->endpoint[i].desc.bmAttributes &
		     USB_ENDPOINT_XFERTYPE_MASK) != USB_ENDPOINT_XFER_BULK) {

			MTS_WARNING( "can only deal with bulk endpoints; endpoint %d is not bulk.\n",
			     (int)altsetting->endpoint[i].desc.bEndpointAddress );
		} else {
			if (altsetting->endpoint[i].desc.bEndpointAddress &
			    USB_DIR_IN)
				*ep_in_current++
					= altsetting->endpoint[i].desc.bEndpointAddress &
					USB_ENDPOINT_NUMBER_MASK;
			else {
				if ( ep_out != -1 ) {
					MTS_WARNING( "can only deal with one output endpoints. Bailing out." );
					return -ENODEV;
				}

				ep_out = altsetting->endpoint[i].desc.bEndpointAddress &
					USB_ENDPOINT_NUMBER_MASK;
			}
		}

	}


	if ( ep_out == -1 ) {
		MTS_WARNING( "couldn't find an output bulk endpoint. Bailing out.\n" );
		return -ENODEV;
	}


	new_desc = kzalloc(sizeof(struct mts_desc), GFP_KERNEL);
	if (!new_desc)
		goto out;

	new_desc->urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!new_desc->urb)
		goto out_kfree;

	new_desc->context.scsi_status = kmalloc(1, GFP_KERNEL);
	if (!new_desc->context.scsi_status)
		goto out_kfree2;

	new_desc->usb_dev = dev;
	new_desc->usb_intf = intf;
	init_MUTEX(&new_desc->lock);

	/* endpoints */
	new_desc->ep_out = ep_out;
	new_desc->ep_response = ep_in_set[0];
	new_desc->ep_image = ep_in_set[1];

	if ( new_desc->ep_out != MTS_EP_OUT )
		MTS_WARNING( "will this work? Command EP is not usually %d\n",
			     (int)new_desc->ep_out );

	if ( new_desc->ep_response != MTS_EP_RESPONSE )
		MTS_WARNING( "will this work? Response EP is not usually %d\n",
			     (int)new_desc->ep_response );

	if ( new_desc->ep_image != MTS_EP_IMAGE )
		MTS_WARNING( "will this work? Image data EP is not usually %d\n",
			     (int)new_desc->ep_image );

	new_desc->host = scsi_host_alloc(&mts_scsi_host_template,
			sizeof(new_desc));
	if (!new_desc->host)
		goto out_free_urb;

	new_desc->host->hostdata[0] = (unsigned long)new_desc;
	if (scsi_add_host(new_desc->host, NULL)) {
		err_retval = -EIO;
		goto out_free_urb;
	}
	scsi_scan_host(new_desc->host);

	usb_set_intfdata(intf, new_desc);
	return 0;

 out_kfree2:
	kfree(new_desc->context.scsi_status);
 out_free_urb:
	usb_free_urb(new_desc->urb);
 out_kfree:
	kfree(new_desc);
 out:
	return err_retval;
}
Exemplo n.º 24
0
static int esas2r_probe(struct pci_dev *pcid,
			const struct pci_device_id *id)
{
	struct Scsi_Host *host = NULL;
	struct esas2r_adapter *a;
	int err;

	size_t host_alloc_size = sizeof(struct esas2r_adapter)
				 + ((num_requests) +
				    1) * sizeof(struct esas2r_request);

	esas2r_log_dev(ESAS2R_LOG_DEBG, &(pcid->dev),
		       "esas2r_probe() 0x%02x 0x%02x 0x%02x 0x%02x",
		       pcid->vendor,
		       pcid->device,
		       pcid->subsystem_vendor,
		       pcid->subsystem_device);

	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
		       "before pci_enable_device() "
		       "enable_cnt: %d",
		       pcid->enable_cnt.counter);

	err = pci_enable_device(pcid);
	if (err != 0) {
		esas2r_log_dev(ESAS2R_LOG_CRIT, &(pcid->dev),
			       "pci_enable_device() FAIL (%d)",
			       err);
		return -ENODEV;
	}

	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
		       "pci_enable_device() OK");
	esas2r_log_dev(ESAS2R_LOG_INFO, &(pcid->dev),
		       "after pci_enable_device() enable_cnt: %d",
		       pcid->enable_cnt.counter);

	host = scsi_host_alloc(&driver_template, host_alloc_size);
	if (host == NULL) {
		esas2r_log(ESAS2R_LOG_CRIT, "scsi_host_alloc() FAIL");
		return -ENODEV;
	}

	memset(host->hostdata, 0, host_alloc_size);

	a = (struct esas2r_adapter *)host->hostdata;

	esas2r_log(ESAS2R_LOG_INFO, "scsi_host_alloc() OK host: %p", host);

	/* override max LUN and max target id */

	host->max_id = ESAS2R_MAX_ID + 1;
	host->max_lun = 255;

	/* we can handle 16-byte CDbs */

	host->max_cmd_len = 16;

	host->can_queue = can_queue;
	host->cmd_per_lun = cmd_per_lun;
	host->this_id = host->max_id + 1;
	host->max_channel = 0;
	host->unique_id = found_adapters;
	host->sg_tablesize = sg_tablesize;
	host->max_sectors = esas2r_max_sectors;

	/* set to bus master for BIOses that don't do it for us */

	esas2r_log(ESAS2R_LOG_INFO, "pci_set_master() called");

	pci_set_master(pcid);

	if (!esas2r_init_adapter(host, pcid, found_adapters)) {
		esas2r_log(ESAS2R_LOG_CRIT,
			   "unable to initialize device at PCI bus %x:%x",
			   pcid->bus->number,
			   pcid->devfn);

		esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
			       "scsi_host_put() called");

		scsi_host_put(host);

		return 0;

	}

	esas2r_log(ESAS2R_LOG_INFO, "pci_set_drvdata(%p, %p) called", pcid,
		   host->hostdata);

	pci_set_drvdata(pcid, host);

	esas2r_log(ESAS2R_LOG_INFO, "scsi_add_host() called");

	err = scsi_add_host(host, &pcid->dev);

	if (err) {
		esas2r_log(ESAS2R_LOG_CRIT, "scsi_add_host returned %d", err);
		esas2r_log_dev(ESAS2R_LOG_CRIT, &(host->shost_gendev),
			       "scsi_add_host() FAIL");

		esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
			       "scsi_host_put() called");

		scsi_host_put(host);

		esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
			       "pci_set_drvdata(%p, NULL) called",
			       pcid);

		pci_set_drvdata(pcid, NULL);

		return -ENODEV;
	}


	esas2r_fw_event_on(a);

	esas2r_log_dev(ESAS2R_LOG_INFO, &(host->shost_gendev),
		       "scsi_scan_host() called");

	scsi_scan_host(host);

	/* Add sysfs binary files */
	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fw))
		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
			       "Failed to create sysfs binary file: fw");
	else
		a->sysfs_fw_created = 1;

	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_fs))
		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
			       "Failed to create sysfs binary file: fs");
	else
		a->sysfs_fs_created = 1;

	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_vda))
		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
			       "Failed to create sysfs binary file: vda");
	else
		a->sysfs_vda_created = 1;

	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_hw))
		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
			       "Failed to create sysfs binary file: hw");
	else
		a->sysfs_hw_created = 1;

	if (sysfs_create_bin_file(&host->shost_dev.kobj, &bin_attr_live_nvram))
		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
			       "Failed to create sysfs binary file: live_nvram");
	else
		a->sysfs_live_nvram_created = 1;

	if (sysfs_create_bin_file(&host->shost_dev.kobj,
				  &bin_attr_default_nvram))
		esas2r_log_dev(ESAS2R_LOG_WARN, &(host->shost_gendev),
			       "Failed to create sysfs binary file: default_nvram");
	else
		a->sysfs_default_nvram_created = 1;

	found_adapters++;

	return 0;
}
Exemplo n.º 25
0
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
	if (!lp) {
		printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
		err = -ENOMEM;
		goto err_out;
	}
	host = lp->host;
	fnic = lport_priv(lp);
	fnic->lport = lp;
	fnic->ctlr.lp = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = fnic_stats_debugfs_init(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
				"Failed to initialize debugfs for stats\n");
		fnic_stats_debugfs_remove(fnic);
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 64-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 64-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}
	/* set data_src for point-to-point mode and to keep it non-zero */
	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	/* Configure Maximum Outstanding IO reqs*/
	if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
		host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
					max_t(u32, FNIC_MIN_IO_REQ,
					fnic->config.io_throttle_count));
	}
	fnic->fnic_max_tag_id = host->can_queue;

	err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			  "Unable to alloc shared tag map\n");
		goto err_out_dev_close;
	}

	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;
	host->max_cmd_len = FCOE_MAX_CMD_LEN;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_clear_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	/* Initialize the FIP fcoe_ctrl struct */
	fnic->ctlr.send = fnic_eth_send;
	fnic->ctlr.update_mac = fnic_update_mac;
	fnic->ctlr.get_src_addr = fnic_get_mac;
	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware supports FIP\n");
		/* enable directed and multicast */
		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
		fnic->set_vlan = fnic_set_vlan;
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
		setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
							(unsigned long)fnic);
		spin_lock_init(&fnic->vlans_lock);
		INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
		INIT_WORK(&fnic->event_work, fnic_handle_event);
		skb_queue_head_init(&fnic->fip_frame_queue);
		INIT_LIST_HEAD(&fnic->evlist);
		INIT_LIST_HEAD(&fnic->vlans);
	} else {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware uses non-FIP mode\n");
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
	}
	fnic->state = FNIC_IN_FC_MODE;

	atomic_set(&fnic->in_flight, 0);
	fnic->state_flags = FNIC_FLAGS_NONE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_init_stats(lp);
	fnic->stats_reset_time = jiffies;

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;
	fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);
	skb_queue_head_init(&fnic->tx_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_free_exch_mgr;
	}

	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(lp->host);
	scsi_remove_host(lp->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	fnic_stats_debugfs_remove(fnic);
	scsi_host_put(lp->host);
err_out:
	return err;
}
Exemplo n.º 26
0
static void zfcp_scsi_forget_cmnds(struct zfcp_scsi_dev *zsdev, u8 tm_flags)
{
	struct zfcp_adapter *adapter = zsdev->port->adapter;
	struct zfcp_scsi_req_filter filter = {
		.tmf_scope = FCP_TMF_TGT_RESET,
		.port_handle = zsdev->port->handle,
	};
	unsigned long flags;

	if (tm_flags == FCP_TMF_LUN_RESET) {
		filter.tmf_scope = FCP_TMF_LUN_RESET;
		filter.lun_handle = zsdev->lun_handle;
	}

	/*
	 * abort_lock secures against other processings - in the abort-function
	 * and normal cmnd-handler - of (struct zfcp_fsf_req *)->data
	 */
	write_lock_irqsave(&adapter->abort_lock, flags);
	zfcp_reqlist_apply_for_all(adapter->req_list, zfcp_scsi_forget_cmnd,
				   &filter);
	write_unlock_irqrestore(&adapter->abort_lock, flags);
}

static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	struct zfcp_fsf_req *fsf_req = NULL;
	int retval = SUCCESS, ret;
	int retry = 3;

	while (retry--) {
		fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
		if (fsf_req)
			break;

		zfcp_erp_wait(adapter);
		ret = fc_block_scsi_eh(scpnt);
		if (ret)
			return ret;

		if (!(atomic_read(&adapter->status) &
		      ZFCP_STATUS_COMMON_RUNNING)) {
			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
			return SUCCESS;
		}
	}
	if (!fsf_req)
		return FAILED;

	wait_for_completion(&fsf_req->completion);

	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
		retval = FAILED;
	} else {
		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
		zfcp_scsi_forget_cmnds(zfcp_sdev, tm_flags);
	}

	zfcp_fsf_req_free(fsf_req);
	return retval;
}

static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *scpnt)
{
	return zfcp_task_mgmt_function(scpnt, FCP_TMF_LUN_RESET);
}

static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
{
	return zfcp_task_mgmt_function(scpnt, FCP_TMF_TGT_RESET);
}

static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
{
	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
	int ret;

	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1");
	zfcp_erp_wait(adapter);
	ret = fc_block_scsi_eh(scpnt);
	if (ret)
		return ret;

	return SUCCESS;
}

struct scsi_transport_template *zfcp_scsi_transport_template;

static struct scsi_host_template zfcp_scsi_host_template = {
	.module			 = THIS_MODULE,
	.name			 = "zfcp",
	.queuecommand		 = zfcp_scsi_queuecommand,
	.eh_abort_handler	 = zfcp_scsi_eh_abort_handler,
	.eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
	.eh_target_reset_handler = zfcp_scsi_eh_target_reset_handler,
	.eh_host_reset_handler	 = zfcp_scsi_eh_host_reset_handler,
	.slave_alloc		 = zfcp_scsi_slave_alloc,
	.slave_configure	 = zfcp_scsi_slave_configure,
	.slave_destroy		 = zfcp_scsi_slave_destroy,
	.change_queue_depth	 = scsi_change_queue_depth,
	.proc_name		 = "zfcp",
	.can_queue		 = 4096,
	.this_id		 = -1,
	.sg_tablesize		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2),
				   /* GCD, adjusted later */
	.max_sectors		 = (((QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
				     * ZFCP_QDIO_MAX_SBALS_PER_REQ) - 2) * 8,
				   /* GCD, adjusted later */
	.dma_boundary		 = ZFCP_QDIO_SBALE_LEN - 1,
	.use_clustering		 = 1,
	.shost_attrs		 = zfcp_sysfs_shost_attrs,
	.sdev_attrs		 = zfcp_sysfs_sdev_attrs,
	.track_queue_depth	 = 1,
};

/**
 * zfcp_scsi_adapter_register - Register SCSI and FC host with SCSI midlayer
 * @adapter: The zfcp adapter to register with the SCSI midlayer
 */
int zfcp_scsi_adapter_register(struct zfcp_adapter *adapter)
{
	struct ccw_dev_id dev_id;

	if (adapter->scsi_host)
		return 0;

	ccw_device_get_id(adapter->ccw_device, &dev_id);
	/* register adapter as SCSI host with mid layer of SCSI stack */
	adapter->scsi_host = scsi_host_alloc(&zfcp_scsi_host_template,
					     sizeof (struct zfcp_adapter *));
	if (!adapter->scsi_host) {
		dev_err(&adapter->ccw_device->dev,
			"Registering the FCP device with the "
			"SCSI stack failed\n");
		return -EIO;
	}

	/* tell the SCSI stack some characteristics of this adapter */
	adapter->scsi_host->max_id = 511;
	adapter->scsi_host->max_lun = 0xFFFFFFFF;
	adapter->scsi_host->max_channel = 0;
	adapter->scsi_host->unique_id = dev_id.devno;
	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
	adapter->scsi_host->transportt = zfcp_scsi_transport_template;

	adapter->scsi_host->hostdata[0] = (unsigned long) adapter;

	if (scsi_add_host(adapter->scsi_host, &adapter->ccw_device->dev)) {
		scsi_host_put(adapter->scsi_host);
		return -EIO;
	}

	return 0;
}

/**
 * zfcp_scsi_adapter_unregister - Unregister SCSI and FC host from SCSI midlayer
 * @adapter: The zfcp adapter to unregister.
 */
void zfcp_scsi_adapter_unregister(struct zfcp_adapter *adapter)
{
	struct Scsi_Host *shost;
	struct zfcp_port *port;

	shost = adapter->scsi_host;
	if (!shost)
		return;

	read_lock_irq(&adapter->port_list_lock);
	list_for_each_entry(port, &adapter->port_list, list)
		port->rport = NULL;
	read_unlock_irq(&adapter->port_list_lock);

	fc_remove_host(shost);
	scsi_remove_host(shost);
	scsi_host_put(shost);
	adapter->scsi_host = NULL;
}

static struct fc_host_statistics*
zfcp_init_fc_host_stats(struct zfcp_adapter *adapter)
{
	struct fc_host_statistics *fc_stats;

	if (!adapter->fc_stats) {
		fc_stats = kmalloc(sizeof(*fc_stats), GFP_KERNEL);
		if (!fc_stats)
			return NULL;
		adapter->fc_stats = fc_stats; /* freed in adapter_release */
	}
	memset(adapter->fc_stats, 0, sizeof(*adapter->fc_stats));
	return adapter->fc_stats;
}

static void zfcp_adjust_fc_host_stats(struct fc_host_statistics *fc_stats,
				      struct fsf_qtcb_bottom_port *data,
				      struct fsf_qtcb_bottom_port *old)
{
	fc_stats->seconds_since_last_reset =
		data->seconds_since_last_reset - old->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames - old->tx_frames;
	fc_stats->tx_words = data->tx_words - old->tx_words;
	fc_stats->rx_frames = data->rx_frames - old->rx_frames;
	fc_stats->rx_words = data->rx_words - old->rx_words;
	fc_stats->lip_count = data->lip - old->lip;
	fc_stats->nos_count = data->nos - old->nos;
	fc_stats->error_frames = data->error_frames - old->error_frames;
	fc_stats->dumped_frames = data->dumped_frames - old->dumped_frames;
	fc_stats->link_failure_count = data->link_failure - old->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync - old->loss_of_sync;
	fc_stats->loss_of_signal_count =
		data->loss_of_signal - old->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count =
		data->psp_error_counts - old->psp_error_counts;
	fc_stats->invalid_tx_word_count =
		data->invalid_tx_words - old->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs - old->invalid_crcs;
	fc_stats->fcp_input_requests =
		data->input_requests - old->input_requests;
	fc_stats->fcp_output_requests =
		data->output_requests - old->output_requests;
	fc_stats->fcp_control_requests =
		data->control_requests - old->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb - old->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb - old->output_mb;
}

static void zfcp_set_fc_host_stats(struct fc_host_statistics *fc_stats,
				   struct fsf_qtcb_bottom_port *data)
{
	fc_stats->seconds_since_last_reset = data->seconds_since_last_reset;
	fc_stats->tx_frames = data->tx_frames;
	fc_stats->tx_words = data->tx_words;
	fc_stats->rx_frames = data->rx_frames;
	fc_stats->rx_words = data->rx_words;
	fc_stats->lip_count = data->lip;
	fc_stats->nos_count = data->nos;
	fc_stats->error_frames = data->error_frames;
	fc_stats->dumped_frames = data->dumped_frames;
	fc_stats->link_failure_count = data->link_failure;
	fc_stats->loss_of_sync_count = data->loss_of_sync;
	fc_stats->loss_of_signal_count = data->loss_of_signal;
	fc_stats->prim_seq_protocol_err_count = data->psp_error_counts;
	fc_stats->invalid_tx_word_count = data->invalid_tx_words;
	fc_stats->invalid_crc_count = data->invalid_crcs;
	fc_stats->fcp_input_requests = data->input_requests;
	fc_stats->fcp_output_requests = data->output_requests;
	fc_stats->fcp_control_requests = data->control_requests;
	fc_stats->fcp_input_megabytes = data->input_mb;
	fc_stats->fcp_output_megabytes = data->output_mb;
}
Exemplo n.º 27
0
static struct Scsi_Host *__qlogicfas_detect(struct scsi_host_template *host,
								int qbase,
								int qlirq)
{
	int qltyp;		/* type of chip */
	int qinitid;
	struct Scsi_Host *hreg;	/* registered host structure */
	struct qlogicfas408_priv *priv;

	/*	Qlogic Cards only exist at 0x230 or 0x330 (the chip itself
	 *	decodes the address - I check 230 first since MIDI cards are
	 *	typically at 0x330
	 *
	 *	Theoretically, two Qlogic cards can coexist in the same system.
	 *	This should work by simply using this as a loadable module for
	 *	the second card, but I haven't tested this.
	 */

	if (!qbase || qlirq == -1)
		goto err;

	if (!request_region(qbase, 0x10, qlogicfas_name)) {
		printk(KERN_INFO "%s: address %#x is busy\n", qlogicfas_name,
							      qbase);
		goto err;
	}

	if (!qlogicfas408_detect(qbase, INT_TYPE)) {
		printk(KERN_WARNING "%s: probe failed for %#x\n",
								qlogicfas_name,
								qbase);
		goto err_release_mem;
	}

	printk(KERN_INFO "%s: Using preset base address of %03x,"
			 " IRQ %d\n", qlogicfas_name, qbase, qlirq);

	qltyp = qlogicfas408_get_chip_type(qbase, INT_TYPE);
	qinitid = host->this_id;
	if (qinitid < 0)
		qinitid = 7;	/* if no ID, use 7 */

	qlogicfas408_setup(qbase, qinitid, INT_TYPE);

	hreg = scsi_host_alloc(host, sizeof(struct qlogicfas408_priv));
	if (!hreg)
		goto err_release_mem;
	priv = get_priv_by_host(hreg);
	hreg->io_port = qbase;
	hreg->n_io_port = 16;
	hreg->dma_channel = -1;
	if (qlirq != -1)
		hreg->irq = qlirq;
	priv->qbase = qbase;
	priv->qlirq = qlirq;
	priv->qinitid = qinitid;
	priv->shost = hreg;
	priv->int_type = INT_TYPE;

	sprintf(priv->qinfo,
		"Qlogicfas Driver version 0.46, chip %02X at %03X, IRQ %d, TPdma:%d",
		qltyp, qbase, qlirq, QL_TURBO_PDMA);
	host->name = qlogicfas_name;

	if (request_irq(qlirq, qlogicfas408_ihandl, 0, qlogicfas_name, hreg))
		goto free_scsi_host;

	if (scsi_add_host(hreg, NULL))
		goto free_interrupt;

	scsi_scan_host(hreg);

	return hreg;

free_interrupt:
	free_irq(qlirq, hreg);

free_scsi_host:
	scsi_host_put(hreg);

err_release_mem:
	release_region(qbase, 0x10);
err:
	return NULL;
}
Exemplo n.º 28
0
static void scsifront_free(struct vscsifrnt_info *info)
{
	struct Scsi_Host *host = info->host;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
	if (host->shost_state != SHOST_DEL) {
#else
	if (!test_bit(SHOST_DEL, &host->shost_state)) {
#endif
		scsi_remove_host(info->host);
	}

	if (info->ring_ref != GRANT_INVALID_REF) {
		gnttab_end_foreign_access(info->ring_ref,
					(unsigned long)info->ring.sring);
		info->ring_ref = GRANT_INVALID_REF;
		info->ring.sring = NULL;
	}

	if (info->irq)
		unbind_from_irqhandler(info->irq, info);
	info->irq = 0;

	scsi_host_put(info->host);
}


static int scsifront_alloc_ring(struct vscsifrnt_info *info)
{
	struct xenbus_device *dev = info->dev;
	struct vscsiif_sring *sring;
	int err = -ENOMEM;


	info->ring_ref = GRANT_INVALID_REF;

	/***** Frontend to Backend ring start *****/
	sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);
	if (!sring) {
		xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)");
		return err;
	}
	SHARED_RING_INIT(sring);
	FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);

	err = xenbus_grant_ring(dev, virt_to_mfn(sring));
	if (err < 0) {
		free_page((unsigned long) sring);
		info->ring.sring = NULL;
		xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)");
		goto free_sring;
	}
	info->ring_ref = err;

	err = bind_listening_port_to_irqhandler(
			dev->otherend_id, scsifront_intr,
			SA_SAMPLE_RANDOM, "scsifront", info);

	if (err <= 0) {
		xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler");
		goto free_sring;
	}
	info->irq = err;

	return 0;

/* free resource */
free_sring:
	scsifront_free(info);

	return err;
}


static int scsifront_init_ring(struct vscsifrnt_info *info)
{
	struct xenbus_device *dev = info->dev;
	struct xenbus_transaction xbt;
	int err;

	DPRINTK("%s\n",__FUNCTION__);

	err = scsifront_alloc_ring(info);
	if (err)
		return err;
	DPRINTK("%u %u\n", info->ring_ref, info->evtchn);

again:
	err = xenbus_transaction_start(&xbt);
	if (err) {
		xenbus_dev_fatal(dev, err, "starting transaction");
	}

	err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
				info->ring_ref);
	if (err) {
		xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
		goto fail;
	}

	err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
				irq_to_evtchn_port(info->irq));

	if (err) {
		xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
		goto fail;
	}

	err = xenbus_transaction_end(xbt, 0);
	if (err) {
		if (err == -EAGAIN)
			goto again;
		xenbus_dev_fatal(dev, err, "completing transaction");
		goto free_sring;
	}

	return 0;

fail:
	xenbus_transaction_end(xbt, 1);
free_sring:
	/* free resource */
	scsifront_free(info);
	
	return err;
}


static int scsifront_probe(struct xenbus_device *dev,
				const struct xenbus_device_id *id)
{
	struct vscsifrnt_info *info;
	struct Scsi_Host *host;
	int i, err = -ENOMEM;
	char name[DEFAULT_TASK_COMM_LEN];

	host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
	if (!host) {
		xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
		return err;
	}
	info = (struct vscsifrnt_info *) host->hostdata;
	info->host = host;


	dev->dev.driver_data = info;
	info->dev  = dev;

	for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
		info->shadow[i].next_free = i + 1;
		init_waitqueue_head(&(info->shadow[i].wq_reset));
		info->shadow[i].wait_reset = 0;
	}
	info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff;

	err = scsifront_init_ring(info);
	if (err) {
		scsi_host_put(host);
		return err;
	}

	init_waitqueue_head(&info->wq);
	spin_lock_init(&info->io_lock);
	spin_lock_init(&info->shadow_lock);

	snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no);

	info->kthread = kthread_run(scsifront_schedule, info, name);
	if (IS_ERR(info->kthread)) {
		err = PTR_ERR(info->kthread);
		info->kthread = NULL;
		printk(KERN_ERR "scsifront: kthread start err %d\n", err);
		goto free_sring;
	}

	host->max_id      = VSCSIIF_MAX_TARGET;
	host->max_channel = 0;
	host->max_lun     = VSCSIIF_MAX_LUN;
	host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512;

	err = scsi_add_host(host, &dev->dev);
	if (err) {
		printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err);
		goto free_sring;
	}

	xenbus_switch_state(dev, XenbusStateInitialised);

	return 0;

free_sring:
	/* free resource */
	scsifront_free(info);
	return err;
}

static int scsifront_remove(struct xenbus_device *dev)
{
	struct vscsifrnt_info *info = dev->dev.driver_data;

	DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename);

	if (info->kthread) {
		kthread_stop(info->kthread);
		info->kthread = NULL;
	}

	scsifront_free(info);
	
	return 0;
}


static int scsifront_disconnect(struct vscsifrnt_info *info)
{
	struct xenbus_device *dev = info->dev;
	struct Scsi_Host *host = info->host;

	DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename);

	/* 
	  When this function is executed,  all devices of 
	  Frontend have been deleted. 
	  Therefore, it need not block I/O before remove_host.
	*/

	scsi_remove_host(host);
	xenbus_frontend_closed(dev);

	return 0;
}

#define VSCSIFRONT_OP_ADD_LUN	1
#define VSCSIFRONT_OP_DEL_LUN	2

static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
{
	struct xenbus_device *dev = info->dev;
	int i, err = 0;
	char str[64], state_str[64];
	char **dir;
	unsigned int dir_n = 0;
	unsigned int device_state;
	unsigned int hst, chn, tgt, lun;
	struct scsi_device *sdev;

	dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
	if (IS_ERR(dir))
		return;

	for (i = 0; i < dir_n; i++) {
		/* read status */
		snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
		err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
			&device_state);
		if (XENBUS_EXIST_ERR(err))
			continue;
		
		/* virtual SCSI device */
		snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
		err = xenbus_scanf(XBT_NIL, dev->otherend, str,
			"%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
		if (XENBUS_EXIST_ERR(err))
			continue;

		/* front device state path */
		snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);

		switch (op) {
		case VSCSIFRONT_OP_ADD_LUN:
			if (device_state == XenbusStateInitialised) {
				sdev = scsi_device_lookup(info->host, chn, tgt, lun);
				if (sdev) {
					printk(KERN_ERR "scsifront: Device already in use.\n");
					scsi_device_put(sdev);
					xenbus_printf(XBT_NIL, dev->nodename,
						state_str, "%d", XenbusStateClosed);
				} else {
					scsi_add_device(info->host, chn, tgt, lun);
					xenbus_printf(XBT_NIL, dev->nodename,
						state_str, "%d", XenbusStateConnected);
				}
			}
			break;
		case VSCSIFRONT_OP_DEL_LUN:
			if (device_state == XenbusStateClosing) {
				sdev = scsi_device_lookup(info->host, chn, tgt, lun);
				if (sdev) {
					scsi_remove_device(sdev);
					scsi_device_put(sdev);
					xenbus_printf(XBT_NIL, dev->nodename,
						state_str, "%d", XenbusStateClosed);
				}
			}
			break;
		default:
			break;
		}
	}
	
	kfree(dir);
	return;
}




static void scsifront_backend_changed(struct xenbus_device *dev,
				enum xenbus_state backend_state)
{
	struct vscsifrnt_info *info = dev->dev.driver_data;

	DPRINTK("%p %u %u\n", dev, dev->state, backend_state);

	switch (backend_state) {
	case XenbusStateUnknown:
	case XenbusStateInitialising:
	case XenbusStateInitWait:
	case XenbusStateClosed:
		break;

	case XenbusStateInitialised:
		break;

	case XenbusStateConnected:
		if (xenbus_read_driver_state(dev->nodename) ==
			XenbusStateInitialised) {
			scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
		}
		
		if (dev->state == XenbusStateConnected)
			break;
			
		xenbus_switch_state(dev, XenbusStateConnected);
		break;

	case XenbusStateClosing:
		scsifront_disconnect(info);
		break;

	case XenbusStateReconfiguring:
		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
		xenbus_switch_state(dev, XenbusStateReconfiguring);
		break;

	case XenbusStateReconfigured:
		scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
		xenbus_switch_state(dev, XenbusStateConnected);
		break;
	}
}


static struct xenbus_device_id scsifront_ids[] = {
	{ "vscsi" },
	{ "" }
};
MODULE_ALIAS("xen:vscsi");

static struct xenbus_driver scsifront_driver = {
	.name			= "vscsi",
	.owner			= THIS_MODULE,
	.ids			= scsifront_ids,
	.probe			= scsifront_probe,
	.remove			= scsifront_remove,
/* 	.resume			= scsifront_resume, */
	.otherend_changed	= scsifront_backend_changed,
};

int scsifront_xenbus_init(void)
{
	return xenbus_register_frontend(&scsifront_driver);
}

void scsifront_xenbus_unregister(void)
{
	xenbus_unregister_driver(&scsifront_driver);
}
//probe书中有教
static int our_probe(struct usb_interface *intf,
			 const struct usb_device_id *id){
struct us_data *us;
int result;
//device
struct device *dev;
//scsihost
struct Scsi_Host *host;
//检测id是否符合 和intf
struct us_unusual_dev *unusual_dev;

unusual_dev=(id - usb_storage_usb_ids) + us_unusual_dev_list;
if(usb_usual_check_type(id,USB_US_TYPE_STOR) || usb_usual_ignore_device(intf))
return -ENXIO;

printk(KERN_ALERT "probe usb   usb  detected!\n");
//分配个host

host = scsi_host_alloc(&usb_stor_host_template,sizeof(*us));
if(!host){
dev_warn(&intf->dev,"fail to allocate the scsi host\n");
return -ENOMEM;
}
//host中的一些初始化
host->max_cmd_len = 16;
host-> sg_tablesize = usb_stor_sg_tablesize(intf);
us= host_to_us(host);//us 作为host中 的us
//分内存?
memset(us,0,sizeof(struct us_data));
mutex_init(&(us->dev_mutex));
init_completion(&us->cmnd_ready);
init_completion(&(us->notify));
init_waitqueue_head(&us->delay_wait);
INIT_DELAYED_WORK(&us->scan_dwork,usb_stor_scan_dwork);

result = associate_dev(us,intf);
if(result)
         goto Bad;
result = get_device_info(us,id,unusual_dev);
if(result)
         goto Bad;
//transport protocol
get_transport(us);
get_protocol(us);

if(!us->transport ||!us->proto_handler){
result=-ENXIO;
goto Bad;
}
printk(KERN_ALERT"Transport: %s\n",us->transport_name);
printk(KERN_ALERT"Protocol: %s\n",us->transport_name);
dev = &us->pusb_intf->dev;
//设置max lun
if(us->fflags & US_FL_SINGLE_LUN)
us->max_lun =0;

//endpoint get pipe
result = get_pipes(us);
if(result)
goto Bad;
//如果u盘前十个指令错误,重置
if (us->fflags & US_FL_INITIAL_READ10)
		set_bit(US_FLIDX_REDO_READ10, &us->dflags);
//申请子资源,添加进host
result=usb_stor_acquire_sesources(us);
if(result)
goto Bad;
snprintf(us->scsi_name,sizeof(us->scsi_name),"our-usb-storage%s",dev_name(&us->pusb_intf->dev));
result= scsi_add_host(us_to_host(us),dev);
if(result){
printk(KERN_ALERT"UNable to add the host\n");
goto Bad;
}
//scsi设备延时探测
usb_autopm_get_interface_no_resume(us->pusb_intf);
set_bit(US_FLIDX_SCAN_PENDING,&us->dflags);
if(delay_use>0)
          dev_dbg(dev,"waiting for device before scanning\n");
queue_delayed_work(system_freezable_wq,&us->scan_dwork,delay_use * HZ);


return 0;
Bad:

 printk(KERN_ALERT "probe false!\n");
release_everything(us);
return result;
   }
Exemplo n.º 30
0
static int __devinit pvscsi_probe(struct pci_dev *pdev,
				  const struct pci_device_id *id)
{
	struct pvscsi_adapter *adapter;
	struct Scsi_Host *host;
	unsigned int i;
	unsigned long flags = 0;
	int error;

	error = -ENODEV;

	if (pci_enable_device(pdev))
		return error;

	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
	    pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
	} else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
		   pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
	} else {
		printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
		goto out_disable_device;
	}

	pvscsi_template.can_queue =
		min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
		PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
	pvscsi_template.cmd_per_lun =
		min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
	host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
	if (!host) {
		printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
		goto out_disable_device;
	}

	adapter = shost_priv(host);
	memset(adapter, 0, sizeof(*adapter));
	adapter->dev  = pdev;
	adapter->host = host;

	spin_lock_init(&adapter->hw_lock);

	host->max_channel = 0;
	host->max_id      = 16;
	host->max_lun     = 1;
	host->max_cmd_len = 16;

	adapter->rev = pdev->revision;

	if (pci_request_regions(pdev, "vmw_pvscsi")) {
		printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
		goto out_free_host;
	}

	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
		if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
			continue;

		if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
			continue;

		break;
	}

	if (i == DEVICE_COUNT_RESOURCE) {
		printk(KERN_ERR
		       "vmw_pvscsi: adapter has no suitable MMIO region\n");
		goto out_release_resources;
	}

	adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);

	if (!adapter->mmioBase) {
		printk(KERN_ERR
		       "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
		       i, PVSCSI_MEM_SPACE_SIZE);
		goto out_release_resources;
	}

	pci_set_master(pdev);
	pci_set_drvdata(pdev, host);

	ll_adapter_reset(adapter);

	adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);

	error = pvscsi_allocate_rings(adapter);
	if (error) {
		printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
		goto out_release_resources;
	}

	/*
	 * From this point on we should reset the adapter if anything goes
	 * wrong.
	 */
	pvscsi_setup_all_rings(adapter);

	adapter->cmd_map = kcalloc(adapter->req_depth,
				   sizeof(struct pvscsi_ctx), GFP_KERNEL);
	if (!adapter->cmd_map) {
		printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
		error = -ENOMEM;
		goto out_reset_adapter;
	}

	INIT_LIST_HEAD(&adapter->cmd_pool);
	for (i = 0; i < adapter->req_depth; i++) {
		struct pvscsi_ctx *ctx = adapter->cmd_map + i;
		list_add(&ctx->list, &adapter->cmd_pool);
	}

	error = pvscsi_allocate_sg(adapter);
	if (error) {
		printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
		goto out_reset_adapter;
	}

	if (!pvscsi_disable_msix &&
	    pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
		adapter->use_msix = 1;
	} else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
		printk(KERN_INFO "vmw_pvscsi: using MSI\n");
		adapter->use_msi = 1;
		adapter->irq = pdev->irq;
	} else {
		printk(KERN_INFO "vmw_pvscsi: using INTx\n");
		adapter->irq = pdev->irq;
		flags = IRQF_SHARED;
	}

	error = request_irq(adapter->irq, pvscsi_isr, flags,
			    "vmw_pvscsi", adapter);
	if (error) {
		printk(KERN_ERR
		       "vmw_pvscsi: unable to request IRQ: %d\n", error);
		adapter->irq = 0;
		goto out_reset_adapter;
	}

	error = scsi_add_host(host, &pdev->dev);
	if (error) {
		printk(KERN_ERR
		       "vmw_pvscsi: scsi_add_host failed: %d\n", error);
		goto out_reset_adapter;
	}

	dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
		 adapter->rev, host->host_no);

	pvscsi_unmask_intr(adapter);

	scsi_scan_host(host);

	return 0;

out_reset_adapter:
	ll_adapter_reset(adapter);
out_release_resources:
	pvscsi_release_resources(adapter);
out_free_host:
	scsi_host_put(host);
out_disable_device:
	pci_set_drvdata(pdev, NULL);
	pci_disable_device(pdev);

	return error;
}