};

static struct resource orion_ge00_resources[] = {
	{
		.name	= "ge00 irq",
		.flags	= IORESOURCE_IRQ,
	},
};

static struct platform_device orion_ge00 = {
	.name		= MV643XX_ETH_NAME,
	.id		= 0,
	.num_resources	= 1,
	.resource	= orion_ge00_resources,
	.dev		= {
		.coherent_dma_mask	= DMA_BIT_MASK(32),
	},
};

void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
			    unsigned long mapbase,
			    unsigned long irq,
			    unsigned long irq_err,
			    int tclk)
{
	fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
		       mapbase + 0x2000, SZ_16K - 1, irq_err);
	ge_complete(&orion_ge00_shared_data, tclk,
		    orion_ge00_resources, irq, &orion_ge00_shared,
		    eth_data, &orion_ge00);
}
	for (i = 0; i < 4; i++) {
		wrl(USB_WINDOW_CTRL(i), 0);
		wrl(USB_WINDOW_BASE(i), 0);
	}

	for (i = 0; i < dram->num_cs; i++) {
		const struct mbus_dram_window *cs = dram->cs + i;

		wrl(USB_WINDOW_CTRL(i), ((cs->size - 1) & 0xffff0000) |
					(cs->mbus_attr << 8) |
					(dram->mbus_dram_target_id << 4) | 1);
		wrl(USB_WINDOW_BASE(i), cs->base);
	}
}

static u64 ehci_orion_dma_mask = DMA_BIT_MASK(32);

static int ehci_orion_drv_probe(struct platform_device *pdev)
{
	struct orion_ehci_data *pd = pdev->dev.platform_data;
	const struct mbus_dram_target_info *dram;
	struct resource *res;
	struct usb_hcd *hcd;
	struct ehci_hcd *ehci;
	struct clk *clk;
	void __iomem *regs;
	int irq, err;
	enum orion_ehci_phy_ver phy_version;

	if (usb_disabled())
		return -ENODEV;
Exemple #3
0
	omap_init_sham();
	omap_init_aes();
	omap_init_vout();
	am33xx_register_edma();
	am33xx_init_pcm();

	return 0;
}
arch_initcall(omap2_init_devices);

#define AM33XX_CPSW_BASE		(0x4A100000)
#define AM33XX_CPSW_MDIO_BASE		(0x4A101000)
#define AM33XX_CPSW_SS_BASE		(0x4A101200)
#define AM33XX_EMAC_MDIO_FREQ		(1000000)

static u64 am33xx_cpsw_dmamask = DMA_BIT_MASK(32);
/* TODO : Verify the offsets */
static struct cpsw_slave_data am33xx_cpsw_slaves[] = {
	{
		.slave_reg_ofs  = 0x208,
		.sliver_reg_ofs = 0xd80,
		.phy_id		= "0:00",
	},
	{
		.slave_reg_ofs  = 0x308,
		.sliver_reg_ofs = 0xdc0,
		.phy_id		= "0:01",
	},
};

static struct cpsw_platform_data am33xx_cpsw_pdata = {
		.start = DMACH_AC97_MICIN,
		.end   = DMACH_AC97_MICIN,
		.flags = IORESOURCE_DMA,
	},
	[4] = {
		.start = IRQ_AC97,
		.end   = IRQ_AC97,
		.flags = IORESOURCE_IRQ,
	},
};

static struct s3c_audio_pdata s3c_ac97_pdata = {
	.cfg_gpio = s5pv310_ac97_cfg_gpio,
};

static u64 s5pv310_ac97_dmamask = DMA_BIT_MASK(32);

struct platform_device s5pv310_device_ac97 = {
	.name             = "s3c-ac97",
	.id               = -1,
	.num_resources    = ARRAY_SIZE(s5pv310_ac97_resource),
	.resource         = s5pv310_ac97_resource,
	.dev = {
		.platform_data = &s3c_ac97_pdata,
		.dma_mask = &s5pv310_ac97_dmamask,
		.coherent_dma_mask = DMA_BIT_MASK(32),
	},
};

static struct resource s5pv310_rp_resource[] = {
};
Exemple #5
0
    return !session_restart;
}

static struct musb_platform_ops dsps_ops = {
    .init		= dsps_musb_init,
    .exit		= dsps_musb_exit,

    .enable		= dsps_musb_enable,
    .disable	= dsps_musb_disable,

    .try_idle	= dsps_musb_try_idle,
    .set_mode	= dsps_musb_set_mode,
    .reset		= dsps_musb_reset,
};

static u64 musb_dmamask = DMA_BIT_MASK(32);

static int get_int_prop(struct device_node *dn, const char *s)
{
    int ret;
    u32 val;

    ret = of_property_read_u32(dn, s, &val);
    if (ret)
        return 0;
    return val;
}

static int get_musb_port_mode(struct device *dev)
{
    enum usb_dr_mode mode;
	for (stream = 0; stream < 2; stream++) {
		substream = pcm->streams[stream].substream;
		if (!substream)
			continue;

		buf = &substream->dma_buffer;
		if (!buf->area)
			continue;

		dma_free_writecombine(pcm->card->dev, buf->bytes,
				      buf->area, buf->addr);
		buf->area = NULL;
	}
}

static u64 sun4i_pcm_mask = DMA_BIT_MASK(32);

static int sun4i_pcm_new(struct snd_card *card,
			   struct snd_soc_dai *dai, struct snd_pcm *pcm)
{
	int ret = 0;
	
	if (!card->dev->dma_mask)
		card->dev->dma_mask = &sun4i_pcm_mask;
	if (!card->dev->coherent_dma_mask)
		card->dev->coherent_dma_mask = 0xffffffff;

	if (dai->driver->playback.channels_min) {
		ret = sun4i_pcm_preallocate_dma_buffer(pcm,
			SNDRV_PCM_STREAM_PLAYBACK);
		if (ret)
Exemple #7
0
static int octeon_mgmt_probe(struct platform_device *pdev)
{
    struct net_device *netdev;
    struct octeon_mgmt *p;
    const __be32 *data;
    const u8 *mac;
    struct resource *res_mix;
    struct resource *res_agl;
    struct resource *res_agl_prt_ctl;
    int len;
    int result;

    netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
    if (netdev == NULL)
        return -ENOMEM;

    SET_NETDEV_DEV(netdev, &pdev->dev);

    platform_set_drvdata(pdev, netdev);
    p = netdev_priv(netdev);
    netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll,
                   OCTEON_MGMT_NAPI_WEIGHT);

    p->netdev = netdev;
    p->dev = &pdev->dev;
    p->has_rx_tstamp = false;

    data = of_get_property(pdev->dev.of_node, "cell-index", &len);
    if (data && len == sizeof(*data)) {
        p->port = be32_to_cpup(data);
    } else {
        dev_err(&pdev->dev, "no 'cell-index' property\n");
        result = -ENXIO;
        goto err;
    }

    snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);

    result = platform_get_irq(pdev, 0);
    if (result < 0)
        goto err;

    p->irq = result;

    res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (res_mix == NULL) {
        dev_err(&pdev->dev, "no 'reg' resource\n");
        result = -ENXIO;
        goto err;
    }

    res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
    if (res_agl == NULL) {
        dev_err(&pdev->dev, "no 'reg' resource\n");
        result = -ENXIO;
        goto err;
    }

    res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
    if (res_agl_prt_ctl == NULL) {
        dev_err(&pdev->dev, "no 'reg' resource\n");
        result = -ENXIO;
        goto err;
    }

    p->mix_phys = res_mix->start;
    p->mix_size = resource_size(res_mix);
    p->agl_phys = res_agl->start;
    p->agl_size = resource_size(res_agl);
    p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
    p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);


    if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
                                 res_mix->name)) {
        dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
                res_mix->name);
        result = -ENXIO;
        goto err;
    }

    if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
                                 res_agl->name)) {
        result = -ENXIO;
        dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
                res_agl->name);
        goto err;
    }

    if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
                                 p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
        result = -ENXIO;
        dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
                res_agl_prt_ctl->name);
        goto err;
    }

    p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
    p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
    p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
                                       p->agl_prt_ctl_size);
    spin_lock_init(&p->lock);

    skb_queue_head_init(&p->tx_list);
    skb_queue_head_init(&p->rx_list);
    tasklet_init(&p->tx_clean_tasklet,
                 octeon_mgmt_clean_tx_tasklet, (unsigned long)p);

    netdev->priv_flags |= IFF_UNICAST_FLT;

    netdev->netdev_ops = &octeon_mgmt_ops;
    netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;

    mac = of_get_mac_address(pdev->dev.of_node);

    if (mac)
        memcpy(netdev->dev_addr, mac, ETH_ALEN);
    else
        eth_hw_addr_random(netdev);

    p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);

    result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
    if (result)
        goto err;

    netif_carrier_off(netdev);
    result = register_netdev(netdev);
    if (result)
        goto err;

    dev_info(&pdev->dev, "Version " DRV_VERSION "\n");
    return 0;

err:
    free_netdev(netdev);
    return result;
}
static struct ahci_platform_data exynos4_ahci_pdata = {
	.init = exynos4_ahci_init,
};

static struct resource exynos4_ahci_resource[] = {
	[0] = {
		.start	= EXYNOS4_PA_SATA,
		.end	= EXYNOS4_PA_SATA + SZ_64K - 1,
		.flags	= IORESOURCE_MEM,
	},
	[1] = {
		.start	= EXYNOS4_IRQ_SATA,
		.end	= EXYNOS4_IRQ_SATA,
		.flags	= IORESOURCE_IRQ,
	},
};

static u64 exynos4_ahci_dmamask = DMA_BIT_MASK(32);

struct platform_device exynos4_device_ahci = {
	.name		= "ahci",
	.id		= -1,
	.resource	= exynos4_ahci_resource,
	.num_resources	= ARRAY_SIZE(exynos4_ahci_resource),
	.dev		= {
		.platform_data		= &exynos4_ahci_pdata,
		.dma_mask		= &exynos4_ahci_dmamask,
		.coherent_dma_mask	= DMA_BIT_MASK(32),
	},
};
#include <asm/time.h>
#include <asm/netlogic/hal/nlm_hal.h>
#include <asm/netlogic/xlp_irq.h>
#include <asm/netlogic/xlp.h>

#define XLP_SOC_PCI_DRIVER 	"XLP SoC Driver"
#define DEV_IRT_INFO			0x3D

#define XLP_MAX_DEVICE			8
#define XLP_MAX_FUNC			8
#define MAX_NUM_UARTS			4
#define XLP_UART_PORTIO_OFFSET	0x1000

static struct plat_serial8250_port xlp_uart_port[MAX_NUM_UARTS];

static u64 xlp_dev_dmamask = DMA_BIT_MASK(32);

struct dev2drv {
	uint32_t 	devid;
	uint8_t 	drvname[16];
	uint8_t 	len;
	uint8_t 	id;
};

#ifdef CONFIG_SERIAL_8250
unsigned int xlp_uart_in(struct uart_port *p, int offset) {

	nlm_reg_t *mmio;
	unsigned int value;

	/* XLP uart does not need any mapping of regs 
Exemple #10
0
static int __devinit fnic_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
	if (!lp) {
;
		err = -ENOMEM;
		goto err_out;
	}
	host = lp->host;
	fnic = lport_priv(lp);
	fnic->lport = lp;
	fnic->ctlr.lp = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_free_hba;
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 40-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
//			shost_printk(KERN_ERR, fnic->lport->host,
//				     "No usable DMA configuration "
;
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
//			shost_printk(KERN_ERR, fnic->lport->host,
//				     "Unable to obtain 32-bit DMA "
;
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
		if (err) {
//			shost_printk(KERN_ERR, fnic->lport->host,
//				     "Unable to obtain 40-bit DMA "
;
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
//		shost_printk(KERN_ERR, fnic->lport->host,
//			     "Cannot memory-map BAR0 res hdr, "
;
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
//		shost_printk(KERN_ERR, fnic->lport->host,
//			     "vNIC registration failed, "
;
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_dev_close;
	}
	/* set data_src for point-to-point mode and to keep it non-zero */
	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
//			     "Get vNIC configuration failed, "
;
		goto err_out_dev_close;
	}
	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;
	host->max_cmd_len = FCOE_MAX_CMD_LEN;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
//			     "Failed to set intr mode, "
;
		goto err_out_dev_close;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
//			     "Failed to alloc vNIC resources, "
;
		goto err_out_clear_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	/* Initialize the FIP fcoe_ctrl struct */
	fnic->ctlr.send = fnic_eth_send;
	fnic->ctlr.update_mac = fnic_update_mac;
	fnic->ctlr.get_src_addr = fnic_get_mac;
	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
//		shost_printk(KERN_INFO, fnic->lport->host,
;
		/* enable directed and multicast */
		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
	} else {
//		shost_printk(KERN_INFO, fnic->lport->host,
;
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
	}
	fnic->state = FNIC_IN_FC_MODE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
//			shost_printk(KERN_ERR, fnic->lport->host,
//				     "fnic_alloc_rq_frame can't alloc "
;
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_init_stats(lp);

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;
	fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);
	skb_queue_head_init(&fnic->tx_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);

	err = fnic_request_intr(fnic);
	if (err) {
//		shost_printk(KERN_ERR, fnic->lport->host,
;
		goto err_out_free_exch_mgr;
	}

	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(lp->host);
	scsi_remove_host(lp->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	scsi_host_put(lp->host);
err_out:
	return err;
}
Exemple #11
0
// chip-specific constructor
// (see "Management of Cards and Components")
static int
snd_vortex_create(struct snd_card *card, struct pci_dev *pci, vortex_t ** rchip)
{
	vortex_t *chip;
	int err;
	static struct snd_device_ops ops = {
		.dev_free = snd_vortex_dev_free,
	};

	*rchip = NULL;

	// check PCI availability (DMA).
	if ((err = pci_enable_device(pci)) < 0)
		return err;
	if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 ||
	    pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) {
		dev_err(card->dev, "error to set DMA mask\n");
		pci_disable_device(pci);
		return -ENXIO;
	}

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	chip->card = card;

	// initialize the stuff
	chip->pci_dev = pci;
	chip->io = pci_resource_start(pci, 0);
	chip->vendor = pci->vendor;
	chip->device = pci->device;
	chip->card = card;
	chip->irq = -1;

	// (1) PCI resource allocation
	// Get MMIO area
	//
	if ((err = pci_request_regions(pci, CARD_NAME_SHORT)) != 0)
		goto regions_out;

	chip->mmio = pci_ioremap_bar(pci, 0);
	if (!chip->mmio) {
		dev_err(card->dev, "MMIO area remap failed.\n");
		err = -ENOMEM;
		goto ioremap_out;
	}

	/* Init audio core.
	 * This must be done before we do request_irq otherwise we can get spurious
	 * interrupts that we do not handle properly and make a mess of things */
	if ((err = vortex_core_init(chip)) != 0) {
		dev_err(card->dev, "hw core init failed\n");
		goto core_out;
	}

	if ((err = request_irq(pci->irq, vortex_interrupt,
			       IRQF_SHARED, KBUILD_MODNAME,
	                       chip)) != 0) {
		dev_err(card->dev, "cannot grab irq\n");
		goto irq_out;
	}
	chip->irq = pci->irq;

	pci_set_master(pci);
	// End of PCI setup.

	// Register alsa root device.
	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
		goto alloc_out;
	}

	*rchip = chip;

	return 0;

      alloc_out:
	free_irq(chip->irq, chip);
      irq_out:
	vortex_core_shutdown(chip);
      core_out:
	iounmap(chip->mmio);
      ioremap_out:
	pci_release_regions(chip->pci_dev);
      regions_out:
	pci_disable_device(chip->pci_dev);
	//FIXME: this not the right place to unregister the gameport
	vortex_gameport_unregister(chip);
	kfree(chip);
	return err;
}
Exemple #12
0
static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
    void __iomem *mem;
    struct ath_softc *sc;
    struct ieee80211_hw *hw;
    u8 csz;
    u32 val;
    int ret = 0;
    char hw_name[64];

    if (pci_enable_device(pdev))
        return -EIO;

    ret =  pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
    if (ret) {
        printk(KERN_ERR "ath9k: 32-bit DMA not available\n");
        goto err_dma;
    }

    ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
    if (ret) {
        printk(KERN_ERR "ath9k: 32-bit DMA consistent "
               "DMA enable failed\n");
        goto err_dma;
    }

    /*
     * Cache line size is used to size and align various
     * structures used to communicate with the hardware.
     */
    pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
    if (csz == 0) {
        /*
         * Linux 2.4.18 (at least) writes the cache line size
         * register as a 16-bit wide register which is wrong.
         * We must have this setup properly for rx buffer
         * DMA to work so force a reasonable value here if it
         * comes up zero.
         */
        csz = L1_CACHE_BYTES / sizeof(u32);
        pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
    }
    /*
     * The default setting of latency timer yields poor results,
     * set it to the value used by other systems. It may be worth
     * tweaking this setting more.
     */
    pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);

    pci_set_master(pdev);

    /*
     * Disable the RETRY_TIMEOUT register (0x41) to keep
     * PCI Tx retries from interfering with C3 CPU state.
     */
    pci_read_config_dword(pdev, 0x40, &val);
    if ((val & 0x0000ff00) != 0)
        pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);

    ret = pci_request_region(pdev, 0, "ath9k");
    if (ret) {
        dev_err(&pdev->dev, "PCI memory region reserve error\n");
        ret = -ENODEV;
        goto err_region;
    }

    mem = pci_iomap(pdev, 0, 0);
    if (!mem) {
        printk(KERN_ERR "PCI memory map error\n") ;
        ret = -EIO;
        goto err_iomap;
    }

    hw = ieee80211_alloc_hw(sizeof(struct ath_softc), &ath9k_ops);
    if (!hw) {
        dev_err(&pdev->dev, "No memory for ieee80211_hw\n");
        ret = -ENOMEM;
        goto err_alloc_hw;
    }

    SET_IEEE80211_DEV(hw, &pdev->dev);
    pci_set_drvdata(pdev, hw);

    sc = hw->priv;
    sc->hw = hw;
    sc->dev = &pdev->dev;
    sc->mem = mem;

    /* Will be cleared in ath9k_start() */
    sc->sc_flags |= SC_OP_INVALID;

    ret = request_irq(pdev->irq, ath_isr, IRQF_SHARED, "ath9k", sc);
    if (ret) {
        dev_err(&pdev->dev, "request_irq failed\n");
        goto err_irq;
    }

    sc->irq = pdev->irq;

    ret = ath9k_init_device(id->device, sc, &ath_pci_bus_ops);
    if (ret) {
        dev_err(&pdev->dev, "Failed to initialize device\n");
        goto err_init;
    }

    ath9k_hw_name(sc->sc_ah, hw_name, sizeof(hw_name));
    wiphy_info(hw->wiphy, "%s mem=0x%lx, irq=%d\n",
               hw_name, (unsigned long)mem, pdev->irq);

    return 0;

err_init:
    free_irq(sc->irq, sc);
err_irq:
    ieee80211_free_hw(hw);
err_alloc_hw:
    pci_iounmap(pdev, mem);
err_iomap:
    pci_release_region(pdev, 0);
err_region:
    /* Nothing */
err_dma:
    pci_disable_device(pdev);
    return ret;
}
Exemple #13
0
static int init_cc_resources(struct platform_device *plat_dev)
{
	struct resource *req_mem_cc_regs = NULL;
	struct cc_drvdata *new_drvdata;
	struct device *dev = &plat_dev->dev;
	struct device_node *np = dev->of_node;
	u32 signature_val;
	u64 dma_mask;
	int rc = 0;

	new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
	if (!new_drvdata)
		return -ENOMEM;

	platform_set_drvdata(plat_dev, new_drvdata);
	new_drvdata->plat_dev = plat_dev;

	new_drvdata->clk = of_clk_get(np, 0);
	new_drvdata->coherent = of_dma_is_coherent(np);

	/* Get device resources */
	/* First CC registers space */
	req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
	/* Map registers space */
	new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
	if (IS_ERR(new_drvdata->cc_base))
		return PTR_ERR(new_drvdata->cc_base);

	dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
		req_mem_cc_regs);
	dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
		&req_mem_cc_regs->start, new_drvdata->cc_base);

	/* Then IRQ */
	new_drvdata->irq = platform_get_irq(plat_dev, 0);
	if (new_drvdata->irq < 0) {
		dev_err(dev, "Failed getting IRQ resource\n");
		return new_drvdata->irq;
	}

	rc = devm_request_irq(dev, new_drvdata->irq, cc_isr,
			      IRQF_SHARED, "arm_cc7x", new_drvdata);
	if (rc) {
		dev_err(dev, "Could not register to interrupt %d\n",
			new_drvdata->irq);
		return rc;
	}
	dev_dbg(dev, "Registered to IRQ: %d\n", new_drvdata->irq);

	init_completion(&new_drvdata->hw_queue_avail);

	if (!plat_dev->dev.dma_mask)
		plat_dev->dev.dma_mask = &plat_dev->dev.coherent_dma_mask;

	dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
	while (dma_mask > 0x7fffffffUL) {
		if (dma_supported(&plat_dev->dev, dma_mask)) {
			rc = dma_set_coherent_mask(&plat_dev->dev, dma_mask);
			if (!rc)
				break;
		}
		dma_mask >>= 1;
	}

	if (rc) {
		dev_err(dev, "Failed in dma_set_mask, mask=%par\n", &dma_mask);
		return rc;
	}

	rc = cc_clk_on(new_drvdata);
	if (rc) {
		dev_err(dev, "Failed to enable clock");
		return rc;
	}

	/* Verify correct mapping */
	signature_val = cc_ioread(new_drvdata, CC_REG(HOST_SIGNATURE));
	if (signature_val != CC_DEV_SIGNATURE) {
		dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
			signature_val, (u32)CC_DEV_SIGNATURE);
		rc = -EINVAL;
		goto post_clk_err;
	}
	dev_dbg(dev, "CC SIGNATURE=0x%08X\n", signature_val);

	/* Display HW versions */
	dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X, Driver version %s\n",
		 CC_DEV_NAME_STR,
		 cc_ioread(new_drvdata, CC_REG(HOST_VERSION)),
		 DRV_MODULE_VERSION);

	rc = init_cc_regs(new_drvdata, true);
	if (rc) {
		dev_err(dev, "init_cc_regs failed\n");
		goto post_clk_err;
	}

	rc = cc_debugfs_init(new_drvdata);
	if (rc) {
		dev_err(dev, "Failed registering debugfs interface\n");
		goto post_regs_err;
	}

	rc = cc_fips_init(new_drvdata);
	if (rc) {
		dev_err(dev, "CC_FIPS_INIT failed 0x%x\n", rc);
		goto post_debugfs_err;
	}
	rc = cc_sram_mgr_init(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_sram_mgr_init failed\n");
		goto post_fips_init_err;
	}

	new_drvdata->mlli_sram_addr =
		cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
	if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
		dev_err(dev, "Failed to alloc MLLI Sram buffer\n");
		rc = -ENOMEM;
		goto post_sram_mgr_err;
	}

	rc = cc_req_mgr_init(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_req_mgr_init failed\n");
		goto post_sram_mgr_err;
	}

	rc = cc_buffer_mgr_init(new_drvdata);
	if (rc) {
		dev_err(dev, "buffer_mgr_init failed\n");
		goto post_req_mgr_err;
	}

	rc = cc_pm_init(new_drvdata);
	if (rc) {
		dev_err(dev, "ssi_power_mgr_init failed\n");
		goto post_buf_mgr_err;
	}

	rc = cc_ivgen_init(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_ivgen_init failed\n");
		goto post_power_mgr_err;
	}

	/* Allocate crypto algs */
	rc = cc_cipher_alloc(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_cipher_alloc failed\n");
		goto post_ivgen_err;
	}

	/* hash must be allocated before aead since hash exports APIs */
	rc = cc_hash_alloc(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_hash_alloc failed\n");
		goto post_cipher_err;
	}

	rc = cc_aead_alloc(new_drvdata);
	if (rc) {
		dev_err(dev, "cc_aead_alloc failed\n");
		goto post_hash_err;
	}

	/* If we got here and FIPS mode is enabled
	 * it means all FIPS test passed, so let TEE
	 * know we're good.
	 */
	cc_set_ree_fips_status(new_drvdata, true);

	return 0;

post_hash_err:
	cc_hash_free(new_drvdata);
post_cipher_err:
	cc_cipher_free(new_drvdata);
post_ivgen_err:
	cc_ivgen_fini(new_drvdata);
post_power_mgr_err:
	cc_pm_fini(new_drvdata);
post_buf_mgr_err:
	 cc_buffer_mgr_fini(new_drvdata);
post_req_mgr_err:
	cc_req_mgr_fini(new_drvdata);
post_sram_mgr_err:
	cc_sram_mgr_fini(new_drvdata);
post_fips_init_err:
	cc_fips_fini(new_drvdata);
post_debugfs_err:
	cc_debugfs_fini(new_drvdata);
post_regs_err:
	fini_cc_regs(new_drvdata);
post_clk_err:
	cc_clk_off(new_drvdata);
	return rc;
}
Exemple #14
0
}

struct snd_pcm_ops mxs_pcm_ops = {
	.open		= mxs_pcm_open,
	.close		= mxs_pcm_close,
	.ioctl		= snd_pcm_lib_ioctl,
	.hw_params	= mxs_pcm_hw_params,
	.hw_free	= mxs_pcm_hw_free,
	.prepare	= mxs_pcm_prepare,
	.trigger	= mxs_pcm_trigger,
	.pointer	= mxs_pcm_pointer,
	.copy		= mcs_pcm_copy,
	.mmap		= mxs_pcm_mmap,
};

static u64 mxs_pcm_dma_mask = DMA_BIT_MASK(32);

static int mxs_pcm_new(struct snd_card *card,
			    struct snd_soc_dai *dai, struct snd_pcm *pcm)
{
	size_t size = mxs_pcm_hardware.buffer_bytes_max;

	if (!card->dev->dma_mask)
		card->dev->dma_mask = &mxs_pcm_dma_mask;

	if (!card->dev->coherent_dma_mask)
		card->dev->coherent_dma_mask = DMA_BIT_MASK(32);

	snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, NULL,
					      size, size);
	},
	{
		.start = DMOV_HSUART1_TX_CHAN,
		.end   = DMOV_HSUART1_RX_CHAN,
		.name  = "uartdm_channels",
		.flags = IORESOURCE_DMA,
	},
	{
		.start = DMOV_HSUART1_TX_CRCI,
		.end   = DMOV_HSUART1_RX_CRCI,
		.name  = "uartdm_crci",
		.flags = IORESOURCE_DMA,
	},
};

static u64 msm_uart_dm1_dma_mask = DMA_BIT_MASK(32);

struct platform_device msm_device_uart_dm1 = {
	.name = "msm_serial_hs",
	.id = 0,
	.num_resources = ARRAY_SIZE(msm_uart1_dm_resources),
	.resource = msm_uart1_dm_resources,
	.dev		= {
		.dma_mask = &msm_uart_dm1_dma_mask,
		.coherent_dma_mask = DMA_BIT_MASK(32),
	},
};

static struct resource msm_uart2_dm_resources[] = {
	{
		.start = MSM_UART2DM_PHYS,
/* TODO - Read fdt to get this info, add BCM65500 platform driver detection */
static int __init xlp_find_pci_dev(void)
{
	uint16_t i, j, base_id, id, num_devices, maxdevice=0;
	int idx;
	uint64_t mmio;
	uint32_t val, devid, irt, irq;
	struct platform_device* pplatdev;
	struct resource pres[2] = { {0} };
	int total=num_possible_nodes();

	for(i=0; i<total; i++) {
		j=node_online(i);
		if(!j)     continue; 
		maxdevice += XLP_MAX_DEVICE;
	}	

	printk(KERN_DEBUG "XLP platform devices:\n");

	for (i=0; i<maxdevice; i++) {

		for (j=0; j<XLP_MAX_FUNC; j++) {

			mmio = nlm_hal_get_dev_base(0, 0, i, j);
			val  = nlm_hal_read_32bit_reg(mmio, 0);

			if(val == 0xFFFFFFFF)
				continue;		// No PCI device

			devid = (val & 0xFFFF0000) >> 16;
//			printk("PCI-e Device ID 0x%04X found at bus 0, device %d, function %d\n", devid, i, j);

			idx = get_dev2drv(devid);
			if(idx < 0)
				continue;		// Not found in table

			/* Register NAND only for other nodes.
			 * Remove if condition when other devices are supported on other nodes as well.
			 * */
			if(!((i>8 && (devid == XLP_DEVID_NAND) ) || (i<8)))
				continue;

			num_devices = 1;

			/* Handle PCI-e devices with multiple platform devices */
			if (devid == XLP2XX_DEVID_I2C) {
				if (is_nlm_xlp108() || is_nlm_xlp104() || is_nlm_xlp101())
					num_devices = 2;
				else
					num_devices = 4;
			}

			while(num_devices--) {		// Handle multiple IDs per PCI device

				base_id = dev2drv_table[idx].id++;
				id = base_id;

				/* Funny UART exception */
				if (devid == XLP_DEVID_UART)
					id += PLAT8250_DEV_PLATFORM;

				pplatdev = platform_device_alloc((const char*)dev2drv_table[idx].drvname, id);
				if (!pplatdev) {
					printk(KERN_WARNING "platform_device_alloc failed\n");
					continue;
				}

				if(devid == XLP_DEVID_UART) {
					pplatdev->dev.platform_data = &xlp_uart_port[base_id];
					xlp_init_uart(base_id);
				}


				irt = (nlm_hal_read_32bit_reg(mmio, DEV_IRT_INFO) & 0xFFFF);
				irq = xlp_irt_to_irq(0, irt);
				pres[0].start = irq;
				pres[0].end   = irq;
				pres[0].flags = IORESOURCE_IRQ;

				/* XLP2xx I2C devices share I/O memory - so let the platform driver manage
				 * it instead of each platform device (I2C bus).
				 */
				if(devid == XLP2XX_DEVID_I2C) {

					printk(KERN_DEBUG "%12s.%d (PCIe B/D/F = 0/0x%02X/%d), IRQ = %3d\n",
							dev2drv_table[idx].drvname, base_id, devid, j, irq);

					platform_device_add_resources(pplatdev, pres, 1);
				} else {
					pres[1].start = mmio;
					pres[1].end   = mmio + 0xFFF;
					pres[1].flags = IORESOURCE_MEM;

					printk(KERN_DEBUG "%12s.%d (PCIe B/D/F = 0/0x%02X/%d), IRQ = %3d, "
							"mem = 0x%llX-0x%llX,\n",
							dev2drv_table[idx].drvname, base_id, devid, j, irq,
							mmio, mmio + 0xFFF);

					platform_device_add_resources(pplatdev, pres, 2);
				}
				if (devid == XLP_DEVID_MMC){
					mmc_pplat_dev = pplatdev;
				}
				pplatdev->dev.dma_mask = &xlp_dev_dmamask;
				pplatdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
				platform_device_add(pplatdev);
			}
		}
	}

	return 0;
}
};

static struct resource vpfe_resources[] = {
	{
		.start          = IRQ_VDINT0,
		.end            = IRQ_VDINT0,
		.flags          = IORESOURCE_IRQ,
	},
	{
		.start          = IRQ_VDINT1,
		.end            = IRQ_VDINT1,
		.flags          = IORESOURCE_IRQ,
	},
};

static u64 dm644x_video_dma_mask = DMA_BIT_MASK(32);

static struct resource dm644x_ccdc_resource[] = {
	/* CCDC Base address */
	{
		.start          = 0x01c70400,
		.end            = 0x01c70400 + 0xff,
		.flags          = IORESOURCE_MEM,
	},
};

static struct platform_device dm644x_ccdc_dev = {
	.name           = "dm644x_ccdc",
	.id             = -1,
	.num_resources  = ARRAY_SIZE(dm644x_ccdc_resource),
	.resource       = dm644x_ccdc_resource,

static struct resource au1100_lcd_resources[] = {
	[0] = {
		.start	= AU1100_LCD_PHYS_ADDR,
		.end	= AU1100_LCD_PHYS_ADDR + 0x800 - 1,
		.flags	= IORESOURCE_MEM,
	},
	[1] = {
		.start	= AU1100_LCD_INT,
		.end	= AU1100_LCD_INT,
		.flags	= IORESOURCE_IRQ,
	}
};

static u64 au1100_lcd_dmamask = DMA_BIT_MASK(32);

static struct platform_device au1100_lcd_device = {
	.name		= "au1100-lcd",
	.id		= 0,
	.dev = {
		.dma_mask		= &au1100_lcd_dmamask,
		.coherent_dma_mask	= DMA_BIT_MASK(32),
	},
	.num_resources	= ARRAY_SIZE(au1100_lcd_resources),
	.resource	= au1100_lcd_resources,
};

static struct resource alchemy_ac97c_res[] = {
	[0] = {
		.start	= AU1000_AC97_PHYS_ADDR,
Exemple #19
0
		.end	= 0xfe943fff,
		.flags	= IORESOURCE_MEM,
	},
	[1] = {
		.start	= intcs_evt2irq(0x580),
		.flags	= IORESOURCE_IRQ,
	},
};

static struct platform_device lcdc_device = {
	.name		= "sh_mobile_lcdc_fb",
	.num_resources	= ARRAY_SIZE(lcdc_resources),
	.resource	= lcdc_resources,
	.dev	= {
		.platform_data	= &lcdc_info,
		.coherent_dma_mask = DMA_BIT_MASK(32),
	},
};

/* Fixed 1.8V regulator to be used by MMCIF */
static struct regulator_consumer_supply fixed1v8_power_consumers[] =
{
	REGULATOR_SUPPLY("vmmc", "sh_mmcif.0"),
	REGULATOR_SUPPLY("vqmmc", "sh_mmcif.0"),
};

/* MMCIF */
static struct resource sh_mmcif_resources[] = {
	[0] = {
		.name	= "MMCIF",
		.start	= 0xe6bd0000,
		.flags	= IORESOURCE_MEM,
	},
	{
		.start	= USB1_HS_IRQ,
		.end	= USB1_HS_IRQ,
		.flags	= IORESOURCE_IRQ,
	},
};

struct platform_device msm_device_otg = {
	.name		= "msm_otg",
	.id		= -1,
	.num_resources	= ARRAY_SIZE(resources_otg),
	.resource	= resources_otg,
	.dev		= {
		.coherent_dma_mask	= DMA_BIT_MASK(32),
	},
};

static struct resource resources_hsusb[] = {
	{
		.start	= MSM9615_HSUSB_PHYS,
		.end	= MSM9615_HSUSB_PHYS + MSM9615_HSUSB_SIZE - 1,
		.flags	= IORESOURCE_MEM,
	},
	{
		.start	= USB1_HS_IRQ,
		.end	= USB1_HS_IRQ,
		.flags	= IORESOURCE_IRQ,
	},
};
Exemple #21
0
INT_CFG(DM365,  INT_IMX0_DISABLE,    0,     1,    1,     false)
INT_CFG(DM365,  INT_HDVICP_ENABLE,   0,     1,    1,     false)
INT_CFG(DM365,  INT_HDVICP_DISABLE,  0,     1,    0,     false)
INT_CFG(DM365,  INT_IMX1_ENABLE,     24,    1,    1,     false)
INT_CFG(DM365,  INT_IMX1_DISABLE,    24,    1,    0,     false)
INT_CFG(DM365,  INT_NSF_ENABLE,      25,    1,    1,     false)
INT_CFG(DM365,  INT_NSF_DISABLE,     25,    1,    0,     false)

EVT_CFG(DM365,	EVT2_ASP_TX,         0,     1,    0,     false)
EVT_CFG(DM365,	EVT3_ASP_RX,         1,     1,    0,     false)
EVT_CFG(DM365,	EVT2_VC_TX,          0,     1,    1,     false)
EVT_CFG(DM365,	EVT3_VC_RX,          1,     1,    1,     false)
#endif
};

static u64 dm365_spi0_dma_mask = DMA_BIT_MASK(32);

static struct davinci_spi_platform_data dm365_spi0_pdata = {
	.version 	= SPI_VERSION_1,
	.num_chipselect = 2,
	.clk_internal	= 1,
	.cs_hold	= 1,
	.intr_level	= 0,
	.poll_mode	= 1,	/* 0 -> interrupt mode 1-> polling mode */
	.c2tdelay	= 0,
	.t2cdelay	= 0,
};

static struct resource dm365_spi0_resources[] = {
	{
		.start = 0x01c66000,
int
usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;
	struct usb_driver 	*driver = to_usb_driver(udev->dev.driver);

	/* usbnet already took usb runtime pm, so have to enable the feature
	 * for usb interface, otherwise usb_autopm_get_interface may return
	 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
	 */
	if (!driver->supports_autosuspend) {
		driver->supports_autosuspend = 1;
		pm_runtime_enable(&udev->dev);
	}

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		dev_dbg (&udev->dev, "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev (udev);
	interface = udev->cur_altsetting;

	usb_get_dev (xdev);

	status = -ENOMEM;

	// set up our own records
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		dbg ("can't kmalloc dev");
		goto out;
	}

	/* netdev_printk() needs this so do it as early as possible */
	SET_NETDEV_DEV(net, &udev->dev);

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init (&dev->rxq);
	skb_queue_head_init (&dev->txq);
	skb_queue_head_init (&dev->done);
	skb_queue_head_init(&dev->rxq_pause);
	dev->bh.func = usbnet_bh;
	dev->bh.data = (unsigned long) dev;
	INIT_WORK (&dev->kevent, kevent);
	init_usb_anchor(&dev->deferred);
	dev->delay.function = usbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer (&dev->delay);
	mutex_init (&dev->phy_mutex);

	dev->net = net;
	strcpy (net->name, "usb%d");
	memcpy (net->dev_addr, node_id, sizeof node_id);

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;
#if 0
// dma_supported() is deeply broken on almost all architectures
	// possible with some EHCI controllers
	if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

	net->netdev_ops = &usbnet_netdev_ops;
	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &usbnet_ethtool_ops;

	// allow device-specific bind/init procedures
	// NOTE net->name still not usable ...
	if (info->bind) {
		status = info->bind (dev, udev);
		if (status < 0)
			goto out1;

		// heuristic:  "usb%d" for links we know are two-host,
		// else "eth%d" when there's reasonable doubt.  userspace
		// can rename the link if it knows better.
		if ((dev->driver_info->flags & FLAG_ETHER) != 0 &&
		    ((dev->driver_info->flags & FLAG_POINTTOPOINT) == 0 ||
		     (net->dev_addr [0] & 0x02) == 0))
			strcpy (net->name, "eth%d");
		/* WLAN devices should always be named "wlan%d" */
		if ((dev->driver_info->flags & FLAG_WLAN) != 0)
			strcpy(net->name, "wlan%d");
		/* WWAN devices should always be named "wwan%d" */
		if ((dev->driver_info->flags & FLAG_WWAN) != 0)
			strcpy(net->name, "wwan%d");

		/* maybe the remote can't receive an Ethernet MTU */
		if (net->mtu > (dev->hard_mtu - net->hard_header_len))
			net->mtu = dev->hard_mtu - net->hard_header_len;
	} else if (!info->in || !info->out)
		status = usbnet_get_endpoints (dev, udev);
	else {
		dev->in = usb_rcvbulkpipe (xdev, info->in);
		dev->out = usb_sndbulkpipe (xdev, info->out);
		if (!(info->flags & FLAG_NO_SETINT))
			status = usb_set_interface (xdev,
				interface->desc.bInterfaceNumber,
				interface->desc.bAlternateSetting);
		else
			status = 0;

	}
	if (status >= 0 && dev->status)
		status = init_status (dev, udev);
	if (status < 0)
		goto out3;

	/* urb size is equal to hard_mtu value unless the size was set in bind() */
	if (!dev->rx_urb_size) {
		dev->custom_rx_urb_size = false;
		dev->rx_urb_size = dev->hard_mtu;
	} else {
		dev->custom_rx_urb_size = true;
	}

	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);

	if ((dev->driver_info->flags & FLAG_WLAN) != 0)
		SET_NETDEV_DEVTYPE(net, &wlan_type);
	if ((dev->driver_info->flags & FLAG_WWAN) != 0)
		SET_NETDEV_DEVTYPE(net, &wwan_type);

	status = register_netdev (net);
	if (status)
		goto out3;
	netif_info(dev, probe, dev->net,
		   "register '%s' at usb-%s-%s, %s, %pM\n",
		   udev->dev.driver->name,
		   xdev->bus->bus_name, xdev->devpath,
		   dev->driver_info->description,
		   net->dev_addr);

	// ok, it's ready to go.
	usb_set_intfdata (udev, dev);

	netif_device_attach (net);

	if (dev->driver_info->flags & FLAG_LINK_INTR)
		netif_carrier_off(net);

	return 0;

out3:
	if (info->unbind)
		info->unbind (dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}
Exemple #23
0
			IRQ_DOVE_XOR_00, IRQ_DOVE_XOR_01);
}

/*****************************************************************************
 * XOR 1
 ****************************************************************************/
static void __init dove_xor1_init(void)
{
	orion_xor1_init(DOVE_XOR1_PHYS_BASE, DOVE_XOR1_HIGH_PHYS_BASE,
			IRQ_DOVE_XOR_10, IRQ_DOVE_XOR_11);
}

/*****************************************************************************
 * SDIO
 ****************************************************************************/
static u64 sdio_dmamask = DMA_BIT_MASK(32);

static struct resource dove_sdio0_resources[] = {
	{
		.start	= DOVE_SDIO0_PHYS_BASE,
		.end	= DOVE_SDIO0_PHYS_BASE + 0xff,
		.flags	= IORESOURCE_MEM,
	}, {
		.start	= IRQ_DOVE_SDIO0,
		.end	= IRQ_DOVE_SDIO0,
		.flags	= IORESOURCE_IRQ,
	},
};

static struct platform_device dove_sdio0 = {
	.name		= "sdhci-dove",
Exemple #24
0
static int __devinit chd_dec_pci_probe(struct pci_dev *pdev,
			     const struct pci_device_id *entry)
{
	struct crystalhd_adp *pinfo;
	int rc;
	enum BC_STATUS sts = BC_STS_SUCCESS;

	BCMLOG(BCMLOG_DBG, "PCI_INFO: Vendor:0x%04x Device:0x%04x "
	       "s_vendor:0x%04x s_device: 0x%04x\n",
	       pdev->vendor, pdev->device, pdev->subsystem_vendor,
	       pdev->subsystem_device);

	/* FIXME: jarod: why atomic? */
	pinfo = kzalloc(sizeof(struct crystalhd_adp), GFP_ATOMIC);
	if (!pinfo) {
		BCMLOG_ERR("Failed to allocate memory\n");
		return -ENOMEM;
	}

	pinfo->pdev = pdev;

	rc = pci_enable_device(pdev);
	if (rc) {
		BCMLOG_ERR("Failed to enable PCI device\n");
		return rc;
	}

	snprintf(pinfo->name, 31, "crystalhd_pci_e:%d:%d:%d",
		 pdev->bus->number, PCI_SLOT(pdev->devfn),
		 PCI_FUNC(pdev->devfn));

	rc = chd_pci_reserve_mem(pinfo);
	if (rc) {
		BCMLOG_ERR("Failed to setup memory regions.\n");
		return -ENOMEM;
	}

	pinfo->present	= 1;
	pinfo->drv_data = entry->driver_data;

	/* Setup adapter level lock.. */
	spin_lock_init(&pinfo->lock);

	/* setup api stuff.. */
	chd_dec_init_chdev(pinfo);
	rc = chd_dec_enable_int(pinfo);
	if (rc) {
		BCMLOG_ERR("_enable_int err:%d\n", rc);
		pci_disable_device(pdev);
		return -ENODEV;
	}

	/* Set dma mask... */
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		pinfo->dmabits = 64;
	} else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		pinfo->dmabits = 32;
	} else {
		BCMLOG_ERR("Unabled to setup DMA %d\n", rc);
		pci_disable_device(pdev);
		return -ENODEV;
	}

	sts = crystalhd_setup_cmd_context(&pinfo->cmds, pinfo);
	if (sts != BC_STS_SUCCESS) {
		BCMLOG_ERR("cmd setup :%d\n", sts);
		pci_disable_device(pdev);
		return -ENODEV;
	}

	pci_set_master(pdev);

	pci_set_drvdata(pdev, pinfo);

	g_adp_info = pinfo;

	return 0;
}
/*
 *    probe function - creates the card manager
 */
static int __devinit snd_mixart_probe(struct pci_dev *pci,
				      const struct pci_device_id *pci_id)
{
	static int dev;
	struct mixart_mgr *mgr;
	unsigned int i;
	int err;
	size_t size;

	/*
	 */
	if (dev >= SNDRV_CARDS)
		return -ENODEV;
	if (! enable[dev]) {
		dev++;
		return -ENOENT;
	}

	/* enable PCI device */
	if ((err = pci_enable_device(pci)) < 0)
		return err;
	pci_set_master(pci);

	/* check if we can restrict PCI DMA transfers to 32 bits */
	if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0) {
		snd_printk(KERN_ERR "architecture does not support 32bit PCI busmaster DMA\n");
		pci_disable_device(pci);
		return -ENXIO;
	}

	/*
	 */
	mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
	if (! mgr) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	mgr->pci = pci;
	mgr->irq = -1;

	/* resource assignment */
	if ((err = pci_request_regions(pci, CARD_NAME)) < 0) {
		kfree(mgr);
		pci_disable_device(pci);
		return err;
	}
	for (i = 0; i < 2; i++) {
		mgr->mem[i].phys = pci_resource_start(pci, i);
		mgr->mem[i].virt = pci_ioremap_bar(pci, i);
		if (!mgr->mem[i].virt) {
		        printk(KERN_ERR "unable to remap resource 0x%lx\n",
			       mgr->mem[i].phys);
			snd_mixart_free(mgr);
			return -EBUSY;
		}
	}

	if (request_irq(pci->irq, snd_mixart_interrupt, IRQF_SHARED,
			CARD_NAME, mgr)) {
		snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq);
		snd_mixart_free(mgr);
		return -EBUSY;
	}
	mgr->irq = pci->irq;

	sprintf(mgr->shortname, "Digigram miXart");
	sprintf(mgr->longname, "%s at 0x%lx & 0x%lx, irq %i", mgr->shortname, mgr->mem[0].phys, mgr->mem[1].phys, mgr->irq);

	/* ISR spinlock  */
	spin_lock_init(&mgr->lock);

	/* init mailbox  */
	mgr->msg_fifo_readptr = 0;
	mgr->msg_fifo_writeptr = 0;

	spin_lock_init(&mgr->msg_lock);
	mutex_init(&mgr->msg_mutex);
	init_waitqueue_head(&mgr->msg_sleep);
	atomic_set(&mgr->msg_processed, 0);

	/* init setup mutex*/
	mutex_init(&mgr->setup_mutex);

	/* init message taslket */
	tasklet_init(&mgr->msg_taskq, snd_mixart_msg_tasklet, (unsigned long) mgr);

	/* card assignment */
	mgr->num_cards = MIXART_MAX_CARDS; /* 4  FIXME: configurable? */
	for (i = 0; i < mgr->num_cards; i++) {
		struct snd_card *card;
		char tmpid[16];
		int idx;

		if (index[dev] < 0)
			idx = index[dev];
		else
			idx = index[dev] + i;
		snprintf(tmpid, sizeof(tmpid), "%s-%d", id[dev] ? id[dev] : "MIXART", i);
		err = snd_card_create(idx, tmpid, THIS_MODULE, 0, &card);

		if (err < 0) {
			snd_printk(KERN_ERR "cannot allocate the card %d\n", i);
			snd_mixart_free(mgr);
			return err;
		}

		strcpy(card->driver, CARD_NAME);
		sprintf(card->shortname, "%s [PCM #%d]", mgr->shortname, i);
		sprintf(card->longname, "%s [PCM #%d]", mgr->longname, i);

		if ((err = snd_mixart_create(mgr, card, i)) < 0) {
			snd_card_free(card);
			snd_mixart_free(mgr);
			return err;
		}

		if(i==0) {
			/* init proc interface only for chip0 */
			snd_mixart_proc_init(mgr->chip[i]);
		}

		if ((err = snd_card_register(card)) < 0) {
			snd_mixart_free(mgr);
			return err;
		}
	}

	/* init firmware status (mgr->dsp_loaded reset in hwdep_new) */
	mgr->board_type = MIXART_DAUGHTER_TYPE_NONE;

	/* create array of streaminfo */
	size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
			    sizeof(struct mixart_flowinfo)) );
	if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
				size, &mgr->flowinfo) < 0) {
		snd_mixart_free(mgr);
		return -ENOMEM;
	}
	/* init streaminfo_array */
	memset(mgr->flowinfo.area, 0, size);

	/* create array of bufferinfo */
	size = PAGE_ALIGN( (MIXART_MAX_STREAM_PER_CARD * MIXART_MAX_CARDS *
			    sizeof(struct mixart_bufferinfo)) );
	if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
				size, &mgr->bufferinfo) < 0) {
		snd_mixart_free(mgr);
		return -ENOMEM;
	}
	/* init bufferinfo_array */
	memset(mgr->bufferinfo.area, 0, size);

	/* set up firmware */
	err = snd_mixart_setup_firmware(mgr);
	if (err < 0) {
		snd_mixart_free(mgr);
		return err;
	}

	pci_set_drvdata(pci, mgr);
	dev++;
	return 0;
}
//#include <mach/irqs.h>
//#include <mach/map.h>

//#include <plat/devs.h>

static struct resource s3c_usb_hsotg_resources[] = {
	[0] = {
		.start	= S3C_PA_USB_HSOTG,
		.end	= S3C_PA_USB_HSOTG + 0x10000 - 1,
		.flags	= IORESOURCE_MEM,
	},
	[1] = {
		.start	= IRQ_OTG,
		.end	= IRQ_OTG,
		.flags	= IORESOURCE_IRQ,
	},
};

static u64 s3c_hsotg_dmamask = DMA_BIT_MASK(32);

struct platform_device s3c_device_usb_hsotg = {
	.name		= "s3c-hsotg",
	.id		= -1,
	.num_resources	= ARRAY_SIZE(s3c_usb_hsotg_resources),
	.resource	= s3c_usb_hsotg_resources,
	.dev		= {
		.dma_mask		= &s3c_hsotg_dmamask,
		.coherent_dma_mask	= DMA_BIT_MASK(32),
	},
};
#endif

static struct pps_gpio_platform_data pps_gpio_info = {
	.assert_falling_edge = false,
	.capture_clear = false,
	.gpio_pin = -1,
	.gpio_label = "PPS",
};

static struct platform_device pps_gpio_device = {
	.name = "pps-gpio",
	.id = PLATFORM_DEVID_NONE,
	.dev.platform_data = &pps_gpio_info,
};

static u64 fb_dmamask = DMA_BIT_MASK(DMA_MASK_BITS_COMMON);

static struct platform_device bcm2708_fb_device = {
	.name = "bcm2708_fb",
	.id = -1,		/* only one bcm2708_fb */
	.resource = NULL,
	.num_resources = 0,
	.dev = {
		.dma_mask = &fb_dmamask,
		.coherent_dma_mask = DMA_BIT_MASK(DMA_MASK_BITS_COMMON),
		},
};

static struct resource bcm2708_usb_resources[] = {
	[0] = {
	       .start = USB_BASE,
	},
	[3] = {
		.start = DMACH_AC97_MICIN,
		.end   = DMACH_AC97_MICIN,
		.flags = IORESOURCE_DMA,
	},
	[4] = {
		.start = IRQ_AC97,
		.end   = IRQ_AC97,
		.flags = IORESOURCE_IRQ,
	},
};

static struct s3c_audio_pdata s3c_ac97_pdata;

static u64 s3c64xx_ac97_dmamask = DMA_BIT_MASK(32);

struct platform_device s3c64xx_device_ac97 = {
	.name		  = "s3c-ac97",
	.id		  = -1,
	.num_resources	  = ARRAY_SIZE(s3c64xx_ac97_resource),
	.resource	  = s3c64xx_ac97_resource,
	.dev = {
		.platform_data = &s3c_ac97_pdata,
		.dma_mask = &s3c64xx_ac97_dmamask,
		.coherent_dma_mask = DMA_BIT_MASK(32),
	},
};
EXPORT_SYMBOL(s3c64xx_device_ac97);

void __init s3c64xx_ac97_setup_gpio(int num)
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at572d940hf.h>
#include <mach/at572d940hf_matrix.h>
#include <mach/at91sam9_smc.h>

#include "generic.h"
#include "sam9_smc.h"


/* --------------------------------------------------------------------
 *  USB Host
 * -------------------------------------------------------------------- */

#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
static u64 ohci_dmamask = DMA_BIT_MASK(32);
static struct at91_usbh_data usbh_data;

static struct resource usbh_resources[] = {
	[0] = {
		.start	= AT572D940HF_UHP_BASE,
		.end	= AT572D940HF_UHP_BASE + SZ_1M - 1,
		.flags	= IORESOURCE_MEM,
	},
	[1] = {
		.start	= AT572D940HF_ID_UHP,
		.end	= AT572D940HF_ID_UHP,
		.flags	= IORESOURCE_IRQ,
	},
};
Exemple #30
0
static int
snd_ad1889_create(struct snd_card *card,
		  struct pci_dev *pci,
		  struct snd_ad1889 **rchip)
{
	int err;

	struct snd_ad1889 *chip;
	static struct snd_device_ops ops = {
		.dev_free = snd_ad1889_dev_free,
	};

	*rchip = NULL;

	if ((err = pci_enable_device(pci)) < 0)
		return err;

	/* check PCI availability (32bit DMA) */
	if (pci_set_dma_mask(pci, DMA_BIT_MASK(32)) < 0 ||
	    pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)) < 0) {
		dev_err(card->dev, "error setting 32-bit DMA mask.\n");
		pci_disable_device(pci);
		return -ENXIO;
	}

	/* allocate chip specific data with zero-filled memory */
	if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	chip->card = card;
	card->private_data = chip;
	chip->pci = pci;
	chip->irq = -1;

	/* (1) PCI resource allocation */
	if ((err = pci_request_regions(pci, card->driver)) < 0)
		goto free_and_ret;

	chip->bar = pci_resource_start(pci, 0);
	chip->iobase = pci_ioremap_bar(pci, 0);
	if (chip->iobase == NULL) {
		dev_err(card->dev, "unable to reserve region.\n");
		err = -EBUSY;
		goto free_and_ret;
	}
	
	pci_set_master(pci);

	spin_lock_init(&chip->lock);	/* only now can we call ad1889_free */

	if (request_irq(pci->irq, snd_ad1889_interrupt,
			IRQF_SHARED, KBUILD_MODNAME, chip)) {
		dev_err(card->dev, "cannot obtain IRQ %d\n", pci->irq);
		snd_ad1889_free(chip);
		return -EBUSY;
	}

	chip->irq = pci->irq;
	synchronize_irq(chip->irq);

	/* (2) initialization of the chip hardware */
	if ((err = snd_ad1889_init(chip)) < 0) {
		snd_ad1889_free(chip);
		return err;
	}

	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) {
		snd_ad1889_free(chip);
		return err;
	}

	*rchip = chip;

	return 0;

free_and_ret:
	kfree(chip);
	pci_disable_device(pci);

	return err;
}