Beispiel #1
0
/* driver entry point */
static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	int ret = -ENODEV;
	resource_size_t csr_base, mem_base;
	unsigned long csr_len, mem_len;
	struct denali_nand_info *denali;

	denali = kzalloc(sizeof(*denali), GFP_KERNEL);
	if (!denali)
		return -ENOMEM;

	ret = pci_enable_device(dev);
	if (ret) {
		printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
		goto failed_alloc_memery;
	}

	if (id->driver_data == INTEL_CE4100) {
		/* Due to a silicon limitation, we can only support
		 * ONFI timing mode 1 and below.
		 */
		if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
			printk(KERN_ERR "Intel CE4100 only supports"
					" ONFI timing mode 1 or below\n");
			ret = -EINVAL;
			goto failed_enable_dev;
		}
		denali->platform = INTEL_CE4100;
		mem_base = pci_resource_start(dev, 0);
		mem_len = pci_resource_len(dev, 1);
		csr_base = pci_resource_start(dev, 1);
		csr_len = pci_resource_len(dev, 1);
	} else {
		denali->platform = INTEL_MRST;
		csr_base = pci_resource_start(dev, 0);
		csr_len = pci_resource_len(dev, 0);
		mem_base = pci_resource_start(dev, 1);
		mem_len = pci_resource_len(dev, 1);
		if (!mem_len) {
			mem_base = csr_base + csr_len;
			mem_len = csr_len;
		}
	}

	/* Is 32-bit DMA supported? */
	ret = dma_set_mask(&dev->dev, DMA_BIT_MASK(32));
	if (ret) {
		printk(KERN_ERR "Spectra: no usable DMA configuration\n");
		goto failed_enable_dev;
	}
	denali->buf.dma_buf = dma_map_single(&dev->dev, denali->buf.buf,
					     DENALI_BUF_SIZE,
					     DMA_BIDIRECTIONAL);

	if (dma_mapping_error(&dev->dev, denali->buf.dma_buf)) {
		dev_err(&dev->dev, "Spectra: failed to map DMA buffer\n");
		goto failed_enable_dev;
	}

	pci_set_master(dev);
	denali->dev = &dev->dev;
	denali->mtd.dev.parent = &dev->dev;

	ret = pci_request_regions(dev, DENALI_NAND_NAME);
	if (ret) {
		printk(KERN_ERR "Spectra: Unable to request memory regions\n");
		goto failed_dma_map;
	}

	denali->flash_reg = ioremap_nocache(csr_base, csr_len);
	if (!denali->flash_reg) {
		printk(KERN_ERR "Spectra: Unable to remap memory region\n");
		ret = -ENOMEM;
		goto failed_req_regions;
	}

	denali->flash_mem = ioremap_nocache(mem_base, mem_len);
	if (!denali->flash_mem) {
		printk(KERN_ERR "Spectra: ioremap_nocache failed!");
		ret = -ENOMEM;
		goto failed_remap_reg;
	}

	denali_hw_init(denali);
	denali_drv_init(denali);

	/* denali_isr register is done after all the hardware
	 * initilization is finished*/
	if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
			DENALI_NAND_NAME, denali)) {
		printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
		ret = -ENODEV;
		goto failed_remap_mem;
	}

	/* now that our ISR is registered, we can enable interrupts */
	denali_set_intr_modes(denali, true);

	pci_set_drvdata(dev, denali);

	denali->mtd.name = "denali-nand";
	denali->mtd.owner = THIS_MODULE;
	denali->mtd.priv = &denali->nand;

	/* register the driver with the NAND core subsystem */
	denali->nand.select_chip = denali_select_chip;
	denali->nand.cmdfunc = denali_cmdfunc;
	denali->nand.read_byte = denali_read_byte;
	denali->nand.waitfunc = denali_waitfunc;

	/* scan for NAND devices attached to the controller
	 * this is the first stage in a two step process to register
	 * with the nand subsystem */
	if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
		ret = -ENXIO;
		goto failed_req_irq;
	}

	/* MTD supported page sizes vary by kernel. We validate our
	 * kernel supports the device here.
	 */
	if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
		ret = -ENODEV;
		printk(KERN_ERR "Spectra: device size not supported by this "
			"version of MTD.");
		goto failed_req_irq;
	}

	/* support for multi nand
	 * MTD known nothing about multi nand,
	 * so we should tell it the real pagesize
	 * and anything necessery
	 */
	denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
	denali->nand.chipsize <<= (denali->devnum - 1);
	denali->nand.page_shift += (denali->devnum - 1);
	denali->nand.pagemask = (denali->nand.chipsize >>
						denali->nand.page_shift) - 1;
	denali->nand.bbt_erase_shift += (denali->devnum - 1);
	denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
	denali->nand.chip_shift += (denali->devnum - 1);
	denali->mtd.writesize <<= (denali->devnum - 1);
	denali->mtd.oobsize <<= (denali->devnum - 1);
	denali->mtd.erasesize <<= (denali->devnum - 1);
	denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
	denali->bbtskipbytes *= denali->devnum;

	/* second stage of the NAND scan
	 * this stage requires information regarding ECC and
	 * bad block management. */

	/* Bad block management */
	denali->nand.bbt_td = &bbt_main_descr;
	denali->nand.bbt_md = &bbt_mirror_descr;

	/* skip the scan for now until we have OOB read and write support */
	denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
	denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;

	/* Denali Controller only support 15bit and 8bit ECC in MRST,
	 * so just let controller do 15bit ECC for MLC and 8bit ECC for
	 * SLC if possible.
	 * */
	if (denali->nand.cellinfo & 0xc &&
			(denali->mtd.oobsize > (denali->bbtskipbytes +
			ECC_15BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE)))) {
		/* if MLC OOB size is large enough, use 15bit ECC*/
		denali->nand.ecc.layout = &nand_15bit_oob;
		denali->nand.ecc.bytes = ECC_15BITS;
		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
	} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
			ECC_8BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE))) {
		printk(KERN_ERR "Your NAND chip OOB is not large enough to"
				" contain 8bit ECC correction codes");
		goto failed_req_irq;
	} else {
		denali->nand.ecc.layout = &nand_8bit_oob;
		denali->nand.ecc.bytes = ECC_8BITS;
		iowrite32(8, denali->flash_reg + ECC_CORRECTION);
	}

	denali->nand.ecc.bytes *= denali->devnum;
	denali->nand.ecc.layout->eccbytes *=
		denali->mtd.writesize / ECC_SECTOR_SIZE;
	denali->nand.ecc.layout->oobfree[0].offset =
		denali->bbtskipbytes + denali->nand.ecc.layout->eccbytes;
	denali->nand.ecc.layout->oobfree[0].length =
		denali->mtd.oobsize - denali->nand.ecc.layout->eccbytes -
		denali->bbtskipbytes;

	/* Let driver know the total blocks number and
	 * how many blocks contained by each nand chip.
	 * blksperchip will help driver to know how many
	 * blocks is taken by FW.
	 * */
	denali->totalblks = denali->mtd.size >>
				denali->nand.phys_erase_shift;
	denali->blksperchip = denali->totalblks / denali->nand.numchips;

	/* These functions are required by the NAND core framework, otherwise,
	 * the NAND core will assert. However, we don't need them, so we'll stub
	 * them out. */
	denali->nand.ecc.calculate = denali_ecc_calculate;
	denali->nand.ecc.correct = denali_ecc_correct;
	denali->nand.ecc.hwctl = denali_ecc_hwctl;

	/* override the default read operations */
	denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum;
	denali->nand.ecc.read_page = denali_read_page;
	denali->nand.ecc.read_page_raw = denali_read_page_raw;
	denali->nand.ecc.write_page = denali_write_page;
	denali->nand.ecc.write_page_raw = denali_write_page_raw;
	denali->nand.ecc.read_oob = denali_read_oob;
	denali->nand.ecc.write_oob = denali_write_oob;
	denali->nand.erase_cmd = denali_erase;

	if (nand_scan_tail(&denali->mtd)) {
		ret = -ENXIO;
		goto failed_req_irq;
	}

	ret = mtd_device_register(&denali->mtd, NULL, 0);
	if (ret) {
		dev_err(&dev->dev, "Spectra: Failed to register MTD: %d\n",
				ret);
		goto failed_req_irq;
	}
	return 0;

failed_req_irq:
	denali_irq_cleanup(dev->irq, denali);
failed_remap_mem:
	iounmap(denali->flash_mem);
failed_remap_reg:
	iounmap(denali->flash_reg);
failed_req_regions:
	pci_release_regions(dev);
failed_dma_map:
	dma_unmap_single(&dev->dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
			 DMA_BIDIRECTIONAL);
failed_enable_dev:
	pci_disable_device(dev);
failed_alloc_memery:
	kfree(denali);
	return ret;
}
Beispiel #2
0
static int hpt3x3_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
    static const struct ata_port_info info = {
        .flags = ATA_FLAG_SLAVE_POSS,
        .pio_mask = ATA_PIO4,
#if defined(CONFIG_PATA_HPT3X3_DMA)
        /* Further debug needed */
        .mwdma_mask = ATA_MWDMA2,
        .udma_mask = ATA_UDMA2,
#endif
        .port_ops = &hpt3x3_port_ops
    };
    /* Register offsets of taskfiles in BAR4 area */
    static const u8 offset_cmd[2] = { 0x20, 0x28 };
    static const u8 offset_ctl[2] = { 0x36, 0x3E };
    const struct ata_port_info *ppi[] = { &info, NULL };
    struct ata_host *host;
    int i, rc;
    void __iomem *base;

    hpt3x3_init_chipset(pdev);

    ata_print_version_once(&pdev->dev, DRV_VERSION);

    host = ata_host_alloc_pinfo(&pdev->dev, ppi, 2);
    if (!host)
        return -ENOMEM;
    /* acquire resources and fill host */
    rc = pcim_enable_device(pdev);
    if (rc)
        return rc;

    /* Everything is relative to BAR4 if we set up this way */
    rc = pcim_iomap_regions(pdev, 1 << 4, DRV_NAME);
    if (rc == -EBUSY)
        pcim_pin_device(pdev);
    if (rc)
        return rc;
    host->iomap = pcim_iomap_table(pdev);
    rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
    if (rc)
        return rc;
    rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
    if (rc)
        return rc;

    base = host->iomap[4];	/* Bus mastering base */

    for (i = 0; i < host->n_ports; i++) {
        struct ata_port *ap = host->ports[i];
        struct ata_ioports *ioaddr = &ap->ioaddr;

        ioaddr->cmd_addr = base + offset_cmd[i];
        ioaddr->altstatus_addr =
            ioaddr->ctl_addr = base + offset_ctl[i];
        ioaddr->scr_addr = NULL;
        ata_sff_std_ports(ioaddr);
        ioaddr->bmdma_addr = base + 8 * i;

        ata_port_pbar_desc(ap, 4, -1, "ioport");
        ata_port_pbar_desc(ap, 4, offset_cmd[i], "cmd");
    }
    pci_set_master(pdev);
    return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
                             IRQF_SHARED, &hpt3x3_sht);
}
Beispiel #3
0
static int thunder_mmc_probe(struct pci_dev *pdev,
			     const struct pci_device_id *id)
{
	struct device_node *node = pdev->dev.of_node;
	struct device *dev = &pdev->dev;
	struct device_node *child_node;
	struct cvm_mmc_host *host;
	int ret, i = 0;

	host = devm_kzalloc(dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	pci_set_drvdata(pdev, host);
	ret = pcim_enable_device(pdev);
	if (ret)
		return ret;

	ret = pci_request_regions(pdev, KBUILD_MODNAME);
	if (ret)
		return ret;

	host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0));
	if (!host->base)
		return -EINVAL;

	/* On ThunderX these are identical */
	host->dma_base = host->base;

	host->reg_off = 0x2000;
	host->reg_off_dma = 0x160;

	host->clk = devm_clk_get(dev, NULL);
	if (IS_ERR(host->clk))
		return PTR_ERR(host->clk);

	ret = clk_prepare_enable(host->clk);
	if (ret)
		return ret;
	host->sys_freq = clk_get_rate(host->clk);

	spin_lock_init(&host->irq_handler_lock);
	sema_init(&host->mmc_serializer, 1);

	host->dev = dev;
	host->acquire_bus = thunder_mmc_acquire_bus;
	host->release_bus = thunder_mmc_release_bus;
	host->int_enable = thunder_mmc_int_enable;

	host->use_sg = true;
	host->big_dma_addr = true;
	host->need_irq_handler_lock = true;
	host->last_slot = -1;

	ret = dma_set_mask(dev, DMA_BIT_MASK(48));
	if (ret)
		goto error;

	/*
	 * Clear out any pending interrupts that may be left over from
	 * bootloader. Writing 1 to the bits clears them.
	 */
	writeq(127, host->base + MIO_EMM_INT_EN(host));
	writeq(3, host->base + MIO_EMM_DMA_INT_ENA_W1C(host));
	/* Clear DMA FIFO */
	writeq(BIT_ULL(16), host->base + MIO_EMM_DMA_FIFO_CFG(host));

	ret = thunder_mmc_register_interrupts(host, pdev);
	if (ret)
		goto error;

	for_each_child_of_node(node, child_node) {
		/*
		 * mmc_of_parse and devm* require one device per slot.
		 * Create a dummy device per slot and set the node pointer to
		 * the slot. The easiest way to get this is using
		 * of_platform_device_create.
		 */
		if (of_device_is_compatible(child_node, "mmc-slot")) {
			host->slot_pdev[i] = of_platform_device_create(child_node, NULL,
								       &pdev->dev);
			if (!host->slot_pdev[i])
				continue;

			ret = cvm_mmc_of_slot_probe(&host->slot_pdev[i]->dev, host);
			if (ret)
				goto error;
		}
		i++;
	}
	dev_info(dev, "probed\n");
	return 0;

error:
	for (i = 0; i < CAVIUM_MAX_MMC; i++) {
		if (host->slot[i])
			cvm_mmc_of_slot_remove(host->slot[i]);
		if (host->slot_pdev[i])
			of_platform_device_destroy(&host->slot_pdev[i]->dev, NULL);
	}
	clk_disable_unprepare(host->clk);
	return ret;
}
Beispiel #4
0
static int
ahc_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	char		 buf[80];
	const uint64_t	 mask_39bit = 0x7FFFFFFFFFULL;
	struct		 ahc_softc *ahc;
	ahc_dev_softc_t	 pci;
	struct		 ahc_pci_identity *entry;
	char		*name;
	int		 error;
	struct device	*dev = &pdev->dev;

	pci = pdev;
	entry = ahc_find_pci_device(pci);
	if (entry == NULL)
		return (-ENODEV);

	/*
	 * Allocate a softc for this card and
	 * set it up for attachment by our
	 * common detect routine.
	 */
	sprintf(buf, "ahc_pci:%d:%d:%d",
		ahc_get_pci_bus(pci),
		ahc_get_pci_slot(pci),
		ahc_get_pci_function(pci));
	name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
	if (name == NULL)
		return (-ENOMEM);
	strcpy(name, buf);
	ahc = ahc_alloc(NULL, name);
	if (ahc == NULL)
		return (-ENOMEM);
	if (pci_enable_device(pdev)) {
		ahc_free(ahc);
		return (-ENODEV);
	}
	pci_set_master(pdev);

	if (sizeof(dma_addr_t) > 4
	    && ahc->features & AHC_LARGE_SCBS
	    && dma_set_mask(dev, mask_39bit) == 0
	    && dma_get_required_mask(dev) > DMA_32BIT_MASK) {
		ahc->flags |= AHC_39BIT_ADDRESSING;
	} else {
		if (dma_set_mask(dev, DMA_32BIT_MASK)) {
			ahc_free(ahc);
			printk(KERN_WARNING "aic7xxx: No suitable DMA available.\n");
                	return (-ENODEV);
		}
	}
	ahc->dev_softc = pci;
	error = ahc_pci_config(ahc, entry);
	if (error != 0) {
		ahc_free(ahc);
		return (-error);
	}

	/*
	 * Second Function PCI devices need to inherit some
	 * settings from function 0.
	 */
	if ((ahc->features & AHC_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
		ahc_linux_pci_inherit_flags(ahc);

	pci_set_drvdata(pdev, ahc);
	ahc_linux_register_host(ahc, &aic7xxx_driver_template);
	return (0);
}
Beispiel #5
0
static int ath10k_ahb_resource_init(struct ath10k *ar)
{
	struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
	struct platform_device *pdev;
	struct resource *res;
	int ret;

	pdev = ar_ahb->pdev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		ath10k_err(ar, "failed to get memory resource\n");
		ret = -ENXIO;
		goto out;
	}

	ar_ahb->mem = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(ar_ahb->mem)) {
		ath10k_err(ar, "mem ioremap error\n");
		ret = PTR_ERR(ar_ahb->mem);
		goto out;
	}

	ar_ahb->mem_len = resource_size(res);

	ar_ahb->gcc_mem = ioremap_nocache(ATH10K_GCC_REG_BASE,
					  ATH10K_GCC_REG_SIZE);
	if (!ar_ahb->gcc_mem) {
		ath10k_err(ar, "gcc mem ioremap error\n");
		ret = -ENOMEM;
		goto err_mem_unmap;
	}

	ar_ahb->tcsr_mem = ioremap_nocache(ATH10K_TCSR_REG_BASE,
					   ATH10K_TCSR_REG_SIZE);
	if (!ar_ahb->tcsr_mem) {
		ath10k_err(ar, "tcsr mem ioremap error\n");
		ret = -ENOMEM;
		goto err_gcc_mem_unmap;
	}

	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret);
		goto err_tcsr_mem_unmap;
	}

	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret) {
		ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n",
			   ret);
		goto err_tcsr_mem_unmap;
	}

	ret = ath10k_ahb_clock_init(ar);
	if (ret)
		goto err_tcsr_mem_unmap;

	ret = ath10k_ahb_rst_ctrl_init(ar);
	if (ret)
		goto err_clock_deinit;

	ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
	if (ar_ahb->irq < 0) {
		ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
		ret = ar_ahb->irq;
		goto err_clock_deinit;
	}

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);

	ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
		   ar_ahb->mem, ar_ahb->mem_len,
		   ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
	return 0;

err_clock_deinit:
	ath10k_ahb_clock_deinit(ar);

err_tcsr_mem_unmap:
	iounmap(ar_ahb->tcsr_mem);

err_gcc_mem_unmap:
	ar_ahb->tcsr_mem = NULL;
	iounmap(ar_ahb->gcc_mem);

err_mem_unmap:
	ar_ahb->gcc_mem = NULL;
	devm_iounmap(&pdev->dev, ar_ahb->mem);

out:
	ar_ahb->mem = NULL;
	return ret;
}
static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	int ret = 0;
	struct rmnet_mhi_private **rmnet_mhi_ctxt = NULL;
	enum MHI_STATUS r = MHI_STATUS_SUCCESS;

	memset(tx_interrupts_count, 0, sizeof(tx_interrupts_count));
	memset(rx_interrupts_count, 0, sizeof(rx_interrupts_count));
	memset(rx_interrupts_in_masked_irq, 0,
	       sizeof(rx_interrupts_in_masked_irq));
	memset(rx_napi_skb_burst_min, 0, sizeof(rx_napi_skb_burst_min));
	memset(rx_napi_skb_burst_max, 0, sizeof(rx_napi_skb_burst_max));
	memset(tx_cb_skb_free_burst_min, 0, sizeof(tx_cb_skb_free_burst_min));
	memset(tx_cb_skb_free_burst_max, 0, sizeof(tx_cb_skb_free_burst_max));
	memset(tx_ring_full_count, 0, sizeof(tx_ring_full_count));
	memset(tx_queued_packets_count, 0, sizeof(tx_queued_packets_count));
	memset(rx_napi_budget_overflow, 0, sizeof(rx_napi_budget_overflow));

	rmnet_log(MSG_INFO, "Entered.\n");

	if (rmnet_mhi_ptr == NULL) {
		rmnet_log(MSG_CRITICAL, "Bad input args.\n");
		return -EINVAL;
	}

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] = UINT_MAX;
	tx_cb_skb_free_burst_min[rmnet_mhi_ptr->dev_index] = UINT_MAX;

	skb_queue_head_init(&(rmnet_mhi_ptr->tx_buffers));
	skb_queue_head_init(&(rmnet_mhi_ptr->rx_buffers));

	if (rmnet_mhi_ptr->tx_client_handle != NULL) {
		rmnet_log(MSG_INFO,
			"Opening TX channel\n");
		r = mhi_open_channel(rmnet_mhi_ptr->tx_client_handle);
		if (r != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				"Failed to start TX chan ret %d\n", r);
			goto mhi_tx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->tx_enabled = 1;
		}
	}
	if (rmnet_mhi_ptr->rx_client_handle != NULL) {
		rmnet_log(MSG_INFO,
			"Opening RX channel\n");
		r = mhi_open_channel(rmnet_mhi_ptr->rx_client_handle);
		if (r != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				"Failed to start RX chan ret %d\n", r);
			goto mhi_rx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->rx_enabled = 1;
		}
	}
	rmnet_mhi_ptr->dev =
		alloc_netdev(sizeof(struct rmnet_mhi_private *),
			     RMNET_MHI_DEV_NAME,
			     NET_NAME_PREDICTABLE, rmnet_mhi_setup);
	if (!rmnet_mhi_ptr->dev) {
		rmnet_log(MSG_CRITICAL, "Network device allocation failed\n");
		ret = -ENOMEM;
		goto net_dev_alloc_fail;
	}

	rmnet_mhi_ctxt = netdev_priv(rmnet_mhi_ptr->dev);
	*rmnet_mhi_ctxt = rmnet_mhi_ptr;

	ret = dma_set_mask(&(rmnet_mhi_ptr->dev->dev),
						MHI_DMA_MASK);
	if (ret)
		rmnet_mhi_ptr->allocation_flags = GFP_KERNEL;
	else
		rmnet_mhi_ptr->allocation_flags = GFP_DMA;

	r = rmnet_mhi_init_inbound(rmnet_mhi_ptr);
	if (r) {
		rmnet_log(MSG_CRITICAL,
			"Failed to init inbound ret %d\n", r);
	}

	netif_napi_add(rmnet_mhi_ptr->dev, &(rmnet_mhi_ptr->napi),
		       rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE);

	rmnet_mhi_ptr->mhi_enabled = 1;
	ret = register_netdev(rmnet_mhi_ptr->dev);
	if (ret) {
		rmnet_log(MSG_CRITICAL,
			  "Network device registration failed\n");
		goto net_dev_reg_fail;
	}
	napi_enable(&(rmnet_mhi_ptr->napi));

	rmnet_log(MSG_INFO, "Exited.\n");

	return 0;

net_dev_reg_fail:
	netif_napi_del(&(rmnet_mhi_ptr->napi));
	free_netdev(rmnet_mhi_ptr->dev);
net_dev_alloc_fail:
	mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
	rmnet_mhi_ptr->dev = NULL;
mhi_rx_chan_start_fail:
	mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
mhi_tx_chan_start_fail:
	rmnet_log(MSG_INFO, "Exited ret %d.\n", ret);
	return ret;
}
Beispiel #7
0
/**
 * mei_me_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * Return: 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	const struct mei_cfg *cfg = (struct mei_cfg *)(ent->driver_data);
	struct mei_device *dev;
	struct mei_me_hw *hw;
	int err;


	if (!mei_me_quirk_probe(pdev, cfg))
		return -ENODEV;

	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev, cfg);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
	pm_runtime_use_autosuspend(&pdev->dev);

	err = mei_register(dev, &pdev->dev);
	if (err)
		goto release_irq;

	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	/*
	* For not wake-able HW runtime pm framework
	* can't be used on pci device level.
	* Use domain runtime pm callbacks instead.
	*/
	if (!pci_dev_run_wake(pdev))
		mei_me_set_pm_domain(dev);

	if (mei_pg_is_enabled(dev))
		pm_runtime_put_noidle(&pdev->dev);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
static int serial_hsu_plat_port_probe(struct platform_device *pdev)
{
	struct uart_hsu_port *up;
	int port = pdev->id, irq;
	struct resource *mem, *ioarea;
	const struct acpi_device_id *id;
	resource_size_t start, len;

#ifdef CONFIG_ACPI
	for (id = hsu_acpi_ids; id->id[0]; id++)
		if (!strncmp(id->id, dev_name(&pdev->dev), strlen(id->id))) {
			acpi_status status;
			unsigned long long tmp;

			status = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
					"_UID", NULL, &tmp);
			if (ACPI_FAILURE(status))
				return -ENODEV;
			port = tmp - 1;
		}
#endif

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "no mem resource?\n");
		return -EINVAL;
	}
	start = mem->start;
	len = resource_size(mem);

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "no irq resource?\n");
		return irq; /* -ENXIO */
	}

	ioarea = request_mem_region(mem->start, resource_size(mem),
			pdev->name);
	if (!ioarea) {
		dev_err(&pdev->dev, "HSU region already claimed\n");
		return -EBUSY;
	}

	up = serial_hsu_port_setup(&pdev->dev, port, start, len,
			irq);
	if (IS_ERR(up)) {
		release_mem_region(mem->start, resource_size(mem));
		dev_err(&pdev->dev, "failed to setup HSU\n");
		return -EINVAL;
	}

	platform_set_drvdata(pdev, up);

	if (!pdev->dev.dma_mask) {
		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
		pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
	}
	dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));

	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_allow(&pdev->dev);

	return 0;
}
int rmnet_mhi_probe(struct platform_device *dev)
{
	int ret = 0, index = 0, cleanup_index = 0;
	struct rmnet_mhi_private *rmnet_mhi_ptr = 0;

	memset(tx_interrupts_count, 0, sizeof(tx_interrupts_count));
	memset(rx_interrupts_count, 0, sizeof(rx_interrupts_count));
	memset(rx_interrupts_in_masked_irq, 0,
	       sizeof(rx_interrupts_in_masked_irq));
	memset(rx_napi_skb_burst_min, 0, sizeof(rx_napi_skb_burst_min));
	memset(rx_napi_skb_burst_max, 0, sizeof(rx_napi_skb_burst_max));
	memset(tx_cb_skb_free_burst_min, 0, sizeof(tx_cb_skb_free_burst_min));
	memset(tx_cb_skb_free_burst_max, 0, sizeof(tx_cb_skb_free_burst_max));
	memset(tx_ring_full_count, 0, sizeof(tx_ring_full_count));
	memset(tx_bounce_buffers_count, 0, sizeof(tx_bounce_buffers_count));
	memset(tx_queued_packets_count, 0, sizeof(tx_queued_packets_count));
	memset(rx_napi_budget_overflow, 0, sizeof(rx_napi_budget_overflow));

	for (index = 0; index < MHI_RMNET_DEVICE_COUNT; index++) {
		mhi_rmnet_devices[index] =
			alloc_netdev(sizeof(struct rmnet_mhi_private),
				     RMNET_MHI_DEV_NAME, rmnet_mhi_setup);
		if (!mhi_rmnet_devices[index]) {
			pr_err("%s: Network device allocation failed",
			       __func__);
			ret = -ENOMEM;
			goto fail;
		}

		rmnet_mhi_ptr = netdev_priv(mhi_rmnet_devices[index]);

		ret = dma_set_mask(&(mhi_rmnet_devices[index]->dev), MHI_DMA_MASK);
		if (0 != ret) {
			/* Not supported for now */
			pr_info("%s: dma_set_mask has failed, error %d",
				__func__, ret);
			rmnet_mhi_ptr->allocation_flags = GFP_KERNEL;
		} else {
			/* We can use the DMA flag! */
			rmnet_mhi_ptr->allocation_flags = GFP_DMA;
		}

		rmnet_mhi_ptr->tx_channel = MHI_CLIENT_IP_HW_0_OUT +
				(MHI_CLIENT_CHANNEL)(index * 2);
		rmnet_mhi_ptr->rx_channel = MHI_CLIENT_IP_HW_0_IN +
				(MHI_CLIENT_CHANNEL)((index * 2));
		rmnet_mhi_ptr->tx_client_handle = 0;
		rmnet_mhi_ptr->rx_client_handle = 0;
		rmnet_mhi_ptr->mru = MHI_DEFAULT_MRU;
		rmnet_mhi_ptr->dev_index = index;

		netif_napi_add(mhi_rmnet_devices[index], &(rmnet_mhi_ptr->napi),
			       rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE);

		ret = register_netdev(mhi_rmnet_devices[index]);
		if (ret) {
			pr_err("%s: Network device registration failed",
			       __func__);
			goto fail;
		}

		rx_napi_skb_burst_min[index] = UINT_MAX;
		tx_cb_skb_free_burst_min[index] = UINT_MAX;
	}
	return 0;

fail:
	for (cleanup_index = 0; cleanup_index <= index; cleanup_index++) {
		if (0 != mhi_rmnet_devices[cleanup_index]) {
			netif_napi_del(&(rmnet_mhi_ptr->napi));
			unregister_netdev(mhi_rmnet_devices[cleanup_index]);
			free_netdev(mhi_rmnet_devices[cleanup_index]);
			mhi_rmnet_devices[cleanup_index] = 0;
		}
	}

	return ret;
}
Beispiel #10
0
static int xhci_plat_probe(struct platform_device *pdev)
{
	struct device_node	*node = pdev->dev.of_node;
	struct usb_xhci_pdata	*pdata = dev_get_platdata(&pdev->dev);
	const struct hc_driver	*driver;
	struct xhci_hcd		*xhci;
	struct resource         *res;
	struct usb_hcd		*hcd;
	struct clk              *clk;
	int			ret;
	int			irq;

	if (usb_disabled())
		return -ENODEV;

	driver = &xhci_plat_xhci_driver;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENODEV;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	if (of_device_is_compatible(pdev->dev.of_node,
				    "marvell,armada-375-xhci") ||
	    of_device_is_compatible(pdev->dev.of_node,
				    "marvell,armada-380-xhci")) {
		ret = xhci_mvebu_mbus_init_quirk(pdev);
		if (ret)
			return ret;
	}

	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;
	if (!pdev->dev.dma_mask)
		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
	else
		dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));

	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd)
		return -ENOMEM;

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	hcd->regs = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(hcd->regs)) {
		ret = PTR_ERR(hcd->regs);
		goto put_hcd;
	}

	/*
	 * Not all platforms have a clk so it is not an error if the
	 * clock does not exists.
	 */
	clk = devm_clk_get(&pdev->dev, NULL);
	if (!IS_ERR(clk)) {
		ret = clk_prepare_enable(clk);
		if (ret)
			goto put_hcd;
	}

	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
	if (ret)
		goto disable_clk;

	device_wakeup_enable(hcd->self.controller);

	/* USB 2.0 roothub is stored in the platform_device now. */
	hcd = platform_get_drvdata(pdev);
	xhci = hcd_to_xhci(hcd);
	xhci->clk = clk;
	xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
			dev_name(&pdev->dev), hcd);
	if (!xhci->shared_hcd) {
		ret = -ENOMEM;
		goto dealloc_usb2_hcd;
	}

	if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
			(pdata && pdata->usb3_lpm_capable))
		xhci->quirks |= XHCI_LPM_SUPPORT;
	/*
	 * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset)
	 * is called by usb_add_hcd().
	 */
	*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;

	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
		xhci->shared_hcd->can_do_streams = 1;

	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
	if (ret)
		goto put_usb3_hcd;

	return 0;

put_usb3_hcd:
	usb_put_hcd(xhci->shared_hcd);

dealloc_usb2_hcd:
	usb_remove_hcd(hcd);

disable_clk:
	if (!IS_ERR(clk))
		clk_disable_unprepare(clk);

put_hcd:
	usb_put_hcd(hcd);

	return ret;
}
Beispiel #11
0
static struct resource_table * qproc_find_rsc_table(struct rproc *rproc,
						    const struct firmware *fw,
						    int *tablesz)
{
	static struct resource_table table = { .ver = 1, };

	*tablesz = sizeof(table);
	return &table;
}

static int qproc_load(struct rproc *rproc, const struct firmware *fw)
{
	struct qproc *qproc = rproc->priv;
	DEFINE_DMA_ATTRS(attrs);
	dma_addr_t phys;
	dma_addr_t end;
	void *ptr;

	dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
	dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);

	ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
	if (!ptr) {
		dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
		return -ENOMEM;
	}

	end = phys + fw->size;
	dev_info(qproc->dev, "loading MBA from %pa to %pa\n", &phys, &end);

	memcpy(ptr, fw->data, fw->size);

	qproc->mba_va = ptr;
	qproc->mba_da = phys;
	qproc->mba_size = fw->size;
	qproc->mba_attrs = attrs;

	return 0;
}

static const struct rproc_fw_ops qproc_fw_ops = {
	.find_rsc_table = qproc_find_rsc_table,
	.load = qproc_load,
	.sanity_check = qproc_sanity_check,
};

static void q6v5proc_reset(struct qproc *qproc)
{
	u32 val;

	/* Assert resets, stop core */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);

	/* Enable power block headswitch, and wait for it to stabilize */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	mb();
	udelay(1);

	/*
	 * Turn on memories. L2 banks should be done individually
	 * to minimize inrush current.
	 */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
		Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_2;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_1;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_0;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);

	/* Remove IO clamp */
	val &= ~Q6SS_CLAMP_IO;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);

	/* Bring core out of reset */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val &= ~Q6SS_CORE_ARES;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);

	/* Turn on core clock */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_GFMUX_CTL);
	val |= Q6SS_CLK_ENA;

#if 0
	/* Need a different clock source for v5.2.0 */
	if (qproc->qdsp6v5_2_0) {
		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
		val |= Q6SS_CLK_SRC_SEL_C;
	}

#endif
	/* force clock on during source switch */
//	if (qproc->qdsp6v56)
		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;

	writel_relaxed(val, qproc->reg_base + QDSP6SS_GFMUX_CTL);

	/* Start core execution */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val &= ~Q6SS_STOP_CORE;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
}
Beispiel #12
0
static int ssb_ohci_attach(struct ssb_device *dev)
{
	struct ssb_ohci_device *ohcidev;
	struct usb_hcd *hcd;
	int err = -ENOMEM;
	u32 tmp, flags = 0;

	if (dma_set_mask(dev->dma_dev, DMA_BIT_MASK(32)) ||
	    dma_set_coherent_mask(dev->dma_dev, DMA_BIT_MASK(32)))
		return -EOPNOTSUPP;

	if (dev->id.coreid == SSB_DEV_USB11_HOSTDEV) {
		/* Put the device into host-mode. */
		flags |= SSB_OHCI_TMSLOW_HOSTMODE;
		ssb_device_enable(dev, flags);
	} else if (dev->id.coreid == SSB_DEV_USB20_HOST) {
		/*
		 * USB 2.0 special considerations:
		 *
		 * In addition to the standard SSB reset sequence, the Host
		 * Control Register must be programmed to bring the USB core
		 * and various phy components out of reset.
		 */
		ssb_device_enable(dev, 0);
		ssb_write32(dev, 0x200, 0x7ff);

		/* Change Flush control reg */
		tmp = ssb_read32(dev, 0x400);
		tmp &= ~8;
		ssb_write32(dev, 0x400, tmp);
		tmp = ssb_read32(dev, 0x400);

		/* Change Shim control reg */
		tmp = ssb_read32(dev, 0x304);
		tmp &= ~0x100;
		ssb_write32(dev, 0x304, tmp);
		tmp = ssb_read32(dev, 0x304);

		udelay(1);

		/* Work around for 5354 failures */
		if (dev->id.revision == 2 && dev->bus->chip_id == 0x5354) {
			/* Change syn01 reg */
			tmp = 0x00fe00fe;
			ssb_write32(dev, 0x894, tmp);

			/* Change syn03 reg */
			tmp = ssb_read32(dev, 0x89c);
			tmp |= 0x1;
			ssb_write32(dev, 0x89c, tmp);
		}
	} else
		ssb_device_enable(dev, 0);

	hcd = usb_create_hcd(&ssb_ohci_hc_driver, dev->dev,
			dev_name(dev->dev));
	if (!hcd)
		goto err_dev_disable;
	ohcidev = hcd_to_ssb_ohci(hcd);
	ohcidev->enable_flags = flags;

	tmp = ssb_read32(dev, SSB_ADMATCH0);
	hcd->rsrc_start = ssb_admatch_base(tmp);
	hcd->rsrc_len = ssb_admatch_size(tmp);
	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs)
		goto err_put_hcd;
	err = usb_add_hcd(hcd, dev->irq, IRQF_DISABLED | IRQF_SHARED);
	if (err)
		goto err_iounmap;

	ssb_set_drvdata(dev, hcd);

	return err;

err_iounmap:
	iounmap(hcd->regs);
err_put_hcd:
	usb_put_hcd(hcd);
err_dev_disable:
	ssb_device_disable(dev, flags);
	return err;
}
Beispiel #13
0
static int atp867x_ata_pci_sff_init_host(struct ata_host *host)
{
	struct device *gdev = host->dev;
	struct pci_dev *pdev = to_pci_dev(gdev);
	unsigned int mask = 0;
	int i, rc;

	/*
	 * do not map rombase
	 */
	rc = pcim_iomap_regions(pdev, 1 << ATP867X_BAR_IOBASE, DRV_NAME);
	if (rc == -EBUSY)
		pcim_pin_device(pdev);
	if (rc)
		return rc;
	host->iomap = pcim_iomap_table(pdev);

#ifdef	ATP867X_DEBUG
	atp867x_check_res(pdev);

	for (i = 0; i < PCI_ROM_RESOURCE; i++)
		printk(KERN_DEBUG "ATP867X: iomap[%d]=0x%llx\n", i,
			(unsigned long long)(host->iomap[i]));
#endif

	/*
	 * request, iomap BARs and init port addresses accordingly
	 */
	for (i = 0; i < host->n_ports; i++) {
		struct ata_port *ap = host->ports[i];
		struct ata_ioports *ioaddr = &ap->ioaddr;

		ioaddr->cmd_addr = ATP867X_IO_PORTBASE(ap, i);
		ioaddr->ctl_addr = ioaddr->altstatus_addr
				 = ATP867X_IO_ALTSTATUS(ap, i);
		ioaddr->bmdma_addr = ATP867X_IO_DMABASE(ap, i);

		ata_sff_std_ports(ioaddr);
		rc = atp867x_set_priv(ap);
		if (rc)
			return rc;

#ifdef	ATP867X_DEBUG
		atp867x_check_ports(ap, i);
#endif
		ata_port_desc(ap, "cmd 0x%lx ctl 0x%lx",
			(unsigned long)ioaddr->cmd_addr,
			(unsigned long)ioaddr->ctl_addr);
		ata_port_desc(ap, "bmdma 0x%lx",
			(unsigned long)ioaddr->bmdma_addr);

		mask |= 1 << i;
	}

	if (!mask) {
		dev_err(gdev, "no available native port\n");
		return -ENODEV;
	}

	atp867x_fixup(host);

	rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
	if (rc)
		return rc;

	rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
	return rc;
}
Beispiel #14
0
/* called during probe() after chip reset completes */
static int xhci_pci_setup(struct usb_hcd *hcd)
{
	struct xhci_hcd		*xhci;
	struct pci_dev		*pdev = to_pci_dev(hcd->self.controller);
	int			retval;
	u32			temp;

	hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;

	if (usb_hcd_is_primary_hcd(hcd)) {
		xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
		if (!xhci)
			return -ENOMEM;
		*((struct xhci_hcd **) hcd->hcd_priv) = xhci;
		xhci->main_hcd = hcd;
		/* Mark the first roothub as being USB 2.0.
		 * The xHCI driver will register the USB 3.0 roothub.
		 */
		hcd->speed = HCD_USB2;
		hcd->self.root_hub->speed = USB_SPEED_HIGH;
		/*
		 * USB 2.0 roothub under xHCI has an integrated TT,
		 * (rate matching hub) as opposed to having an OHCI/UHCI
		 * companion controller.
		 */
		hcd->has_tt = 1;
	} else {
		/* xHCI private pointer was set in xhci_pci_probe for the second
		 * registered roothub.
		 */
		xhci = hcd_to_xhci(hcd);
		temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
		if (HCC_64BIT_ADDR(temp)) {
			xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
			dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
		} else {
			dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
		}
		return 0;
	}

	xhci->cap_regs = hcd->regs;
	xhci->op_regs = hcd->regs +
		HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
	xhci->run_regs = hcd->regs +
		(xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
	/* Cache read-only capability registers */
	xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
	xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
	xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
	xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
	xhci->hci_version = HC_VERSION(xhci->hcc_params);
	xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
	xhci_print_registers(xhci);

	/* Look for vendor-specific quirks */
	if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
			(pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK ||
			 pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_FL1400)) {
		if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
				pdev->revision == 0x0) {
			xhci->quirks |= XHCI_RESET_EP_QUIRK;
			xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
					" endpoint cmd after reset endpoint\n");
		}
		/* Fresco Logic confirms: all revisions of this chip do not
		 * support MSI, even though some of them claim to in their PCI
		 * capabilities.
		 */
		xhci->quirks |= XHCI_BROKEN_MSI;
		xhci_dbg(xhci, "QUIRK: Fresco Logic revision %u "
				"has broken MSI implementation\n",
				pdev->revision);
		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
	}

	if (pdev->vendor == PCI_VENDOR_ID_NEC)
		xhci->quirks |= XHCI_NEC_HOST;

	if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96)
		xhci->quirks |= XHCI_AMD_0x96_HOST;

	/* AMD PLL quirk */
	if (pdev->vendor == PCI_VENDOR_ID_AMD && usb_amd_find_chipset_info())
		xhci->quirks |= XHCI_AMD_PLL_FIX;
	if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
			pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
		xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
		xhci->limit_active_eps = 64;
		/*
		 * PPT desktop boards DH77EB and DH77DF will power back on after
		 * a few seconds of being shutdown.  The fix for this is to
		 * switch the ports from xHCI to EHCI on shutdown.  We can't use
		 * DMI information to find those particular boards (since each
		 * vendor will change the board name), so we have to key off all
		 * PPT chipsets.
		 */
		xhci->quirks |= XHCI_SPURIOUS_REBOOT;
		xhci->quirks |= XHCI_AVOID_BEI;
	}
	if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
			pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
		xhci->quirks |= XHCI_RESET_ON_RESUME;
		xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
		xhci->quirks |= XHCI_TRUST_TX_LENGTH;
		xhci->quirks |= XHCI_BROKEN_STREAMS;
	}
	if (pdev->vendor == PCI_VENDOR_ID_VIA)
		xhci->quirks |= XHCI_RESET_ON_RESUME;

	if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
			pdev->device == 0x1042)
		xhci->quirks |= XHCI_BROKEN_STREAMS;

	/* In xhci controllers which follow xhci 1.0 spec gives a spurious
	 * success event after a short transfer. This quirk will ignore such
	 * spurious event.
	 */
	if (xhci->hci_version > 0x96)
		xhci->quirks |= XHCI_SPURIOUS_SUCCESS;

	/* Make sure the HC is halted. */
	retval = xhci_halt(xhci);
	if (retval)
		goto error;

	xhci_dbg(xhci, "Resetting HCD\n");
	/* Reset the internal HC memory state and registers. */
	retval = xhci_reset(xhci);
	if (retval)
		goto error;
	xhci_dbg(xhci, "Reset complete\n");

	temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
	if (HCC_64BIT_ADDR(temp)) {
		xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
		dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
	} else {
		dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
	}

	xhci_dbg(xhci, "Calling HCD init\n");
	/* Initialize HCD and host controller data structures. */
	retval = xhci_init(hcd);
	if (retval)
		goto error;
	xhci_dbg(xhci, "Called HCD init\n");

	pci_read_config_byte(pdev, XHCI_SBRN_OFFSET, &xhci->sbrn);
	xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);

	/* Find any debug ports */
	retval = xhci_pci_reinit(xhci, pdev);
	if (!retval)
		return retval;

error:
	kfree(xhci);
	return retval;
}
static int xhci_plat_probe(struct platform_device *pdev)
{
	struct device_node	*node = pdev->dev.of_node;
	struct usb_xhci_pdata	*pdata = dev_get_platdata(&pdev->dev);
	const struct hc_driver	*driver;
	struct xhci_hcd		*xhci;
	struct resource         *res;
	struct usb_hcd		*hcd;
	int			ret;
	int			irq;

	if (usb_disabled())
		return -ENODEV;

	driver = &xhci_plat_xhci_driver;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENODEV;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
	ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
	if (ret)
		return ret;
	if (!pdev->dev.dma_mask)
		pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
	else
		dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));

	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd)
		return -ENOMEM;

	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
				driver->description)) {
		dev_dbg(&pdev->dev, "controller already in use\n");
		ret = -EBUSY;
		goto put_hcd;
	}

	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_dbg(&pdev->dev, "error mapping memory\n");
		ret = -EFAULT;
		goto release_mem_region;
	}

	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
	if (ret)
		goto unmap_registers;
	device_wakeup_enable(hcd->self.controller);

	/* USB 2.0 roothub is stored in the platform_device now. */
	hcd = platform_get_drvdata(pdev);
	xhci = hcd_to_xhci(hcd);
	xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
			dev_name(&pdev->dev), hcd);
	if (!xhci->shared_hcd) {
		ret = -ENOMEM;
		goto dealloc_usb2_hcd;
	}

	if ((node && of_property_read_bool(node, "usb3-lpm-capable")) ||
			(pdata && pdata->usb3_lpm_capable))
		xhci->quirks |= XHCI_LPM_SUPPORT;
	/*
	 * Set the xHCI pointer before xhci_plat_setup() (aka hcd_driver.reset)
	 * is called by usb_add_hcd().
	 */
	*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;

	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
	if (ret)
		goto put_usb3_hcd;

	return 0;

put_usb3_hcd:
	usb_put_hcd(xhci->shared_hcd);

dealloc_usb2_hcd:
	usb_remove_hcd(hcd);

unmap_registers:
	iounmap(hcd->regs);

release_mem_region:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);

put_hcd:
	usb_put_hcd(hcd);

	return ret;
}
Beispiel #16
0
static int mmp_tdma_probe(struct platform_device *pdev)
{
	enum mmp_tdma_type type;
	const struct of_device_id *of_id;
	struct mmp_tdma_device *tdev;
	struct resource *iores;
	int i, ret;
	int irq = 0, irq_num = 0;
	int chan_num = TDMA_CHANNEL_NUM;
	struct gen_pool *pool = NULL;

	of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
	if (of_id)
		type = (enum mmp_tdma_type) of_id->data;
	else
		type = platform_get_device_id(pdev)->driver_data;

	/* always have couple channels */
	tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
	if (!tdev)
		return -ENOMEM;

	tdev->dev = &pdev->dev;

	for (i = 0; i < chan_num; i++) {
		if (platform_get_irq(pdev, i) > 0)
			irq_num++;
	}

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	tdev->base = devm_ioremap_resource(&pdev->dev, iores);
	if (IS_ERR(tdev->base))
		return PTR_ERR(tdev->base);

	INIT_LIST_HEAD(&tdev->device.channels);

	if (pdev->dev.of_node)
		pool = of_gen_pool_get(pdev->dev.of_node, "asram", 0);
	else
		pool = sram_get_gpool("asram");
	if (!pool) {
		dev_err(&pdev->dev, "asram pool not available\n");
		return -ENOMEM;
	}

	if (irq_num != chan_num) {
		irq = platform_get_irq(pdev, 0);
		ret = devm_request_irq(&pdev->dev, irq,
			mmp_tdma_int_handler, 0, "tdma", tdev);
		if (ret)
			return ret;
	}

	/* initialize channel parameters */
	for (i = 0; i < chan_num; i++) {
		irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
		ret = mmp_tdma_chan_init(tdev, i, irq, type, pool);
		if (ret)
			return ret;
	}

	dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
	dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
	tdev->device.dev = &pdev->dev;
	tdev->device.device_alloc_chan_resources =
					mmp_tdma_alloc_chan_resources;
	tdev->device.device_free_chan_resources =
					mmp_tdma_free_chan_resources;
	tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
	tdev->device.device_tx_status = mmp_tdma_tx_status;
	tdev->device.device_issue_pending = mmp_tdma_issue_pending;
	tdev->device.device_config = mmp_tdma_config;
	tdev->device.device_pause = mmp_tdma_pause_chan;
	tdev->device.device_resume = mmp_tdma_resume_chan;
	tdev->device.device_terminate_all = mmp_tdma_terminate_all;
	tdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;

	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
	platform_set_drvdata(pdev, tdev);

	ret = dma_async_device_register(&tdev->device);
	if (ret) {
		dev_err(tdev->device.dev, "unable to register\n");
		return ret;
	}

	if (pdev->dev.of_node) {
		ret = of_dma_controller_register(pdev->dev.of_node,
							mmp_tdma_xlate, tdev);
		if (ret) {
			dev_err(tdev->device.dev,
				"failed to register controller\n");
			dma_async_device_unregister(&tdev->device);
		}
	}

	dev_info(tdev->device.dev, "initialized\n");
	return 0;
}
/*
 * Probe routine for each detected JobR subsystem. It assumes that
 * property detection was picked up externally.
 */
int caam_jr_probe(struct platform_device *pdev, struct device_node *np,
		  int ring)
{
	struct device *ctrldev, *jrdev;
	struct platform_device *jr_pdev;
	struct caam_drv_private *ctrlpriv;
	struct caam_drv_private_jr *jrpriv;
	u32 *jroffset;
	int error;

	ctrldev = &pdev->dev;
	ctrlpriv = dev_get_drvdata(ctrldev);

	jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
			 GFP_KERNEL);
	if (jrpriv == NULL) {
		dev_err(ctrldev, "can't alloc private mem for job ring %d\n",
			ring);
		return -ENOMEM;
	}
	jrpriv->parentdev = ctrldev; /* point back to parent */
	jrpriv->ridx = ring; /* save ring identity relative to detection */

	/*
	 * Derive a pointer to the detected JobRs regs
	 * Driver has already iomapped the entire space, we just
	 * need to add in the offset to this JobR. Don't know if I
	 * like this long-term, but it'll run
	 */
	jroffset = (u32 *)of_get_property(np, "reg", NULL);
	jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl
							 + *jroffset);

	/* Build a local dev for each detected queue */
	jr_pdev = of_platform_device_create(np, NULL, ctrldev);
	if (jr_pdev == NULL) {
		kfree(jrpriv);
		return -EINVAL;
	}

	jrpriv->jr_pdev = jr_pdev;
	jrdev = &jr_pdev->dev;
	dev_set_drvdata(jrdev, jrpriv);
	ctrlpriv->jrdev[ring] = jrdev;

	if (sizeof(dma_addr_t) == sizeof(u64))
		if (of_device_is_compatible(np, "fsl,sec-v5.0-job-ring"))
			dma_set_mask(jrdev, DMA_BIT_MASK(40));
		else
			dma_set_mask(jrdev, DMA_BIT_MASK(36));
	else
		dma_set_mask(jrdev, DMA_BIT_MASK(32));

	/* Identify the interrupt */
	jrpriv->irq = irq_of_parse_and_map(np, 0);

	/* Now do the platform independent part */
	error = caam_jr_init(jrdev); /* now turn on hardware */
	if (error) {
		of_device_unregister(jr_pdev);
		kfree(jrpriv);
		return error;
	}

	return error;
}
Beispiel #18
0
static int mmp_tdma_probe(struct platform_device *pdev)
{
	enum mmp_tdma_type type;
	const struct of_device_id *of_id;
	struct mmp_tdma_device *tdev;
	struct resource *iores;
	int i, ret;
	int irq = 0, irq_num = 0;
	int chan_num = TDMA_CHANNEL_NUM;

	of_id = of_match_device(mmp_tdma_dt_ids, &pdev->dev);
	if (of_id)
		type = (enum mmp_tdma_type) of_id->data;
	else
		type = platform_get_device_id(pdev)->driver_data;

	/* always have couple channels */
	tdev = devm_kzalloc(&pdev->dev, sizeof(*tdev), GFP_KERNEL);
	if (!tdev)
		return -ENOMEM;

	tdev->dev = &pdev->dev;

	for (i = 0; i < chan_num; i++) {
		if (platform_get_irq(pdev, i) > 0)
			irq_num++;
	}

	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!iores)
		return -EINVAL;

	tdev->base = devm_request_and_ioremap(&pdev->dev, iores);
	if (!tdev->base)
		return -EADDRNOTAVAIL;

	INIT_LIST_HEAD(&tdev->device.channels);

	if (irq_num != chan_num) {
		irq = platform_get_irq(pdev, 0);
		ret = devm_request_irq(&pdev->dev, irq,
			mmp_tdma_int_handler, IRQF_DISABLED, "tdma", tdev);
		if (ret)
			return ret;
	}

	/* initialize channel parameters */
	for (i = 0; i < chan_num; i++) {
		irq = (irq_num != chan_num) ? 0 : platform_get_irq(pdev, i);
		ret = mmp_tdma_chan_init(tdev, i, irq, type);
		if (ret)
			return ret;
	}

	dma_cap_set(DMA_SLAVE, tdev->device.cap_mask);
	dma_cap_set(DMA_CYCLIC, tdev->device.cap_mask);
	tdev->device.dev = &pdev->dev;
	tdev->device.device_alloc_chan_resources =
					mmp_tdma_alloc_chan_resources;
	tdev->device.device_free_chan_resources =
					mmp_tdma_free_chan_resources;
	tdev->device.device_prep_dma_cyclic = mmp_tdma_prep_dma_cyclic;
	tdev->device.device_tx_status = mmp_tdma_tx_status;
	tdev->device.device_issue_pending = mmp_tdma_issue_pending;
	tdev->device.device_control = mmp_tdma_control;
	tdev->device.copy_align = TDMA_ALIGNMENT;

	dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
	platform_set_drvdata(pdev, tdev);

	ret = dma_async_device_register(&tdev->device);
	if (ret) {
		dev_err(tdev->device.dev, "unable to register\n");
		return ret;
	}

	dev_info(tdev->device.dev, "initialized\n");
	return 0;
}
Beispiel #19
0
/**
 * mei_probe - Device Initialization Routine
 *
 * @pdev: PCI device structure
 * @ent: entry in kcs_pci_tbl
 *
 * returns 0 on success, <0 on failure.
 */
static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct mei_device *dev;
	struct mei_me_hw *hw;
	int err;

	mutex_lock(&mei_mutex);

	if (!mei_me_quirk_probe(pdev, ent)) {
		err = -ENODEV;
		goto end;
	}

	if (mei_pdev) {
		err = -EEXIST;
		goto end;
	}
	/* enable pci dev */
	err = pci_enable_device(pdev);
	if (err) {
		dev_err(&pdev->dev, "failed to enable pci device.\n");
		goto end;
	}
	/* set PCI host mastering  */
	pci_set_master(pdev);
	/* pci request regions for mei driver */
	err = pci_request_regions(pdev, KBUILD_MODNAME);
	if (err) {
		dev_err(&pdev->dev, "failed to get pci regions.\n");
		goto disable_device;
	}

	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) ||
	    dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {

		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
		if (err)
			err = dma_set_coherent_mask(&pdev->dev,
						    DMA_BIT_MASK(32));
	}
	if (err) {
		dev_err(&pdev->dev, "No usable DMA configuration, aborting\n");
		goto release_regions;
	}


	/* allocates and initializes the mei dev structure */
	dev = mei_me_dev_init(pdev);
	if (!dev) {
		err = -ENOMEM;
		goto release_regions;
	}
	hw = to_me_hw(dev);
	/* mapping  IO device memory */
	hw->mem_addr = pci_iomap(pdev, 0, 0);
	if (!hw->mem_addr) {
		dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
		err = -ENOMEM;
		goto free_device;
	}
	pci_enable_msi(pdev);

	 /* request and enable interrupt */
	if (pci_dev_msi_enabled(pdev))
		err = request_threaded_irq(pdev->irq,
			NULL,
			mei_me_irq_thread_handler,
			IRQF_ONESHOT, KBUILD_MODNAME, dev);
	else
		err = request_threaded_irq(pdev->irq,
			mei_me_irq_quick_handler,
			mei_me_irq_thread_handler,
			IRQF_SHARED, KBUILD_MODNAME, dev);

	if (err) {
		dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
		       pdev->irq);
		goto disable_msi;
	}

	if (mei_start(dev)) {
		dev_err(&pdev->dev, "init hw failure.\n");
		err = -ENODEV;
		goto release_irq;
	}

	err = mei_register(dev);
	if (err)
		goto release_irq;

	mei_pdev = pdev;
	pci_set_drvdata(pdev, dev);

	schedule_delayed_work(&dev->timer_work, HZ);

	mutex_unlock(&mei_mutex);

	dev_dbg(&pdev->dev, "initialization successful.\n");

	return 0;

release_irq:
	mei_cancel_work(dev);
	mei_disable_interrupts(dev);
	free_irq(pdev->irq, dev);
disable_msi:
	pci_disable_msi(pdev);
	pci_iounmap(pdev, hw->mem_addr);
free_device:
	kfree(dev);
release_regions:
	pci_release_regions(pdev);
disable_device:
	pci_disable_device(pdev);
end:
	mutex_unlock(&mei_mutex);
	dev_err(&pdev->dev, "initialization failed.\n");
	return err;
}
Beispiel #20
0
static int xhci_mtk_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct device_node *node = dev->of_node;
	struct xhci_hcd_mtk *mtk;
	const struct hc_driver *driver;
	struct xhci_hcd *xhci;
	struct resource *res;
	struct usb_hcd *hcd;
	struct phy *phy;
	int phy_num;
	int ret = -ENODEV;
	int irq;

	if (usb_disabled())
		return -ENODEV;

	driver = &xhci_mtk_hc_driver;
	mtk = devm_kzalloc(dev, sizeof(*mtk), GFP_KERNEL);
	if (!mtk)
		return -ENOMEM;

	mtk->dev = dev;
	mtk->vbus = devm_regulator_get(dev, "vbus");
	if (IS_ERR(mtk->vbus)) {
		dev_err(dev, "fail to get vbus\n");
		return PTR_ERR(mtk->vbus);
	}

	mtk->vusb33 = devm_regulator_get(dev, "vusb33");
	if (IS_ERR(mtk->vusb33)) {
		dev_err(dev, "fail to get vusb33\n");
		return PTR_ERR(mtk->vusb33);
	}

	mtk->sys_clk = devm_clk_get(dev, "sys_ck");
	if (IS_ERR(mtk->sys_clk)) {
		dev_err(dev, "fail to get sys_ck\n");
		return PTR_ERR(mtk->sys_clk);
	}

	mtk->lpm_support = of_property_read_bool(node, "usb3-lpm-capable");

	ret = usb_wakeup_of_property_parse(mtk, node);
	if (ret)
		return ret;

	mtk->num_phys = of_count_phandle_with_args(node,
			"phys", "#phy-cells");
	if (mtk->num_phys > 0) {
		mtk->phys = devm_kcalloc(dev, mtk->num_phys,
					sizeof(*mtk->phys), GFP_KERNEL);
		if (!mtk->phys)
			return -ENOMEM;
	} else {
		mtk->num_phys = 0;
	}
	pm_runtime_enable(dev);
	pm_runtime_get_sync(dev);
	device_enable_async_suspend(dev);

	ret = xhci_mtk_ldos_enable(mtk);
	if (ret)
		goto disable_pm;

	ret = xhci_mtk_clks_enable(mtk);
	if (ret)
		goto disable_ldos;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		goto disable_clk;

	/* Initialize dma_mask and coherent_dma_mask to 32-bits */
	ret = dma_set_coherent_mask(dev, DMA_BIT_MASK(32));
	if (ret)
		goto disable_clk;

	if (!dev->dma_mask)
		dev->dma_mask = &dev->coherent_dma_mask;
	else
		dma_set_mask(dev, DMA_BIT_MASK(32));

	hcd = usb_create_hcd(driver, dev, dev_name(dev));
	if (!hcd) {
		ret = -ENOMEM;
		goto disable_clk;
	}

	/*
	 * USB 2.0 roothub is stored in the platform_device.
	 * Swap it with mtk HCD.
	 */
	mtk->hcd = platform_get_drvdata(pdev);
	platform_set_drvdata(pdev, mtk);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	hcd->regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(hcd->regs)) {
		ret = PTR_ERR(hcd->regs);
		goto put_usb2_hcd;
	}
	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	mtk->ippc_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(mtk->ippc_regs)) {
		ret = PTR_ERR(mtk->ippc_regs);
		goto put_usb2_hcd;
	}

	for (phy_num = 0; phy_num < mtk->num_phys; phy_num++) {
		phy = devm_of_phy_get_by_index(dev, node, phy_num);
		if (IS_ERR(phy)) {
			ret = PTR_ERR(phy);
			goto put_usb2_hcd;
		}
		mtk->phys[phy_num] = phy;
	}

	ret = xhci_mtk_phy_init(mtk);
	if (ret)
		goto put_usb2_hcd;

	ret = xhci_mtk_phy_power_on(mtk);
	if (ret)
		goto exit_phys;

	device_init_wakeup(dev, true);

	xhci = hcd_to_xhci(hcd);
	xhci->main_hcd = hcd;
	xhci->shared_hcd = usb_create_shared_hcd(driver, dev,
			dev_name(dev), hcd);
	if (!xhci->shared_hcd) {
		ret = -ENOMEM;
		goto power_off_phys;
	}

	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
		xhci->shared_hcd->can_do_streams = 1;

	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
	if (ret)
		goto put_usb3_hcd;

	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
	if (ret)
		goto dealloc_usb2_hcd;

	return 0;

dealloc_usb2_hcd:
	usb_remove_hcd(hcd);

put_usb3_hcd:
	xhci_mtk_sch_exit(mtk);
	usb_put_hcd(xhci->shared_hcd);

power_off_phys:
	xhci_mtk_phy_power_off(mtk);
	device_init_wakeup(dev, false);

exit_phys:
	xhci_mtk_phy_exit(mtk);

put_usb2_hcd:
	usb_put_hcd(hcd);

disable_clk:
	xhci_mtk_clks_disable(mtk);

disable_ldos:
	xhci_mtk_ldos_disable(mtk);

disable_pm:
	pm_runtime_put_sync(dev);
	pm_runtime_disable(dev);
	return ret;
}
static int
ahd_linux_pci_dev_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	char		 buf[80];
	struct		 ahd_softc *ahd;
	ahd_dev_softc_t	 pci;
	const struct ahd_pci_identity *entry;
	char		*name;
	int		 error;
	struct device	*dev = &pdev->dev;

	pci = pdev;
	entry = ahd_find_pci_device(pci);
	if (entry == NULL)
		return (-ENODEV);

	/*
	 * Allocate a softc for this card and
	 * set it up for attachment by our
	 * common detect routine.
	 */
	sprintf(buf, "ahd_pci:%d:%d:%d",
		ahd_get_pci_bus(pci),
		ahd_get_pci_slot(pci),
		ahd_get_pci_function(pci));
	name = malloc(strlen(buf) + 1, M_DEVBUF, M_NOWAIT);
	if (name == NULL)
		return (-ENOMEM);
	strcpy(name, buf);
	ahd = ahd_alloc(NULL, name);
	if (ahd == NULL)
		return (-ENOMEM);
	if (pci_enable_device(pdev)) {
		ahd_free(ahd);
		return (-ENODEV);
	}
	pci_set_master(pdev);

	if (sizeof(dma_addr_t) > 4) {
		const u64 required_mask = dma_get_required_mask(dev);

		if (required_mask > DMA_BIT_MASK(39) &&
		    dma_set_mask(dev, DMA_BIT_MASK(64)) == 0)
			ahd->flags |= AHD_64BIT_ADDRESSING;
		else if (required_mask > DMA_BIT_MASK(32) &&
			 dma_set_mask(dev, DMA_BIT_MASK(39)) == 0)
			ahd->flags |= AHD_39BIT_ADDRESSING;
		else
			dma_set_mask(dev, DMA_BIT_MASK(32));
	} else {
		dma_set_mask(dev, DMA_BIT_MASK(32));
	}
	ahd->dev_softc = pci;
	error = ahd_pci_config(ahd, entry);
	if (error != 0) {
		ahd_free(ahd);
		return (-error);
	}

	/*
	 * Second Function PCI devices need to inherit some
	 * * settings from function 0.
	 */
	if ((ahd->features & AHD_MULTI_FUNC) && PCI_FUNC(pdev->devfn) != 0)
		ahd_linux_pci_inherit_flags(ahd);

	pci_set_drvdata(pdev, ahd);

	ahd_linux_register_host(ahd, &aic79xx_driver_template);
	return (0);
}
int denali_init(struct denali_nand_info *denali)
{
	int ret;

	if (denali->platform == INTEL_CE4100) {
		/* Due to a silicon limitation, we can only support
		 * ONFI timing mode 1 and below.
		 */
		if (onfi_timing_mode < -1 || onfi_timing_mode > 1) {
			pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n");
			return -EINVAL;
		}
	}

	/* Is 32-bit DMA supported? */
	ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32));
	if (ret) {
		pr_err("Spectra: no usable DMA configuration\n");
		return ret;
	}
	denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf,
					     DENALI_BUF_SIZE,
					     DMA_BIDIRECTIONAL);

	if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) {
		dev_err(denali->dev, "Spectra: failed to map DMA buffer\n");
		return -EIO;
	}
	denali->mtd.dev.parent = denali->dev;
	denali_hw_init(denali);
	denali_drv_init(denali);

	/* denali_isr register is done after all the hardware
	 * initilization is finished*/
	if (request_irq(denali->irq, denali_isr, IRQF_SHARED,
			DENALI_NAND_NAME, denali)) {
		pr_err("Spectra: Unable to allocate IRQ\n");
		return -ENODEV;
	}

	/* now that our ISR is registered, we can enable interrupts */
	denali_set_intr_modes(denali, true);
	denali->mtd.name = "denali-nand";
	denali->mtd.owner = THIS_MODULE;
	denali->mtd.priv = &denali->nand;

	/* register the driver with the NAND core subsystem */
	denali->nand.select_chip = denali_select_chip;
	denali->nand.cmdfunc = denali_cmdfunc;
	denali->nand.read_byte = denali_read_byte;
	denali->nand.waitfunc = denali_waitfunc;

	/* scan for NAND devices attached to the controller
	 * this is the first stage in a two step process to register
	 * with the nand subsystem */
	if (nand_scan_ident(&denali->mtd, denali->max_banks, NULL)) {
		ret = -ENXIO;
		goto failed_req_irq;
	}

	/* MTD supported page sizes vary by kernel. We validate our
	 * kernel supports the device here.
	 */
	if (denali->mtd.writesize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE) {
		ret = -ENODEV;
		pr_err("Spectra: device size not supported by this version of MTD.");
		goto failed_req_irq;
	}

	/* support for multi nand
	 * MTD known nothing about multi nand,
	 * so we should tell it the real pagesize
	 * and anything necessery
	 */
	denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED);
	denali->nand.chipsize <<= (denali->devnum - 1);
	denali->nand.page_shift += (denali->devnum - 1);
	denali->nand.pagemask = (denali->nand.chipsize >>
						denali->nand.page_shift) - 1;
	denali->nand.bbt_erase_shift += (denali->devnum - 1);
	denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift;
	denali->nand.chip_shift += (denali->devnum - 1);
	denali->mtd.writesize <<= (denali->devnum - 1);
	denali->mtd.oobsize <<= (denali->devnum - 1);
	denali->mtd.erasesize <<= (denali->devnum - 1);
	denali->mtd.size = denali->nand.numchips * denali->nand.chipsize;
	denali->bbtskipbytes *= denali->devnum;

	/* second stage of the NAND scan
	 * this stage requires information regarding ECC and
	 * bad block management. */

	/* Bad block management */
	denali->nand.bbt_td = &bbt_main_descr;
	denali->nand.bbt_md = &bbt_mirror_descr;

	/* skip the scan for now until we have OOB read and write support */
	denali->nand.bbt_options |= NAND_BBT_USE_FLASH;
	denali->nand.options |= NAND_SKIP_BBTSCAN;
	denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;

	/* Denali Controller only support 15bit and 8bit ECC in MRST,
	 * so just let controller do 15bit ECC for MLC and 8bit ECC for
	 * SLC if possible.
	 * */
	if (denali->nand.cellinfo & 0xc &&
			(denali->mtd.oobsize > (denali->bbtskipbytes +
			ECC_15BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE)))) {
		/* if MLC OOB size is large enough, use 15bit ECC*/
		denali->nand.ecc.strength = 15;
		denali->nand.ecc.layout = &nand_15bit_oob;
		denali->nand.ecc.bytes = ECC_15BITS;
		iowrite32(15, denali->flash_reg + ECC_CORRECTION);
	} else if (denali->mtd.oobsize < (denali->bbtskipbytes +
			ECC_8BITS * (denali->mtd.writesize /
			ECC_SECTOR_SIZE))) {
		pr_err("Your NAND chip OOB is not large enough to \
				contain 8bit ECC correction codes");
		goto failed_req_irq;
	} else {
Beispiel #23
0
static int snd_als300_create(struct snd_card *card,
			     struct pci_dev *pci, int chip_type,
			     struct snd_als300 **rchip)
{
	struct snd_als300 *chip;
	void *irq_handler;
	int err;

	static struct snd_device_ops ops = {
		.dev_free = snd_als300_dev_free,
	};
	*rchip = NULL;

	if ((err = pci_enable_device(pci)) < 0)
		return err;

	if (dma_set_mask(&pci->dev, DMA_BIT_MASK(28)) < 0 ||
		dma_set_coherent_mask(&pci->dev, DMA_BIT_MASK(28)) < 0) {
		dev_err(card->dev, "error setting 28bit DMA mask\n");
		pci_disable_device(pci);
		return -ENXIO;
	}
	pci_set_master(pci);

	chip = kzalloc(sizeof(*chip), GFP_KERNEL);
	if (chip == NULL) {
		pci_disable_device(pci);
		return -ENOMEM;
	}

	chip->card = card;
	chip->pci = pci;
	chip->irq = -1;
	chip->chip_type = chip_type;
	spin_lock_init(&chip->reg_lock);

	if ((err = pci_request_regions(pci, "ALS300")) < 0) {
		kfree(chip);
		pci_disable_device(pci);
		return err;
	}
	chip->port = pci_resource_start(pci, 0);

	if (chip->chip_type == DEVICE_ALS300_PLUS)
		irq_handler = snd_als300plus_interrupt;
	else
		irq_handler = snd_als300_interrupt;

	if (request_irq(pci->irq, irq_handler, IRQF_SHARED,
			KBUILD_MODNAME, chip)) {
		dev_err(card->dev, "unable to grab IRQ %d\n", pci->irq);
		snd_als300_free(chip);
		return -EBUSY;
	}
	chip->irq = pci->irq;


	snd_als300_init(chip);

	err = snd_als300_ac97(chip);
	if (err < 0) {
		dev_err(card->dev, "Could not create ac97\n");
		snd_als300_free(chip);
		return err;
	}

	if ((err = snd_als300_new_pcm(chip)) < 0) {
		dev_err(card->dev, "Could not create PCM\n");
		snd_als300_free(chip);
		return err;
	}

	if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL,
						chip, &ops)) < 0) {
		snd_als300_free(chip);
		return err;
	}

	*rchip = chip;
	return 0;
}
Beispiel #24
0
/* called during probe() after chip reset completes */
static int ubi32_xhci_drv_setup(struct usb_hcd *hcd)
{
    struct xhci_hcd		*xhci;
    int			retval;
    u32			temp;

    hcd->self.sg_tablesize = TRBS_PER_SEGMENT - 2;

    if (usb_hcd_is_primary_hcd(hcd)) {
        xhci = kzalloc(sizeof(struct xhci_hcd), GFP_KERNEL);
        if (!xhci)
            return -ENOMEM;
        *((struct xhci_hcd **) hcd->hcd_priv) = xhci;
        xhci->main_hcd = hcd;
        /* Mark the first roothub as being USB 2.0.
         * The xHCI driver will register the USB 3.0 roothub.
         */
        hcd->speed = HCD_USB2;
        hcd->self.root_hub->speed = USB_SPEED_HIGH;
        /*
         * USB 2.0 roothub under xHCI has an integrated TT,
         * (rate matching hub) as opposed to having an OHCI/UHCI
         * companion controller.
         */
        hcd->has_tt = 1;
    } else {
        /* xHCI private pointer was set in xhci_pci_probe for the second
         * registered roothub.
         */
        xhci = hcd_to_xhci(hcd);
        temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
        if (HCC_64BIT_ADDR(temp)) {
            xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
            dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
        } else {
            dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
        }
        return 0;
    }

    xhci->cap_regs = hcd->regs;
    xhci->op_regs = hcd->regs +
                    HC_LENGTH(xhci_readl(xhci, &xhci->cap_regs->hc_capbase));
    xhci->run_regs = hcd->regs +
                     (xhci_readl(xhci, &xhci->cap_regs->run_regs_off) & RTSOFF_MASK);
    /* Cache read-only capability registers */
    xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
    xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
    xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
    xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
    xhci->hci_version = HC_VERSION(xhci->hcc_params);
    xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
    xhci_print_registers(xhci);

    /* Make sure the HC is halted. */
    retval = xhci_halt(xhci);
    if (retval)
        goto error;

    xhci_dbg(xhci, "Resetting HCD\n");
    /* Reset the internal HC memory state and registers. */
    retval = xhci_reset(xhci);
    if (retval)
        goto error;
    xhci_dbg(xhci, "Reset complete\n");

    temp = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
    if (HCC_64BIT_ADDR(temp)) {
        xhci_dbg(xhci, "Enabling 64-bit DMA addresses.\n");
        dma_set_mask(hcd->self.controller, DMA_BIT_MASK(64));
    } else {
        dma_set_mask(hcd->self.controller, DMA_BIT_MASK(32));
    }

    xhci_dbg(xhci, "Calling HCD init\n");
    /* Initialize HCD and host controller data structures. */
    retval = xhci_init(hcd);
    if (retval)
        goto error;
    xhci_dbg(xhci, "Called HCD init\n");

    if (!retval)
        return retval;
error:
    kfree(xhci);
    return retval;
}