Exemple #1
0
/**
 * zynq_gpio_probe - Initialization method for a zynq_gpio device
 * @pdev:	platform device instance
 *
 * This function allocates memory resources for the gpio device and registers
 * all the banks of the device. It will also set up interrupts for the gpio
 * pins.
 * Note: Interrupts are disabled for all the banks during initialization.
 *
 * Return: 0 on success, negative error otherwise.
 */
static int zynq_gpio_probe(struct platform_device *pdev)
{
	int ret, pin_num, bank_num, gpio_irq;
	unsigned int irq_num;
	struct zynq_gpio *gpio;
	struct gpio_chip *chip;
	struct resource *res;

	gpio = devm_kzalloc(&pdev->dev, sizeof(*gpio), GFP_KERNEL);
	if (!gpio)
		return -ENOMEM;

	platform_set_drvdata(pdev, gpio);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	gpio->base_addr = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(gpio->base_addr))
		return PTR_ERR(gpio->base_addr);

	irq_num = platform_get_irq(pdev, 0);
	gpio->irq = irq_num;

	/* configure the gpio chip */
	chip = &gpio->chip;
	chip->label = "zynq_gpio";
	chip->owner = THIS_MODULE;
	chip->dev = &pdev->dev;
	chip->get = zynq_gpio_get_value;
	chip->set = zynq_gpio_set_value;
	chip->request = zynq_gpio_request;
	chip->free = zynq_gpio_free;
	chip->direction_input = zynq_gpio_dir_in;
	chip->direction_output = zynq_gpio_dir_out;
	chip->to_irq = zynq_gpio_to_irq;
	chip->dbg_show = NULL;
	chip->base = 0;		/* default pin base */
	chip->ngpio = ZYNQ_GPIO_NR_GPIOS;
	chip->can_sleep = 0;

	gpio->irq_base = irq_alloc_descs(-1, 0, chip->ngpio, 0);
	if (gpio->irq_base < 0) {
		dev_err(&pdev->dev, "Couldn't allocate IRQ numbers\n");
		return -ENODEV;
	}

	irq_domain = irq_domain_add_legacy(pdev->dev.of_node,
					   chip->ngpio, gpio->irq_base, 0,
					   &irq_domain_simple_ops, NULL);

	/* report a bug if gpio chip registration fails */
	ret = gpiochip_add(chip);
	if (ret < 0)
		return ret;

	/* Enable GPIO clock */
	gpio->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(gpio->clk)) {
		dev_err(&pdev->dev, "input clock not found.\n");
		if (gpiochip_remove(chip))
			dev_err(&pdev->dev, "Failed to remove gpio chip\n");
		return PTR_ERR(gpio->clk);
	}
	ret = clk_prepare_enable(gpio->clk);
	if (ret) {
		dev_err(&pdev->dev, "Unable to enable clock.\n");
		if (gpiochip_remove(chip))
			dev_err(&pdev->dev, "Failed to remove gpio chip\n");
		return ret;
	}

	/* disable interrupts for all banks */
	for (bank_num = 0; bank_num < ZYNQ_GPIO_MAX_BANK; bank_num++) {
		zynq_gpio_writereg(gpio->base_addr +
				   ZYNQ_GPIO_INTDIS_OFFSET(bank_num),
				   ZYNQ_GPIO_IXR_DISABLE_ALL);
	}

	/*
	 * set the irq chip, handler and irq chip data for callbacks for
	 * each pin
	 */
	for (pin_num = 0; pin_num < min_t(int, ZYNQ_GPIO_NR_GPIOS,
					  (int)chip->ngpio); pin_num++) {
		gpio_irq = irq_find_mapping(irq_domain, pin_num);
		irq_set_chip_and_handler(gpio_irq, &zynq_gpio_irqchip,
					 handle_simple_irq);
		irq_set_chip_data(gpio_irq, (void *)gpio);
		set_irq_flags(gpio_irq, IRQF_VALID);
	}

	irq_set_handler_data(irq_num, (void *)gpio);
	irq_set_chained_handler(irq_num, zynq_gpio_irqhandler);

	pm_runtime_enable(&pdev->dev);

	device_set_wakeup_capable(&pdev->dev, 1);

	return 0;
}
Exemple #2
0
static int xiic_i2c_probe(struct platform_device *pdev)
{
	struct xiic_i2c *i2c;
	struct xiic_i2c_platform_data *pdata;
	struct resource *res;
	int ret, irq;
	u8 i;
	u32 sr;

	i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
	if (!i2c)
		return -ENOMEM;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	i2c->base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(i2c->base))
		return PTR_ERR(i2c->base);

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return irq;

	pdata = dev_get_platdata(&pdev->dev);

	/* hook up driver to tree */
	platform_set_drvdata(pdev, i2c);
	i2c->adap = xiic_adapter;
	i2c_set_adapdata(&i2c->adap, i2c);
	i2c->adap.dev.parent = &pdev->dev;
	i2c->adap.dev.of_node = pdev->dev.of_node;

	mutex_init(&i2c->lock);
	init_waitqueue_head(&i2c->wait);

	ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
					xiic_process, IRQF_ONESHOT,
					pdev->name, i2c);

	if (ret < 0) {
		dev_err(&pdev->dev, "Cannot claim IRQ\n");
		return ret;
	}

	/*
	 * Detect endianness
	 * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not
	 * set, assume that the endianness was wrong and swap.
	 */
	i2c->endianness = LITTLE;
	xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
	/* Reset is cleared in xiic_reinit */
	sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET);
	if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
		i2c->endianness = BIG;

	xiic_reinit(i2c);

	/* add i2c adapter to i2c tree */
	ret = i2c_add_adapter(&i2c->adap);
	if (ret) {
		dev_err(&pdev->dev, "Failed to add adapter\n");
		xiic_deinit(i2c);
		return ret;
	}

	if (pdata) {
		/* add in known devices to the bus */
		for (i = 0; i < pdata->num_devices; i++)
			i2c_new_device(&i2c->adap, pdata->devices + i);
	}

	return 0;
}
Exemple #3
0
static int __devinit esp_jazz_probe(struct platform_device *dev)
{
	struct scsi_host_template *tpnt = &scsi_esp_template;
	struct Scsi_Host *host;
	struct esp *esp;
	struct resource *res;
	int err;

	host = scsi_host_alloc(tpnt, sizeof(struct esp));

	err = -ENOMEM;
	if (!host)
		goto fail;

	host->max_id = 8;
	esp = shost_priv(host);

	esp->host = host;
	esp->dev = dev;
	esp->ops = &jazz_esp_ops;

	res = platform_get_resource(dev, IORESOURCE_MEM, 0);
	if (!res)
		goto fail_unlink;

	esp->regs = (void __iomem *)res->start;
	if (!esp->regs)
		goto fail_unlink;

	res = platform_get_resource(dev, IORESOURCE_MEM, 1);
	if (!res)
		goto fail_unlink;

	esp->dma_regs = (void __iomem *)res->start;

	esp->command_block = dma_alloc_coherent(esp->dev, 16,
						&esp->command_block_dma,
						GFP_KERNEL);
	if (!esp->command_block)
		goto fail_unmap_regs;

	host->irq = platform_get_irq(dev, 0);
	err = request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
	if (err < 0)
		goto fail_unmap_command_block;

	esp->scsi_id = 7;
	esp->host->this_id = esp->scsi_id;
	esp->scsi_id_mask = (1 << esp->scsi_id);
	esp->cfreq = 40000000;

	dev_set_drvdata(&dev->dev, esp);

	err = scsi_esp_register(esp, &dev->dev);
	if (err)
		goto fail_free_irq;

	return 0;

fail_free_irq:
	free_irq(host->irq, esp);
fail_unmap_command_block:
	dma_free_coherent(esp->dev, 16,
			  esp->command_block,
			  esp->command_block_dma);
fail_unmap_regs:
fail_unlink:
	scsi_host_put(host);
fail:
	return err;
}
Exemple #4
0
static int __init msm_otg_probe(struct platform_device *pdev)
{
	int ret = 0;
	struct resource *res;
	struct msm_otg *motg;
	struct otg_transceiver *otg;

	dev_info(&pdev->dev, "msm_otg probe\n");
	if (!pdev->dev.platform_data) {
		dev_err(&pdev->dev, "No platform data given. Bailing out\n");
		return -ENODEV;
	}

	motg = kzalloc(sizeof(struct msm_otg), GFP_KERNEL);
	if (!motg) {
		dev_err(&pdev->dev, "unable to allocate msm_otg\n");
		return -ENOMEM;
	}

	motg->pdata = pdev->dev.platform_data;
	otg = &motg->otg;
	otg->dev = &pdev->dev;

	motg->phy_reset_clk = clk_get(&pdev->dev, "usb_phy_clk");
	if (IS_ERR(motg->phy_reset_clk)) {
		dev_err(&pdev->dev, "failed to get usb_phy_clk\n");
		ret = PTR_ERR(motg->phy_reset_clk);
		goto free_motg;
	}

	motg->clk = clk_get(&pdev->dev, "usb_hs_clk");
	if (IS_ERR(motg->clk)) {
		dev_err(&pdev->dev, "failed to get usb_hs_clk\n");
		ret = PTR_ERR(motg->clk);
		goto put_phy_reset_clk;
	}

	motg->pclk = clk_get(&pdev->dev, "usb_hs_pclk");
	if (IS_ERR(motg->pclk)) {
		dev_err(&pdev->dev, "failed to get usb_hs_pclk\n");
		ret = PTR_ERR(motg->pclk);
		goto put_clk;
	}

	/*
	 * USB core clock is not present on all MSM chips. This
	 * clock is introduced to remove the dependency on AXI
	 * bus frequency.
	 */
	motg->core_clk = clk_get(&pdev->dev, "usb_hs_core_clk");
	if (IS_ERR(motg->core_clk))
		motg->core_clk = NULL;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "failed to get platform resource mem\n");
		ret = -ENODEV;
		goto put_core_clk;
	}

	motg->regs = ioremap(res->start, resource_size(res));
	if (!motg->regs) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto put_core_clk;
	}
	dev_info(&pdev->dev, "OTG regs = %p\n", motg->regs);

	motg->irq = platform_get_irq(pdev, 0);
	if (!motg->irq) {
		dev_err(&pdev->dev, "platform_get_irq failed\n");
		ret = -ENODEV;
		goto free_regs;
	}

	clk_enable(motg->clk);
	clk_enable(motg->pclk);
	if (motg->core_clk)
		clk_enable(motg->core_clk);

	writel(0, USB_USBINTR);
	writel(0, USB_OTGSC);

	INIT_WORK(&motg->sm_work, msm_otg_sm_work);
	ret = request_irq(motg->irq, msm_otg_irq, IRQF_SHARED,
					"msm_otg", motg);
	if (ret) {
		dev_err(&pdev->dev, "request irq failed\n");
		goto disable_clks;
	}

	otg->init = msm_otg_reset;
	otg->set_host = msm_otg_set_host;
	otg->set_peripheral = msm_otg_set_peripheral;

	otg->io_ops = &msm_otg_io_ops;

	ret = otg_set_transceiver(&motg->otg);
	if (ret) {
		dev_err(&pdev->dev, "otg_set_transceiver failed\n");
		goto free_irq;
	}

	platform_set_drvdata(pdev, motg);
	device_init_wakeup(&pdev->dev, 1);

	if (motg->pdata->mode == USB_OTG &&
			motg->pdata->otg_control == OTG_USER_CONTROL) {
		ret = msm_otg_debugfs_init(motg);
		if (ret)
			dev_dbg(&pdev->dev, "mode debugfs file is"
					"not available\n");
	}

	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

	return 0;
free_irq:
	free_irq(motg->irq, motg);
disable_clks:
	clk_disable(motg->pclk);
	clk_disable(motg->clk);
free_regs:
	iounmap(motg->regs);
put_core_clk:
	if (motg->core_clk)
		clk_put(motg->core_clk);
	clk_put(motg->pclk);
put_clk:
	clk_put(motg->clk);
put_phy_reset_clk:
	clk_put(motg->phy_reset_clk);
free_motg:
	kfree(motg);
	return ret;
}
Exemple #5
0
/*
 * This function is called during the driver binding process.
 * This function requests for memory, initializes net_device structure and
 * registers with kernel.
 *
 * @param   pdev  the device structure used to store device specific
 *                information that is used by the suspend, resume and remove
 *                functions
 *
 * @return  The function returns 0 on success and a non-zero value on failure
 */
static int mxc_irda_probe(struct platform_device *pdev)
{
	struct net_device *dev;
	struct mxc_irda *si;
	struct resource *uart_res;
	unsigned int baudrate_mask;
	int uart_irq;
	int err;

	uart_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	uart_irq = platform_get_irq(pdev, 0);
	if (!uart_res || uart_irq == NO_IRQ) {
		dev_err(&pdev->dev, "Unable to find resources\n");
		return -ENXIO;
	}

	if (!request_mem_region(uart_res->start,
				uart_res->end - uart_res->start +1,
				pdev->name)) {
		dev_err(&pdev->dev, "Unable to request resource\n");
		return -ENOMEM;
	}
	mxcirda_debug("UART base: %x, IRQ: %d", uart_res->start, uart_irq);

	dev = alloc_irdadev(sizeof(struct mxc_irda));
	if (!dev) {
		dev_err(&pdev->dev, "Failed to request memory region\n");
		err = -ENOMEM;
		goto err_mem_1;
	}

	si = netdev_priv(dev);
	si->dev = &pdev->dev;
	si->uart_res = uart_res;
	si->uart_irq = uart_irq;

	si->uart_base = (void *)IO_ADDRESS(uart_res->start);
	if (!(si->uart_base)) {
		dev_err(&pdev->dev, "Failed to remap memory region\n");
		err = -ENOMEM;
		goto err_mem_2;
	}

	/*
	 * Initialise the SIR buffers
	 */
	err = mxc_irda_init_iobuf(&si->rx_buff, UART_BUFF_SIZE);
	if (err) {
		dev_err(&pdev->dev, "Failed to request memory for rx buffer\n");
		goto err_mem_2;
	}

	err = mxc_irda_init_iobuf(&si->tx_buff, UART_BUFF_SIZE);
	if (err) {
		dev_err(&pdev->dev, "Failed to request memory for tx buffer\n");
		goto err_mem_3;
	}

	dev->hard_start_xmit	= mxc_irda_hard_xmit;
	dev->open		= mxc_irda_start;
	dev->stop		= mxc_irda_stop;
	dev->do_ioctl		= mxc_irda_ioctl;
	dev->get_stats		= mxc_irda_stats;
	dev->irq		= uart_irq;

	irda_init_max_qos_capabilies(&si->qos);

	/*
	 * We support
	 * SIR(9600, 19200,38400, 57600 and 115200 bps)
	 * Min Turn Time set to 1ms or greater.
	 */
	baudrate_mask = IR_9600;
	switch (max_rate) {
		case 4000000:	baudrate_mask |= IR_4000000 << 8;
		case 1152000:	baudrate_mask |= IR_1152000;
		case 576000:	baudrate_mask |= IR_576000;
		case 115200:	baudrate_mask |= IR_115200;
		case 57600:	baudrate_mask |= IR_57600;
		case 38400:	baudrate_mask |= IR_38400;
		case 19200:	baudrate_mask |= IR_19200;
	}
	si->qos.baud_rate.bits &= baudrate_mask;
	si->qos.min_turn_time.bits = 0x7;
	irda_qos_bits_to_value(&si->qos);

	err = register_netdev(dev);
	if (err == 0) {
		printk("%s: is found on a MXC IrDA\n", dev->name);
		platform_set_drvdata(pdev, dev);
	} else {
		kfree(si->tx_buff.head);
err_mem_3:
		kfree(si->rx_buff.head);
err_mem_2:
		free_netdev(dev);
err_mem_1:
		release_mem_region(uart_res->start,
				uart_res->end - uart_res->start + 1);
	}
	return err;
}
Exemple #6
0
static int __init coh901327_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	int ret;
	u16 val;
	struct resource *res;

	parent = dev;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	virtbase = devm_ioremap_resource(dev, res);
	if (IS_ERR(virtbase))
		return PTR_ERR(virtbase);

	clk = clk_get(dev, NULL);
	if (IS_ERR(clk)) {
		ret = PTR_ERR(clk);
		dev_err(dev, "could not get clock\n");
		return ret;
	}
	ret = clk_prepare_enable(clk);
	if (ret) {
		dev_err(dev, "could not prepare and enable clock\n");
		goto out_no_clk_enable;
	}

	val = readw(virtbase + U300_WDOG_SR);
	switch (val) {
	case U300_WDOG_SR_STATUS_TIMED_OUT:
		dev_info(dev, "watchdog timed out since last chip reset!\n");
		coh901327_wdt.bootstatus |= WDIOF_CARDRESET;
		/* Status will be cleared below */
		break;
	case U300_WDOG_SR_STATUS_NORMAL:
		dev_info(dev, "in normal status, no timeouts have occurred.\n");
		break;
	default:
		dev_info(dev, "contains an illegal status code (%08x)\n", val);
		break;
	}

	val = readw(virtbase + U300_WDOG_D2R);
	switch (val) {
	case U300_WDOG_D2R_DISABLE_STATUS_DISABLED:
		dev_info(dev, "currently disabled.\n");
		break;
	case U300_WDOG_D2R_DISABLE_STATUS_ENABLED:
		dev_info(dev, "currently enabled! (disabling it now)\n");
		coh901327_disable();
		break;
	default:
		dev_err(dev, "contains an illegal enable/disable code (%08x)\n",
			val);
		break;
	}

	/* Reset the watchdog */
	writew(U300_WDOG_SR_RESET_STATUS_RESET, virtbase + U300_WDOG_SR);

	irq = platform_get_irq(pdev, 0);
	if (request_irq(irq, coh901327_interrupt, 0,
			DRV_NAME " Bark", pdev)) {
		ret = -EIO;
		goto out_no_irq;
	}

	watchdog_init_timeout(&coh901327_wdt, margin, dev);

	coh901327_wdt.parent = dev;
	ret = watchdog_register_device(&coh901327_wdt);
	if (ret)
		goto out_no_wdog;

	dev_info(dev, "initialized. (timeout=%d sec)\n",
			coh901327_wdt.timeout);
	return 0;

out_no_wdog:
	free_irq(irq, pdev);
out_no_irq:
	clk_disable_unprepare(clk);
out_no_clk_enable:
	clk_put(clk);
	return ret;
}
static int tahvo_usb_probe(struct platform_device *pdev)
{
	struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
	struct tahvo_usb *tu;
	int ret;

	tu = devm_kzalloc(&pdev->dev, sizeof(*tu), GFP_KERNEL);
	if (!tu)
		return -ENOMEM;

	tu->phy.otg = devm_kzalloc(&pdev->dev, sizeof(*tu->phy.otg),
				   GFP_KERNEL);
	if (!tu->phy.otg)
		return -ENOMEM;

	tu->pt_dev = pdev;

	/* Default mode */
#ifdef CONFIG_TAHVO_USB_HOST_BY_DEFAULT
	tu->tahvo_mode = TAHVO_MODE_HOST;
#else
	tu->tahvo_mode = TAHVO_MODE_PERIPHERAL;
#endif

	mutex_init(&tu->serialize);

	tu->ick = devm_clk_get(&pdev->dev, "usb_l4_ick");
	if (!IS_ERR(tu->ick))
		clk_enable(tu->ick);

	/*
	 * Set initial state, so that we generate kevents only on state changes.
	 */
	tu->vbus_state = retu_read(rdev, TAHVO_REG_IDSR) & TAHVO_STAT_VBUS;

	tu->extcon.name = DRIVER_NAME;
	tu->extcon.supported_cable = tahvo_cable;
	tu->extcon.dev.parent = &pdev->dev;

	ret = extcon_dev_register(&tu->extcon);
	if (ret) {
		dev_err(&pdev->dev, "could not register extcon device: %d\n",
			ret);
		goto err_disable_clk;
	}

	/* Set the initial cable state. */
	extcon_set_cable_state(&tu->extcon, "USB-HOST",
			       tu->tahvo_mode == TAHVO_MODE_HOST);
	extcon_set_cable_state(&tu->extcon, "USB", tu->vbus_state);

	/* Create OTG interface */
	tahvo_usb_power_off(tu);
	tu->phy.dev = &pdev->dev;
	tu->phy.otg->state = OTG_STATE_UNDEFINED;
	tu->phy.label = DRIVER_NAME;
	tu->phy.set_suspend = tahvo_usb_set_suspend;

	tu->phy.otg->phy = &tu->phy;
	tu->phy.otg->set_host = tahvo_usb_set_host;
	tu->phy.otg->set_peripheral = tahvo_usb_set_peripheral;

	ret = usb_add_phy(&tu->phy, USB_PHY_TYPE_USB2);
	if (ret < 0) {
		dev_err(&pdev->dev, "cannot register USB transceiver: %d\n",
			ret);
		goto err_extcon_unreg;
	}

	dev_set_drvdata(&pdev->dev, tu);

	tu->irq = platform_get_irq(pdev, 0);
	ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0,
				   "tahvo-vbus", tu);
	if (ret) {
		dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
			ret);
		goto err_remove_phy;
	}

	/* Attributes */
	ret = sysfs_create_group(&pdev->dev.kobj, &tahvo_attr_group);
	if (ret) {
		dev_err(&pdev->dev, "cannot create sysfs group: %d\n", ret);
		goto err_free_irq;
	}

	return 0;

err_free_irq:
	free_irq(tu->irq, tu);
err_remove_phy:
	usb_remove_phy(&tu->phy);
err_extcon_unreg:
	extcon_dev_unregister(&tu->extcon);
err_disable_clk:
	if (!IS_ERR(tu->ick))
		clk_disable(tu->ick);

	return ret;
}
Exemple #8
0
static int img_ir_probe(struct platform_device *pdev)
{
	struct img_ir_priv *priv;
	struct resource *res_regs;
	int irq, error, error2;

	/* Get resources from platform device */
	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "cannot find IRQ resource\n");
		return irq;
	}

	/* Private driver data */
	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv) {
		dev_err(&pdev->dev, "cannot allocate device data\n");
		return -ENOMEM;
	}
	platform_set_drvdata(pdev, priv);
	priv->dev = &pdev->dev;
	spin_lock_init(&priv->lock);

	/* Ioremap the registers */
	res_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	priv->reg_base = devm_ioremap_resource(&pdev->dev, res_regs);
	if (IS_ERR(priv->reg_base))
		return PTR_ERR(priv->reg_base);

	/* Get core clock */
	priv->clk = devm_clk_get(&pdev->dev, "core");
	if (IS_ERR(priv->clk))
		dev_warn(&pdev->dev, "cannot get core clock resource\n");

	/* Get sys clock */
	priv->sys_clk = devm_clk_get(&pdev->dev, "sys");
	if (IS_ERR(priv->sys_clk))
		dev_warn(&pdev->dev, "cannot get sys clock resource\n");
	/*
	 * Enabling the system clock before the register interface is
	 * accessed. ISR shouldn't get called with Sys Clock disabled,
	 * hence exiting probe with an error.
	 */
	if (!IS_ERR(priv->sys_clk)) {
		error = clk_prepare_enable(priv->sys_clk);
		if (error) {
			dev_err(&pdev->dev, "cannot enable sys clock\n");
			return error;
		}
	}

	/* Set up raw & hw decoder */
	error = img_ir_probe_raw(priv);
	error2 = img_ir_probe_hw(priv);
	if (error && error2) {
		if (error == -ENODEV)
			error = error2;
		goto err_probe;
	}

	/* Get the IRQ */
	priv->irq = irq;
	error = request_irq(priv->irq, img_ir_isr, 0, "img-ir", priv);
	if (error) {
		dev_err(&pdev->dev, "cannot register IRQ %u\n",
			priv->irq);
		error = -EIO;
		goto err_irq;
	}

	img_ir_ident(priv);
	img_ir_setup(priv);

	return 0;

err_irq:
	img_ir_remove_hw(priv);
	img_ir_remove_raw(priv);
err_probe:
	if (!IS_ERR(priv->sys_clk))
		clk_disable_unprepare(priv->sys_clk);
	return error;
}
Exemple #9
0
/**
 * usb_hcd_pxa27x_probe - initialize pxa27x-based HCDs
 * Context: !in_interrupt()
 *
 * Allocates basic resources for this USB host controller, and
 * then invokes the start() method for the HCD associated with it
 * through the hotplug entry's driver_data.
 *
 */
int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device *pdev)
{
	int retval, irq;
	struct usb_hcd *hcd;
	struct pxaohci_platform_data *inf;
	struct pxa27x_ohci *ohci;
	struct resource *r;
	struct clk *usb_clk;

	inf = pdev->dev.platform_data;

	if (!inf)
		return -ENODEV;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		pr_err("no resource of IORESOURCE_IRQ");
		return -ENXIO;
	}

	usb_clk = clk_get(&pdev->dev, NULL);
	if (IS_ERR(usb_clk))
		return PTR_ERR(usb_clk);

	hcd = usb_create_hcd (driver, &pdev->dev, "pxa27x");
	if (!hcd)
		return -ENOMEM;

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!r) {
		pr_err("no resource of IORESOURCE_MEM");
		retval = -ENXIO;
		goto err1;
	}

	hcd->rsrc_start = r->start;
	hcd->rsrc_len = resource_size(r);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
		pr_debug("request_mem_region failed");
		retval = -EBUSY;
		goto err1;
	}

	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		pr_debug("ioremap failed");
		retval = -ENOMEM;
		goto err2;
	}

	/* initialize "struct pxa27x_ohci" */
	ohci = (struct pxa27x_ohci *)hcd_to_ohci(hcd);
	ohci->dev = &pdev->dev;
	ohci->clk = usb_clk;
	ohci->mmio_base = (void __iomem *)hcd->regs;

	if ((retval = pxa27x_start_hc(ohci, &pdev->dev)) < 0) {
		pr_debug("pxa27x_start_hc failed");
		goto err3;
	}

	/* Select Power Management Mode */
	pxa27x_ohci_select_pmm(ohci, inf->port_mode);

	if (inf->power_budget)
		hcd->power_budget = inf->power_budget;

	ohci_hcd_init(hcd_to_ohci(hcd));

	retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
	if (retval == 0)
		return retval;

	pxa27x_stop_hc(ohci, &pdev->dev);
 err3:
	iounmap(hcd->regs);
 err2:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
 err1:
	usb_put_hcd(hcd);
	clk_put(usb_clk);
	return retval;
}
Exemple #10
0
static int do_pd_setup(struct fs_enet_private *fep)
{
#ifdef CONFIG_PPC_CPM_NEW_BINDING
	struct of_device *ofdev = to_of_device(fep->dev);
	struct fs_platform_info *fpi = fep->fpi;
	int ret = -EINVAL;

	fep->interrupt = of_irq_to_resource(ofdev->node, 0, NULL);
	if (fep->interrupt == NO_IRQ)
		goto out;

	fep->fcc.fccp = of_iomap(ofdev->node, 0);
	if (!fep->fcc.fccp)
		goto out;

	fep->fcc.ep = of_iomap(ofdev->node, 1);
	if (!fep->fcc.ep)
		goto out_fccp;

	fep->fcc.fcccp = of_iomap(ofdev->node, 2);
	if (!fep->fcc.fcccp)
		goto out_ep;

	fep->fcc.mem = (void __iomem *)cpm2_immr;
	fpi->dpram_offset = cpm_dpalloc(128, 8);
	if (IS_ERR_VALUE(fpi->dpram_offset)) {
		ret = fpi->dpram_offset;
		goto out_fcccp;
	}

	return 0;

out_fcccp:
	iounmap(fep->fcc.fcccp);
out_ep:
	iounmap(fep->fcc.ep);
out_fccp:
	iounmap(fep->fcc.fccp);
out:
	return ret;
#else
	struct platform_device *pdev = to_platform_device(fep->dev);
	struct resource *r;

	/* Fill out IRQ field */
	fep->interrupt = platform_get_irq(pdev, 0);
	if (fep->interrupt < 0)
		return -EINVAL;

	/* Attach the memory for the FCC Parameter RAM */
	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_pram");
	fep->fcc.ep = ioremap(r->start, r->end - r->start + 1);
	if (fep->fcc.ep == NULL)
		return -EINVAL;

	r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fcc_regs");
	fep->fcc.fccp = ioremap(r->start, r->end - r->start + 1);
	if (fep->fcc.fccp == NULL)
		return -EINVAL;

	if (fep->fpi->fcc_regs_c) {
		fep->fcc.fcccp = (void __iomem *)fep->fpi->fcc_regs_c;
	} else {
		r = platform_get_resource_byname(pdev, IORESOURCE_MEM,
				"fcc_regs_c");
		fep->fcc.fcccp = ioremap(r->start,
				r->end - r->start + 1);
	}

	if (fep->fcc.fcccp == NULL)
		return -EINVAL;

	fep->fcc.mem = (void __iomem *)fep->fpi->mem_offset;
	if (fep->fcc.mem == NULL)
		return -EINVAL;

	return 0;
#endif
}
Exemple #11
0
static int parport_ax88796_probe(struct platform_device *pdev)
{
	struct device *_dev = &pdev->dev;
	struct ax_drvdata *dd;
	struct parport *pp = NULL;
	struct resource *res;
	unsigned long size;
	int spacing;
	int irq;
	int ret;

	dd = kzalloc(sizeof(struct ax_drvdata), GFP_KERNEL);
	if (dd == NULL) {
		dev_err(_dev, "no memory for private data\n");
		return -ENOMEM;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(_dev, "no MEM specified\n");
		ret = -ENXIO;
		goto exit_mem;
	}

	size = resource_size(res);
	spacing = size / 3;

	dd->io = request_mem_region(res->start, size, pdev->name);
	if (dd->io == NULL) {
		dev_err(_dev, "cannot reserve memory\n");
		ret = -ENXIO;
		goto exit_mem;
	}

	dd->base = ioremap(res->start, size);
	if (dd->base == NULL) {
		dev_err(_dev, "cannot ioremap region\n");
		ret = -ENXIO;
		goto exit_res;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0)
		irq = PARPORT_IRQ_NONE;

	pp = parport_register_port((unsigned long)dd->base, irq,
				   PARPORT_DMA_NONE,
				   &parport_ax88796_ops);

	if (pp == NULL) {
		dev_err(_dev, "failed to register parallel port\n");
		ret = -ENOMEM;
		goto exit_unmap;
	}

	pp->private_data = dd;
	dd->parport = pp;
	dd->dev = _dev;

	dd->spp_data = dd->base;
	dd->spp_spr  = dd->base + (spacing * 1);
	dd->spp_cpr  = dd->base + (spacing * 2);

	/* initialise the port controls */
	writeb(AX_CPR_STRB, dd->spp_cpr);

	if (irq >= 0) {
		/* request irq */
		ret = request_irq(irq, parport_irq_handler,
				  IRQF_TRIGGER_FALLING, pdev->name, pp);

		if (ret < 0)
			goto exit_port;

		dd->irq_enabled = 1;
	}

	platform_set_drvdata(pdev, pp);

	dev_info(_dev, "attached parallel port driver\n");
	parport_announce_port(pp);

	return 0;

 exit_port:
	parport_remove_port(pp);
 exit_unmap:
	iounmap(dd->base);
 exit_res:
	release_resource(dd->io);
	kfree(dd->io);
 exit_mem:
	kfree(dd);
	return ret;
}
Exemple #12
0
static int sbsa_gwdt_probe(struct platform_device *pdev)
{
	void __iomem *rf_base, *cf_base;
	struct device *dev = &pdev->dev;
	struct watchdog_device *wdd;
	struct sbsa_gwdt *gwdt;
	struct resource *res;
	int ret, irq;
	u32 status;

	gwdt = devm_kzalloc(dev, sizeof(*gwdt), GFP_KERNEL);
	if (!gwdt)
		return -ENOMEM;
	platform_set_drvdata(pdev, gwdt);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	cf_base = devm_ioremap_resource(dev, res);
	if (IS_ERR(cf_base))
		return PTR_ERR(cf_base);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	rf_base = devm_ioremap_resource(dev, res);
	if (IS_ERR(rf_base))
		return PTR_ERR(rf_base);

	/*
	 * Get the frequency of system counter from the cp15 interface of ARM
	 * Generic timer. We don't need to check it, because if it returns "0",
	 * system would panic in very early stage.
	 */
	gwdt->clk = arch_timer_get_cntfrq();
	gwdt->refresh_base = rf_base;
	gwdt->control_base = cf_base;

	wdd = &gwdt->wdd;
	wdd->parent = dev;
	wdd->info = &sbsa_gwdt_info;
	wdd->ops = &sbsa_gwdt_ops;
	wdd->min_timeout = 1;
	wdd->max_hw_heartbeat_ms = U32_MAX / gwdt->clk * 1000;
	wdd->timeout = DEFAULT_TIMEOUT;
	watchdog_set_drvdata(wdd, gwdt);
	watchdog_set_nowayout(wdd, nowayout);

	status = readl(cf_base + SBSA_GWDT_WCS);
	if (status & SBSA_GWDT_WCS_WS1) {
		dev_warn(dev, "System reset by WDT.\n");
		wdd->bootstatus |= WDIOF_CARDRESET;
	}
	if (status & SBSA_GWDT_WCS_EN)
		set_bit(WDOG_HW_RUNNING, &wdd->status);

	if (action) {
		irq = platform_get_irq(pdev, 0);
		if (irq < 0) {
			action = 0;
			dev_warn(dev, "unable to get ws0 interrupt.\n");
		} else {
			/*
			 * In case there is a pending ws0 interrupt, just ping
			 * the watchdog before registering the interrupt routine
			 */
			writel(0, rf_base + SBSA_GWDT_WRR);
			if (devm_request_irq(dev, irq, sbsa_gwdt_interrupt, 0,
					     pdev->name, gwdt)) {
				action = 0;
				dev_warn(dev, "unable to request IRQ %d.\n",
					 irq);
			}
		}
		if (!action)
			dev_warn(dev, "falling back to single stage mode.\n");
	}
	/*
	 * In the single stage mode, The first signal (WS0) is ignored,
	 * the timeout is (WOR * 2), so the maximum timeout should be doubled.
	 */
	if (!action)
		wdd->max_hw_heartbeat_ms *= 2;

	watchdog_init_timeout(wdd, timeout, dev);
	/*
	 * Update timeout to WOR.
	 * Because of the explicit watchdog refresh mechanism,
	 * it's also a ping, if watchdog is enabled.
	 */
	sbsa_gwdt_set_timeout(wdd, wdd->timeout);

	ret = watchdog_register_device(wdd);
	if (ret)
		return ret;

	dev_info(dev, "Initialized with %ds timeout @ %u Hz, action=%d.%s\n",
		 wdd->timeout, gwdt->clk, action,
		 status & SBSA_GWDT_WCS_EN ? " [enabled]" : "");

	return 0;
}
Exemple #13
0
static int wakeup_event_thread(void *param)
{
	struct wakeup_ctrl *ctrl = (struct wakeup_ctrl *)param;
	struct sched_param sch_param = {.sched_priority = 1};
	u32 timeout = 0;

	sched_setscheduler(current, SCHED_RR, &sch_param);
	while (1) {
		wait_for_completion_interruptible(&ctrl->event);
		if (ctrl->thread_close) {
			while (!kthread_should_stop() && (timeout < 1000)) {
				timeout++;
				msleep(1);
			}
			break;
		}
		wakeup_event_handler(ctrl);
		enable_irq(ctrl->wakeup_irq);
		if ((ctrl->usb_irq > 0) && (ctrl->wakeup_irq != ctrl->usb_irq))
			enable_irq(ctrl->usb_irq);
	}
	return 0;
}

static int wakeup_dev_probe(struct platform_device *pdev)
{
	struct fsl_usb2_wakeup_platform_data *pdata;
	struct wakeup_ctrl *ctrl = NULL;
	int status;
	unsigned long interrupt_flag;

	printk(KERN_INFO "IMX usb wakeup probe\n");

	if (!pdev || !pdev->dev.platform_data)
		return -ENODEV;
	ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
	if (!ctrl)
		return -ENOMEM;
	pdata = pdev->dev.platform_data;
	ctrl->pdata = pdata;
	init_waitqueue_head(&pdata->wq);
	pdata->usb_wakeup_is_pending = false;

	init_completion(&ctrl->event);
	/* Currently, both mx5x and mx6q uses usb controller's irq
	 * as wakeup irq.
	 */
	ctrl->wakeup_irq = platform_get_irq(pdev, 1);
	ctrl->usb_irq = platform_get_irq(pdev, 1);
	ctrl->thread_close = false;
	if (ctrl->wakeup_irq != ctrl->usb_irq)
		interrupt_flag = IRQF_DISABLED;
	else
		interrupt_flag = IRQF_SHARED;
	status = request_irq(ctrl->wakeup_irq, usb_wakeup_handler, interrupt_flag, "usb_wakeup", (void *)ctrl);
	if (status)
		goto error1;

	ctrl->thread = kthread_run(wakeup_event_thread, (void *)ctrl, "usb_wakeup thread");
	status = IS_ERR(ctrl->thread) ? -1 : 0;
	if (status)
		goto error2;
	platform_set_drvdata(pdev, ctrl);

	printk(KERN_DEBUG "the wakeup pdata is 0x%p\n", pdata);
	return 0;
error2:
	free_irq(ctrl->wakeup_irq, (void *)ctrl);
error1:
	kfree(ctrl);
	return status;
}

static int  wakeup_dev_exit(struct platform_device *pdev)
{
	struct wakeup_ctrl *wctrl = platform_get_drvdata(pdev);

	wctrl->thread_close = true;
	complete(&wctrl->event);
	kthread_stop(wctrl->thread);
	free_irq(wctrl->wakeup_irq, (void *)wctrl);
	kfree(wctrl);

	return 0;
}
static struct platform_driver wakeup_d = {
	.probe   = wakeup_dev_probe,
	.remove  = wakeup_dev_exit,
	.driver = {
		.name = "usb-wakeup",
	},
};

static int __init wakeup_dev_init(void)
{
	return platform_driver_register(&wakeup_d);
}
static void __exit wakeup_dev_uninit(void)
{
	platform_driver_unregister(&wakeup_d);
}
static int tsc_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct tsc_data *ts;
	int error = 0;
	u32 rev = 0;

	ts = kzalloc(sizeof(struct tsc_data), GFP_KERNEL);
	if (!ts) {
		dev_err(dev, "cannot allocate device info\n");
		return -ENOMEM;
	}

	ts->dev = dev;
	spin_lock_init(&ts->lock);
	setup_timer(&ts->timer, tsc_poll, (unsigned long)ts);
	platform_set_drvdata(pdev, ts);

	ts->tsc_irq = platform_get_irq(pdev, 0);
	if (ts->tsc_irq < 0) {
		dev_err(dev, "cannot determine device interrupt\n");
		error = -ENODEV;
		goto error_res;
	}

	ts->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!ts->res) {
		dev_err(dev, "cannot determine register area\n");
		error = -ENODEV;
		goto error_res;
	}

	if (!request_mem_region(ts->res->start, resource_size(ts->res),
				pdev->name)) {
		dev_err(dev, "cannot claim register memory\n");
		ts->res = NULL;
		error = -EINVAL;
		goto error_res;
	}

	ts->regs = ioremap(ts->res->start, resource_size(ts->res));
	if (!ts->regs) {
		dev_err(dev, "cannot map register memory\n");
		error = -ENOMEM;
		goto error_map;
	}

	ts->clk = clk_get(dev, NULL);
	if (IS_ERR(ts->clk)) {
		dev_err(dev, "cannot claim device clock\n");
		error = PTR_ERR(ts->clk);
		goto error_clk;
	}

	error = request_threaded_irq(ts->tsc_irq, NULL, tsc_irq, IRQF_ONESHOT,
				     dev_name(dev), ts);
	if (error < 0) {
		dev_err(ts->dev, "Could not allocate ts irq\n");
		goto error_irq;
	}

	ts->input_dev = input_allocate_device();
	if (!ts->input_dev) {
		dev_err(dev, "cannot allocate input device\n");
		error = -ENOMEM;
		goto error_input;
	}
	input_set_drvdata(ts->input_dev, ts);

	ts->input_dev->name       = pdev->name;
	ts->input_dev->id.bustype = BUS_HOST;
	ts->input_dev->dev.parent = &pdev->dev;
	ts->input_dev->open	  = tsc_start;
	ts->input_dev->close	  = tsc_stop;

	clk_enable(ts->clk);
	rev = tsc_read(ts, rev);
	ts->input_dev->id.product = ((rev >>  8) & 0x07);
	ts->input_dev->id.version = ((rev >> 16) & 0xfff);
	clk_disable(ts->clk);

	__set_bit(EV_KEY,    ts->input_dev->evbit);
	__set_bit(EV_ABS,    ts->input_dev->evbit);
	__set_bit(BTN_TOUCH, ts->input_dev->keybit);

	input_set_abs_params(ts->input_dev, ABS_X, 0, 0xffff, 5, 0);
	input_set_abs_params(ts->input_dev, ABS_Y, 0, 0xffff, 5, 0);
	input_set_abs_params(ts->input_dev, ABS_PRESSURE, 0, 4095, 128, 0);

	error = input_register_device(ts->input_dev);
	if (error < 0) {
		dev_err(dev, "failed input device registration\n");
		goto error_reg;
	}

	return 0;

error_reg:
	input_free_device(ts->input_dev);
error_input:
	free_irq(ts->tsc_irq, ts);
error_irq:
	clk_put(ts->clk);
error_clk:
	iounmap(ts->regs);
error_map:
	release_mem_region(ts->res->start, resource_size(ts->res));
error_res:
	platform_set_drvdata(pdev, NULL);
	kfree(ts);

	return error;
}
Exemple #15
0
static int orion_mdio_probe(struct platform_device *pdev)
{
	struct resource *r;
	struct mii_bus *bus;
	struct orion_mdio_dev *dev;
	int i, ret;

	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!r) {
		dev_err(&pdev->dev, "No SMI register address given\n");
		return -ENODEV;
	}

	bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
	if (!bus) {
		dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
		return -ENOMEM;
	}

	bus->name = "orion_mdio_bus";
	bus->read = orion_mdio_read;
	bus->write = orion_mdio_write;
	snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii",
		 dev_name(&pdev->dev));
	bus->parent = &pdev->dev;

	bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
	if (!bus->irq) {
		mdiobus_free(bus);
		return -ENOMEM;
	}

	for (i = 0; i < PHY_MAX_ADDR; i++)
		bus->irq[i] = PHY_POLL;

	dev = bus->priv;
	dev->regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
	if (!dev->regs) {
		dev_err(&pdev->dev, "Unable to remap SMI register\n");
		ret = -ENODEV;
		goto out_mdio;
	}

	init_waitqueue_head(&dev->smi_busy_wait);

	dev->clk = devm_clk_get(&pdev->dev, NULL);
	if (!IS_ERR(dev->clk))
		clk_prepare_enable(dev->clk);

	dev->err_interrupt = platform_get_irq(pdev, 0);
	if (dev->err_interrupt > 0) {
		ret = devm_request_irq(&pdev->dev, dev->err_interrupt,
					orion_mdio_err_irq,
					IRQF_SHARED, pdev->name, dev);
		if (ret)
			goto out_mdio;

		writel(MVMDIO_ERR_INT_SMI_DONE,
			dev->regs + MVMDIO_ERR_INT_MASK);

	} else if (dev->err_interrupt == -EPROBE_DEFER) {
		return -EPROBE_DEFER;
	}

	mutex_init(&dev->lock);

	if (pdev->dev.of_node)
		ret = of_mdiobus_register(bus, pdev->dev.of_node);
	else
		ret = mdiobus_register(bus);
	if (ret < 0) {
		dev_err(&pdev->dev, "Cannot register MDIO bus (%d)\n", ret);
		goto out_mdio;
	}

	platform_set_drvdata(pdev, bus);

	return 0;

out_mdio:
	if (!IS_ERR(dev->clk))
		clk_disable_unprepare(dev->clk);
	kfree(bus->irq);
	mdiobus_free(bus);
	return ret;
}
Exemple #16
0
static int s3cfb_probe(struct platform_device *pdev)
{
	struct s3c_platform_fb *pdata = NULL;
	struct resource *res = NULL;
	struct s3cfb_global *fbdev[2];
	int ret = 0;
	int i = 0;
#ifdef CONFIG_FB_S3C_MDNIE
	u32 reg;
#endif

#ifdef CONFIG_S5PV310_DEV_PD
	/* to use the runtime PM helper functions */
	pm_runtime_enable(&pdev->dev);
	/* enable the power domain */
	pm_runtime_get_sync(&pdev->dev);
#endif
	fbfimd = kzalloc(sizeof(struct s3cfb_fimd_desc), GFP_KERNEL);

	if (FIMD_MAX == 2)
		fbfimd->dual = 1;
	else
		fbfimd->dual = 0;

	for (i = 0; i < FIMD_MAX; i++) {
		/* global structure */
		fbfimd->fbdev[i] = kzalloc(sizeof(struct s3cfb_global), GFP_KERNEL);
		fbdev[i] = fbfimd->fbdev[i];
		if (!fbdev[i]) {
			dev_err(fbdev[i]->dev, "failed to allocate for	\
				global fb structure fimd[%d]!\n", i);
			goto err0;
		}

		fbdev[i]->dev = &pdev->dev;

		/* platform_data*/
		pdata = to_fb_plat(&pdev->dev);
		fbdev[i]->lcd = (struct s3cfb_lcd *)pdata->lcd;

		if (pdata->cfg_gpio)
			pdata->cfg_gpio(pdev);

		if (pdata->clk_on)
			pdata->clk_on(pdev, &fbdev[i]->clock);

		/* io memory */
		res = platform_get_resource(pdev, IORESOURCE_MEM, i);
		if (!res) {
			dev_err(fbdev[i]->dev,
				"failed to get io memory region\n");
			ret = -EINVAL;
			goto err1;
		}
		res = request_mem_region(res->start,
					res->end - res->start + 1, pdev->name);
		if (!res) {
			dev_err(fbdev[i]->dev,
				"failed to request io memory region\n");
			ret = -EINVAL;
			goto err1;
		}
		fbdev[i]->regs = ioremap(res->start, res->end - res->start + 1);
		if (!fbdev[i]->regs) {
			dev_err(fbdev[i]->dev, "failed to remap io region\n");
			ret = -EINVAL;
			goto err1;
		}

		fbdev[i]->wq_count = 0;
		init_waitqueue_head(&fbdev[i]->wq);

		/* irq */
		fbdev[i]->irq = platform_get_irq(pdev, 0);
		if (request_irq(fbdev[i]->irq, s3cfb_irq_frame, IRQF_SHARED,
				pdev->name, fbdev[i])) {
			dev_err(fbdev[i]->dev, "request_irq failed\n");
			ret = -EINVAL;
			goto err2;
		}

#ifdef CONFIG_FB_S3C_TRACE_UNDERRUN
		if (request_irq(platform_get_irq(pdev, 1), s3cfb_irq_fifo,
				IRQF_DISABLED, pdev->name, fbdev[i])) {
			dev_err(fbdev[i]->dev, "request_irq failed\n");
			ret = -EINVAL;
			goto err2;
		}

		s3cfb_set_fifo_interrupt(fbdev[i], 1);
		dev_info(fbdev[i]->dev, "fifo underrun trace\n");
#endif
#ifdef CONFIG_FB_S3C_MDNIE
		/* only FIMD0 is supported */
		if (i == 0)
			s3c_mdnie_setup();
#endif
		/* hw setting */
		s3cfb_init_global(fbdev[i]);

		/* alloc fb_info */
		if (s3cfb_alloc_framebuffer(fbdev[i], i)) {
			dev_err(fbdev[i]->dev, "alloc error fimd[%d]\n", i);
			goto err3;
		}

		/* register fb_info */
		if (s3cfb_register_framebuffer(fbdev[i])) {
			dev_err(fbdev[i]->dev, "register error fimd[%d]\n", i);
			goto err3;
		}

		/* enable display */
		s3cfb_set_clock(fbdev[i]);

		/* Set Alpha value width to 8-bit alpha value
		 * 1 : 8bit mode
		 * 2 : 4bit mode
		 */
		s3cfb_set_alpha_value(fbdev[i], 1);

#ifdef CONFIG_FB_S3C_MDNIE
		/* only FIMD0 is supported */
		if (i == 0) {
			reg = readl(S3C_VA_SYS + 0x0210);
			reg &= ~(1<<13);
			reg &= ~(1<<12);
			reg &= ~(3<<10);
			reg |= (1<<0);
			reg &= ~(1<<1);
			writel(reg, S3C_VA_SYS + 0x0210);
			writel(3, fbdev[i]->regs + 0x27c);

			s3c_mdnie_init_global(fbdev[i]);
			s3c_mdnie_start(fbdev[i]);
		}
#endif
		s3cfb_enable_window(fbdev[0], pdata->default_win);
		s3cfb_update_power_state(fbdev[i], pdata->default_win,
					FB_BLANK_UNBLANK);
		s3cfb_display_on(fbdev[i]);

		fbdev[i]->system_state = POWER_ON;
#ifdef CONFIG_HAS_WAKELOCK
#ifdef CONFIG_HAS_EARLYSUSPEND
		fbdev[i]->early_suspend.suspend = s3cfb_early_suspend;
		fbdev[i]->early_suspend.resume = s3cfb_late_resume;
		fbdev[i]->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
		register_early_suspend(&fbdev[i]->early_suspend);
#endif
#endif
	}
Exemple #17
0
static int __init omap4_l3_probe(struct platform_device *pdev)
{
	static struct omap4_l3		*l3;
	struct resource		*res;
	int			ret;
	int			irq;

	l3 = kzalloc(sizeof(*l3), GFP_KERNEL);
	if (!l3)
		return -ENOMEM;

	platform_set_drvdata(pdev, l3);
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		dev_err(&pdev->dev, "couldn't find resource 0\n");
		ret = -ENODEV;
		goto err0;
	}

	l3->l3_base[0] = ioremap(res->start, resource_size(res));
	if (!l3->l3_base[0]) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto err0;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	if (!res) {
		dev_err(&pdev->dev, "couldn't find resource 1\n");
		ret = -ENODEV;
		goto err1;
	}

	l3->l3_base[1] = ioremap(res->start, resource_size(res));
	if (!l3->l3_base[1]) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto err1;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
	if (!res) {
		dev_err(&pdev->dev, "couldn't find resource 2\n");
		ret = -ENODEV;
		goto err2;
	}

	l3->l3_base[2] = ioremap(res->start, resource_size(res));
	if (!l3->l3_base[2]) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto err2;
	}

	/*
	 * Setup interrupt Handlers
	 */
	l3->debug_irq = irq = platform_get_irq(pdev, 0);
	ret = request_irq(irq,
			l3_interrupt_handler,
			IRQF_DISABLED, "l3-dbg-irq", l3);
	if (ret) {
		pr_crit("L3: request_irq failed to register for 0x%x\n",
					 OMAP44XX_IRQ_L3_DBG);
		goto err3;
	}

	l3->app_irq = irq = platform_get_irq(pdev, 1);
	ret = request_irq(irq,
			l3_interrupt_handler,
			IRQF_DISABLED, "l3-app-irq", l3);
	if (ret) {
		pr_crit("L3: request_irq failed to register for 0x%x\n",
					 OMAP44XX_IRQ_L3_APP);
		goto err4;
	}

	return 0;

err4:
	free_irq(l3->debug_irq, l3);
err3:
	iounmap(l3->l3_base[2]);
err2:
	iounmap(l3->l3_base[1]);
err1:
	iounmap(l3->l3_base[0]);
err0:
	kfree(l3);
	return ret;
}
static int __init msm_serial_probe(struct platform_device *pdev)
{
	struct msm_port *msm_port;
	struct resource *resource;
	struct uart_port *port;
	int irq;
#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
	struct msm_serial_platform_data *pdata = pdev->dev.platform_data;
#endif

	if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
		return -ENXIO;

	printk(KERN_INFO "msm_serial: detected port #%d\n", pdev->id);

	port = get_port_from_line(pdev->id);
	port->dev = &pdev->dev;
	msm_port = UART_TO_MSM(port);

	msm_port->clk = clk_get(&pdev->dev, "uart_clk");
	if (unlikely(IS_ERR(msm_port->clk)))
		return PTR_ERR(msm_port->clk);
	port->uartclk = clk_get_rate(msm_port->clk);

// BEGIN 0010274: [email protected] 2010-10-27
// Fixed a problem that system don't go to sleep mode.
#if defined(CONFIG_MACH_LGE_BRYCE)
	if(uart_det_flag){
		clk_enable(msm_port->clk);
		msm_uart_driver.cons = NULL;
	}
#endif	
// END 0010274: [email protected] 2010-10-27

	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (unlikely(!resource))
		return -ENXIO;
	port->mapbase = resource->start;

	irq = platform_get_irq(pdev, 0);
	if (unlikely(irq < 0))
		return -ENXIO;
	port->irq = irq;

	platform_set_drvdata(pdev, port);

	if (unlikely(set_irq_wake(port->irq, 1)))
		return -ENXIO;

#ifdef CONFIG_SERIAL_MSM_RX_WAKEUP
	if (pdata == NULL)
		msm_port->wakeup.irq = -1;
	else {
		msm_port->wakeup.irq = pdata->wakeup_irq;
		msm_port->wakeup.ignore = 1;
		msm_port->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
		msm_port->wakeup.rx_to_inject = pdata->rx_to_inject;

		if (unlikely(msm_port->wakeup.irq <= 0))
			return -EINVAL;
	}
#endif

#ifdef CONFIG_SERIAL_MSM_CLOCK_CONTROL
	msm_port->clk_state = MSM_CLK_PORT_OFF;
	hrtimer_init(&msm_port->clk_off_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	msm_port->clk_off_timer.function = msm_serial_clock_off;
	msm_port->clk_off_delay = ktime_set(0, 1000000);  /* 1 ms */
#endif

	pm_runtime_enable(port->dev);
	return uart_add_one_port(&msm_uart_driver, port);
}
static int __devinit imx_keypad_probe(struct platform_device *pdev)
{
	const struct matrix_keymap_data *keymap_data = pdev->dev.platform_data;
	struct imx_keypad *keypad;
	struct input_dev *input_dev;
	struct resource *res;
	int irq, error, i;

	if (keymap_data == NULL) {
		dev_err(&pdev->dev, "no keymap defined\n");
		return -EINVAL;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "no irq defined in platform data\n");
		return -EINVAL;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res == NULL) {
		dev_err(&pdev->dev, "no I/O memory defined in platform data\n");
		return -EINVAL;
	}

	res = request_mem_region(res->start, resource_size(res), pdev->name);
	if (res == NULL) {
		dev_err(&pdev->dev, "failed to request I/O memory\n");
		return -EBUSY;
	}

	input_dev = input_allocate_device();
	if (!input_dev) {
		dev_err(&pdev->dev, "failed to allocate the input device\n");
		error = -ENOMEM;
		goto failed_rel_mem;
	}

	keypad = kzalloc(sizeof(struct imx_keypad), GFP_KERNEL);
	if (!keypad) {
		dev_err(&pdev->dev, "not enough memory for driver data\n");
		error = -ENOMEM;
		goto failed_free_input;
	}

	keypad->input_dev = input_dev;
	keypad->irq = irq;
	keypad->stable_count = 0;

	setup_timer(&keypad->check_matrix_timer,
		    imx_keypad_check_for_events, (unsigned long) keypad);

	keypad->mmio_base = ioremap(res->start, resource_size(res));
	if (keypad->mmio_base == NULL) {
		dev_err(&pdev->dev, "failed to remap I/O memory\n");
		error = -ENOMEM;
		goto failed_free_priv;
	}

	keypad->clk = clk_get(&pdev->dev, "kpp");
	if (IS_ERR(keypad->clk)) {
		dev_err(&pdev->dev, "failed to get keypad clock\n");
		error = PTR_ERR(keypad->clk);
		goto failed_unmap;
	}

	/* Search for rows and cols enabled */
	for (i = 0; i < keymap_data->keymap_size; i++) {
		keypad->rows_en_mask |= 1 << KEY_ROW(keymap_data->keymap[i]);
		keypad->cols_en_mask |= 1 << KEY_COL(keymap_data->keymap[i]);
	}

	if (keypad->rows_en_mask > ((1 << MAX_MATRIX_KEY_ROWS) - 1) ||
	   keypad->cols_en_mask > ((1 << MAX_MATRIX_KEY_COLS) - 1)) {
		dev_err(&pdev->dev,
			"invalid key data (too many rows or colums)\n");
		error = -EINVAL;
		goto failed_clock_put;
	}
	dev_dbg(&pdev->dev, "enabled rows mask: %x\n", keypad->rows_en_mask);
	dev_dbg(&pdev->dev, "enabled cols mask: %x\n", keypad->cols_en_mask);

	/* Init the Input device */
	input_dev->name = pdev->name;
	input_dev->id.bustype = BUS_HOST;
	input_dev->dev.parent = &pdev->dev;
	input_dev->open = imx_keypad_open;
	input_dev->close = imx_keypad_close;

	/* Should not set BIT_MASK(EV_REP) to evbit[0] when some keys(like
	   power key) in keypad don't require to repeat, otherwise it will
	   auto repeat the key event if long press those keys. The disadvantage
	   is all keys in keypad, will not auto repeat when long pressed, but it
	   should be acceptable to remove the EV_REP flag*/
	input_dev->evbit[0] = BIT_MASK(EV_KEY);
	input_dev->keycode = keypad->keycodes;
	input_dev->keycodesize = sizeof(keypad->keycodes[0]);
	input_dev->keycodemax = ARRAY_SIZE(keypad->keycodes);

	matrix_keypad_build_keymap(keymap_data, MATRIX_ROW_SHIFT,
				keypad->keycodes, input_dev->keybit);

	input_set_capability(input_dev, EV_MSC, MSC_SCAN);
	input_set_drvdata(input_dev, keypad);

	/* Ensure that the keypad will stay dormant until opened */
	imx_keypad_inhibit(keypad);

	error = request_irq(irq, imx_keypad_irq_handler, IRQF_DISABLED,
			    pdev->name, keypad);
	if (error) {
		dev_err(&pdev->dev, "failed to request IRQ\n");
		goto failed_clock_put;
	}

	/* Register the input device */
	error = input_register_device(input_dev);
	if (error) {
		dev_err(&pdev->dev, "failed to register input device\n");
		goto failed_free_irq;
	}

	platform_set_drvdata(pdev, keypad);
	device_init_wakeup(&pdev->dev, 1);

	return 0;

failed_free_irq:
	free_irq(irq, pdev);
failed_clock_put:
	clk_put(keypad->clk);
failed_unmap:
	iounmap(keypad->mmio_base);
failed_free_priv:
	kfree(keypad);
failed_free_input:
	input_free_device(input_dev);
failed_rel_mem:
	release_mem_region(res->start, resource_size(res));
	return error;
}
Exemple #20
0
static int __devinit mxskbd_probe(struct platform_device *pdev)
{
	int i;
	int err = 0;
	struct resource *res;
	struct mxskbd_data *d;
	struct mxs_kbd_plat_data *plat_data;

	plat_data = (struct mxs_kbd_plat_data *)pdev->dev.platform_data;
	if (plat_data == NULL)
		return -ENODEV;

#ifdef CONFIG_HAS_WAKELOCK
	wake_lock_init(&key_wake_lock, WAKE_LOCK_SUSPEND, "mxs-keypad");
#endif

	/* Create and register the input driver. */
	d = mxskbd_data_alloc(pdev, plat_data->keypair,
			plat_data->keypair_offset);
	if (!d) {
		dev_err(&pdev->dev, "Cannot allocate driver structures\n");
		err = -ENOMEM;
		goto err_out;
	}

	_devdata = d;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		err = -ENODEV;
		goto err_out;
	}
	d->base = (unsigned int)IO_ADDRESS(res->start);
	for (i = 0; i < MAX_CH; i++)
		d->chan[i] = plat_data->channel[i];
	d->btn_irq1 = platform_get_irq(pdev, 0);

	platform_set_drvdata(pdev, d);

	if (d->btn_irq1 > 0) {
		err = request_irq(d->btn_irq1, mxskbd_irq_handler,
				IRQF_DISABLED, pdev->name, pdev);
		if (err) {
			dev_err(&pdev->dev, "Cannot request keypad IRQ\n");
			goto err_free_dev;
		}
	}

	/* Register the input device */
	err = input_register_device(GET_INPUT_DEV(d));
	if (err)
		goto err_free_dev;

	/* these two have to be set after registering the input device */
	d->input->rep[REP_DELAY] = delay1;
	d->input->rep[REP_PERIOD] = delay2;

	for (i = 0; i < MAX_CH; i++)
		hw_lradc_use_channel(d->chan[i]);
	mxskbd_hwinit(pdev);

#ifdef ENABLE_BACKLIGHT_GPIO_CONTROL
	init_timer(&_bl_timer);
	_bl_timer.function = _keypad_bl_timer_handler;
#endif

	return 0;

err_free_dev:
	if (d->btn_irq1 > 0)
		free_irq(d->btn_irq1, pdev);

	mxskbd_data_free(d);
err_out:
#ifdef CONFIG_HAS_WAKELOCK
		wake_lock_destroy(&key_wake_lock);
#endif
	return err;
}
Exemple #21
0
/**
 * usb_hcd_omap_probe - initialize OMAP-based HCDs
 * Context: !in_interrupt()
 *
 * Allocates basic resources for this USB host controller, and
 * then invokes the start() method for the HCD associated with it
 * through the hotplug entry's driver_data.
 */
static int usb_hcd_omap_probe (const struct hc_driver *driver,
			  struct platform_device *pdev)
{
	int retval, irq;
	struct usb_hcd *hcd = 0;
	struct ohci_hcd *ohci;

	if (pdev->num_resources != 2) {
		printk(KERN_ERR "hcd probe: invalid num_resources: %i\n",
		       pdev->num_resources);
		return -ENODEV;
	}

	if (pdev->resource[0].flags != IORESOURCE_MEM
			|| pdev->resource[1].flags != IORESOURCE_IRQ) {
		printk(KERN_ERR "hcd probe: invalid resource type\n");
		return -ENODEV;
	}

#ifndef CONFIG_ARCH_OMAP34XX
	usb_host_ck = clk_get(0, "usb_hhc_ck");
	if (IS_ERR(usb_host_ck))
		return PTR_ERR(usb_host_ck);

	if (!cpu_is_omap15xx())
		usb_dc_ck = clk_get(&pdev->dev, "usb_dc_ck");
	else
		usb_dc_ck = clk_get(&pdev->dev, "lb_ck");

	if (IS_ERR(usb_dc_ck)) {
		clk_put(usb_host_ck);
		return PTR_ERR(usb_dc_ck);
	}
#endif


	hcd = usb_create_hcd (driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd) {
		retval = -ENOMEM;
		goto err0;
	}
#if defined(CONFIG_ARCH_OMAP34XX)
	clk_enable(clk_get(NULL, "usbhost_ick"));
	clk_enable(clk_get(NULL, "usbtll_ick"));
	clk_enable(clk_get(NULL, "usbtll_fck"));
	clk_enable(clk_get(NULL, "usbhost_120m_fck"));
	clk_enable(clk_get(NULL, "usbhost_48m_fck"));
#endif
	hcd->rsrc_start = pdev->resource[0].start;
	hcd->rsrc_len = pdev->resource[0].end - pdev->resource[0].start + 1;

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
		dev_dbg(&pdev->dev, "request_mem_region failed\n");
		retval = -EBUSY;
		goto err1;
	}

	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_err(&pdev->dev, "can't ioremap OHCI HCD\n");
		retval = -ENOMEM;
		goto err2;
	}

	ohci = hcd_to_ohci(hcd);
	ohci_hcd_init(ohci);

	host_initialized = 0;
	host_enabled = 1;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		retval = -ENXIO;
		goto err3;
	}
	retval = usb_add_hcd(hcd, irq, IRQF_DISABLED);
	if (retval)
		goto err3;

	host_initialized = 1;

	if (!host_enabled)
		omap_ohci_clock_power(0);

	return 0;
err3:
	iounmap(hcd->regs);
err2:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
	usb_put_hcd(hcd);
err0:

#ifndef CONFIG_ARCH_OMAP34XX
	clk_put(usb_dc_ck);
	clk_put(usb_host_ck);
#endif
	return retval;
}
Exemple #22
0
static int tb10x_gpio_probe(struct platform_device *pdev)
{
    struct tb10x_gpio *tb10x_gpio;
    struct resource *mem;
    struct device_node *dn = pdev->dev.of_node;
    int ret = -EBUSY;
    u32 ngpio;

    if (!dn)
        return -EINVAL;

    if (of_property_read_u32(dn, "abilis,ngpio", &ngpio))
        return -EINVAL;

    mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
    if (!mem) {
        dev_err(&pdev->dev, "No memory resource defined.\n");
        return -EINVAL;
    }

    tb10x_gpio = devm_kzalloc(&pdev->dev, sizeof(*tb10x_gpio), GFP_KERNEL);
    if (tb10x_gpio == NULL)
        return -ENOMEM;

    spin_lock_init(&tb10x_gpio->spinlock);

    tb10x_gpio->base = devm_ioremap_resource(&pdev->dev, mem);
    if (IS_ERR(tb10x_gpio->base))
        return PTR_ERR(tb10x_gpio->base);

    tb10x_gpio->gc.label		= of_node_full_name(dn);
    tb10x_gpio->gc.dev		= &pdev->dev;
    tb10x_gpio->gc.owner		= THIS_MODULE;
    tb10x_gpio->gc.direction_input	= tb10x_gpio_direction_in;
    tb10x_gpio->gc.get		= tb10x_gpio_get;
    tb10x_gpio->gc.direction_output	= tb10x_gpio_direction_out;
    tb10x_gpio->gc.set		= tb10x_gpio_set;
    tb10x_gpio->gc.request		= tb10x_gpio_request;
    tb10x_gpio->gc.free		= tb10x_gpio_free;
    tb10x_gpio->gc.base		= -1;
    tb10x_gpio->gc.ngpio		= ngpio;
    tb10x_gpio->gc.can_sleep	= false;


    ret = gpiochip_add(&tb10x_gpio->gc);
    if (ret < 0) {
        dev_err(&pdev->dev, "Could not add gpiochip.\n");
        goto fail_gpiochip_registration;
    }

    platform_set_drvdata(pdev, tb10x_gpio);

    if (of_find_property(dn, "interrupt-controller", NULL)) {
        struct irq_chip_generic *gc;

        ret = platform_get_irq(pdev, 0);
        if (ret < 0) {
            dev_err(&pdev->dev, "No interrupt specified.\n");
            goto fail_get_irq;
        }

        tb10x_gpio->gc.to_irq	= tb10x_gpio_to_irq;
        tb10x_gpio->irq		= ret;

        ret = devm_request_irq(&pdev->dev, ret, tb10x_gpio_irq_cascade,
                               IRQF_TRIGGER_NONE | IRQF_SHARED,
                               dev_name(&pdev->dev), tb10x_gpio);
        if (ret != 0)
            goto fail_request_irq;

        tb10x_gpio->domain = irq_domain_add_linear(dn,
                             tb10x_gpio->gc.ngpio,
                             &irq_generic_chip_ops, NULL);
        if (!tb10x_gpio->domain) {
            ret = -ENOMEM;
            goto fail_irq_domain;
        }

        ret = irq_alloc_domain_generic_chips(tb10x_gpio->domain,
                                             tb10x_gpio->gc.ngpio, 1, tb10x_gpio->gc.label,
                                             handle_edge_irq, IRQ_NOREQUEST, IRQ_NOPROBE,
                                             IRQ_GC_INIT_MASK_CACHE);
        if (ret)
            goto fail_irq_domain;

        gc = tb10x_gpio->domain->gc->gc[0];
        gc->reg_base                         = tb10x_gpio->base;
        gc->chip_types[0].type               = IRQ_TYPE_EDGE_BOTH;
        gc->chip_types[0].chip.irq_ack       = irq_gc_ack_set_bit;
        gc->chip_types[0].chip.irq_mask      = irq_gc_mask_clr_bit;
        gc->chip_types[0].chip.irq_unmask    = irq_gc_mask_set_bit;
        gc->chip_types[0].chip.irq_set_type  = tb10x_gpio_irq_set_type;
        gc->chip_types[0].regs.ack           = OFFSET_TO_REG_CHANGE;
        gc->chip_types[0].regs.mask          = OFFSET_TO_REG_INT_EN;
    }

    return 0;

fail_irq_domain:
fail_request_irq:
fail_get_irq:
    gpiochip_remove(&tb10x_gpio->gc);
fail_gpiochip_registration:
fail_ioremap:
    return ret;
}
Exemple #23
0
static int xgbe_probe(struct platform_device *pdev)
{
	struct xgbe_prv_data *pdata;
	struct net_device *netdev;
	struct device *dev = &pdev->dev, *phy_dev;
	struct platform_device *phy_pdev;
	struct resource *res;
	const char *phy_mode;
	unsigned int i, phy_memnum, phy_irqnum;
	int ret;

	DBGPR("--> xgbe_probe\n");

	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
				   XGBE_MAX_DMA_CHANNELS);
	if (!netdev) {
		dev_err(dev, "alloc_etherdev failed\n");
		ret = -ENOMEM;
		goto err_alloc;
	}
	SET_NETDEV_DEV(netdev, dev);
	pdata = netdev_priv(netdev);
	pdata->netdev = netdev;
	pdata->pdev = pdev;
	pdata->adev = ACPI_COMPANION(dev);
	pdata->dev = dev;
	platform_set_drvdata(pdev, netdev);

	spin_lock_init(&pdata->lock);
	mutex_init(&pdata->xpcs_mutex);
	mutex_init(&pdata->rss_mutex);
	spin_lock_init(&pdata->tstamp_lock);

	pdata->msg_enable = netif_msg_init(debug, default_msg_level);

	set_bit(XGBE_DOWN, &pdata->dev_state);

	/* Check if we should use ACPI or DT */
	pdata->use_acpi = (!pdata->adev || acpi_disabled) ? 0 : 1;

	phy_pdev = xgbe_get_phy_pdev(pdata);
	if (!phy_pdev) {
		dev_err(dev, "unable to obtain phy device\n");
		ret = -EINVAL;
		goto err_phydev;
	}
	phy_dev = &phy_pdev->dev;

	if (pdev == phy_pdev) {
		/* New style device tree or ACPI:
		 *   The XGBE and PHY resources are grouped together with
		 *   the PHY resources listed last
		 */
		phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
		phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
	} else {
		/* Old style device tree:
		 *   The XGBE and PHY resources are separate
		 */
		phy_memnum = 0;
		phy_irqnum = 0;
	}

	/* Set and validate the number of descriptors for a ring */
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
	if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
		dev_err(dev, "tx descriptor count (%d) is not valid\n",
			pdata->tx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
	if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
		dev_err(dev, "rx descriptor count (%d) is not valid\n",
			pdata->rx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}

	/* Obtain the mmio areas for the device */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pdata->xgmac_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xgmac_regs)) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = PTR_ERR(pdata->xgmac_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xpcs_regs)) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = PTR_ERR(pdata->xpcs_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->rxtx_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->rxtx_regs)) {
		dev_err(dev, "rxtx ioremap failed\n");
		ret = PTR_ERR(pdata->rxtx_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "rxtx_regs  = %p\n", pdata->rxtx_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir0_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir0_regs)) {
		dev_err(dev, "sir0 ioremap failed\n");
		ret = PTR_ERR(pdata->sir0_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir0_regs  = %p\n", pdata->sir0_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir1_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir1_regs)) {
		dev_err(dev, "sir1 ioremap failed\n");
		ret = PTR_ERR(pdata->sir1_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);

	/* Retrieve the MAC address */
	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
					    pdata->mac_addr,
					    sizeof(pdata->mac_addr));
	if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}

	/* Retrieve the PHY mode - it must be "xgmii" */
	ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
					  &phy_mode);
	if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
		dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}
	pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;

	/* Check for per channel interrupt support */
	if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
		pdata->per_channel_irq = 1;

	/* Retrieve the PHY speedset */
	ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
				       &pdata->speed_set);
	if (ret) {
		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
		goto err_io;
	}

	switch (pdata->speed_set) {
	case XGBE_SPEEDSET_1000_10000:
	case XGBE_SPEEDSET_2500_10000:
		break;
	default:
		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
		ret = -EINVAL;
		goto err_io;
	}

	/* Retrieve the PHY configuration properties */
	if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_BLWC_PROPERTY,
						     pdata->serdes_blwc,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_BLWC_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
		       sizeof(pdata->serdes_blwc));
	}

	if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_CDR_RATE_PROPERTY,
						     pdata->serdes_cdr_rate,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_CDR_RATE_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
		       sizeof(pdata->serdes_cdr_rate));
	}

	if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_PQ_SKEW_PROPERTY,
						     pdata->serdes_pq_skew,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_PQ_SKEW_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
		       sizeof(pdata->serdes_pq_skew));
	}

	if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_TX_AMP_PROPERTY,
						     pdata->serdes_tx_amp,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_TX_AMP_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
		       sizeof(pdata->serdes_tx_amp));
	}

	if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_DFE_CFG_PROPERTY,
						     pdata->serdes_dfe_tap_cfg,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_DFE_CFG_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
		       sizeof(pdata->serdes_dfe_tap_cfg));
	}

	if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_DFE_ENA_PROPERTY,
						     pdata->serdes_dfe_tap_ena,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_DFE_ENA_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
		       sizeof(pdata->serdes_dfe_tap_ena));
	}

	/* Obtain device settings unique to ACPI/OF */
	if (pdata->use_acpi)
		ret = xgbe_acpi_support(pdata);
	else
		ret = xgbe_of_support(pdata);
	if (ret)
		goto err_io;

	/* Set the DMA coherency values */
	if (pdata->coherent) {
		pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_OS_ARCACHE;
		pdata->awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_SYS_ARCACHE;
		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
	}

	/* Get the device interrupt */
	ret = platform_get_irq(pdev, 0);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq 0 failed\n");
		goto err_io;
	}
	pdata->dev_irq = ret;

	/* Get the auto-negotiation interrupt */
	ret = platform_get_irq(phy_pdev, phy_irqnum++);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq phy 0 failed\n");
		goto err_io;
	}
	pdata->an_irq = ret;

	netdev->irq = pdata->dev_irq;
	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
	memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);

	/* Set all the function pointers */
	xgbe_init_all_fptrs(pdata);

	/* Issue software reset to device */
	pdata->hw_if.exit(pdata);

	/* Populate the hardware features */
	xgbe_get_all_hw_features(pdata);

	/* Set default configuration data */
	xgbe_default_config(pdata);

	/* Set the DMA mask */
	ret = dma_set_mask_and_coherent(dev,
					DMA_BIT_MASK(pdata->hw_feat.dma_width));
	if (ret) {
		dev_err(dev, "dma_set_mask_and_coherent failed\n");
		goto err_io;
	}

	/* Calculate the number of Tx and Rx rings to be created
	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
	 *   the number of Tx queues to the number of Tx channels
	 *   enabled
	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
	 *   number of Rx queues
	 */
	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
				     pdata->hw_feat.tx_ch_cnt);
	pdata->tx_q_count = pdata->tx_ring_count;
	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real tx queue count\n");
		goto err_io;
	}

	pdata->rx_ring_count = min_t(unsigned int,
				     netif_get_num_default_rss_queues(),
				     pdata->hw_feat.rx_ch_cnt);
	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real rx queue count\n");
		goto err_io;
	}

	/* Initialize RSS hash key and lookup table */
	netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));

	for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
			       i % pdata->rx_ring_count);

	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);

	/* Call MDIO/PHY initialization routine */
	pdata->phy_if.phy_init(pdata);

	/* Set device operations */
	netdev->netdev_ops = xgbe_get_netdev_ops();
	netdev->ethtool_ops = xgbe_get_ethtool_ops();
#ifdef CONFIG_AMD_XGBE_DCB
	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
#endif

	/* Set device features */
	netdev->hw_features = NETIF_F_SG |
			      NETIF_F_IP_CSUM |
			      NETIF_F_IPV6_CSUM |
			      NETIF_F_RXCSUM |
			      NETIF_F_TSO |
			      NETIF_F_TSO6 |
			      NETIF_F_GRO |
			      NETIF_F_HW_VLAN_CTAG_RX |
			      NETIF_F_HW_VLAN_CTAG_TX |
			      NETIF_F_HW_VLAN_CTAG_FILTER;

	if (pdata->hw_feat.rss)
		netdev->hw_features |= NETIF_F_RXHASH;

	netdev->vlan_features |= NETIF_F_SG |
				 NETIF_F_IP_CSUM |
				 NETIF_F_IPV6_CSUM |
				 NETIF_F_TSO |
				 NETIF_F_TSO6;

	netdev->features |= netdev->hw_features;
	pdata->netdev_features = netdev->features;

	netdev->priv_flags |= IFF_UNICAST_FLT;

	/* Use default watchdog timeout */
	netdev->watchdog_timeo = 0;

	xgbe_init_rx_coalesce(pdata);
	xgbe_init_tx_coalesce(pdata);

	netif_carrier_off(netdev);
	ret = register_netdev(netdev);
	if (ret) {
		dev_err(dev, "net device registration failed\n");
		goto err_io;
	}

	/* Create the PHY/ANEG name based on netdev name */
	snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
		 netdev_name(netdev));

	/* Create workqueues */
	pdata->dev_workqueue =
		create_singlethread_workqueue(netdev_name(netdev));
	if (!pdata->dev_workqueue) {
		netdev_err(netdev, "device workqueue creation failed\n");
		ret = -ENOMEM;
		goto err_netdev;
	}

	pdata->an_workqueue =
		create_singlethread_workqueue(pdata->an_name);
	if (!pdata->an_workqueue) {
		netdev_err(netdev, "phy workqueue creation failed\n");
		ret = -ENOMEM;
		goto err_wq;
	}

	xgbe_ptp_register(pdata);

	xgbe_debugfs_init(pdata);

	platform_device_put(phy_pdev);

	netdev_notice(netdev, "net device enabled\n");

	DBGPR("<-- xgbe_probe\n");

	return 0;

err_wq:
	destroy_workqueue(pdata->dev_workqueue);

err_netdev:
	unregister_netdev(netdev);

err_io:
	platform_device_put(phy_pdev);

err_phydev:
	free_netdev(netdev);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}
Exemple #24
0
static int spear_adc_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct device *dev = &pdev->dev;
	struct spear_adc_state *st;
	struct iio_dev *indio_dev = NULL;
	int ret = -ENODEV;
	int irq;

	indio_dev = devm_iio_device_alloc(dev, sizeof(struct spear_adc_state));
	if (!indio_dev) {
		dev_err(dev, "failed allocating iio device\n");
		return -ENOMEM;
	}

	st = iio_priv(indio_dev);
	st->np = np;

	/*
	 * SPEAr600 has a different register layout than other SPEAr SoC's
	 * (e.g. SPEAr3xx). Let's provide two register base addresses
	 * to support multi-arch kernels.
	 */
	st->adc_base_spear6xx = of_iomap(np, 0);
	if (!st->adc_base_spear6xx) {
		dev_err(dev, "failed mapping memory\n");
		return -ENOMEM;
	}
	st->adc_base_spear3xx =
		(struct adc_regs_spear3xx __iomem *)st->adc_base_spear6xx;

	st->clk = clk_get(dev, NULL);
	if (IS_ERR(st->clk)) {
		dev_err(dev, "failed getting clock\n");
		goto errout1;
	}

	ret = clk_prepare_enable(st->clk);
	if (ret) {
		dev_err(dev, "failed enabling clock\n");
		goto errout2;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq <= 0) {
		dev_err(dev, "failed getting interrupt resource\n");
		ret = -EINVAL;
		goto errout3;
	}

	ret = devm_request_irq(dev, irq, spear_adc_isr, 0, SPEAR_ADC_MOD_NAME,
			       st);
	if (ret < 0) {
		dev_err(dev, "failed requesting interrupt\n");
		goto errout3;
	}

	if (of_property_read_u32(np, "sampling-frequency",
				 &st->sampling_freq)) {
		dev_err(dev, "sampling-frequency missing in DT\n");
		ret = -EINVAL;
		goto errout3;
	}

	/*
	 * Optional avg_samples defaults to 0, resulting in single data
	 * conversion
	 */
	of_property_read_u32(np, "average-samples", &st->avg_samples);

	/*
	 * Optional vref_external defaults to 0, resulting in internal vref
	 * selection
	 */
	of_property_read_u32(np, "vref-external", &st->vref_external);

	spear_adc_configure(st);

	platform_set_drvdata(pdev, indio_dev);

	init_completion(&st->completion);

	indio_dev->name = SPEAR_ADC_MOD_NAME;
	indio_dev->dev.parent = dev;
	indio_dev->info = &spear_adc_info;
	indio_dev->modes = INDIO_DIRECT_MODE;
	indio_dev->channels = spear_adc_iio_channels;
	indio_dev->num_channels = ARRAY_SIZE(spear_adc_iio_channels);

	ret = iio_device_register(indio_dev);
	if (ret)
		goto errout3;

	dev_info(dev, "SPEAR ADC driver loaded, IRQ %d\n", irq);

	return 0;

errout3:
	clk_disable_unprepare(st->clk);
errout2:
	clk_put(st->clk);
errout1:
	iounmap(st->adc_base_spear6xx);
	return ret;
}
Exemple #25
0
static int __devinit dm355evm_keys_probe(struct platform_device *pdev)
{
	struct dm355evm_keys	*keys;
	struct input_dev	*input;
	int			status;
	int			i;

	/* allocate instance struct and input dev */
	keys = kzalloc(sizeof *keys, GFP_KERNEL);
	input = input_allocate_device();
	if (!keys || !input) {
		status = -ENOMEM;
		goto fail1;
	}

	keys->dev = &pdev->dev;
	keys->input = input;

	/* set up "threaded IRQ handler" */
	status = platform_get_irq(pdev, 0);
	if (status < 0)
		goto fail1;
	keys->irq = status;

	input_set_drvdata(input, keys);

	input->name = "DM355 EVM Controls";
	input->phys = "dm355evm/input0";
	input->dev.parent = &pdev->dev;

	input->id.bustype = BUS_I2C;
	input->id.product = 0x0355;
	input->id.version = dm355evm_msp_read(DM355EVM_MSP_FIRMREV);

	input->evbit[0] = BIT(EV_KEY);
	for (i = 0; i < ARRAY_SIZE(dm355evm_keys); i++)
		__set_bit(dm355evm_keys[i].keycode, input->keybit);

	input->setkeycode = dm355evm_setkeycode;
	input->getkeycode = dm355evm_getkeycode;

	/* REVISIT:  flush the event queue? */

	status = request_threaded_irq(keys->irq,
			dm355evm_keys_hardirq, dm355evm_keys_irq,
			IRQF_TRIGGER_FALLING,
			dev_name(&pdev->dev), keys);
	if (status < 0)
		goto fail1;

	/* register */
	status = input_register_device(input);
	if (status < 0)
		goto fail2;

	platform_set_drvdata(pdev, keys);

	return 0;

fail2:
	free_irq(keys->irq, keys);
fail1:
	input_free_device(input);
	kfree(keys);
	dev_err(&pdev->dev, "can't register, err %d\n", status);

	return status;
}
Exemple #26
0
static int __devinit sdhci_s3c_probe(struct platform_device *pdev)
{
	struct s3c_sdhci_platdata *pdata;
	struct sdhci_s3c_drv_data *drv_data;
	struct device *dev = &pdev->dev;
	struct sdhci_host *host;
	struct sdhci_s3c *sc;
	struct resource *res;
	int ret, irq, ptr, clks;

	if (!pdev->dev.platform_data) {
		dev_err(dev, "no device data specified\n");
		return -ENOENT;
	}

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(dev, "no irq specified\n");
		return irq;
	}

	host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));
	if (IS_ERR(host)) {
		dev_err(dev, "sdhci_alloc_host() failed\n");
		return PTR_ERR(host);
	}

	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
	if (!pdata) {
		ret = -ENOMEM;
		goto err_io_clk;
	}
	memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata));

	drv_data = sdhci_s3c_get_driver_data(pdev);
	sc = sdhci_priv(host);

	sc->host = host;
	sc->pdev = pdev;
	sc->pdata = pdata;
	sc->ext_cd_gpio = -1; /* invalid gpio number */

	platform_set_drvdata(pdev, host);

	sc->clk_io = clk_get(dev, "hsmmc");
	if (IS_ERR(sc->clk_io)) {
		dev_err(dev, "failed to get io clock\n");
		ret = PTR_ERR(sc->clk_io);
		goto err_io_clk;
	}

	/* enable the local io clock and keep it running for the moment. */
	clk_enable(sc->clk_io);

	for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
		struct clk *clk;
		char name[14];

		snprintf(name, 14, "mmc_busclk.%d", ptr);
		clk = clk_get(dev, name);
		if (IS_ERR(clk)) {
			continue;
		}

		clks++;
		sc->clk_bus[ptr] = clk;

		/*
		 * save current clock index to know which clock bus
		 * is used later in overriding functions.
		 */
		sc->cur_clk = ptr;

		clk_enable(clk);

		dev_info(dev, "clock source %d: %s (%ld Hz)\n",
			 ptr, name, clk_get_rate(clk));
	}

	if (clks == 0) {
		dev_err(dev, "failed to find any bus clocks\n");
		ret = -ENOENT;
		goto err_no_busclks;
	}

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	host->ioaddr = devm_request_and_ioremap(&pdev->dev, res);
	if (!host->ioaddr) {
		dev_err(dev, "failed to map registers\n");
		ret = -ENXIO;
		goto err_req_regs;
	}

	/* Ensure we have minimal gpio selected CMD/CLK/Detect */
	if (pdata->cfg_gpio)
		pdata->cfg_gpio(pdev, pdata->max_width);

	host->hw_name = "samsung-hsmmc";
	host->ops = &sdhci_s3c_ops;
	host->quirks = 0;
	host->irq = irq;

	/* Setup quirks for the controller */
	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;
	host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT;
	if (drv_data)
		host->quirks |= drv_data->sdhci_quirks;

#ifndef CONFIG_MMC_SDHCI_S3C_DMA

	/* we currently see overruns on errors, so disable the SDMA
	 * support as well. */
	host->quirks |= SDHCI_QUIRK_BROKEN_DMA;

#endif /* CONFIG_MMC_SDHCI_S3C_DMA */

	/* It seems we do not get an DATA transfer complete on non-busy
	 * transfers, not sure if this is a problem with this specific
	 * SDHCI block, or a missing configuration that needs to be set. */
	host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ;

	/* This host supports the Auto CMD12 */
	host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;

	/* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */
	host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC;

	if (pdata->cd_type == S3C_SDHCI_CD_NONE ||
	    pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;

	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)
		host->mmc->caps = MMC_CAP_NONREMOVABLE;

	switch (pdata->max_width) {
	case 8:
		host->mmc->caps |= MMC_CAP_8_BIT_DATA;
	case 4:
		host->mmc->caps |= MMC_CAP_4_BIT_DATA;
		break;
	}

	if (pdata->pm_caps)
		host->mmc->pm_caps |= pdata->pm_caps;

	host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |
			 SDHCI_QUIRK_32BIT_DMA_SIZE);

	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */
	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK;

	/*
	 * If controller does not have internal clock divider,
	 * we can use overriding functions instead of default.
	 */
	if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
	}

	/* It supports additional host capabilities if needed */
	if (pdata->host_caps)
		host->mmc->caps |= pdata->host_caps;

	if (pdata->host_caps2)
		host->mmc->caps2 |= pdata->host_caps2;

	pm_runtime_enable(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_suspend_ignore_children(&pdev->dev, 1);

	ret = sdhci_add_host(host);
	if (ret) {
		dev_err(dev, "sdhci_add_host() failed\n");
		pm_runtime_forbid(&pdev->dev);
		pm_runtime_get_noresume(&pdev->dev);
		goto err_req_regs;
	}

	/* The following two methods of card detection might call
	   sdhci_s3c_notify_change() immediately, so they can be called
	   only after sdhci_add_host(). Setup errors are ignored. */
	if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init)
		pdata->ext_cd_init(&sdhci_s3c_notify_change);
	if (pdata->cd_type == S3C_SDHCI_CD_GPIO &&
	    gpio_is_valid(pdata->ext_cd_gpio))
		sdhci_s3c_setup_card_detect_gpio(sc);

	return 0;

 err_req_regs:
	for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) {
		if (sc->clk_bus[ptr]) {
			clk_disable(sc->clk_bus[ptr]);
			clk_put(sc->clk_bus[ptr]);
		}
	}

 err_no_busclks:
	clk_disable(sc->clk_io);
	clk_put(sc->clk_io);

 err_io_clk:
	sdhci_free_host(host);

	return ret;
}
static int xhci_plat_probe(struct platform_device *pdev)
{
	const struct hc_driver	*driver;
	struct xhci_hcd		*xhci;
	struct resource         *res;
	struct usb_hcd		*hcd;
	int			ret;
	int			irq;

	if (usb_disabled())
		return -ENODEV;

	driver = &xhci_plat_xhci_driver;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0)
		return -ENODEV;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res)
		return -ENODEV;

	hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
	if (!hcd)
		return -ENOMEM;

	hcd_to_bus(hcd)->skip_resume = true;
	hcd->rsrc_start = res->start;
	hcd->rsrc_len = resource_size(res);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
				driver->description)) {
		dev_dbg(&pdev->dev, "controller already in use\n");
		ret = -EBUSY;
		goto put_hcd;
	}

	hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_dbg(&pdev->dev, "error mapping memory\n");
		ret = -EFAULT;
		goto release_mem_region;
	}

	pm_runtime_set_active(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
	if (ret)
		goto unmap_registers;

	
	hcd = dev_get_drvdata(&pdev->dev);
	xhci = hcd_to_xhci(hcd);
	xhci->shared_hcd = usb_create_shared_hcd(driver, &pdev->dev,
			dev_name(&pdev->dev), hcd);
	if (!xhci->shared_hcd) {
		ret = -ENOMEM;
		goto dealloc_usb2_hcd;
	}

	hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
	*((struct xhci_hcd **) xhci->shared_hcd->hcd_priv) = xhci;

	ret = usb_add_hcd(xhci->shared_hcd, irq, IRQF_SHARED);
	if (ret)
		goto put_usb3_hcd;

	phy = usb_get_transceiver();
	
	if (phy && phy->otg && !(phy->flags & ENABLE_SECONDARY_PHY)) {
		dev_dbg(&pdev->dev, "%s otg support available\n", __func__);
		ret = otg_set_host(phy->otg, &hcd->self);
		if (ret) {
			dev_err(&pdev->dev, "%s otg_set_host failed\n",
				__func__);
			usb_put_transceiver(phy);
			goto put_usb3_hcd;
		}
	} else {
		pm_runtime_no_callbacks(&pdev->dev);
	}

	

	return 0;

put_usb3_hcd:
	usb_put_hcd(xhci->shared_hcd);

dealloc_usb2_hcd:
	usb_remove_hcd(hcd);

unmap_registers:
	iounmap(hcd->regs);

release_mem_region:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);

put_hcd:
	usb_put_hcd(hcd);

	return ret;
}
static int __devinit bfin_rotary_probe(struct platform_device *pdev)
{
    struct bfin_rotary_platform_data *pdata = pdev->dev.platform_data;
    struct bfin_rot *rotary;
    struct input_dev *input;
    int error;


    if ((pdata->rotary_up_key && !pdata->rotary_down_key) ||
            (!pdata->rotary_up_key && pdata->rotary_down_key)) {
        return -EINVAL;
    }

    error = peripheral_request_list(per_cnt, dev_name(&pdev->dev));
    if (error) {
        dev_err(&pdev->dev, "requesting peripherals failed\n");
        return error;
    }

    rotary = kzalloc(sizeof(struct bfin_rot), GFP_KERNEL);
    input = input_allocate_device();
    if (!rotary || !input) {
        error = -ENOMEM;
        goto out1;
    }

    rotary->input = input;

    rotary->up_key = pdata->rotary_up_key;
    rotary->down_key = pdata->rotary_down_key;
    rotary->button_key = pdata->rotary_button_key;
    rotary->rel_code = pdata->rotary_rel_code;

    error = rotary->irq = platform_get_irq(pdev, 0);
    if (error < 0)
        goto out1;

    input->name = pdev->name;
    input->phys = "bfin-rotary/input0";
    input->dev.parent = &pdev->dev;

    input_set_drvdata(input, rotary);

    input->id.bustype = BUS_HOST;
    input->id.vendor = 0x0001;
    input->id.product = 0x0001;
    input->id.version = 0x0100;

    if (rotary->up_key) {
        __set_bit(EV_KEY, input->evbit);
        __set_bit(rotary->up_key, input->keybit);
        __set_bit(rotary->down_key, input->keybit);
    } else {
        __set_bit(EV_REL, input->evbit);
        __set_bit(rotary->rel_code, input->relbit);
    }

    if (rotary->button_key) {
        __set_bit(EV_KEY, input->evbit);
        __set_bit(rotary->button_key, input->keybit);
    }

    error = request_irq(rotary->irq, bfin_rotary_isr,
                        0, dev_name(&pdev->dev), pdev);
    if (error) {
        dev_err(&pdev->dev,
                "unable to claim irq %d; error %d\n",
                rotary->irq, error);
        goto out1;
    }

    error = input_register_device(input);
    if (error) {
        dev_err(&pdev->dev,
                "unable to register input device (%d)\n", error);
        goto out2;
    }

    if (pdata->rotary_button_key)
        bfin_write_CNT_IMASK(CZMIE);

    if (pdata->mode & ROT_DEBE)
        bfin_write_CNT_DEBOUNCE(pdata->debounce & DPRESCALE);

    if (pdata->mode)
        bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() |
                              (pdata->mode & ~CNTE));

    bfin_write_CNT_IMASK(bfin_read_CNT_IMASK() | UCIE | DCIE);
    bfin_write_CNT_CONFIG(bfin_read_CNT_CONFIG() | CNTE);

    platform_set_drvdata(pdev, rotary);
    device_init_wakeup(&pdev->dev, 1);

    return 0;

out2:
    free_irq(rotary->irq, pdev);
out1:
    input_free_device(input);
    kfree(rotary);
    peripheral_free_list(per_cnt);

    return error;
}
Exemple #29
0
static int ehci_octeon_drv_probe(struct platform_device *pdev)
{
	struct usb_hcd *hcd;
	struct ehci_hcd *ehci;
	struct resource *res_mem;
	int irq;
	int ret;

	if (usb_disabled())
		return -ENODEV;

	irq = platform_get_irq(pdev, 0);
	if (irq < 0) {
		dev_err(&pdev->dev, "No irq assigned\n");
		return -ENODEV;
	}

	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (res_mem == NULL) {
		dev_err(&pdev->dev, "No register space assigned\n");
		return -ENODEV;
	}

	/*
	 * We can DMA from anywhere. But the descriptors must be in
	 * the lower 4GB.
	 */
	pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
	pdev->dev.dma_mask = &ehci_octeon_dma_mask;

	hcd = usb_create_hcd(&ehci_octeon_hc_driver, &pdev->dev, "octeon");
	if (!hcd)
		return -ENOMEM;

	hcd->rsrc_start = res_mem->start;
	hcd->rsrc_len = resource_size(res_mem);

	if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
				OCTEON_EHCI_HCD_NAME)) {
		dev_err(&pdev->dev, "request_mem_region failed\n");
		ret = -EBUSY;
		goto err1;
	}

	hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
	if (!hcd->regs) {
		dev_err(&pdev->dev, "ioremap failed\n");
		ret = -ENOMEM;
		goto err2;
	}

	ehci_octeon_start();

	ehci = hcd_to_ehci(hcd);

	/* Octeon EHCI matches CPU endianness. */
#ifdef __BIG_ENDIAN
	ehci->big_endian_mmio = 1;
#endif

	ehci->caps = hcd->regs;
	ehci->regs = hcd->regs +
		HC_LENGTH(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
	/* cache this readonly data; minimize chip reads */
	ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);

	ret = usb_add_hcd(hcd, irq, IRQF_DISABLED | IRQF_SHARED);
	if (ret) {
		dev_dbg(&pdev->dev, "failed to add hcd with err %d\n", ret);
		goto err3;
	}

	platform_set_drvdata(pdev, hcd);

	/* root ports should always stay powered */
	ehci_port_power(ehci, 1);

	return 0;
err3:
	ehci_octeon_stop();

	iounmap(hcd->regs);
err2:
	release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err1:
	usb_put_hcd(hcd);
	return ret;
}
static int __devinit twl6030_usb_probe(struct platform_device *pdev)
{
	struct twl6030_usb	*twl;
	int			status, err;
	struct twl4030_usb_data *pdata;
	struct device *dev = &pdev->dev;
	pdata = dev->platform_data;

	twl = kzalloc(sizeof *twl, GFP_KERNEL);
	if (!twl)
		return -ENOMEM;

	twl->dev		= &pdev->dev;
	twl->irq1		= platform_get_irq(pdev, 0);
	twl->irq2		= platform_get_irq(pdev, 1);
	twl->features		= pdata->features;
	twl->otg.dev		= twl->dev;
	twl->otg.label		= "twl6030";
	twl->otg.set_host	= twl6030_set_host;
	twl->otg.set_peripheral	= twl6030_set_peripheral;
	twl->otg.set_vbus	= twl6030_set_vbus;
	twl->otg.set_hz_mode	= twl6030_set_hz_mode;
	twl->otg.init		= twl6030_phy_init;
	twl->otg.set_power    = twl6030_set_power;
	twl->otg.shutdown	= twl6030_phy_shutdown;
	twl->otg.set_suspend	= twl6030_phy_suspend;
	twl->otg.start_srp	= twl6030_start_srp;
	twl->otg.state		= OTG_STATE_UNDEFINED;
    plugin_stat = PLUGIN_DEVICE_NONE;

	/* init spinlock for workqueue */
	spin_lock_init(&twl->lock);

	wake_lock_init(&twl_lock, WAKE_LOCK_SUSPEND, "twl_wake_lock");
	err = twl6030_usb_ldo_init(twl);
	if (err) {
		dev_err(&pdev->dev, "ldo init failed\n");
		kfree(twl);
		return err;
	}
	otg_set_transceiver(&twl->otg);

	platform_set_drvdata(pdev, twl);
	if (device_create_file(&pdev->dev, &dev_attr_vbus))
		dev_warn(&pdev->dev, "could not create sysfs file\n");

	ATOMIC_INIT_NOTIFIER_HEAD(&twl->otg.notifier);

	INIT_WORK(&twl->set_vbus_work, otg_set_vbus_work);

	twl->vbus_enable = false;
	twl->irq_enabled = true;
	status = request_threaded_irq(twl->irq1, NULL, twl6030_usbotg_irq,
			IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
			"twl6030_usb", twl);
	if (status < 0) {
		dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
			twl->irq1, status);
		device_remove_file(twl->dev, &dev_attr_vbus);
		kfree(twl);
		return status;
	}

	status = request_threaded_irq(twl->irq2, NULL, twl6030_usb_irq,
			IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
			"twl6030_usb", twl);
	if (status < 0) {
		dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
			twl->irq2, status);
		free_irq(twl->irq1, twl);
		device_remove_file(twl->dev, &dev_attr_vbus);
		kfree(twl);
		return status;
	}

	twl->asleep = 0;
	twl->is_phy_suspended = true;
	pdata->phy_init(dev);
	twl6030_phy_suspend(&twl->otg, 0);
	twl6030_enable_irq(&twl->otg);
	dev_info(&pdev->dev, "Initialized TWL6030 USB module\n");

	return 0;
}