static void __devinit sdhci_pltfm_runtime_pm_init(struct device *device)
{
	struct sdio_dev *dev =
		platform_get_drvdata(to_platform_device(device));

	if (!sdhci_pltfm_rpm_enabled(dev))
		return;

	pm_runtime_irq_safe(device);
	pm_runtime_enable(device);

	if (dev->devtype == SDIO_DEV_TYPE_WIFI)
		pm_runtime_set_autosuspend_delay(device,
				KONA_MMC_WIFI_AUTOSUSPEND_DELAY);
	else
		pm_runtime_set_autosuspend_delay(device,
				KONA_MMC_AUTOSUSPEND_DELAY);

	pm_runtime_use_autosuspend(device);
}
Beispiel #2
0
static int sh_cmt_probe(struct platform_device *pdev)
{
	struct sh_cmt_priv *p = platform_get_drvdata(pdev);
	struct sh_timer_config *cfg = pdev->dev.platform_data;
	int ret;

	if (!is_early_platform_device(pdev)) {
		pm_runtime_set_active(&pdev->dev);
		pm_runtime_enable(&pdev->dev);
	}

	if (p) {
		dev_info(&pdev->dev, "kept as earlytimer\n");
		goto out;
	}

	p = kmalloc(sizeof(*p), GFP_KERNEL);
	if (p == NULL) {
		dev_err(&pdev->dev, "failed to allocate driver data\n");
		return -ENOMEM;
	}

	ret = sh_cmt_setup(p, pdev);
	if (ret) {
		kfree(p);
		pm_runtime_idle(&pdev->dev);
		return ret;
	}
	if (is_early_platform_device(pdev))
		return 0;

 out:
	if (cfg->clockevent_rating || cfg->clocksource_rating)
		pm_runtime_irq_safe(&pdev->dev);
	else
		pm_runtime_idle(&pdev->dev);

	return 0;
}
Beispiel #3
0
static int sh_tmu_probe(struct platform_device *pdev)
{
	struct sh_tmu_device *tmu = platform_get_drvdata(pdev);
	int ret;

	if (!is_early_platform_device(pdev)) {
		pm_runtime_set_active(&pdev->dev);
		pm_runtime_enable(&pdev->dev);
	}

	if (tmu) {
		dev_info(&pdev->dev, "kept as earlytimer\n");
		goto out;
	}

	tmu = kzalloc(sizeof(*tmu), GFP_KERNEL);
	if (tmu == NULL)
		return -ENOMEM;

	ret = sh_tmu_setup(tmu, pdev);
	if (ret) {
		kfree(tmu);
		pm_runtime_idle(&pdev->dev);
		return ret;
	}
	if (is_early_platform_device(pdev))
		return 0;

 out:
	if (tmu->has_clockevent || tmu->has_clocksource)
		pm_runtime_irq_safe(&pdev->dev);
	else
		pm_runtime_idle(&pdev->dev);

	return 0;
}
static int serial_omap_probe(struct platform_device *pdev)
{
	struct uart_omap_port	*up = NULL;
	struct resource		*mem, *irq, *dma_tx, *dma_rx;
	struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
	struct omap_device *od;
	int ret = -ENOSPC;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "no mem resource?\n");
		return -ENODEV;
	}

	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!irq) {
		dev_err(&pdev->dev, "no irq resource?\n");
		return -ENODEV;
	}

	if (!request_mem_region(mem->start, (mem->end - mem->start) + 1,
				     pdev->dev.driver->name)) {
		dev_err(&pdev->dev, "memory region already claimed\n");
		return -EBUSY;
	}

	dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
	if (!dma_rx) {
		ret = -EINVAL;
		goto err;
	}

	dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
	if (!dma_tx) {
		ret = -EINVAL;
		goto err;
	}

	up = kzalloc(sizeof(*up), GFP_KERNEL);
	if (up == NULL) {
		ret = -ENOMEM;
		goto do_release_region;
	}
	sprintf(up->name, "OMAP UART%d", pdev->id);
	up->pdev = pdev;
	up->port.dev = &pdev->dev;
	up->port.type = PORT_OMAP;
	up->port.iotype = UPIO_MEM;
	up->port.irq = irq->start;

	up->port.regshift = 2;
	up->port.fifosize = 64;
	up->port.ops = &serial_omap_pops;
	up->port.line = pdev->id;

	up->port.mapbase = mem->start;
	up->port.membase = ioremap(mem->start, mem->end - mem->start);

	if (!up->port.membase) {
		dev_err(&pdev->dev, "can't ioremap UART\n");
		ret = -ENOMEM;
		goto err1;
	}

	up->port.flags = omap_up_info->flags;
	up->port.uartclk = omap_up_info->uartclk;
	up->uart_dma.uart_base = mem->start;
	up->errata = omap_up_info->errata;
	up->enable_wakeup = omap_up_info->enable_wakeup;
	up->wer = omap_up_info->wer;
	up->chk_wakeup = omap_up_info->chk_wakeup;
	up->wake_peer = omap_up_info->wake_peer;
	up->rts_mux_driver_control = omap_up_info->rts_mux_driver_control;
	up->rts_pullup_in_suspend = 0;
	up->wer_restore = 0;

	if (omap_up_info->use_dma) {
		up->uart_dma.uart_dma_tx = dma_tx->start;
		up->uart_dma.uart_dma_rx = dma_rx->start;
		up->use_dma = 1;
		up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
		up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
		up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
		spin_lock_init(&(up->uart_dma.tx_lock));
		spin_lock_init(&(up->uart_dma.rx_lock));
		up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
		up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
	}

	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev,
			omap_up_info->auto_sus_timeout);

	if (device_may_wakeup(&pdev->dev))
		pm_runtime_enable(&pdev->dev);

	pm_runtime_irq_safe(&pdev->dev);
	if (omap_up_info->console_uart) {
		od = to_omap_device(up->pdev);
		omap_hwmod_idle(od->hwmods[0]);
		serial_omap_port_enable(up);
		serial_omap_port_disable(up);
	}

	ui[pdev->id] = up;
	serial_omap_add_console_port(up);

	ret = uart_add_one_port(&serial_omap_reg, &up->port);
	if (ret != 0)
		goto err1;

	dev_set_drvdata(&pdev->dev, up);
	platform_set_drvdata(pdev, up);

	return 0;
err:
	dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
				pdev->id, __func__, ret);
err1:
	kfree(up);
do_release_region:
	release_mem_region(mem->start, (mem->end - mem->start) + 1);
	return ret;
}
Beispiel #5
0
static int serial_omap_probe(struct platform_device *pdev)
{
	struct omap_uart_port_info *omap_up_info = dev_get_platdata(&pdev->dev);
	struct uart_omap_port *up;
	struct resource *mem;
	void __iomem *base;
	int uartirq = 0;
	int wakeirq = 0;
	int ret;

	/* The optional wakeirq may be specified in the board dts file */
	if (pdev->dev.of_node) {
		uartirq = irq_of_parse_and_map(pdev->dev.of_node, 0);
		if (!uartirq)
			return -EPROBE_DEFER;
		wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
		omap_up_info = of_get_uart_port_info(&pdev->dev);
		pdev->dev.platform_data = omap_up_info;
	} else {
		uartirq = platform_get_irq(pdev, 0);
		if (uartirq < 0)
			return -EPROBE_DEFER;
	}

	up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
	if (!up)
		return -ENOMEM;

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	base = devm_ioremap_resource(&pdev->dev, mem);
	if (IS_ERR(base))
		return PTR_ERR(base);

	up->dev = &pdev->dev;
	up->port.dev = &pdev->dev;
	up->port.type = PORT_OMAP;
	up->port.iotype = UPIO_MEM;
	up->port.irq = uartirq;
	up->port.regshift = 2;
	up->port.fifosize = 64;
	up->port.ops = &serial_omap_pops;

	if (pdev->dev.of_node)
		ret = of_alias_get_id(pdev->dev.of_node, "serial");
	else
		ret = pdev->id;

	if (ret < 0) {
		dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
			ret);
		goto err_port_line;
	}
	up->port.line = ret;

	if (up->port.line >= OMAP_MAX_HSUART_PORTS) {
		dev_err(&pdev->dev, "uart ID %d >  MAX %d.\n", up->port.line,
			OMAP_MAX_HSUART_PORTS);
		ret = -ENXIO;
		goto err_port_line;
	}

	up->wakeirq = wakeirq;
	if (!up->wakeirq)
		dev_info(up->port.dev, "no wakeirq for uart%d\n",
			 up->port.line);

	ret = serial_omap_probe_rs485(up, pdev->dev.of_node);
	if (ret < 0)
		goto err_rs485;

	sprintf(up->name, "OMAP UART%d", up->port.line);
	up->port.mapbase = mem->start;
	up->port.membase = base;
	up->port.flags = omap_up_info->flags;
	up->port.uartclk = omap_up_info->uartclk;
	up->port.rs485_config = serial_omap_config_rs485;
	if (!up->port.uartclk) {
		up->port.uartclk = DEFAULT_CLK_SPEED;
		dev_warn(&pdev->dev,
			 "No clock speed specified: using default: %d\n",
			 DEFAULT_CLK_SPEED);
	}

	up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	pm_qos_add_request(&up->pm_qos_request,
		PM_QOS_CPU_DMA_LATENCY, up->latency);
	INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);

	platform_set_drvdata(pdev, up);
	if (omap_up_info->autosuspend_timeout == 0)
		omap_up_info->autosuspend_timeout = -1;

	device_init_wakeup(up->dev, true);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev,
			omap_up_info->autosuspend_timeout);

	pm_runtime_irq_safe(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

	pm_runtime_get_sync(&pdev->dev);

	omap_serial_fill_features_erratas(up);

	ui[up->port.line] = up;
	serial_omap_add_console_port(up);

	ret = uart_add_one_port(&serial_omap_reg, &up->port);
	if (ret != 0)
		goto err_add_port;

	pm_runtime_mark_last_busy(up->dev);
	pm_runtime_put_autosuspend(up->dev);
	return 0;

err_add_port:
	pm_runtime_put(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
	pm_qos_remove_request(&up->pm_qos_request);
	device_init_wakeup(up->dev, false);
err_rs485:
err_port_line:
	return ret;
}
Beispiel #6
0
/* HSI Platform Device probing & hsi_device registration */
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
    struct hsi_platform_data *pdata = pd->dev.platform_data;
    struct hsi_dev *hsi_ctrl;
    u32 revision;
    int err;

    dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

    dev_dbg(&pd->dev, "The platform device probed is an %s\n",
            hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

    if (!pdata) {
        dev_err(&pd->dev, "No platform_data found on hsi device\n");
        return -ENXIO;
    }

    hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
    if (hsi_ctrl == NULL) {
        dev_err(&pd->dev, "Could not allocate memory for"
                " struct hsi_dev\n");
        return -ENOMEM;
    }

    platform_set_drvdata(pd, hsi_ctrl);
    err = hsi_controller_init(hsi_ctrl, pd);
    if (err < 0) {
        dev_err(&pd->dev, "Could not initialize hsi controller:"
                " %d\n", err);
        goto rollback1;
    }
    /* Wakeup dependency was disabled for HSI <-> MPU PM_L3INIT_HSI_WKDEP */
#if 0
    omap_writel(0x141, 0x4A307338);
#endif
    pm_runtime_enable(hsi_ctrl->dev);
    pm_runtime_irq_safe(hsi_ctrl->dev);
    hsi_clocks_enable(hsi_ctrl->dev, __func__);

    /* Non critical SW Reset */
    err = hsi_softreset(hsi_ctrl);
    if (err < 0)
        goto rollback2;

    hsi_set_pm_force_hsi_on(hsi_ctrl);

    /* Configure HSI ports */
    hsi_set_ports_default(hsi_ctrl, pd);

    /* Gather info from registers for the driver.(REVISION) */
    revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
    if (hsi_driver_device_is_hsi(pd))
        dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
                 revision);
    else
        dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
                 (revision & HSI_SSI_REV_MAJOR) >> 4,
                 (revision & HSI_SSI_REV_MINOR));

    err = hsi_debug_add_ctrl(hsi_ctrl);
    if (err < 0) {
        dev_err(&pd->dev,
                "Could not add hsi controller to debugfs: %d\n", err);
        goto rollback2;
    }

    err = register_hsi_devices(hsi_ctrl);
    if (err < 0) {
        dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
        goto rollback3;
    }

    /* Allow HSI to wake up the platform */
    device_init_wakeup(hsi_ctrl->dev, true);

    /* Set the HSI FCLK to default. */
    if (!pdata->device_scale) {
        dev_err(&pd->dev, "%s: No platform device_scale function\n",
                __func__);
        err = -ENXIO;
        goto rollback3;
    }
    err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
                              pdata->default_hsi_fclk);
    if (err == -EBUSY) {
        dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. "
                 "Will retry on next open\n",
                 pdata->default_hsi_fclk);
    } else if (err) {
        dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n",
                __func__, err, pdata->default_hsi_fclk);
        goto rollback3;
    } else {
        hsi_ctrl->clock_rate = pdata->default_hsi_fclk;
    }

    /* From here no need for HSI HW access */
    hsi_clocks_disable(hsi_ctrl->dev, __func__);

    return 0;

rollback3:
    hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
    hsi_controller_exit(hsi_ctrl);

    /* From here no need for HSI HW access */
    hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
    kfree(hsi_ctrl);
    return err;
}
struct lego_port_device
*ev3_output_port_register(struct ev3_output_port_platform_data *pdata,
			  struct device *parent)
{
	struct ev3_output_port_data *data;
	struct pwm_device *pwm;
	int err;

	if (WARN(!pdata, "Platform data is required."))
		return ERR_PTR(-EINVAL);

	data = kzalloc(sizeof(struct ev3_output_port_data), GFP_KERNEL);
	if (!data)
		return ERR_PTR(-ENOMEM);

	data->id = pdata->id;
	data->analog = get_legoev3_analog();
	if (IS_ERR(data->analog)) {
		dev_err(parent, "Could not get legoev3-analog device.\n");
		err = PTR_ERR(data->analog);
		goto err_request_legoev3_analog;
	}

	data->gpio[GPIO_PIN1].gpio	= pdata->pin1_gpio;
	data->gpio[GPIO_PIN1].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN1].label	= "pin1";

	data->gpio[GPIO_PIN2].gpio	= pdata->pin2_gpio;
	data->gpio[GPIO_PIN2].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN2].label	= "pin2";

	data->gpio[GPIO_PIN5].gpio	= pdata->pin5_gpio;
	data->gpio[GPIO_PIN5].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN5].label	= "pin5";

	data->gpio[GPIO_PIN5_INT].gpio	= pdata->pin5_int_gpio;
	data->gpio[GPIO_PIN5_INT].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN5_INT].label	= "pin5_tacho";

	data->gpio[GPIO_PIN6_DIR].gpio	= pdata->pin6_dir_gpio;
	data->gpio[GPIO_PIN6_DIR].flags	= GPIOF_IN;
	data->gpio[GPIO_PIN6_DIR].label	= "pin6";

	err = gpio_request_array(data->gpio, ARRAY_SIZE(data->gpio));
	if (err) {
		dev_err(parent, "Requesting GPIOs failed.\n");
		goto err_gpio_request_array;
	}

	data->out_port.name = ev3_output_port_type.name;
	snprintf(data->out_port.port_name, LEGO_PORT_NAME_SIZE, "out%c",
		 data->id + 'A');
	pwm = pwm_get(NULL, data->out_port.port_name);
	if (IS_ERR(pwm)) {
		dev_err(parent, "Could not get pwm! (%ld)\n", PTR_ERR(pwm));
		err = PTR_ERR(pwm);
		goto err_pwm_get;
	}

	err = pwm_config(pwm, 0, NSEC_PER_SEC / 10000);
	if (err) {
		dev_err(parent,
			"Failed to set pwm duty percent and frequency! (%d)\n",
			err);
		goto err_pwm_config;
	}

	err = pwm_enable(pwm);
	if (err) {
		dev_err(parent, "Failed to start pwm! (%d)\n", err);
		goto err_pwm_start;
	}
	/* This lets us set the pwm duty cycle in an atomic context */
	pm_runtime_irq_safe(pwm->chip->dev);
	data->pwm = pwm;

	data->out_port.num_modes = NUM_EV3_OUTPUT_PORT_MODE;
	data->out_port.mode_info = legoev3_output_port_mode_info;
	data->out_port.set_mode = ev3_output_port_set_mode;
	data->out_port.set_device = ev3_output_port_set_device;
	data->out_port.get_status = ev3_output_port_get_status;
	data->out_port.dc_motor_ops = &ev3_output_port_motor_ops;
	data->out_port.context = data;
	err = lego_port_register(&data->out_port, &ev3_output_port_type, parent);
	if (err) {
		dev_err(parent, "Failed to register lego_port_device. (%d)\n",
			err);
		goto err_lego_port_register;
	}

	INIT_WORK(&data->change_uevent_work, ev3_output_port_change_uevent_work);
	INIT_WORK(&data->work, NULL);

	data->con_state = CON_STATE_INIT;

	hrtimer_init(&data->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	data->timer.function = ev3_output_port_timer_callback;
	hrtimer_start(&data->timer, ktime_set(0, OUTPUT_PORT_POLL_NS),
		      HRTIMER_MODE_REL);

	return &data->out_port;

err_lego_port_register:
	pwm_disable(pwm);
err_pwm_start:
err_pwm_config:
	pwm_put(pwm);
err_pwm_get:
	gpio_free_array(data->gpio, ARRAY_SIZE(data->gpio));
err_gpio_request_array:
	put_legoev3_analog(data->analog);
err_request_legoev3_analog:
	kfree(data);

	return ERR_PTR(err);
}
Beispiel #8
0
static int __devinit omap_wdt_probe(struct platform_device *pdev)
{
	struct resource *res, *mem, *res_irq;
	struct omap_wdt_dev *wdev;
	int ret;
#ifdef CONFIG_OMAP_WATCHDOG_CONTROL
	int i;
#endif

	/* reserve static register mappings */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		ret = -ENOENT;
		goto err_get_resource;
	}

	if (omap_wdt_dev) {
		ret = -EBUSY;
		goto err_busy;
	}

	mem = request_mem_region(res->start, resource_size(res), pdev->name);
	if (!mem) {
		ret = -EBUSY;
		goto err_busy;
	}

	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);

	wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL);
	if (!wdev) {
		ret = -ENOMEM;
		goto err_kzalloc;
	}

	wdev->omap_wdt_users = 0;
	wdev->mem = mem;
	wdev->dev = &pdev->dev;

	wdev->base = ioremap(res->start, resource_size(res));
	if (!wdev->base) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	if (res_irq) {
		ret = request_irq(res_irq->start, omap_wdt_interrupt, 0,
				  dev_name(&pdev->dev), wdev);

		if (ret)
			goto err_irq;

		wdev->irq = res_irq->start;
	}

	platform_set_drvdata(pdev, wdev);

	pm_runtime_enable(wdev->dev);
	pm_runtime_irq_safe(wdev->dev);
	pm_runtime_get_sync(wdev->dev);

	omap_wdt_disable(wdev);
	omap_wdt_adjust_timeout(timer_margin);

	wdev->omap_wdt_miscdev.parent = &pdev->dev;
	wdev->omap_wdt_miscdev.minor = WATCHDOG_MINOR;
	wdev->omap_wdt_miscdev.name = "watchdog";
	wdev->omap_wdt_miscdev.fops = &omap_wdt_fops;

	ret = misc_register(&(wdev->omap_wdt_miscdev));
	if (ret)
		goto err_misc;

	pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
		__raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
		timer_margin);

	pm_runtime_put_sync(wdev->dev);

	omap_wdt_dev = pdev;

/*	if (kernelpet && wdev->irq) {
		wdev->nb.notifier_call = omap_wdt_nb_func;
		atomic_notifier_chain_register(&touch_watchdog_notifier_head,
			&wdev->nb);
		omap_wdt_setup(wdev);
	} */

#ifdef CONFIG_OMAP_WATCHDOG_CONTROL
	for (i = 0; i < ARRAY_SIZE(wdt_attributes); i++) {
		ret = device_create_file(&pdev->dev, wdt_attributes[i]);
		if (ret != 0) {
			printk(KERN_ERR "Failed to create attr %d: %d\n",
				i, ret);
		}
	}

	if (!is_wdt_enabled) {
		disable_wdt(wdev);
		printk(KERN_WARNING "Disable omap watchdog\n");
	}
#endif
	return 0;

err_misc:
	platform_set_drvdata(pdev, NULL);

	if (wdev->irq)
		free_irq(wdev->irq, wdev);

err_irq:
	iounmap(wdev->base);

err_ioremap:
	wdev->base = NULL;
	kfree(wdev);

err_kzalloc:
	release_mem_region(res->start, resource_size(res));

err_busy:
err_get_resource:

	return ret;
}
/*
 * Check if the die sensor is cooling down. If it's higher than
 * t_hot since the last throttle then throttle it again.
 * OMAP junction temperature could stay for a long time in an
 * unacceptable temperature range. The idea here is to check after
 * t_hot->throttle the system really came below t_hot else re-throttle
 * and keep doing till it's under t_hot temp range.
 */
static void throttle_delayed_work_fn(struct work_struct *work)
{
	int curr;
	struct omap_temp_sensor *temp_sensor =
				container_of(work, struct omap_temp_sensor,
					     throttle_work.work);
	curr = omap_read_current_temp(temp_sensor);

#ifdef CONFIG_OMAP_TEMP_CONTROL
	if (curr >= temp_limit || curr < 0) {
#else
	if (curr >= BGAP_THRESHOLD_T_HOT || curr < 0) {
#endif
		pr_warn("%s: OMAP temp read %d exceeds the threshold\n",
			__func__, curr);
		omap_thermal_throttle();
		schedule_delayed_work(&temp_sensor->throttle_work,
			msecs_to_jiffies(THROTTLE_DELAY_MS));
	} else {
		schedule_delayed_work(&temp_sensor->throttle_work,
			msecs_to_jiffies(THROTTLE_DELAY_MS));
	}
}

static irqreturn_t omap_tshut_irq_handler(int irq, void *data)
{
	struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data;

	/* Need to handle thermal mgmt in bootloader
	 * to avoid restart again at kernel level
	 */
	if (temp_sensor->is_efuse_valid) {
		pr_emerg("%s: Thermal shutdown reached rebooting device\n",
			__func__);
		kernel_restart(NULL);
	} else {
		pr_err("%s:Invalid EFUSE, Non-trimmed BGAP\n", __func__);
	}

	return IRQ_HANDLED;
}

static irqreturn_t omap_talert_irq_handler(int irq, void *data)
{
	struct omap_temp_sensor *temp_sensor = (struct omap_temp_sensor *)data;
	int t_hot, t_cold, temp_offset;

	t_hot = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
	    & OMAP4_HOT_FLAG_MASK;
	t_cold = omap_temp_sensor_readl(temp_sensor, BGAP_STATUS_OFFSET)
	    & OMAP4_COLD_FLAG_MASK;
	temp_offset = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
	if (t_hot) {
		omap_thermal_throttle();
		schedule_delayed_work(&temp_sensor->throttle_work,
			msecs_to_jiffies(THROTTLE_DELAY_MS));
		temp_offset &= ~(OMAP4_MASK_HOT_MASK);
		temp_offset |= OMAP4_MASK_COLD_MASK;
	} else if (t_cold) {
		cancel_delayed_work_sync(&temp_sensor->throttle_work);
		omap_thermal_unthrottle();
		temp_offset &= ~(OMAP4_MASK_COLD_MASK);
		temp_offset |= OMAP4_MASK_HOT_MASK;
	}

	omap_temp_sensor_writel(temp_sensor, temp_offset, BGAP_CTRL_OFFSET);

	return IRQ_HANDLED;
}

static int __devinit omap_temp_sensor_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data;
	struct omap_temp_sensor *temp_sensor;
	struct resource *mem;
	int ret = 0, val;

	if (!pdata) {
		dev_err(dev, "%s: platform data missing\n", __func__);
		return -EINVAL;
	}

	temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL);
	if (!temp_sensor)
		return -ENOMEM;

	spin_lock_init(&temp_sensor->lock);

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(dev, "%s:no mem resource\n", __func__);
		ret = -EINVAL;
		goto plat_res_err;
	}

	temp_sensor->irq = platform_get_irq_byname(pdev, "thermal_alert");
	if (temp_sensor->irq < 0) {
		dev_err(dev, "%s:Cannot get thermal alert irq\n",
			__func__);
		ret = -EINVAL;
		goto get_irq_err;
	}

	ret = gpio_request_one(OMAP_TSHUT_GPIO, GPIOF_DIR_IN,
		"thermal_shutdown");
	if (ret) {
		dev_err(dev, "%s: Could not get tshut_gpio\n",
			__func__);
		goto tshut_gpio_req_err;
	}

	temp_sensor->tshut_irq = gpio_to_irq(OMAP_TSHUT_GPIO);
	if (temp_sensor->tshut_irq < 0) {
		dev_err(dev, "%s:Cannot get thermal shutdown irq\n",
			__func__);
		ret = -EINVAL;
		goto get_tshut_irq_err;
	}

	temp_sensor->phy_base = pdata->offset;
	temp_sensor->pdev = pdev;
	temp_sensor->dev = dev;

	pm_runtime_enable(dev);
	pm_runtime_irq_safe(dev);

	/*
	 * check if the efuse has a non-zero value if not
	 * it is an untrimmed sample and the temperatures
	 * may not be accurate */
	if (omap_readl(OMAP4_CTRL_MODULE_CORE +
			OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP))
		temp_sensor->is_efuse_valid = 1;

	temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck");
	if (IS_ERR(temp_sensor->clock)) {
		ret = PTR_ERR(temp_sensor->clock);
		pr_err("%s:Unable to get fclk: %d\n", __func__, ret);
		ret = -EINVAL;
		goto clk_get_err;
	}

	/* Init delayed work for throttle decision */
	INIT_DELAYED_WORK(&temp_sensor->throttle_work,
			  throttle_delayed_work_fn);

	platform_set_drvdata(pdev, temp_sensor);

	ret = omap_temp_sensor_enable(temp_sensor);
	if (ret) {
		dev_err(dev, "%s:Cannot enable temp sensor\n", __func__);
		goto sensor_enable_err;
	}

	omap_enable_continuous_mode(temp_sensor);
	omap_configure_temp_sensor_thresholds(temp_sensor);
	/* 1 ms */
	omap_configure_temp_sensor_counter(temp_sensor, 1);

	/* Wait till the first conversion is done wait for at least 1ms */
	mdelay(2);

	/* Read the temperature once due to hw issue*/
	omap_read_current_temp(temp_sensor);

	/* Set 2 seconds time as default counter */
	omap_configure_temp_sensor_counter(temp_sensor,
						temp_sensor->clk_rate * 2);
	ret = request_threaded_irq(temp_sensor->irq, NULL,
			omap_talert_irq_handler,
			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
			"temp_sensor", (void *)temp_sensor);
	if (ret) {
		dev_err(dev, "Request threaded irq failed.\n");
		goto req_irq_err;
	}

	ret = request_threaded_irq(temp_sensor->tshut_irq, NULL,
			omap_tshut_irq_handler,
			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
			"tshut", (void *)temp_sensor);
	if (ret) {
		dev_err(dev, "Request threaded irq failed for TSHUT.\n");
		goto tshut_irq_req_err;
	}

	ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_sensor_group);
	if (ret) {
		dev_err(&pdev->dev, "could not create sysfs files\n");
		goto sysfs_create_err;
	}

	/* unmask the T_COLD and unmask T_HOT at init */
	val = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
	val |= OMAP4_MASK_COLD_MASK;
	val |= OMAP4_MASK_HOT_MASK;
	omap_temp_sensor_writel(temp_sensor, val, BGAP_CTRL_OFFSET);

	dev_info(dev, "%s probed", pdata->name);

	temp_sensor_pm = temp_sensor;

#ifdef CONFIG_OMAP_TEMP_CONTROL
	ctrl_sensor = temp_sensor;
	tempcontrol_registerlimit(temp_limit);
#endif

	return 0;

sysfs_create_err:
	free_irq(temp_sensor->tshut_irq, temp_sensor);
	cancel_delayed_work_sync(&temp_sensor->throttle_work);
tshut_irq_req_err:
	free_irq(temp_sensor->irq, temp_sensor);
req_irq_err:
	platform_set_drvdata(pdev, NULL);
	omap_temp_sensor_disable(temp_sensor);
sensor_enable_err:
	clk_put(temp_sensor->clock);
clk_get_err:
	pm_runtime_disable(dev);
get_tshut_irq_err:
	gpio_free(OMAP_TSHUT_GPIO);
tshut_gpio_req_err:
get_irq_err:
plat_res_err:
	kfree(temp_sensor);
	return ret;
}

static int __devexit omap_temp_sensor_remove(struct platform_device *pdev)
{
	struct omap_temp_sensor *temp_sensor = platform_get_drvdata(pdev);

	sysfs_remove_group(&pdev->dev.kobj, &omap_temp_sensor_group);
	cancel_delayed_work_sync(&temp_sensor->throttle_work);
	omap_temp_sensor_disable(temp_sensor);
	clk_put(temp_sensor->clock);
	platform_set_drvdata(pdev, NULL);
	if (temp_sensor->irq)
		free_irq(temp_sensor->irq, temp_sensor);
	if (temp_sensor->tshut_irq)
		free_irq(temp_sensor->tshut_irq, temp_sensor);
	kfree(temp_sensor);

	return 0;
}

#ifdef CONFIG_PM
static void omap_temp_sensor_save_ctxt(struct omap_temp_sensor *temp_sensor)
{
	temp_sensor_context.temp_sensor_ctrl =
	    omap_temp_sensor_readl(temp_sensor, TEMP_SENSOR_CTRL_OFFSET);
	temp_sensor_context.bg_ctrl =
	    omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
	temp_sensor_context.bg_counter =
	    omap_temp_sensor_readl(temp_sensor, BGAP_COUNTER_OFFSET);
	temp_sensor_context.bg_threshold =
	    omap_temp_sensor_readl(temp_sensor, BGAP_THRESHOLD_OFFSET);
	temp_sensor_context.temp_sensor_tshut_threshold =
	    omap_temp_sensor_readl(temp_sensor, BGAP_TSHUT_OFFSET);
}
static long omap_wdt_ioctl(struct file *file, unsigned int cmd,
						unsigned long arg)
{
	struct omap_wdt_dev *wdev;
	int new_margin;
	static const struct watchdog_info ident = {
		.identity = "OMAP Watchdog",
		.options = WDIOF_SETTIMEOUT,
		.firmware_version = 0,
	};

	wdev = file->private_data;

	switch (cmd) {
	case WDIOC_GETSUPPORT:
		return copy_to_user((struct watchdog_info __user *)arg, &ident,
				sizeof(ident));
	case WDIOC_GETSTATUS:
		return put_user(0, (int __user *)arg);
	case WDIOC_GETBOOTSTATUS:
		if (cpu_is_omap16xx())
			return put_user(__raw_readw(ARM_SYSST),
					(int __user *)arg);
		if (cpu_is_omap24xx())
			return put_user(omap_prcm_get_reset_sources(),
					(int __user *)arg);
	case WDIOC_KEEPALIVE:
		spin_lock(&wdt_lock);
		omap_wdt_ping(wdev);
		spin_unlock(&wdt_lock);
		return 0;
	case WDIOC_SETTIMEOUT:
		if (get_user(new_margin, (int __user *)arg))
			return -EFAULT;
		omap_wdt_adjust_timeout(new_margin);

		spin_lock(&wdt_lock);
		omap_wdt_disable(wdev);
		omap_wdt_set_timeout(wdev);
		omap_wdt_enable(wdev);

		omap_wdt_ping(wdev);
		spin_unlock(&wdt_lock);
		/* Fall */
	case WDIOC_GETTIMEOUT:
		return put_user(timer_margin, (int __user *)arg);
	default:
		return -ENOTTY;
	}
}

static const struct file_operations omap_wdt_fops = {
	.owner = THIS_MODULE,
	.write = omap_wdt_write,
	.unlocked_ioctl = omap_wdt_ioctl,
	.open = omap_wdt_open,
	.release = omap_wdt_release,
	.llseek = no_llseek,
};

static int omap_wdt_nb_func(struct notifier_block *nb, unsigned long val,
	void *v)
{
	struct omap_wdt_dev *wdev = container_of(nb, struct omap_wdt_dev, nb);
	omap_wdt_ping(wdev);

	return NOTIFY_OK;
}

static int __devinit omap_wdt_probe(struct platform_device *pdev)
{
	struct resource *res, *mem, *res_irq;
	struct omap_wdt_dev *wdev;
	int ret;

	/* reserve static register mappings */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		ret = -ENOENT;
		goto err_get_resource;
	}

	if (omap_wdt_dev) {
		ret = -EBUSY;
		goto err_busy;
	}

	mem = request_mem_region(res->start, resource_size(res), pdev->name);
	if (!mem) {
		ret = -EBUSY;
		goto err_busy;
	}

	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);

	wdev = kzalloc(sizeof(struct omap_wdt_dev), GFP_KERNEL);
	if (!wdev) {
		ret = -ENOMEM;
		goto err_kzalloc;
	}

	wdev->omap_wdt_users = 0;
	wdev->mem = mem;
	wdev->dev = &pdev->dev;

	wdev->base = ioremap(res->start, resource_size(res));
	if (!wdev->base) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	if (res_irq) {
		ret = request_irq(res_irq->start, omap_wdt_interrupt, 0,
				  dev_name(&pdev->dev), wdev);

		if (ret)
			goto err_irq;

		wdev->irq = res_irq->start;
	}

	platform_set_drvdata(pdev, wdev);

	/*
	 * Note: PM runtime functions must be used only when watchdog driver
	 * is enabling/disabling. Dynamic handling of clock of watchdog timer on
	 * OMAP4/5 (like provided with runtime API) will cause system failure
	 */
	pm_runtime_enable(wdev->dev);
	pm_runtime_irq_safe(wdev->dev);
	pm_runtime_get_sync(wdev->dev);

	omap_wdt_disable(wdev);
	omap_wdt_adjust_timeout(timer_margin);

	wdev->omap_wdt_miscdev.parent = &pdev->dev;
	wdev->omap_wdt_miscdev.minor = WATCHDOG_MINOR;
	wdev->omap_wdt_miscdev.name = "watchdog";
	wdev->omap_wdt_miscdev.fops = &omap_wdt_fops;

	ret = misc_register(&(wdev->omap_wdt_miscdev));
	if (ret)
		goto err_misc;

	pr_info("OMAP Watchdog Timer Rev 0x%02x: initial timeout %d sec\n",
		__raw_readl(wdev->base + OMAP_WATCHDOG_REV) & 0xFF,
		timer_margin);

	omap_wdt_dev = pdev;

	if (kernelpet && wdev->irq) {
		wdev->nb.notifier_call = omap_wdt_nb_func;
		atomic_notifier_chain_register(&touch_watchdog_notifier_head,
			&wdev->nb);
		return omap_wdt_setup(wdev);
	}

	pm_runtime_put_sync(wdev->dev);
	return 0;

err_misc:
	pm_runtime_put_sync(wdev->dev);
	platform_set_drvdata(pdev, NULL);

	if (wdev->irq)
		free_irq(wdev->irq, wdev);

err_irq:
	iounmap(wdev->base);

err_ioremap:
	wdev->base = NULL;
	kfree(wdev);

err_kzalloc:
	release_mem_region(res->start, resource_size(res));

err_busy:
err_get_resource:

	return ret;
}

static void omap_wdt_shutdown(struct platform_device *pdev)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		omap_wdt_disable(wdev);
		pm_runtime_put_sync(wdev->dev);
	}
}

static int __devexit omap_wdt_remove(struct platform_device *pdev)
{
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
	struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	if (!res)
		return -ENOENT;

	misc_deregister(&(wdev->omap_wdt_miscdev));
	release_mem_region(res->start, resource_size(res));
	platform_set_drvdata(pdev, NULL);

	if (wdev->irq)
		free_irq(wdev->irq, wdev);

	if (kernelpet && wdev->irq)
		atomic_notifier_chain_unregister(&touch_watchdog_notifier_head,
			&wdev->nb);

	iounmap(wdev->base);

	kfree(wdev);
	omap_wdt_dev = NULL;

	return 0;
}

#ifdef	CONFIG_PM

/* REVISIT ... not clear this is the best way to handle system suspend; and
 * it's very inappropriate for selective device suspend (e.g. suspending this
 * through sysfs rather than by stopping the watchdog daemon).  Also, this
 * may not play well enough with NOWAYOUT...
 */

static int omap_wdt_suspend(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		omap_wdt_disable(wdev);
		pm_runtime_put_sync_suspend(wdev->dev);
	}

	return 0;
}

static int omap_wdt_resume(struct device *dev)
{
	struct platform_device *pdev = to_platform_device(dev);
	struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);

	if (wdev->omap_wdt_users) {
		pm_runtime_get_sync(wdev->dev);
		omap_wdt_enable(wdev);
		omap_wdt_ping(wdev);
	}

	return 0;
}

#else
#define	omap_wdt_suspend	NULL
#define	omap_wdt_resume		NULL
#endif

static const struct dev_pm_ops omap_wdt_pm_ops = {
	.suspend_noirq	= omap_wdt_suspend,
	.resume_noirq	= omap_wdt_resume,
};

static struct platform_driver omap_wdt_driver = {
	.probe		= omap_wdt_probe,
	.remove		= __devexit_p(omap_wdt_remove),
	.shutdown	= omap_wdt_shutdown,
	.driver		= {
		.owner	= THIS_MODULE,
		.name	= "omap_wdt",
		.pm	= &omap_wdt_pm_ops,
	},
};

static int __init omap_wdt_init(void)
{
	spin_lock_init(&wdt_lock);
	return platform_driver_register(&omap_wdt_driver);
}
/* HSI Platform Device probing & hsi_device registration */
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
	struct hsi_platform_data *pdata = dev_get_platdata(&pd->dev);
	struct hsi_dev *hsi_ctrl;
	u32 revision;
	int err;

	dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

	dev_dbg(&pd->dev, "The platform device probed is an %s\n",
		hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

	if (!pdata) {
		dev_err(&pd->dev, "No platform_data found on hsi device\n");
		return -ENXIO;
	}

	/* Check if mandatory board functions are populated */
	if (!pdata->device_scale) {
		dev_err(&pd->dev, "Missing platform device_scale function\n");
		return -ENOSYS;
	}

	hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
	if (hsi_ctrl == NULL) {
		dev_err(&pd->dev, "Could not allocate memory for"
			" struct hsi_dev\n");
		return -ENOMEM;
	}

	platform_set_drvdata(pd, hsi_ctrl);
	err = hsi_controller_init(hsi_ctrl, pd);
	if (err < 0) {
		dev_err(&pd->dev, "Could not initialize hsi controller:"
			" %d\n", err);
		goto rollback1;
	}

	pm_runtime_enable(hsi_ctrl->dev);
	pm_runtime_irq_safe(hsi_ctrl->dev);
	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	/* Non critical SW Reset */
	err = hsi_softreset(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	hsi_set_pm_force_hsi_on(hsi_ctrl);

	/* Configure HSI ports */
	hsi_set_ports_default(hsi_ctrl, pd);

	/* Gather info from registers for the driver.(REVISION) */
	revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
	if (hsi_driver_device_is_hsi(pd))
		dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
			 revision);
	else
		dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
			 (revision & HSI_SSI_REV_MAJOR) >> 4,
			 (revision & HSI_SSI_REV_MINOR));

	err = hsi_debug_add_ctrl(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev,
			"Could not add hsi controller to debugfs: %d\n", err);
		goto rollback2;
	}

	err = register_hsi_devices(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
		goto rollback3;
	}

	/* Allow HSI to wake up the platform */
	device_init_wakeup(hsi_ctrl->dev, true);

	/* Set the HSI FCLK to default. */
	hsi_ctrl->hsi_fclk_req = pdata->default_hsi_fclk;
	err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
				  pdata->default_hsi_fclk);
	if (err == -EBUSY) {
		/* PM framework init is late_initcall, so it may not yet be */
		/* initialized, so be prepared to retry later on open. */
		dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. "
			 "Will retry on next open\n", pdata->default_hsi_fclk);
	} else if (err) {
		dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n",
				__func__, err, pdata->default_hsi_fclk);
		goto rollback3;
	} else {
		hsi_ctrl->hsi_fclk_current = pdata->default_hsi_fclk;
	}
	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

	return 0;

rollback3:
	hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
	hsi_controller_exit(hsi_ctrl);

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
	kfree(hsi_ctrl);
	return err;
}
Beispiel #12
0
static int omap8250_probe(struct platform_device *pdev)
{
	struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	struct resource *irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	struct omap8250_priv *priv;
	struct uart_8250_port up;
	int ret;
	void __iomem *membase;

	if (!regs || !irq) {
		dev_err(&pdev->dev, "missing registers or irq\n");
		return -EINVAL;
	}

	priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
	if (!priv)
		return -ENOMEM;

	membase = devm_ioremap_nocache(&pdev->dev, regs->start,
				       resource_size(regs));
	if (!membase)
		return -ENODEV;

	memset(&up, 0, sizeof(up));
	up.port.dev = &pdev->dev;
	up.port.mapbase = regs->start;
	up.port.membase = membase;
	up.port.irq = irq->start;
	/*
	 * It claims to be 16C750 compatible however it is a little different.
	 * It has EFR and has no FCR7_64byte bit. The AFE (which it claims to
	 * have) is enabled via EFR instead of MCR. The type is set here 8250
	 * just to get things going. UNKNOWN does not work for a few reasons and
	 * we don't need our own type since we don't use 8250's set_termios()
	 * or pm callback.
	 */
	up.port.type = PORT_8250;
	up.port.iotype = UPIO_MEM;
	up.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SOFT_FLOW |
		UPF_HARD_FLOW;
	up.port.private_data = priv;

	up.port.regshift = 2;
	up.port.fifosize = 64;
	up.tx_loadsz = 64;
	up.capabilities = UART_CAP_FIFO;
#ifdef CONFIG_PM_RUNTIME
	/*
	 * PM_RUNTIME is mostly transparent. However to do it right we need to a
	 * TX empty interrupt before we can put the device to auto idle. So if
	 * PM_RUNTIME is not enabled we don't add that flag and can spare that
	 * one extra interrupt in the TX path.
	 */
	up.capabilities |= UART_CAP_RPM;
#endif
	up.port.set_termios = omap_8250_set_termios;
	up.port.pm = omap_8250_pm;
	up.port.startup = omap_8250_startup;
	up.port.shutdown = omap_8250_shutdown;
	up.port.throttle = omap_8250_throttle;
	up.port.unthrottle = omap_8250_unthrottle;

	if (pdev->dev.of_node) {
		ret = of_alias_get_id(pdev->dev.of_node, "serial");

		of_property_read_u32(pdev->dev.of_node, "clock-frequency",
				     &up.port.uartclk);
		priv->wakeirq = irq_of_parse_and_map(pdev->dev.of_node, 1);
	} else {
		ret = pdev->id;
	}
	if (ret < 0) {
		dev_err(&pdev->dev, "failed to get alias/pdev id\n");
		return ret;
	}
	up.port.line = ret;

	if (!up.port.uartclk) {
		up.port.uartclk = DEFAULT_CLK_SPEED;
		dev_warn(&pdev->dev,
			 "No clock speed specified: using default: %d\n",
			 DEFAULT_CLK_SPEED);
	}

	priv->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	priv->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	pm_qos_add_request(&priv->pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
			   priv->latency);
	INIT_WORK(&priv->qos_work, omap8250_uart_qos_work);

	device_init_wakeup(&pdev->dev, true);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev, -1);

	pm_runtime_irq_safe(&pdev->dev);
	pm_runtime_enable(&pdev->dev);

	pm_runtime_get_sync(&pdev->dev);

	omap_serial_fill_features_erratas(&up, priv);
#ifdef CONFIG_SERIAL_8250_DMA
	if (pdev->dev.of_node) {
		/*
		 * Oh DMA support. If there are no DMA properties in the DT then
		 * we will fall back to a generic DMA channel which does not
		 * really work here. To ensure that we do not get a generic DMA
		 * channel assigned, we have the the_no_dma_filter_fn() here.
		 * To avoid "failed to request DMA" messages we check for DMA
		 * properties in DT.
		 */
		ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
		if (ret == 2) {
			up.dma = &priv->omap8250_dma;
			up.port.handle_irq = omap_8250_dma_handle_irq;
			priv->omap8250_dma.fn = the_no_dma_filter_fn;
			priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
			priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
			priv->omap8250_dma.rx_size = RX_TRIGGER;
			priv->omap8250_dma.rxconf.src_maxburst = RX_TRIGGER;
			priv->omap8250_dma.txconf.dst_maxburst = TX_TRIGGER;

			if (of_machine_is_compatible("ti,am33xx"))
				priv->habit |= OMAP_DMA_TX_KICK;
		}
	}
#endif
	ret = serial8250_register_8250_port(&up);
	if (ret < 0) {
		dev_err(&pdev->dev, "unable to register 8250 port\n");
		goto err;
	}
	priv->line = ret;
	platform_set_drvdata(pdev, priv);
	pm_runtime_mark_last_busy(&pdev->dev);
	pm_runtime_put_autosuspend(&pdev->dev);
	return 0;
err:
	pm_runtime_put(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
	return ret;
}
Beispiel #13
0
static int __devinit serial_omap_probe(struct platform_device *pdev)
{
	struct uart_omap_port	*up;
	struct resource		*mem, *irq;
	struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
	int ret;

	if (pdev->dev.of_node)
		omap_up_info = of_get_uart_port_info(&pdev->dev);

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "no mem resource?\n");
		return -ENODEV;
	}

	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!irq) {
		dev_err(&pdev->dev, "no irq resource?\n");
		return -ENODEV;
	}

	if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
				pdev->dev.driver->name)) {
		dev_err(&pdev->dev, "memory region already claimed\n");
		return -EBUSY;
	}

	if (gpio_is_valid(omap_up_info->DTR_gpio) &&
	    omap_up_info->DTR_present) {
		ret = gpio_request(omap_up_info->DTR_gpio, "omap-serial");
		if (ret < 0)
			return ret;
		ret = gpio_direction_output(omap_up_info->DTR_gpio,
					    omap_up_info->DTR_inverted);
		if (ret < 0)
			return ret;
	}

	up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
	if (!up)
		return -ENOMEM;

	if (gpio_is_valid(omap_up_info->DTR_gpio) &&
	    omap_up_info->DTR_present) {
		up->DTR_gpio = omap_up_info->DTR_gpio;
		up->DTR_inverted = omap_up_info->DTR_inverted;
	} else
		up->DTR_gpio = -EINVAL;
	up->DTR_active = 0;

	up->dev = &pdev->dev;
	up->port.dev = &pdev->dev;
	up->port.type = PORT_OMAP;
	up->port.iotype = UPIO_MEM;
	up->port.irq = irq->start;

	up->port.regshift = 2;
	up->port.fifosize = 64;
	up->port.ops = &serial_omap_pops;

	if (pdev->dev.of_node)
		up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
	else
		up->port.line = pdev->id;

	if (up->port.line < 0) {
		dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
								up->port.line);
		ret = -ENODEV;
		goto err_port_line;
	}

	up->pins = devm_pinctrl_get_select_default(&pdev->dev);
	if (IS_ERR(up->pins)) {
		dev_warn(&pdev->dev, "did not get pins for uart%i error: %li\n",
			 up->port.line, PTR_ERR(up->pins));
		up->pins = NULL;
	}

	sprintf(up->name, "OMAP UART%d", up->port.line);
	up->port.mapbase = mem->start;
	up->port.membase = devm_ioremap(&pdev->dev, mem->start,
						resource_size(mem));
	if (!up->port.membase) {
		dev_err(&pdev->dev, "can't ioremap UART\n");
		ret = -ENOMEM;
		goto err_ioremap;
	}

	up->port.flags = omap_up_info->flags;
	up->port.uartclk = omap_up_info->uartclk;
	if (!up->port.uartclk) {
		up->port.uartclk = DEFAULT_CLK_SPEED;
		dev_warn(&pdev->dev, "No clock speed specified: using default:"
						"%d\n", DEFAULT_CLK_SPEED);
	}

	up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	pm_qos_add_request(&up->pm_qos_request,
		PM_QOS_CPU_DMA_LATENCY, up->latency);
	serial_omap_uart_wq = create_singlethread_workqueue(up->name);
	INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);

	platform_set_drvdata(pdev, up);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev,
			omap_up_info->autosuspend_timeout);

	pm_runtime_irq_safe(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	omap_serial_fill_features_erratas(up);

	ui[up->port.line] = up;
	serial_omap_add_console_port(up);

	ret = uart_add_one_port(&serial_omap_reg, &up->port);
	if (ret != 0)
		goto err_add_port;

	pm_runtime_mark_last_busy(up->dev);
	pm_runtime_put_autosuspend(up->dev);
	return 0;

err_add_port:
	pm_runtime_put(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
err_ioremap:
err_port_line:
	dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
				pdev->id, __func__, ret);
	return ret;
}
static int __devinit omap_temp_sensor_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data;
	struct omap_temp_sensor *temp_sensor;
	struct resource *mem;
	int ret = 0;

	if (!pdata) {
		dev_err(dev, "%s: platform data missing\n", __func__);
		return -EINVAL;
	}

	temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL);
	if (!temp_sensor)
		return -ENOMEM;

	spin_lock_init(&temp_sensor->lock);

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(dev, "%s:no mem resource\n", __func__);
		dump_stack();
		ret = -EINVAL;
		goto plat_res_err;
	}

	temp_sensor->phy_base = pdata->offset;
	temp_sensor->pdev = pdev;
	temp_sensor->dev = dev;

	pm_runtime_enable(dev);
	pm_runtime_irq_safe(dev);

	/*
	 * check if the efuse has a non-zero value if not
	 * it is an untrimmed sample and the temperatures
	 * may not be accurate */
	if (omap_readl(OMAP4_CTRL_MODULE_CORE +
			OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP))
		temp_sensor->is_efuse_valid = 1;

	temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck");
	if (IS_ERR(temp_sensor->clock)) {
		ret = PTR_ERR(temp_sensor->clock);
		pr_err("%s:Unable to get fclk: %d\n", __func__, ret);
		ret = -EINVAL;
		goto clk_get_err;
	}
	platform_set_drvdata(pdev, temp_sensor);

	ret = omap_temp_sensor_enable(temp_sensor);
	if (ret) {
		dev_err(dev, "%s:Cannot enable temp sensor\n", __func__);
		goto sensor_enable_err;
	}

	omap_enable_continuous_mode(temp_sensor);

	/* Wait till the first conversion is done wait for at least 1ms */
	mdelay(2);

	/* Read the temperature once due to hw issue*/
	omap_read_current_temp(temp_sensor);

	/* Initialize Polling work queue*/
	INIT_DELAYED_WORK(&temp_sensor->temp_poll_work,
			  temp_poll_work_fn);
	schedule_delayed_work(&temp_sensor->temp_poll_work,
			msecs_to_jiffies(POLL_DELAY_MS));

	ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_sensor_group);
	if (ret) {
		dev_err(&pdev->dev, "could not create sysfs files\n");
		goto sensor_enable_err;
	}

	ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_section1_group);
	if (ret)
		dev_err(&pdev->dev, "could not create section sysfs files\n");
	ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_section2_group);
	if (ret)
		dev_err(&pdev->dev, "could not create section sysfs files\n");
	ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_section3_group);
	if (ret)
		dev_err(&pdev->dev, "could not create section sysfs files\n");
	ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_section4_group);
	if (ret)
		dev_err(&pdev->dev, "could not create section sysfs files\n");
	ret = sysfs_create_group(&pdev->dev.kobj, &omap_temp_section5_group);
	if (ret)
		dev_err(&pdev->dev, "could not create section sysfs files\n");

	dev_info(dev, "%s probed", pdata->name);

	temp_sensor_pm = temp_sensor;

	return 0;

sensor_enable_err:
	clk_put(temp_sensor->clock);
clk_get_err:
	pm_runtime_disable(dev);
plat_res_err:
	kfree(temp_sensor);
	return ret;
}
static int __devinit omap_temp_sensor_probe(struct platform_device *pdev)
{
	struct device *dev = &pdev->dev;
	struct omap_temp_sensor_pdata *pdata = pdev->dev.platform_data;
	struct omap_temp_sensor *temp_sensor;
	struct resource *mem;
	int ret = 0, val;

	if (!pdata) {
		dev_err(&pdev->dev, "%s: platform data missing\n", __func__);
		return -EINVAL;
	}

	temp_sensor = kzalloc(sizeof(struct omap_temp_sensor), GFP_KERNEL);
	if (!temp_sensor)
		return -ENOMEM;

	spin_lock_init(&temp_sensor->lock);
	mutex_init(&temp_sensor->sensor_mutex);

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "%s:no mem resource\n", __func__);
		ret = -EINVAL;
		goto plat_res_err;
	}

	temp_sensor->irq = platform_get_irq_byname(pdev, "thermal_alert");
	if (temp_sensor->irq < 0) {
		dev_err(&pdev->dev, "%s:Cannot get thermal alert irq\n",
			__func__);
		ret = -EINVAL;
		goto get_irq_err;
	}

	ret = gpio_request_one(OMAP_TSHUT_GPIO, GPIOF_DIR_IN,
		"thermal_shutdown");
	if (ret) {
		dev_err(&pdev->dev, "%s: Could not get tshut_gpio\n",
			__func__);
		goto tshut_gpio_req_err;
	}

	temp_sensor->tshut_irq = gpio_to_irq(OMAP_TSHUT_GPIO);
	if (temp_sensor->tshut_irq < 0) {
		dev_err(&pdev->dev, "%s:Cannot get thermal shutdown irq\n",
			__func__);
		ret = -EINVAL;
		goto get_tshut_irq_err;
	}

	temp_sensor->phy_base = pdata->offset;
	temp_sensor->pdev = pdev;
	temp_sensor->dev = dev;

	pm_runtime_enable(dev);
	pm_runtime_irq_safe(dev);


	kobject_uevent(&pdev->dev.kobj, KOBJ_ADD);
	platform_set_drvdata(pdev, temp_sensor);


	/*
	 * check if the efuse has a non-zero value if not
	 * it is an untrimmed sample and the temperatures
	 * may not be accurate */
	if (omap_readl(OMAP4_CTRL_MODULE_CORE +
			OMAP4_CTRL_MODULE_CORE_STD_FUSE_OPP_BGAP))
		temp_sensor->is_efuse_valid = 1;

	temp_sensor->clock = clk_get(&temp_sensor->pdev->dev, "fck");
	if (IS_ERR(temp_sensor->clock)) {
		ret = PTR_ERR(temp_sensor->clock);
		pr_err("%s:Unable to get fclk: %d\n", __func__, ret);
		ret = -EINVAL;
		goto clk_get_err;
	}

	ret = omap_temp_sensor_enable(temp_sensor);
	if (ret) {
		dev_err(&pdev->dev, "%s:Cannot enable temp sensor\n", __func__);
		goto sensor_enable_err;
	}

	temp_sensor->therm_fw = kzalloc(sizeof(struct thermal_dev), GFP_KERNEL);
	if (temp_sensor->therm_fw) {
		temp_sensor->therm_fw->name = "omap_ondie_sensor";
		temp_sensor->therm_fw->domain_name = "cpu";
		temp_sensor->therm_fw->dev = temp_sensor->dev;
		temp_sensor->therm_fw->dev_ops = &omap_sensor_ops;
		thermal_sensor_dev_register(temp_sensor->therm_fw);
	} else {
		dev_err(&pdev->dev, "%s:Cannot alloc memory for thermal fw\n",
			__func__);
		ret = -ENOMEM;
		goto therm_fw_alloc_err;
	}

	omap_enable_continuous_mode(temp_sensor, 1);
	omap_configure_temp_sensor_thresholds(temp_sensor);
	/* 1 ms */
	omap_configure_temp_sensor_counter(temp_sensor, 1);

	/* Wait till the first conversion is done wait for at least 1ms */
	mdelay(2);

	/* Read the temperature once due to hw issue*/
	omap_report_temp(temp_sensor->therm_fw);

	/* Set 250 milli-seconds time as default counter */
	omap_configure_temp_sensor_counter(temp_sensor,
					temp_sensor->clk_rate * 250 / 1000);
	ret = sysfs_create_group(&pdev->dev.kobj,
				 &omap_temp_sensor_group);
	if (ret) {
		dev_err(&pdev->dev, "could not create sysfs files\n");
		goto sysfs_create_err;
	}

	ret = request_threaded_irq(temp_sensor->irq, NULL,
			omap_talert_irq_handler,
			IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
			"temp_sensor", (void *)temp_sensor);
	if (ret) {
		dev_err(&pdev->dev, "Request threaded irq failed.\n");
		goto req_irq_err;
	}

	ret = request_threaded_irq(temp_sensor->tshut_irq, NULL,
			omap_tshut_irq_handler,
			IRQF_TRIGGER_RISING | IRQF_ONESHOT,
			"tshut", (void *)temp_sensor);
	if (ret) {
		dev_err(&pdev->dev, "Request threaded irq failed for TSHUT.\n");
		goto tshut_irq_req_err;
	}

	/* unmask the T_COLD and unmask T_HOT at init */
	val = omap_temp_sensor_readl(temp_sensor, BGAP_CTRL_OFFSET);
	val |= OMAP4_MASK_COLD_MASK;
	val |= OMAP4_MASK_HOT_MASK;
	omap_temp_sensor_writel(temp_sensor, val, BGAP_CTRL_OFFSET);

	dev_info(&pdev->dev, "%s : '%s'\n", temp_sensor->therm_fw->name,
			pdata->name);

	temp_sensor_pm = temp_sensor;
	return 0;

tshut_irq_req_err:
	free_irq(temp_sensor->irq, temp_sensor);
req_irq_err:
	kobject_uevent(&temp_sensor->dev->kobj, KOBJ_REMOVE);
	sysfs_remove_group(&temp_sensor->dev->kobj, &omap_temp_sensor_group);
sysfs_create_err:
	thermal_sensor_dev_unregister(temp_sensor->therm_fw);
	kfree(temp_sensor->therm_fw);
	if (temp_sensor->clock)
		clk_put(temp_sensor->clock);
	platform_set_drvdata(pdev, NULL);
therm_fw_alloc_err:
	omap_temp_sensor_disable(temp_sensor);
sensor_enable_err:
	clk_put(temp_sensor->clock);
clk_get_err:
	pm_runtime_disable(&pdev->dev);
get_tshut_irq_err:
	gpio_free(OMAP_TSHUT_GPIO);
tshut_gpio_req_err:
get_irq_err:
plat_res_err:
	mutex_destroy(&temp_sensor->sensor_mutex);
	kfree(temp_sensor);
	return ret;
}
static int serial_omap_probe(struct platform_device *pdev)
{
	struct uart_omap_port	*up;
	struct resource		*mem, *irq, *dma_tx, *dma_rx;
	struct omap_uart_port_info *omap_up_info = pdev->dev.platform_data;
	int ret = -ENOSPC;

	if (pdev->dev.of_node)
		omap_up_info = of_get_uart_port_info(&pdev->dev);

	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!mem) {
		dev_err(&pdev->dev, "no mem resource?\n");
		return -ENODEV;
	}

	irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!irq) {
		dev_err(&pdev->dev, "no irq resource?\n");
		return -ENODEV;
	}

	if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
				pdev->dev.driver->name)) {
		dev_err(&pdev->dev, "memory region already claimed\n");
		return -EBUSY;
	}

	dma_rx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
	if (!dma_rx)
		return -ENXIO;

	dma_tx = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx");
	if (!dma_tx)
		return -ENXIO;

	up = devm_kzalloc(&pdev->dev, sizeof(*up), GFP_KERNEL);
	if (!up)
		return -ENOMEM;

	up->pdev = pdev;
	up->port.dev = &pdev->dev;
	up->port.type = PORT_OMAP;
	up->port.iotype = UPIO_MEM;
	up->port.irq = irq->start;

	up->port.regshift = 2;
	up->port.fifosize = 64;
	up->port.ops = &serial_omap_pops;

	if (pdev->dev.of_node)
		up->port.line = of_alias_get_id(pdev->dev.of_node, "serial");
	else
		up->port.line = pdev->id;

	if (up->port.line < 0) {
		dev_err(&pdev->dev, "failed to get alias/pdev id, errno %d\n",
								up->port.line);
		ret = -ENODEV;
		goto err_port_line;
	}

	sprintf(up->name, "OMAP UART%d", up->port.line);
	up->port.mapbase = mem->start;
	up->port.membase = devm_ioremap(&pdev->dev, mem->start,
						resource_size(mem));
	if (!up->port.membase) {
		dev_err(&pdev->dev, "can't ioremap UART\n");
		ret = -ENOMEM;
		goto err_ioremap;
	}

	up->port.flags = omap_up_info->flags;
	up->port.uartclk = omap_up_info->uartclk;
	if (!up->port.uartclk) {
		up->port.uartclk = DEFAULT_CLK_SPEED;
		dev_warn(&pdev->dev, "No clock speed specified: using default:"
						"%d\n", DEFAULT_CLK_SPEED);
	}
	up->uart_dma.uart_base = mem->start;
	up->errata = omap_up_info->errata;

	if (omap_up_info->dma_enabled) {
		up->uart_dma.uart_dma_tx = dma_tx->start;
		up->uart_dma.uart_dma_rx = dma_rx->start;
		up->use_dma = 1;
		up->uart_dma.rx_buf_size = omap_up_info->dma_rx_buf_size;
		up->uart_dma.rx_timeout = omap_up_info->dma_rx_timeout;
		up->uart_dma.rx_poll_rate = omap_up_info->dma_rx_poll_rate;
		spin_lock_init(&(up->uart_dma.tx_lock));
		spin_lock_init(&(up->uart_dma.rx_lock));
		up->uart_dma.tx_dma_channel = OMAP_UART_DMA_CH_FREE;
		up->uart_dma.rx_dma_channel = OMAP_UART_DMA_CH_FREE;
	}

	up->latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	up->calc_latency = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
	pm_qos_add_request(&up->pm_qos_request,
		PM_QOS_CPU_DMA_LATENCY, up->latency);
	serial_omap_uart_wq = create_singlethread_workqueue(up->name);
	INIT_WORK(&up->qos_work, serial_omap_uart_qos_work);

	pm_runtime_use_autosuspend(&pdev->dev);
	pm_runtime_set_autosuspend_delay(&pdev->dev,
			omap_up_info->autosuspend_timeout);

	pm_runtime_irq_safe(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	pm_runtime_get_sync(&pdev->dev);

	ui[up->port.line] = up;
	serial_omap_add_console_port(up);

	ret = uart_add_one_port(&serial_omap_reg, &up->port);
	if (ret != 0)
		goto err_add_port;

	pm_runtime_put(&pdev->dev);
	platform_set_drvdata(pdev, up);
	return 0;

err_add_port:
	pm_runtime_put(&pdev->dev);
	pm_runtime_disable(&pdev->dev);
err_ioremap:
err_port_line:
	dev_err(&pdev->dev, "[UART%d]: failure [%s]: %d\n",
				pdev->id, __func__, ret);
	return ret;
}
Beispiel #17
0
static int of_dra7_atl_clk_probe(struct platform_device *pdev)
{
	struct device_node *node = pdev->dev.of_node;
	struct dra7_atl_clock_info *cinfo;
	int i;
	int ret = 0;

	if (!node)
		return -ENODEV;

	cinfo = devm_kzalloc(&pdev->dev, sizeof(*cinfo), GFP_KERNEL);
	if (!cinfo)
		return -ENOMEM;

	cinfo->iobase = of_iomap(node, 0);
	cinfo->dev = &pdev->dev;
	pm_runtime_enable(cinfo->dev);
	pm_runtime_irq_safe(cinfo->dev);

	pm_runtime_get_sync(cinfo->dev);
	atl_write(cinfo, DRA7_ATL_PCLKMUX_REG(0), DRA7_ATL_PCLKMUX);

	for (i = 0; i < DRA7_ATL_INSTANCES; i++) {
		struct device_node *cfg_node;
		char prop[5];
		struct dra7_atl_desc *cdesc;
		struct of_phandle_args clkspec;
		struct clk *clk;
		int rc;

		rc = of_parse_phandle_with_args(node, "ti,provided-clocks",
						NULL, i, &clkspec);

		if (rc) {
			pr_err("%s: failed to lookup atl clock %d\n", __func__,
			       i);
			return -EINVAL;
		}

		clk = of_clk_get_from_provider(&clkspec);

		cdesc = to_atl_desc(__clk_get_hw(clk));
		cdesc->cinfo = cinfo;
		cdesc->id = i;

		/* Get configuration for the ATL instances */
		snprintf(prop, sizeof(prop), "atl%u", i);
		cfg_node = of_find_node_by_name(node, prop);
		if (cfg_node) {
			ret = of_property_read_u32(cfg_node, "bws",
						   &cdesc->bws);
			ret |= of_property_read_u32(cfg_node, "aws",
						    &cdesc->aws);
			if (!ret) {
				cdesc->valid = true;
				atl_write(cinfo, DRA7_ATL_BWSMUX_REG(i),
					  cdesc->bws);
				atl_write(cinfo, DRA7_ATL_AWSMUX_REG(i),
					  cdesc->aws);
			}
		}

		cdesc->probed = true;
		/*
		 * Enable the clock if it has been asked prior to loading the
		 * hw driver
		 */
		if (cdesc->enabled)
			atl_clk_enable(__clk_get_hw(clk));
	}
	pm_runtime_put_sync(cinfo->dev);

	return ret;
}
Beispiel #18
0
static int ecap_probe(struct platform_device *pdev)
{
	struct ecap_pwm *ep = NULL;
	struct resource *r;
	int ret = 0;
	int val;
	char con_id[PWM_CON_ID_STRING_LENGTH] = "epwmss";
	struct pwmss_platform_data *pdata = (&pdev->dev)->platform_data;

	ep = kzalloc(sizeof(*ep), GFP_KERNEL);

	if (!ep) {
		dev_err(&pdev->dev, "failed to allocate memory\n");
		return -ENOMEM;
	}

	ep->version = pdata->version;

	if (ep->version == PWM_VERSION_1) {
		sprintf(con_id, "%s%d_%s", con_id, pdev->id, "fck");
		ep->clk = clk_get(&pdev->dev, con_id);
	} else
		ep->clk = clk_get(&pdev->dev, "ecap");

	pm_runtime_irq_safe(&pdev->dev);
	pm_runtime_enable(&pdev->dev);
	ep->dev = &pdev->dev;
	if (IS_ERR(ep->clk)) {
		ret = PTR_ERR(ep->clk);
		goto err_clk_get;
	}

	if (ep->version == PWM_VERSION_1) {
		r = platform_get_resource(pdev, IORESOURCE_MEM, 0);

		if (!r) {
			dev_err(&pdev->dev, "no memory resource defined\n");
			ret = -ENOMEM;
			goto err_get_resource;
		}

		ep->config_mem_base = ioremap(r->start, resource_size(r));

		if (!ep->config_mem_base) {

			dev_err(&pdev->dev, "failed to ioremap() registers\n");
			ret = -ENOMEM;
			goto err_get_resource;
		}

		pm_runtime_get_sync(ep->dev);
		val = readw(ep->config_mem_base + PWMSS_CLKCONFIG);
		val |= BIT(ECAP_CLK_EN);
		writew(val, ep->config_mem_base + PWMSS_CLKCONFIG);
		pm_runtime_put_sync(ep->dev);
	}

	spin_lock_init(&ep->lock);
	ep->ops.config = ecap_pwm_config;
	ep->ops.request = ecap_pwm_request;
	ep->ops.freq_transition_notifier_cb = ecap_frequency_transition_cb;

	if (ep->version == PWM_VERSION_1)
		r = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	else
		r = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	if (!r) {
		dev_err(&pdev->dev, "no memory resource defined\n");
		ret = -ENODEV;
		goto err_request_mem;
	}

	r = request_mem_region(r->start, resource_size(r), pdev->name);
	if (!r) {
		dev_err(&pdev->dev, "failed to request memory resource\n");
		ret = -EBUSY;
		goto err_request_mem;
	}

	ep->mmio_base = ioremap(r->start, resource_size(r));
	if (!ep->mmio_base) {
		dev_err(&pdev->dev, "failed to ioremap() registers\n");
		ret = -ENODEV;
		goto err_ioremap;
	}

	ep->pwm.ops = &ep->ops;
	pwm_set_drvdata(&ep->pwm, ep);
	ret =  pwm_register(&ep->pwm, &pdev->dev, -1);
	platform_set_drvdata(pdev, ep);
	/* Inverse the polarity of PWM wave */
	if (pdata->chan_attrib[0].inverse_pol) {
		ep->pwm.active_high = 1;
		ecap_pwm_set_polarity(&ep->pwm, 1);
	}
	return 0;

err_ioremap:
	release_mem_region(r->start, resource_size(r));
err_request_mem:
	if (ep->version == PWM_VERSION_1) {
		iounmap(ep->config_mem_base);
		ep->config_mem_base = NULL;
	}
err_get_resource:
	clk_put(ep->clk);
	pm_runtime_disable(&pdev->dev);
err_clk_get:
	kfree(ep);
	return ret;
}
Beispiel #19
0
static int __init hsi_platform_device_probe(struct platform_device *pd)
{
	struct hsi_platform_data *pdata = dev_get_platdata(&pd->dev);
	struct hsi_dev *hsi_ctrl;
	u32 revision;
	int err;

	dev_dbg(&pd->dev, "HSI DRIVER : hsi_platform_device_probe\n");

	dev_dbg(&pd->dev, "The platform device probed is an %s\n",
		hsi_driver_device_is_hsi(pd) ? "HSI" : "SSI");

	if (!pdata) {
		dev_err(&pd->dev, "No platform_data found on hsi device\n");
		return -ENXIO;
	}

	/* Check if mandatory board functions are populated */
	if (!pdata->device_scale) {
		dev_err(&pd->dev, "Missing platform device_scale function\n");
		return -ENOSYS;
	}

	hsi_ctrl = kzalloc(sizeof(*hsi_ctrl), GFP_KERNEL);
	if (hsi_ctrl == NULL) {
		dev_err(&pd->dev, "Could not allocate memory for"
			" struct hsi_dev\n");
		return -ENOMEM;
	}
//mo2haewoon.you => [START]
#if defined (HSI_SEND_ATCOMMAND_TO_CAWAKE)
	INIT_WORK(&hsi_ctrl->ifx_work,ifx_check_handle_work);
	hsi_ctrl->ifx_wq = create_singlethread_workqueue("ifx_wq");
    if(!hsi_ctrl->ifx_wq){
		printk("Failed to setup workqueue - ifx_wq \n");          
    }
#endif
//mo2haewoon.you => [END]

	platform_set_drvdata(pd, hsi_ctrl);
	err = hsi_controller_init(hsi_ctrl, pd);
	if (err < 0) {
		dev_err(&pd->dev, "Could not initialize hsi controller:"
			" %d\n", err);
		goto rollback1;
	}

	pm_runtime_enable(hsi_ctrl->dev);
	pm_runtime_irq_safe(hsi_ctrl->dev);
	hsi_clocks_enable(hsi_ctrl->dev, __func__);

	/* Non critical SW Reset */
	err = hsi_softreset(hsi_ctrl);
	if (err < 0)
		goto rollback2;

	hsi_set_pm_force_hsi_on(hsi_ctrl);

	/* Configure HSI ports */
	hsi_set_ports_default(hsi_ctrl, pd);

	/* Gather info from registers for the driver.(REVISION) */
	revision = hsi_inl(hsi_ctrl->base, HSI_SYS_REVISION_REG);
	if (hsi_driver_device_is_hsi(pd))
		dev_info(hsi_ctrl->dev, "HSI Hardware REVISION 0x%x\n",
			 revision);
	else
		dev_info(hsi_ctrl->dev, "SSI Hardware REVISION %d.%d\n",
			 (revision & HSI_SSI_REV_MAJOR) >> 4,
			 (revision & HSI_SSI_REV_MINOR));

	err = hsi_debug_add_ctrl(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev,
			"Could not add hsi controller to debugfs: %d\n", err);
		goto rollback2;
	}

	err = register_hsi_devices(hsi_ctrl);
	if (err < 0) {
		dev_err(&pd->dev, "Could not register hsi_devices: %d\n", err);
		goto rollback3;
	}

//mo2haewoon.you => [START]
#if defined (HSI_SEND_ATCOMMAND_TO_CAWAKE)
	/* Set the ts_gpio pin mux */
	err = gpio_request(OMAP_SEND, "gpio_122");
	gpio_direction_input(OMAP_SEND);
	irq_num_122 = gpio_to_irq(OMAP_SEND);
	
	err = request_irq(irq_num_122,ifx_check_handle_srdy_irq, IRQF_TRIGGER_RISING,HSI_MODULENAME, hsi_ctrl);
	if (err < 0) {
		pr_err("Modem-wait-check: couldn't request gpio interrupt 122\n");
	}
#endif
//mo2haewoon.you => [END]

	/* Allow HSI to wake up the platform */
	device_init_wakeup(hsi_ctrl->dev, true);

	/* Set the HSI FCLK to default. */
	hsi_ctrl->hsi_fclk_req = pdata->default_hsi_fclk;
	err = pdata->device_scale(hsi_ctrl->dev, hsi_ctrl->dev,
				  pdata->default_hsi_fclk);
	if (err == -EBUSY) {
		/* PM framework init is late_initcall, so it may not yet be */
		/* initialized, so be prepared to retry later on open. */
		dev_warn(&pd->dev, "Cannot set HSI FClk to default value: %ld. "
			 "Will retry on next open\n", pdata->default_hsi_fclk);
	} else if (err) {
		dev_err(&pd->dev, "%s: Error %d setting HSI FClk to %ld.\n",
				__func__, err, pdata->default_hsi_fclk);
		goto rollback3;
	} else {
		hsi_ctrl->hsi_fclk_current = pdata->default_hsi_fclk;
	}
	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

// LGE_CHANGE [MIPI-HSI] [email protected] [START]
#if defined(CONFIG_MACH_LGE_COSMOPOLITAN)
	/* Set IMC CP core dump */
	IFX_CP_CRASH_DUMP_INIT();
#endif

#if defined(CONFIG_MACH_LGE_COSMO)
	/* Notify active/sleep status of AP to CP*/
	ifx_init_modem_send();
#endif
// LGE_CHANGE [MIPI-HSI] [email protected] [END]

	return 0;

rollback3:
	hsi_debug_remove_ctrl(hsi_ctrl);
rollback2:
	hsi_controller_exit(hsi_ctrl);

	/* From here no need for HSI HW access */
	hsi_clocks_disable(hsi_ctrl->dev, __func__);

rollback1:
	kfree(hsi_ctrl);
	return err;
}