Ejemplo n.º 1
0
static void serial_omap_rx_timeout(unsigned long uart_no)
{
	struct uart_omap_port *up = ui[uart_no - 1];
	unsigned int curr_dma_pos;
	curr_dma_pos = omap_readl(OMAP34XX_DMA4_BASE + OMAP_DMA4_CDAC(up->uart_dma.rx_dma_channel));
	if ((curr_dma_pos == up->uart_dma.prev_rx_dma_pos) || (curr_dma_pos == 0)) {
		/*
		 * If there is no transfer rx happening for 10sec then stop the dma
		 * else just restart the timer. See if 10 sec can be improved.
		 */
		if (jiffies_to_msecs(jiffies - isr8250_activity) < 10000)
			mod_timer(&up->uart_dma.rx_timer, jiffies +
				usecs_to_jiffies(up->uart_dma.rx_timeout));
		else {
			del_timer(&up->uart_dma.rx_timer);
			serial_omap_stop_rxdma(up);
			up->ier |= UART_IER_RDI;
			serial_out(up, UART_IER, up->ier);
		}

		return;
	} else {
		unsigned int curr_transmitted_size = curr_dma_pos - up->uart_dma.prev_rx_dma_pos;
		up->port.icount.rx += curr_transmitted_size;
		tty_insert_flip_string(up->port.state->port.tty, up->uart_dma.rx_buf + (up->uart_dma.prev_rx_dma_pos - up->uart_dma.rx_buf_dma_phys), curr_transmitted_size);
		queue_work(omap_serial_workqueue, &up->tty_work);
		up->uart_dma.prev_rx_dma_pos = curr_dma_pos;
		if (up->uart_dma.rx_buf_size + up->uart_dma.rx_buf_dma_phys == curr_dma_pos) {
			serial_omap_start_rxdma(up);
		} else
			mod_timer(&up->uart_dma.rx_timer,
				  jiffies + usecs_to_jiffies(up->uart_dma.rx_timeout));
		isr8250_activity = jiffies;
	}
}
Ejemplo n.º 2
0
static irqreturn_t tmu_irq(int irq, void *id)
{
	struct tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + INTSTAT);

	if (status & INTSTAT_RISE0) {
		pr_info("Throttling interrupt occured!!!!\n");
		__raw_writel(INTCLEAR_RISE0, info->tmu_base + INTCLEAR);
		info->tmu_state = TMU_STATUS_THROTTLED;
		queue_delayed_work_on(0, tmu_monitor_wq,
				&info->polling, usecs_to_jiffies(500 * 1000));
	} else if (status & INTSTAT_RISE1) {
		pr_info("Warning interrupt occured!!!!\n");
		__raw_writel(INTCLEAR_RISE1, info->tmu_base + INTCLEAR);
		info->tmu_state = TMU_STATUS_WARNING;
		queue_delayed_work_on(0, tmu_monitor_wq,
				&info->polling, usecs_to_jiffies(500 * 1000));
	} else if (status & INTSTAT_RISE2) {
		pr_info("Tripping interrupt occured!!!!\n");
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE2, info->tmu_base + INTCLEAR);
		tmu_tripped_cb();
	} else {
		pr_err("%s: TMU interrupt error\n", __func__);
		return -ENODEV;
	}

	return IRQ_HANDLED;
}
static void cpufreq_interactive_timer_resched(
	struct cpufreq_interactive_cpuinfo *pcpu)
{
	struct cpufreq_interactive_tunables *tunables =
		pcpu->policy->governor_data;
	unsigned long expires;
	unsigned long flags;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(smp_processor_id(),
				  &pcpu->time_in_idle_timestamp,
				  tunables->io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
	mod_timer_pinned(&pcpu->cpu_timer, expires);

	if (tunables->timer_slack_val >= 0 &&
	    pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(tunables->timer_slack_val);
		mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
	}

	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(int cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	unsigned long expires = jiffies +
		usecs_to_jiffies(pcpu->timer_rate);
	unsigned long flags;
	u64 now = ktime_to_us(ktime_get());

	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);
	if (pcpu->timer_slack_val >= 0 &&
	    (pcpu->target_freq > pcpu->policy->min ||
		(pcpu->target_freq == pcpu->policy->min &&
		 now < boostpulse_endtime))) {
		expires += usecs_to_jiffies(pcpu->timer_slack_val);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
static void cpufreq_interactive_timer_resched(
	struct cpufreq_interactive_cpuinfo *pcpu)
{
	unsigned long expires;
	unsigned long flags;
	u64 now = ktime_to_us(ktime_get());

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(smp_processor_id(),
				     &pcpu->time_in_idle_timestamp);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	expires = jiffies + usecs_to_jiffies(pcpu->timer_rate);
	mod_timer_pinned(&pcpu->cpu_timer, expires);

	if (pcpu->timer_slack_val >= 0 &&
	    (pcpu->target_freq > pcpu->policy->min ||
		(pcpu->target_freq == pcpu->policy->min &&
		 now < boostpulse_endtime))) {
		expires += usecs_to_jiffies(pcpu->timer_slack_val);
		mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
	}

	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(int cpu, int time_override)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	unsigned long flags;
	unsigned long expires;
	if (time_override)
		expires = jiffies + time_override;
	else
		expires = jiffies + usecs_to_jiffies(timer_rate);

	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);
	if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(timer_slack_val);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
Ejemplo n.º 7
0
/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
static int rmnet_cause_wakeup(struct rmnet_private *p) {
	int ret = 0;
	ktime_t now;
	if (p->timeout_us == 0) /* Check if disabled */
		return 0;

	/* Start timer on a wakeup packet */
	if (p->active_countdown == 0) {
		ret = 1;
		now = ktime_get_real();
		p->last_packet = now;
		if (in_suspend)
			queue_delayed_work(rmnet_wq, &p->work,
					usecs_to_jiffies(p->timeout_us));
		else
			queue_delayed_work(rmnet_wq, &p->work,
					usecs_to_jiffies(POLL_DELAY));
	}

	if (in_suspend)
		p->active_countdown++;
	else
		p->active_countdown = p->timeout_us / POLL_DELAY;

	return ret;
}
Ejemplo n.º 8
0
static void set_temperature_params(struct s5p_tmu_info *info)
{
#ifdef CONFIG_TMU_DEBUG
	struct s5p_platform_tmu *data = info->dev->platform_data;

	if (tmu_test_on) {
		/* In the tmu_test mode, change temperature_params value
		 * input data.
		*/
		data->ts.stop_1st_throttle = in.stop_1st_throttle;
		data->ts.start_1st_throttle = in.start_1st_throttle;
		data->ts.stop_2nd_throttle = in.stop_2nd_throttle;
		data->ts.start_2nd_throttle = in.start_2nd_throttle;
		data->ts.start_tripping = in.start_tripping;
		data->ts.start_emergency = in.start_emergency;
		data->ts.stop_mem_throttle = in.start_mem_throttle - 5;
		data->ts.start_mem_throttle = in.start_mem_throttle;
	}
	if (tmu_limit_on) {
		info->cpufreq_level_1st_throttle = freq_limit_1st_throttle;
		info->cpufreq_level_2nd_throttle = freq_limit_2nd_throttle;
	}
	if (set_sampling_rate) {
		info->sampling_rate =
			usecs_to_jiffies(set_sampling_rate * 1000);
		info->monitor_period =
			usecs_to_jiffies(set_sampling_rate * 10 * 1000);
	}
#endif
	print_temperature_params(info);
}
Ejemplo n.º 9
0
/*
 * si4713_send_command - sends a command to si4713 and waits its response
 * @sdev: si4713_device structure for the device we are communicating
 * @command: command id
 * @args: command arguments we are sending (up to 7)
 * @argn: actual size of @args
 * @response: buffer to place the expected response from the device (up to 15)
 * @respn: actual size of @response
 * @usecs: amount of time to wait before reading the response (in usecs)
 */
static int si4713_send_command(struct si4713_device *sdev, const u8 command,
				const u8 args[], const int argn,
				u8 response[], const int respn, const int usecs)
{
	struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
	unsigned long until_jiffies;
	u8 data1[MAX_ARGS + 1];
	int err;

	if (!client->adapter)
		return -ENODEV;

	/* First send the command and its arguments */
	data1[0] = command;
	memcpy(data1 + 1, args, argn);
	DBG_BUFFER(&sdev->sd, "Parameters", data1, argn + 1);

	err = i2c_master_send(client, data1, argn + 1);
	if (err != argn + 1) {
		v4l2_err(&sdev->sd, "Error while sending command 0x%02x\n",
			command);
		return err < 0 ? err : -EIO;
	}

	until_jiffies = jiffies + usecs_to_jiffies(usecs) + 1;

	/* Wait response from interrupt */
	if (client->irq) {
		if (!wait_for_completion_timeout(&sdev->work,
				usecs_to_jiffies(usecs) + 1))
			v4l2_warn(&sdev->sd,
				"(%s) Device took too much time to answer.\n",
				__func__);
	}

	do {
		err = i2c_master_recv(client, response, respn);
		if (err != respn) {
			v4l2_err(&sdev->sd,
				"Error %d while reading response for command 0x%02x\n",
				err, command);
			return err < 0 ? err : -EIO;
		}

		DBG_BUFFER(&sdev->sd, "Response", response, respn);
		if (!check_command_failed(response[0]))
			return 0;

		if (client->irq)
			return -EBUSY;
		if (usecs <= 1000)
			usleep_range(usecs, 1000);
		else
			usleep_range(1000, 2000);
	} while (time_is_after_jiffies(until_jiffies));

	return -EBUSY;
}
Ejemplo n.º 10
0
void hpios_delay_micro_seconds(u32 num_micro_sec)
{
	if ((usecs_to_jiffies(num_micro_sec) > 1) && !in_interrupt()) {
		/* MUST NOT SCHEDULE IN INTERRUPT CONTEXT! */
		schedule_timeout_uninterruptible(usecs_to_jiffies
			(num_micro_sec));
	} else if (num_micro_sec <= 2000)
		udelay(num_micro_sec);
	else
		mdelay(num_micro_sec / 1000);

}
Ejemplo n.º 11
0
static int __init timeriomem_rng_probe(struct platform_device *pdev)
{
	struct resource *res, *mem;
	int ret;

	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);

	if (!res)
		return -ENOENT;

	mem = request_mem_region(res->start, res->end - res->start + 1,
				 pdev->name);
	if (mem == NULL)
		return -EBUSY;

	dev_set_drvdata(&pdev->dev, mem);

	timeriomem_rng_data = pdev->dev.platform_data;

	timeriomem_rng_data->address = ioremap(res->start,
						res->end - res->start + 1);
	if (!timeriomem_rng_data->address) {
		ret = -ENOMEM;
		goto err_ioremap;
	}

	if (timeriomem_rng_data->period != 0
		&& usecs_to_jiffies(timeriomem_rng_data->period) > 0) {
		timeriomem_rng_timer.expires = jiffies;

		timeriomem_rng_ops.priv = usecs_to_jiffies(
						timeriomem_rng_data->period);
	}
	timeriomem_rng_data->present = 1;

	ret = hwrng_register(&timeriomem_rng_ops);
	if (ret)
		goto err_register;

	dev_info(&pdev->dev, "32bits from 0x%p @ %dus\n",
			timeriomem_rng_data->address,
			timeriomem_rng_data->period);

	return 0;

err_register:
	dev_err(&pdev->dev, "problem registering\n");
	iounmap(timeriomem_rng_data->address);
err_ioremap:
	release_resource(mem);

	return ret;
}
Ejemplo n.º 12
0
void HpiOs_DelayMicroSeconds(
	uint32_t dwNumMicroSec
)
{
	if ((usecs_to_jiffies(dwNumMicroSec) > 1) && !in_interrupt()) {
		/* MUST NOT SCHEDULE IN INTERRUPT CONTEXT! */
		schedule_timeout_uninterruptible(usecs_to_jiffies
			(dwNumMicroSec));
	} else if (dwNumMicroSec <= 2000)
		udelay(dwNumMicroSec);
	else
		mdelay(dwNumMicroSec / 1000);

}
Ejemplo n.º 13
0
/**
 * update_sampling_rate - update sampling rate effective immediately if needed.
 * @new_rate: new sampling rate
 *
 * If new rate is smaller than the old, simply updating
 * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the
 * original sampling_rate was 1 second and the requested new sampling rate is 10
 * ms because the user needs immediate reaction from ondemand governor, but not
 * sure if higher frequency will be required or not, then, the governor may
 * change the sampling rate too late; up to 1 second later. Thus, if we are
 * reducing the sampling rate, we need to make the new value effective
 * immediately.
 */
static void update_sampling_rate(struct dbs_data *dbs_data,
		unsigned int new_rate)
{
	struct od_dbs_tuners *od_tuners = dbs_data->tuners;
	int cpu;

	od_tuners->sampling_rate = new_rate = max(new_rate,
			dbs_data->min_sampling_rate);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		struct cpufreq_policy *policy;
		struct od_cpu_dbs_info_s *dbs_info;
		unsigned long next_sampling, appointed_at;

		policy = cpufreq_cpu_get(cpu);
		if (!policy)
			continue;
		if (policy->governor != &cpufreq_gov_ondemand) {
			cpufreq_cpu_put(policy);
			continue;
		}
		dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
		cpufreq_cpu_put(policy);

		mutex_lock(&dbs_info->cdbs.timer_mutex);

		if (!delayed_work_pending(&dbs_info->cdbs.work)) {
			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			continue;
		}

		next_sampling = jiffies + usecs_to_jiffies(new_rate);
		appointed_at = dbs_info->cdbs.work.timer.expires;

		if (time_before(next_sampling, appointed_at)) {

			mutex_unlock(&dbs_info->cdbs.timer_mutex);
			cancel_delayed_work_sync(&dbs_info->cdbs.work);
			mutex_lock(&dbs_info->cdbs.timer_mutex);

			gov_queue_work(dbs_data, dbs_info->cdbs.cur_policy,
					usecs_to_jiffies(new_rate), true);

		}
		mutex_unlock(&dbs_info->cdbs.timer_mutex);
	}
	put_online_cpus();
}
Ejemplo n.º 14
0
static irqreturn_t ncp373_overcur_irq(int irq, void *data)
{
    struct ncp373_internal *dev = pncp373_internal;
    int ret;

    pr_info("%s: irq=%d received /FLG interrupt\n", __func__, irq);

    disable_irq_nosync(irq);

    if (unlikely(!dev)) {
        pr_err("%s: device %s is not probed yet.\n", __func__,
               NCP373_DRIVER_NAME);
        return IRQ_HANDLED;
    }

    if (unlikely(OC_DET_READY != atomic_cmpxchg(&dev->oc_det_state,
                 OC_DET_READY, OC_DET_START))) {
        pr_info("%s: expected=%s, but now=%s,"
                " mode_disable or remove may have begun.\n",
                __func__, ncp373_oc_det_state_to_string(OC_DET_READY),
                ncp373_oc_det_state_to_string(
                    atomic_read(&dev->oc_det_state)));
        return IRQ_HANDLED;
    }

    ret = schedule_delayed_work(&dev->oc_delay_work,
                                usecs_to_jiffies(atomic_read(&dev->oc_delay_time)));
    if (unlikely(ret < 0)) {
        pr_info("%s: failed to start the timer, notify"
                " the overcurrent immediately.\n", __func__);
        ncp373_oc_notify(dev);
    }

    return IRQ_HANDLED;
}
Ejemplo n.º 15
0
static int wl1271_boot_soft_reset(struct wl1271 *wl)
{
    unsigned long timeout;
    u32 boot_data;

    /* perform soft reset */
    wl1271_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);

    /* SOFT_RESET is self clearing */
    timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
    while (1) {
        boot_data = wl1271_reg_read32(wl, ACX_REG_SLV_SOFT_RESET);
        wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
        if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
            break;

        if (time_after(jiffies, timeout)) {
            /* 1.2 check pWhalBus->uSelfClearTime if the
             * timeout was reached */
            wl1271_error("soft reset timeout");
            return -1;
        }

        udelay(SOFT_RESET_STALL_TIME);
    }

    /* disable Rx/Tx */
    wl1271_reg_write32(wl, ENABLE, 0x0);

    /* disable auto calibration on start*/
    wl1271_reg_write32(wl, SPARE_A2, 0xffff);

    return 0;
}
Ejemplo n.º 16
0
static int  __cpuinit msm8625_release_secondary(unsigned int cpu)
{
	void __iomem *base_ptr;
	int value = 0;
	unsigned long timeout;

	timeout = jiffies + usecs_to_jiffies(20);
	while (time_before(jiffies, timeout)) {
		value = __raw_readl(MSM_CFG_CTL_BASE + cpu_data[cpu].offset);		
		if ((value & MSM_CORE_STATUS_MSK) ==
				MSM_CORE_STATUS_MSK)
			break;
			udelay(1);
	}

	if (!value) {
		pr_err("Core %u cannot be brought out of Reset!!!\n", cpu);
		return -ENODEV;
	}

	base_ptr = ioremap_nocache(CORE_RESET_BASE + cpu_data[cpu].reset_off, SZ_4);	
	if (!base_ptr)
		return -ENODEV;
	
	__raw_writel(0x0, base_ptr);
	mb();

	cpu_data[cpu].reset_core_base = base_ptr;

	return 0;
}
Ejemplo n.º 17
0
static int iadc_do_conversion(struct iadc_chip *iadc, int chan, u16 *data)
{
	unsigned int wait;
	int ret;

	ret = iadc_configure(iadc, chan);
	if (ret < 0)
		goto exit;

	wait = BIT(IADC_DEF_AVG_SAMPLES) * IADC_CONV_TIME_MIN_US * 2;

	if (iadc->poll_eoc) {
		ret = iadc_poll_wait_eoc(iadc, wait);
	} else {
		ret = wait_for_completion_timeout(&iadc->complete,
			usecs_to_jiffies(wait));
		if (!ret)
			ret = -ETIMEDOUT;
		else
			/* double check conversion status */
			ret = iadc_poll_wait_eoc(iadc, IADC_CONV_TIME_MIN_US);
	}

	if (!ret)
		ret = iadc_read_result(iadc, data);
exit:
	iadc_set_state(iadc, false);
	if (ret < 0)
		dev_err(iadc->dev, "conversion failed\n");

	return ret;
}
Ejemplo n.º 18
0
static void cpufreq_interactive_timer_resched(unsigned long cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	u64 expires;
	unsigned long flags;
	u64 now = ktime_to_us(ktime_get());

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(smp_processor_id(),
				     &pcpu->time_in_idle_timestamp, io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	expires = round_to_nw_start(pcpu->last_evaluated_jiffy);
	del_timer(&pcpu->cpu_timer);
	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);

	if (timer_slack_val >= 0 &&
	    (pcpu->target_freq > pcpu->policy->min ||
		(pcpu->target_freq == pcpu->policy->min &&
		 now < boostpulse_endtime))) {
		expires += usecs_to_jiffies(timer_slack_val);
		del_timer(&pcpu->cpu_slack_timer);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
Ejemplo n.º 19
0
/*
 * Set the DDR, AHB to 24MHz.
 * This mode will be activated only when none of the modules that
 * need a higher DDR or AHB frequency are active.
 */
int set_low_bus_freq(void)
{
	if (busfreq_suspended)
		return 0;

	if (!bus_freq_scaling_initialized || !bus_freq_scaling_is_active)
		return 0;

	/*
	 * Check to see if we need to got from
	 * low bus freq mode to audio bus freq mode.
	 * If so, the change needs to be done immediately.
	 */
	if (audio_bus_count && (low_bus_freq_mode || ultra_low_bus_freq_mode))
		reduce_bus_freq();
	else
		/*
		 * Don't lower the frequency immediately. Instead
		 * scheduled a delayed work and drop the freq if
		 * the conditions still remain the same.
		 */
		schedule_delayed_work(&low_bus_freq_handler,
					usecs_to_jiffies(3000000));
	return 0;
}
static int msm_ulpi_write(struct msm_hcd *mhcd, u32 val, u32 reg)
{
	struct usb_hcd *hcd = mhcd_to_hcd(mhcd);
	unsigned long timeout;

	/* initiate write operation */
	writel_relaxed(ULPI_RUN | ULPI_WRITE |
	       ULPI_ADDR(reg) | ULPI_DATA(val),
	       USB_ULPI_VIEWPORT);

	/* wait for completion */
	timeout = jiffies + usecs_to_jiffies(ULPI_IO_TIMEOUT_USECS);
	while (readl_relaxed(USB_ULPI_VIEWPORT) & ULPI_RUN) {
		if (time_after(jiffies, timeout)) {
			dev_err(mhcd->dev, "msm_ulpi_write: timeout\n");
			dev_err(mhcd->dev, "PORTSC: %08x USBCMD: %08x\n",
				readl_relaxed(USB_PORTSC),
				readl_relaxed(USB_USBCMD));
			return -ETIMEDOUT;
		}
		udelay(1);
	}

	return 0;
}
Ejemplo n.º 21
0
/*
 * @westwood_pkts_acked
 * Called after processing group of packets.
 * but all westwood needs is the last sample of srtt.
 */
static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
{
	struct westwood *w = inet_csk_ca(sk);

	if (rtt > 0)
		w->rtt = usecs_to_jiffies(rtt);
}
Ejemplo n.º 22
0
static void serial_omap_start_rxdma(struct uart_omap_port *up)
{
#ifdef CONFIG_OMAP3_PM
	/* Disallow OCP bus idle. UART TX irqs are not seen during
	 * bus idle. Alternative is to set kernel timer at fifo
	 * drain rate.
	 */
	unsigned int tmp;
	tmp = (serial_in(up, UART_OMAP_SYSC) & 0x7) | (1 << 3);
	serial_out(up, UART_OMAP_SYSC, tmp); /* no-idle */
#endif
	if (up->uart_dma.rx_dma_channel == 0xFF) {
		omap_request_dma(uart_dma_rx[up->pdev->id-1],"UART Rx DMA",
				(void *)uart_rx_dma_callback,up,
				&(up->uart_dma.rx_dma_channel));
		omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
					OMAP_DMA_AMODE_CONSTANT,
					UART_BASE(up->pdev->id - 1), 0, 0);
		omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
					OMAP_DMA_AMODE_POST_INC,
					up->uart_dma.rx_buf_dma_phys, 0, 0);
		omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
					OMAP_DMA_DATA_TYPE_S8,
					up->uart_dma.rx_buf_size, 1,
					OMAP_DMA_SYNC_ELEMENT,
					uart_dma_rx[up->pdev->id-1], 0);
	}
	up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
	omap_writel(0, OMAP34XX_DMA4_BASE +
		OMAP_DMA4_CDAC(up->uart_dma.rx_dma_channel));
	omap_start_dma(up->uart_dma.rx_dma_channel);
	mod_timer(&up->uart_dma.rx_timer, jiffies +
			usecs_to_jiffies(up->uart_dma.rx_timeout));
	up->uart_dma.rx_dma_state = 1;
}
Ejemplo n.º 23
0
/*
 * si4713_wait_stc - Waits STC interrupt and clears status bits. Useful
 *		     for TX_TUNE_POWER, TX_TUNE_FREQ and TX_TUNE_MEAS
 * @sdev: si4713_device structure for the device we are communicating
 * @usecs: timeout to wait for STC interrupt signal
 */
static int si4713_wait_stc(struct si4713_device *sdev, const int usecs)
{
	int err;
	u8 resp[SI4713_GET_STATUS_NRESP];

	/* Wait response from STC interrupt */
	if (!wait_for_completion_timeout(&sdev->work,
			usecs_to_jiffies(usecs) + 1))
		v4l2_warn(&sdev->sd,
			"%s: device took too much time to answer (%d usec).\n",
				__func__, usecs);

	/* Clear status bits */
	err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
					NULL, 0,
					resp, ARRAY_SIZE(resp),
					DEFAULT_TIMEOUT);

	if (err < 0)
		goto exit;

	v4l2_dbg(1, debug, &sdev->sd,
			"%s: status bits: 0x%02x\n", __func__, resp[0]);

	if (!(resp[0] & SI4713_STC_INT))
		err = -EIO;

exit:
	return err;
}
Ejemplo n.º 24
0
static void usleep(unsigned int usecs)
{
        unsigned long timeout = usecs_to_jiffies(usecs);

        while (timeout)
                timeout = schedule_timeout_interruptible(timeout);
}
Ejemplo n.º 25
0
/* probe for an external PHY via MDIO; return PHY address */
int mii_probe(unsigned long base_addr)
{
	volatile EmacRegisters *emac = (volatile EmacRegisters *)base_addr;
	int i, j;

	for(i = 0; i < 32; i++) {
		emac->config |= EMAC_EXT_PHY;
		emac->mdioFreq = EMAC_MII_PRE_EN | EMAC_MDC;
		udelay(10);
		emac->mdioData = MDIO_RD | (i << MDIO_PMD_SHIFT) |
			(MII_BMSR << MDIO_REG_SHIFT);
		for(j = 0; j < 500; j++) {
#if defined (CONFIG_MIPS_BCM_NDVD)
			schedule_timeout_uninterruptible(usecs_to_jiffies(10));
#else
			udelay(10);
#endif
			if(emac->intStatus & EMAC_MDIO_INT) {
				uint16_t data = emac->mdioData & 0xffff;

				emac->intStatus |= EMAC_MDIO_INT;
				if(data != 0x0000 && data != 0xffff)
					return(i);	/* found something */
				break;
			}
		}
	}
	return(BCMEMAC_NO_PHY_ID);
}
Ejemplo n.º 26
0
static irqreturn_t tmu_irq(int irq, void *id)
{
	struct tmu_info *info = id;
	unsigned int status;

	disable_irq_nosync(irq);

	status = __raw_readl(info->tmu_base + INTSTAT);
	/* To handle multiple interrupt pending,
	* interrupt by high temperature are serviced with priority.
	*/
	if (status & INTSTAT_RISE1) {
		dev_info(info->dev, "Tripping interrupt\n");
		info->tmu_state = TMU_STATUS_TRIPPED;
		__raw_writel(INTCLEAR_RISE1, info->tmu_base + INTCLEAR);
	} else if (status & INTSTAT_RISE0) {
		dev_info(info->dev, "Throttling interrupt\n");
		__raw_writel(INTCLEAR_RISE0, info->tmu_base + INTCLEAR);
		info->tmu_state = TMU_STATUS_THROTTLED;
	} else {
		dev_err(info->dev, "%s: TMU interrupt error. INTSTAT : %x\n", __func__, status);
		__raw_writel(status, info->tmu_base + INTCLEAR);
		return IRQ_HANDLED;
	}

	queue_delayed_work_on(0, tmu_monitor_wq, &info->polling, usecs_to_jiffies(0));

	return IRQ_HANDLED;
}
Ejemplo n.º 27
0
static int serial_omap_start_rxdma(struct uart_omap_port *up)
{
	int ret = 0;

	if (up->uart_dma.rx_dma_channel == -1) {
		pm_runtime_get_sync(&up->pdev->dev);
		ret = omap_request_dma(up->uart_dma.uart_dma_rx,
				"UART Rx DMA",
				(void *)uart_rx_dma_callback, up,
				&(up->uart_dma.rx_dma_channel));
		if (ret < 0)
			return ret;

		omap_set_dma_src_params(up->uart_dma.rx_dma_channel, 0,
				OMAP_DMA_AMODE_CONSTANT,
				up->uart_dma.uart_base, 0, 0);
		omap_set_dma_dest_params(up->uart_dma.rx_dma_channel, 0,
				OMAP_DMA_AMODE_POST_INC,
				up->uart_dma.rx_buf_dma_phys, 0, 0);
		omap_set_dma_transfer_params(up->uart_dma.rx_dma_channel,
				OMAP_DMA_DATA_TYPE_S8,
				up->uart_dma.rx_buf_size, 1,
				OMAP_DMA_SYNC_ELEMENT,
				up->uart_dma.uart_dma_rx, 0);
	}
	up->uart_dma.prev_rx_dma_pos = up->uart_dma.rx_buf_dma_phys;
	
	omap_start_dma(up->uart_dma.rx_dma_channel);
	mod_timer(&up->uart_dma.rx_timer, jiffies +
				usecs_to_jiffies(up->uart_dma.rx_poll_rate));
	up->uart_dma.rx_dma_used = true;
	return ret;
}
Ejemplo n.º 28
0
static void cpufreq_hardlimit_resume(struct power_suspend * h)
{
	current_screen_state = CPUFREQ_HARDLIMIT_SCREEN_ON;

	if(wakeup_kick_delay == CPUFREQ_HARDLIMIT_WAKEUP_KICK_DISABLED) {
		#ifdef CPUFREQ_HARDLIMIT_DEBUG
		pr_info("[HARDLIMIT] resume (no wakeup kick) : old_min = %u / old_max = %u / new_min = %u / new_max = %u \n",
				current_limit_min,
				current_limit_max,
				hardlimit_min_screen_on,
				hardlimit_max_screen_on
			);
		#endif
		wakeup_kick_active = CPUFREQ_HARDLIMIT_WAKEUP_KICK_INACTIVE;
	} else {
		#ifdef CPUFREQ_HARDLIMIT_DEBUG
		pr_info("[HARDLIMIT] resume (with wakeup kick) : old_min = %u / old_max = %u / new_min = %u / new_max = %u \n",
				current_limit_min,
				current_limit_max,
				wakeup_kick_freq,
				max(hardlimit_max_screen_on, min(hardlimit_max_screen_on, wakeup_kick_freq))
			);
		#endif
		wakeup_kick_active = CPUFREQ_HARDLIMIT_WAKEUP_KICK_ACTIVE;
		/* Schedule delayed work to restore stock scaling min after wakeup kick delay */
		schedule_delayed_work(&stop_wakeup_kick_work, usecs_to_jiffies(wakeup_kick_delay * 1000));
	}
	reapply_hard_limits();
	return;
}
/* Round to starting jiffy of next evaluation window */
static u64 round_to_nw_start(u64 jif)
{
	unsigned long step = usecs_to_jiffies(timer_rate);

	do_div(jif, step);
	return (jif + 1) * step;
}
Ejemplo n.º 30
0
static int vivid_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
				   u32 signal_free_time, struct cec_msg *msg)
{
	struct vivid_dev *dev = adap->priv;
	struct vivid_cec_work *cw = kzalloc(sizeof(*cw), GFP_KERNEL);
	long delta_jiffies = 0;

	if (cw == NULL)
		return -ENOMEM;
	cw->dev = dev;
	cw->adap = adap;
	cw->usecs = CEC_FREE_TIME_TO_USEC(signal_free_time) +
		    msg->len * USECS_PER_BYTE;
	cw->msg = *msg;

	spin_lock(&dev->cec_slock);
	list_add(&cw->list, &dev->cec_work_list);
	if (dev->cec_xfer_time_jiffies == 0) {
		INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_done_worker);
		dev->cec_xfer_start_jiffies = jiffies;
		dev->cec_xfer_time_jiffies = usecs_to_jiffies(cw->usecs);
		delta_jiffies = dev->cec_xfer_time_jiffies;
	} else {
		INIT_DELAYED_WORK(&cw->work, vivid_cec_xfer_try_worker);
		delta_jiffies = dev->cec_xfer_start_jiffies +
			dev->cec_xfer_time_jiffies - jiffies;
	}
	spin_unlock(&dev->cec_slock);
	schedule_delayed_work(&cw->work, delta_jiffies < 0 ? 0 : delta_jiffies);
	return 0;
}