Пример #1
0
void set_mt65xx_mon_mode(MonitorMode mode)
{
    ktime_t kt;

    pr_info("set_mt65xx_mon_mode (mode = %d)\n", (int)mode);

    mutex_lock(&mt65xx_mon_mutex);

    if ((mode != MODE_SCHED_SWITCH) && (mode != MODE_PERIODIC) && (mode != MODE_MANUAL_TRACER))
        return;

    monitor_mode = mode;
    if ((monitor_mode == MODE_PERIODIC)) {
        if (timer_initialized == 0) {
            hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
            timer.function = timer_isr;
            kt = ktime_set(0, mon_period_ns);
            hrtimer_start(&timer, kt, HRTIMER_MODE_REL);
            timer_initialized++;
        } else {
            hrtimer_restart(&timer);
        }
    } else if ((monitor_mode == MODE_SCHED_SWITCH) || (monitor_mode == MODE_MANUAL_TRACER)) {
        if (timer_initialized > 0)
            hrtimer_cancel(&timer);
    }

    mutex_unlock(&mt65xx_mon_mutex);

}
Пример #2
0
/*
 * Helper function which calls the hrtimer callback from
 * tasklet/softirq context
 */
static void __tasklet_hrtimer_trampoline(unsigned long data)
{
	struct tasklet_hrtimer *ttimer = (void *)data;
	enum hrtimer_restart restart;

	restart = ttimer->function(&ttimer->timer);
	if (restart != HRTIMER_NORESTART)
		hrtimer_restart(&ttimer->timer);
}
Пример #3
0
/*
 * called from hardirq (IPI) context
 */
static void hrtick_start(void *arg)
{
	struct rq *rq = arg;

	raw_spin_lock(&rq->lock);
	hrtimer_restart(&rq->hrtick_timer);
	rq->hrtick_csd_pending = 0;
	raw_spin_unlock(&rq->lock);
}
Пример #4
0
static int timeriomem_rng_read(struct hwrng *hwrng, void *data,
				size_t max, bool wait)
{
	struct timeriomem_rng_private *priv =
		container_of(hwrng, struct timeriomem_rng_private, rng_ops);
	int retval = 0;
	int period_us = ktime_to_us(priv->period);

	/*
	 * The RNG provides 32-bits per read.  Ensure there is enough space for
	 * at minimum one read.
	 */
	if (max < sizeof(u32))
		return 0;

	/*
	 * There may not have been enough time for new data to be generated
	 * since the last request.  If the caller doesn't want to wait, let them
	 * bail out.  Otherwise, wait for the completion.  If the new data has
	 * already been generated, the completion should already be available.
	 */
	if (!wait && !priv->present)
		return 0;

	wait_for_completion(&priv->completion);

	do {
		/*
		 * After the first read, all additional reads will need to wait
		 * for the RNG to generate new data.  Since the period can have
		 * a wide range of values (1us to 1s have been observed), allow
		 * for 1% tolerance in the sleep time rather than a fixed value.
		 */
		if (retval > 0)
			usleep_range(period_us,
					period_us + min(1, period_us / 100));

		*(u32 *)data = readl(priv->io_base);
		retval += sizeof(u32);
		data += sizeof(u32);
		max -= sizeof(u32);
	} while (wait && max > sizeof(u32));

	/*
	 * Block any new callers until the RNG has had time to generate new
	 * data.
	 */
	priv->present = 0;
	reinit_completion(&priv->completion);
	hrtimer_forward_now(&priv->timer, priv->period);
	hrtimer_restart(&priv->timer);

	return retval;
}
Пример #5
0
static void schedule_next_timer(struct k_itimer *timr)
{
	if (timr->it.real.interval.tv64 == 0)
		return;

	timr->it_overrun += hrtimer_forward(&timr->it.real.timer,
					    timr->it.real.interval);
	timr->it_overrun_last = timr->it_overrun;
	timr->it_overrun = -1;
	++timr->it_requeue_pending;
	hrtimer_restart(&timr->it.real.timer);
}
Пример #6
0
/*
 * Called to set the hrtick timer state.
 *
 * called with rq->lock held and irqs disabled
 */
void hrtick_start(struct rq *rq, int delay)
{
	struct hrtimer *timer = &rq->hrtick_timer;
	int time = ktime_add_ns(timer->base->get_time(), delay);

	hrtimer_set_expires(timer, time);

	if (rq == this_rq()) {
		hrtimer_restart(timer);
	} else if (!rq->hrtick_csd_pending) {
		smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0);
		rq->hrtick_csd_pending = 1;
	}
}
Пример #7
0
ssize_t timerfd_do_read(struct timerfd_ctx* ctx)
{
  ssize_t ticks = ctx->ticks;

  if (ctx->expired && ctx->interval.tv64)
  {
    ticks += hrtimer_forward_now(&ctx->timer, ctx->interval) - 1;
    hrtimer_restart(&ctx->timer);
  }

  ctx->expired = 0;
  ctx->ticks = 0;

  return ticks;
}
Пример #8
0
static void schedule_next_timer(struct k_itimer *timr)
{
	struct hrtimer *timer = &timr->it.real.timer;

	if (timr->it.real.interval.tv64 == 0)
		return;

	timr->it_overrun += (unsigned int) hrtimer_forward(timer,
						timer->base->get_time(),
						timr->it.real.interval);

	timr->it_overrun_last = timr->it_overrun;
	timr->it_overrun = -1;
	++timr->it_requeue_pending;
	hrtimer_restart(timer);
}
Пример #9
0
static void mt65xx_mon_trace_start(struct trace_array *tr)
{
    ktime_t kt;

    int ret;
    ret = register_monitor(&mtk_mon, monitor_mode);

    if (ret != 0) {
        pr_info("MTK Monitor Register Fail\n");
        return;
    }
    pr_info("MTK Monitor Register OK\n");
    mtk_mon->init();

    mt65xx_mon_stopped = 0;
    if ((monitor_mode == MODE_PERIODIC) && (timer_initialized > 0)) {
        kt = ktime_set(0, mon_period_ns);
        hrtimer_restart(&timer);
    }
    mtk_mon->enable();
}
STATIC int balong_ade_overlay_commit(struct ade_compose_data_type    *ade_pri_data, void __user *p)
{
    struct overlay_compose_info comp_info;
    struct balong_fb_data_type *balongfd = NULL;
    int ret = 0;
    u32 struct_len = sizeof(struct ovly_hnd_info) * ADE_OVERLAY_MAX_LAYERS;
#if ADE_SYNC_SUPPORT
    int fenceId = 0;
    unsigned long flags;
#endif

    BUG_ON(ade_pri_data == NULL);

    balongfb_logi_display_debugfs("balong_ade_overlay_commit enter succ ! \n");

    if (copy_from_user(&comp_info, p, sizeof(comp_info))) {
        balongfb_loge("copy from user failed!\n");
        return -EFAULT;
    }

    balongfd = (struct balong_fb_data_type *)platform_get_drvdata(ade_pri_data->parent);
    BUG_ON(balongfd == NULL);
    if ((!balongfd->frame_updated) && lcd_pwr_status.panel_power_on)
    {

        balongfd->frame_updated = 1;
        lcd_pwr_status.lcd_dcm_pwr_status |= BIT(2);
        do_gettimeofday(&lcd_pwr_status.tvl_set_frame);
        time_to_tm(lcd_pwr_status.tvl_set_frame.tv_sec, 0, &lcd_pwr_status.tm_set_frame);
    }
    down(&balong_fb_blank_sem);
    if (!balongfd->ade_core_power_on) {
        up(&balong_fb_blank_sem);
        balongfb_logi("ade_core_power_on is false !\n");
        return -EPERM;
    }


    if (ADE_TRUE == comp_info.is_finished) {
        spin_lock_irqsave(&balongfd->refresh_lock, flags);
        balongfd->refresh++;
        spin_unlock_irqrestore(&balongfd->refresh_lock, flags);
        balongfd->timeline_max++;
        //release the reserved buffer of fb
        if((true == ade_pri_data->fb_reserved_flag) && (ade_pri_data->frame_count)) {
            balong_ion_free_mem_to_buddy();
            ade_pri_data->fb_reserved_flag = false;
        }

        if (PANEL_MIPI_VIDEO == ade_pri_data->lcd_type) {
            spin_lock_irqsave(&(balongfd->vsync_info.spin_lock), flags);
            set_LDI_INT_MASK_bit(balongfd->ade_base, LDI_ISR_FRAME_END_INT);
            balongfd->vsync_info.vsync_ctrl_expire_count = 0;
            spin_unlock_irqrestore(&(balongfd->vsync_info.spin_lock), flags);

            if (balongfd->vsync_info.vsync_ctrl_disabled_set) {
                if (balongfd->ldi_irq) {
                    enable_irq(balongfd->ldi_irq);
                    balongfd->vsync_info.vsync_ctrl_disabled_set = false;
                }
            }
        }

#ifndef PC_UT_TEST_ON
#ifdef CONFIG_TRACING
        trace_dot(SF, "7", 0);
#endif
#endif

#if ADE_DEBUG_LOG_ENABLE
        g_debug_frame_number = comp_info.frame_number;
#endif
    }

    ret = ade_overlay_commit(ade_pri_data, &comp_info);

    if ((ADE_TRUE == comp_info.is_finished)) {
        if (ret == 0) {
            set_LDI_INT_MASK_bit(ade_pri_data->ade_base, LDI_ISR_UNDER_FLOW_INT);
#if PARTIAL_UPDATE
            if ((PANEL_MIPI_CMD == ade_pri_data->lcd_type) && (true == balongfd->dirty_update)) {
                balongfb_set_display_region(balongfd);
                balongfd->dirty_update = false;
            }
#endif
            if (PANEL_MIPI_VIDEO == ade_pri_data->lcd_type) {
                if ((ade_pri_data->overlay_ctl.comp_info.compose_mode != OVERLAY_COMP_TYPE_ONLINE)
                    || (balongfd->vpu_power_on)){
                     /* ade_core_rate is default value (360M) */
                     balongfd->ade_set_core_rate = balongfd->ade_core_rate;
                }

                if (balongfd->last_ade_core_rate != balongfd->ade_set_core_rate) {
                    if (clk_set_rate(balongfd->ade_clk, balongfd->ade_set_core_rate) != 0) {
                          balongfb_loge("clk_set_rate ade_core_rate error \n");
                    }
                }

                balongfd->last_ade_core_rate = balongfd->ade_set_core_rate;

            }

            set_LDI_CTRL_ldi_en(ade_pri_data->ade_base, ADE_ENABLE);
            balongfd->ade_ldi_on = true;
            if (PANEL_MIPI_CMD == ade_pri_data->lcd_type) {
                set_LDI_INT_MASK_bit(balongfd->ade_base, LDI_ISR_DSI_TE0_PIN_INT);

                /* enable fake vsync timer */
                if (balongfd->frc_state != BALONG_FB_FRC_IDLE_PLAYING) {
                    balongfd->use_cmd_vsync = (balongfb_frc_get_fps(balongfd) < BALONG_FB_FRC_NORMAL_FPS ? true : false);
                }

                /* report vsync with timer */
                if (balongfd->use_cmd_vsync) {
                    hrtimer_restart(&balongfd->cmd_vsync_hrtimer);
                } else {
                    hrtimer_cancel(&balongfd->cmd_vsync_hrtimer);
                }
            }

#ifndef PC_UT_TEST_ON
#ifdef CONFIG_TRACING
            trace_dot(SF, "8", 0);
#endif
#endif

#if ADE_SYNC_SUPPORT
            /* In online/hybrid mode, ADE must create release fenceFd.
             * In offline mode, don't create release fenceFd
             * because ADE will read HAL's offline buffer instead of layer's buffer.
             */
            /*
               spin_lock_irqsave(&balongfd->refresh_lock, flags);
               balongfd->refresh++;
               spin_unlock_irqrestore(&balongfd->refresh_lock, flags);
               balongfd->timeline_max++;
             */
            if ((OVERLAY_COMP_TYPE_ONLINE == comp_info.compose_mode)
                    || (OVERLAY_COMP_TYPE_HYBRID == comp_info.compose_mode)) {

                fenceId = balong_ade_overlay_fence_create(balongfd->timeline, "ADE", balongfd->timeline_max);
                if (fenceId < 0) {
                    balongfb_loge("ADE failed to create fence!\n");
                }
                comp_info.release_fence = fenceId;
                if (copy_to_user((struct overlay_compose_info __user*)p, &comp_info, sizeof(struct overlay_compose_info))
                        && (fenceId >= 0)) {
                    fenceId = -EFAULT;
                    balongfb_loge("ADE failed to copy fence to user!\n");
                    put_unused_fd(comp_info.release_fence);

                    up(&balong_fb_blank_sem);
                    return fenceId;
                }
            }
#endif /* ADE_SYNC_SUPPORT */
            ade_overlay_handle_unlock(balongfd);
            memcpy(balongfd->locked_hnd, balongfd->locking_hnd, struct_len);
            memset(balongfd->locking_hnd, 0, struct_len);
        } else {
            ade_overlay_handle_unlock(balongfd);
            memcpy(balongfd->locked_hnd, balongfd->locking_hnd, struct_len);
            memset(balongfd->locking_hnd, 0, struct_len);
            up(&balong_fb_blank_sem);
            return ret;
        }
    }

    up(&balong_fb_blank_sem);

    balongfb_logi_display_debugfs("balong_ade_overlay_commit exit succ ! \n");
    return 0;
}