/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(
	struct cpufreq_interactive_tunables *tunables, int cpu,
	int time_override)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	unsigned long flags;
	unsigned long expires;
	if (time_override)
		expires = jiffies + time_override;
	else
		expires = jiffies + usecs_to_jiffies(tunables->timer_rate);

	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);
	if (tunables->timer_slack_val >= 0 &&
	    pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(tunables->timer_slack_val);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
				  tunables->io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(int cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	u64 expires = round_to_nw_start(pcpu->last_evaluated_jiffy);
	unsigned long flags;
	u64 now = ktime_to_us(ktime_get());

	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);
	if (timer_slack_val >= 0 &&
	    (pcpu->target_freq > pcpu->policy->min ||
		(pcpu->target_freq == pcpu->policy->min &&
		 now < boostpulse_endtime))) {
		expires += usecs_to_jiffies(timer_slack_val);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp, io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
static void cpufreq_interactive_timer_resched(unsigned long cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	u64 expires;
	unsigned long flags;

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->time_in_idle =
		get_cpu_idle_time(smp_processor_id(),
				  &pcpu->time_in_idle_timestamp, io_is_busy);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	expires = round_to_nw_start(pcpu->last_evaluated_jiffy);
	del_timer(&pcpu->cpu_timer);
	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);

	if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(timer_slack_val);
		del_timer(&pcpu->cpu_slack_timer);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
예제 #4
0
void dp_bfd_timer_echo_send_callback(unsigned long mydis)
{
    dp_bfd_session_s *session;     
    session = dp_bfd_find_session_simple(mydis);
    if(NULL == session)
    {
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_echo_send_callback session not found\n");
        return;
    }
    
    session->flag &= (~DP_BFD_ECHOSEND_TIMER_ON);
    dp_bfd_send_msg(session, DP_BFD_SEND_ECHO_PACKET);  
    
    if(!(session->flag & DP_BFD_ECHOSEND_TIMER_ON))
    {
        session->echo_send_timer.expires = jiffies + session->echo_send_interval;
        session->echo_send_timer.data = mydis;
        session->echo_send_timer.function = dp_bfd_timer_echo_send_callback;
        add_timer_on(&session->echo_send_timer, 1);
        session->flag |= DP_BFD_ECHOSEND_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_echo_send_callback add echo_send_timer\n");
    }
    else
    {
        mod_timer(&session->echo_send_timer, jiffies + session->echo_send_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_echo_send_callback mod echo_send_timer\n");
    }
    
    write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
}
예제 #5
0
static void timelimit_expire(unsigned long timeout_seconds)
{
	char msg[128];
	int msglen = 127;

	if (timeout_seconds) {
		if (timeout_seconds >= 60)
			snprintf(msg, msglen,
				 "Uptime: kernel validity duration has %d %s remaining\n",
				 (int) timeout_seconds / 60, "minute(s)");
		else
			snprintf(msg, msglen,
				 "Uptime: kernel validity duration has %d %s remaining\n",
				 (int) timeout_seconds, "seconds");

		printk(KERN_CRIT "%s", msg);

		timelimit_timer.expires = jiffies + timeout_seconds * HZ;
		timelimit_timer.data = 0;
		add_timer_on(&timelimit_timer, cpumask_first(cpu_online_mask));
	} else {
		printk(KERN_CRIT "Uptime: Kernel validity timeout has expired\n");
#ifdef CONFIG_UPTIME_LIMIT_KERNEL_REBOOT
		uptime_expiration_action = uptime_reboot;
		wake_up_process(uptime_worker_task);
	}
#endif
}
예제 #6
0
static void gpu_dvfs_timer_control(bool enable)
{
	unsigned long flags;
	struct kbase_device *kbdev = pkbdev;
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;

	DVFS_ASSERT(platform);

	if (!platform->dvfs_status) {
		GPU_LOG(DVFS_ERROR, DUMMY, 0u, 0u, "%s: DVFS is disabled\n", __func__);
		return;
	}

	if (kbdev->pm.metrics.timer_active && !enable) {
#if !defined(SLSI_SUBSTITUTE)
		hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
		del_timer(&kbdev->pm.metrics.tlist);
#endif /* SLSI_SUBSTITUTE */
	} else if (!kbdev->pm.metrics.timer_active && enable) {
#if !defined(SLSI_SUBSTITUTE)
		hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
		kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
		add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif /* SLSI_SUBSTITUTE */
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		platform->down_requirement = platform->table[platform->step].stay_count;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
	}

	spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
	kbdev->pm.metrics.timer_active = enable;
	spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
}
예제 #7
0
static void pet_watchdog_timer_fn(unsigned long data)
{
/*	pr_err("%s kicking...\n", __func__); */
	writel(watchdog_reset * TPS, S3C2410_WTCNT);
	pet_watchdog_timer.expires += watchdog_pet * HZ;
	add_timer_on(&pet_watchdog_timer, 0);
}
예제 #8
0
static void watchdog_start(void)
{
	unsigned int val;
	unsigned long flags;

	spin_lock_irqsave(&wdt_lock, flags);

	/* set to PCLK / 256 / 128 */
	val = S3C2410_WTCON_DIV128;
	val |= S3C2410_WTCON_PRESCALE(255);
	writel(val, S3C2410_WTCON);

	/* program initial count */
	writel(watchdog_reset * TPS, S3C2410_WTCNT);
	writel(watchdog_reset * TPS, S3C2410_WTDAT);

	/* start timer */
	val |= S3C2410_WTCON_RSTEN | S3C2410_WTCON_ENABLE;
	writel(val, S3C2410_WTCON);
	spin_unlock_irqrestore(&wdt_lock, flags);

	/* make sure we're ready to pet the dog */
#if defined(PET_BY_WORKQUEUE)
	queue_delayed_work_on(0, watchdog_wq, &watchdog_work,
			      watchdog_pet * HZ);
#elif defined(PET_BY_DIRECT_TIMER)
	pet_watchdog_timer.expires = jiffies + watchdog_pet * HZ;
	add_timer_on(&pet_watchdog_timer, 0);
#else
	hrtimer_start(&watchdog_timer,
		      ktime_set(watchdog_pet, 0), HRTIMER_MODE_REL);
#endif
}
예제 #9
0
/* 应删除循环握手定时器, 添加发送定时器和接收定时器 */
u32 dp_bfd_down_to_up(dp_bfd_packet_s *packet, dp_bfd_session_s *session)
{
    session->packet.your_discriminator = packet->my_discriminator;
    session->remote_dmti = packet->min_tx_interval;
    session->remote_rmri = packet->min_rx_interval;
    session->remote_dm = packet->detect_mult;

    if(DP_BFD_SET == packet->a)
    {
        dp_bfd_auth_seq_deal(session);
        /*如果验证方式为MD5或SHA1重新计算digest*/
        session->flag |= DP_BFD_FILL_AUTH;
    }
    
    /*立即响应握手报文*/
    dp_bfd_send_msg(session, DP_BFD_SEND_CONTROL_PACKET);
    
    /*修改BFD报文发送定时器*/            
    session->send_interval = (HZ)*DP_BFD_GET_MAX(session->packet.min_tx_interval, packet->min_rx_interval)/1000/1000;
    if(session->bfd_cyc_switch)
    {
        session->bfd_cyc_switch = 0;
    }
    
    if(!(session->flag & DP_BFD_SEND_TIMER_ON))
    {
        session->send_timer.expires = jiffies + session->send_interval;
        session->send_timer.data = (unsigned long)(session->packet.my_discriminator);
        session->send_timer.function = dp_bfd_timer_send_callback;
        add_timer_on(&session->send_timer, 1);
        session->flag |= DP_BFD_SEND_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_down_to_up send_timer\n");
    }
    else
    {
        mod_timer(&session->send_timer, jiffies + session->send_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_down_to_up mod send_timer\n");
    }
                    
    /*添加BFD报文接收定时器*/
    dp_bfd_timer_receive_add(session, packet);
    
    if(session->packet.min_echo_rx_interval != 0)
    {
       dp_bfd_echo_enable(session);  
    }
    
    /*向用户态发送消息*/
    session->state = UP;
    session->fail = 0;
    dp_bfd_send_event(session, EVENT_BFD_DOWN_TO_UP, BFD_EVENT_CRUCIAL);
    
    return NF_DROP;    
}
예제 #10
0
/* The caller shall take enable_sem write semaphore to avoid any timer race.
 * The cpu_timer and cpu_slack_timer must be deactivated when calling this
 * function.
 */
static void cpufreq_interactive_timer_start(int cpu)
{
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
	unsigned long expires = jiffies + usecs_to_jiffies(pcpu->timer_rate);
	unsigned long flags;

	pcpu->cpu_timer.expires = expires;
	add_timer_on(&pcpu->cpu_timer, cpu);
	if (pcpu->timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
		expires += usecs_to_jiffies(pcpu->timer_slack_val);
		pcpu->cpu_slack_timer.expires = expires;
		add_timer_on(&pcpu->cpu_slack_timer, cpu);
	}

	spin_lock_irqsave(&pcpu->load_lock, flags);
	pcpu->cputime_speedadj = 0;
	pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
예제 #11
0
/*
 * Schedule the timer to fire immediately. This is a helper function for
 * atomic notification from scheduler so that CPU load can be re-evaluated
 * immediately. Since we are just re-evaluating previous window's load, we
 * should not push back slack timer.
 */
static void cpufreq_interactive_timer_resched_now(unsigned long cpu)
{
	unsigned long flags;
	struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);

	spin_lock_irqsave(&pcpu->load_lock, flags);
	del_timer(&pcpu->cpu_timer);
	pcpu->cpu_timer.expires = jiffies;
	add_timer_on(&pcpu->cpu_timer, cpu);
	spin_unlock_irqrestore(&pcpu->load_lock, flags);
}
예제 #12
0
void dp_bfd_timer_send_callback(unsigned long mydis)
{    
    dp_bfd_session_s *session;
   
    session = dp_bfd_find_session_simple(mydis);
    if(NULL == session)
    {
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_send_callback session not found\n");
        return;
    }
    
    session->flag &= (~DP_BFD_SEND_TIMER_ON);
    if(session->bfd_cyc_switch)
    {
        session->bfd_cyc_handshake_time++;
        if(session->bfd_cyc_handshake_time >= BFD_CYC_HANDSHAKE_TIMEOUT)
        {
            session->bfd_cyc_switch = 0;
            session->fail = 1;
            dp_bfd_send_event(session, EVENT_BFD_CYC_HANDSHAKE_TIMEOUT, BFD_EVENT_CRUCIAL);
        }
    }
        
    if(!(session->flag & DP_BFD_SEND_TIMER_ON))
    {
        session->send_timer.expires = jiffies + session->send_interval;
        session->send_timer.data = mydis;
        session->send_timer.function = dp_bfd_timer_send_callback;
        add_timer_on(&session->send_timer, 1);
        session->flag |= DP_BFD_SEND_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_send_callback add send_timer\n");
    }
    else
    {
        mod_timer(&session->send_timer, jiffies + session->send_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_send_callback mod send_timer\n");
    }

    dp_bfd_send_msg(session, DP_BFD_SEND_CONTROL_PACKET);
    write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
}
static int gpu_dvfs_on_off(struct kbase_device *kbdev, bool enable)
{
	unsigned long flags;
	struct exynos_context *platform = (struct exynos_context *)kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	if (enable && !platform->dvfs_status) {
		platform->dvfs_status = true;
		gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, platform->cur_clock);
		gpu_dvfs_handler_init(kbdev);
		if (!kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = true;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
			kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
			add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif
		}
	} else if (!enable && platform->dvfs_status) {
		platform->dvfs_status = false;
		gpu_dvfs_handler_deinit(kbdev);
		gpu_control_state_set(kbdev, GPU_CONTROL_CHANGE_CLK_VOL, MALI_DVFS_BL_CONFIG_FREQ);
		if (kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = false;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
			del_timer(&kbdev->pm.metrics.tlist);
#endif
		}
	} else {
		GPU_LOG(DVFS_WARNING, "impossible state to change dvfs status (current: %d, request: %d)\n",
				platform->dvfs_status, enable);
		return -1;
	}
	return 0;
}
예제 #14
0
inline void dp_bfd_timer_handshake_add(dp_bfd_session_s *session)
{
    if(!(session->flag & DP_BFD_HANDSHAKE_TIMER_ON))
    {
        session->handshake_timer.expires= jiffies + (HZ)*HAND_SHAKE_TIME/1000;
        session->handshake_timer.data = (unsigned long)(session->packet.my_discriminator);
        session->handshake_timer.function = dp_bfd_timer_handshake_callback;
        add_timer_on(&session->handshake_timer, 1);
        session->flag |= DP_BFD_HANDSHAKE_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_handshake_add add handshake_timer\n");
    }
    else
    {
        mod_timer(&session->handshake_timer, jiffies + (HZ)*HAND_SHAKE_TIME/1000);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_handshake_add mod handshake_timer\n");
    }
}
예제 #15
0
static int timeout_enable(int cpu)
{
	int err = 0;
	int warning_limit;

	/*
	 * Create an uptime worker thread. This thread is required since the
	 * safe version of kernel restart cannot be called from a
	 * non-interruptible context. Which means we cannot call it directly
	 * from a timer callback.  So we arrange for the timer expiration to
	 * wakeup a thread, which performs the action.
	 */
	uptime_worker_task = kthread_create(uptime_worker,
					    (void *)(unsigned long)cpu,
					    "uptime_worker/%d", cpu);
	if (IS_ERR(uptime_worker_task)) {
		printk(KERN_ERR "Uptime: task for cpu %i failed\n", cpu);
		err = PTR_ERR(uptime_worker_task);
		goto out;
	}
	/* bind to cpu0 to avoid migration and hot plug nastiness */
	kthread_bind(uptime_worker_task, cpu);
	wake_up_process(uptime_worker_task);

	/* Create the timer that will wake the uptime thread at expiration */
	init_timer(&timelimit_timer);
	timelimit_timer.function = timelimit_expire;
	/*
	 * Fire two timers. One warning timeout and the final timer
	 * which will carry out the expiration action. The warning timer will
	 * expire at the minimum of half the original time or ten minutes.
	 */
	warning_limit = MIN(UPTIME_LIMIT_IN_SECONDS/2, TEN_MINUTES_IN_SECONDS);
	timelimit_timer.expires = jiffies + warning_limit * HZ;
	timelimit_timer.data = UPTIME_LIMIT_IN_SECONDS - warning_limit;

	add_timer_on(&timelimit_timer, cpumask_first(cpu_online_mask));
out:
	return err;
}
예제 #16
0
inline void dp_bfd_timer_cyc_handshake_add(dp_bfd_session_s *session)
{    
    session->send_interval = 1*HZ;
    if(!(session->flag & DP_BFD_SEND_TIMER_ON))
    {
        session->send_timer.expires = jiffies + session->send_interval;
        session->send_timer.function = dp_bfd_timer_send_callback;
        session->send_timer.data = session->packet.my_discriminator;
        add_timer_on(&session->send_timer, 1);
        session->flag |= DP_BFD_SEND_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_cyc_handshake_add send_timer *********send interval : %d\n", session->send_interval);
    }
    else
    {
        mod_timer(&session->send_timer, jiffies + session->send_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_cyc_handshake mod send_timer *********send interval : %d\n", session->send_interval);
    }
}
예제 #17
0
inline void dp_bfd_timer_send_add(dp_bfd_session_s *session, dp_bfd_packet_s *packet)
{            
    session->send_interval = (HZ)*DP_BFD_GET_MAX(session->packet.min_tx_interval, packet->min_rx_interval)/1000/1000;
    if(!(session->flag & DP_BFD_SEND_TIMER_ON))
    {
        session->send_timer.expires = jiffies + session->send_interval;
        session->send_timer.data = (unsigned long)(session->packet.my_discriminator);
        session->send_timer.function = dp_bfd_timer_send_callback;
        add_timer_on(&session->send_timer, 1);
        session->flag |= DP_BFD_SEND_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_send_add send_timer **********send interval : %d\n", session->send_interval);
    }
    else
    {
        mod_timer(&session->send_timer, jiffies + session->send_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_send_add mod send_timer **********send interval : %d\n", session->send_interval);
    }
}
예제 #18
0
/* Error injection interface */
static ssize_t mce_write(struct file *filp, const char __user *ubuf,
			 size_t usize, loff_t *off)
{
	struct delayed_mce *dm;
	struct mce m;

	if (!capable(CAP_SYS_ADMIN))
		return -EPERM;
	/*
	 * There are some cases where real MSR reads could slip
	 * through.
	 */
	if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA))
		return -EIO;

	if ((unsigned long)usize > sizeof(struct mce))
		usize = sizeof(struct mce);
	if (copy_from_user(&m, ubuf, usize))
		return -EFAULT;

	if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
		return -EINVAL;

	dm = kmalloc(sizeof(struct delayed_mce), GFP_KERNEL);
	if (!dm)
		return -ENOMEM;

	/*
	 * Need to give user space some time to set everything up,
	 * so do it a jiffie or two later everywhere.
	 * Should we use a hrtimer here for better synchronization?
	 */
	memcpy(&dm->m, &m, sizeof(struct mce));
	setup_timer(&dm->timer, raise_mce, (unsigned long)dm);
	dm->timer.expires = jiffies + 2;
	add_timer_on(&dm->timer, m.extcpu);
	return usize;
}
예제 #19
0
inline void dp_bfd_timer_echo_receive_add(dp_bfd_session_s *session)
{
    session->echo_receive_interval = (HZ)*session->packet.min_echo_rx_interval/1000/1000;

    if(!(session->flag & DP_BFD_ECHORECV_TIMER_ON))
    {
        session->echo_receive_timer.expires = jiffies + session->echo_receive_interval;
        session->echo_receive_timer.data = (unsigned long)(session->packet.my_discriminator);
        session->echo_receive_timer.function = dp_bfd_timer_echo_receive_callback;
        add_timer_on(&session->echo_receive_timer, 1);
        session->flag |= DP_BFD_ECHORECV_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_echo_receive_add echo_receive_timer\n");
    }
    else
    {
        mod_timer(&session->echo_receive_timer, jiffies + session->echo_receive_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_echo_receive_add mod echo_receive_timer\n");
    }
}
예제 #20
0
inline void dp_bfd_timer_receive_add(dp_bfd_session_s *session, dp_bfd_packet_s *packet)
{
    session->receive_interval = (HZ)*(packet->detect_mult)*(DP_BFD_GET_MAX(packet->min_tx_interval, session->packet.min_rx_interval)/1000)/1000;

    if(!(session->flag & DP_BFD_RECEIVE_TIMER_ON))
    {
        session->receive_timer.expires = jiffies + session->receive_interval;
        session->receive_timer.data = (unsigned long)(session->packet.my_discriminator);
        session->receive_timer.function = dp_bfd_timer_receive_callback;
        add_timer_on(&session->receive_timer, 1);
        session->flag |= DP_BFD_RECEIVE_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_receive_add receive_timer   @@@@@@@@@@@@session->receive_interval=%d\n", session->receive_interval);
    }
    else
    {
        mod_timer(&session->receive_timer, jiffies + session->receive_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_timer_receive_add mod receive_timer   @@@@@@@@@@@@session->receive_interval=%d\n", session->receive_interval);
    }
}
예제 #21
0
u32 dp_bfd_up_to_up(dp_bfd_packet_s *packet, dp_bfd_session_s *session)
{   
    if(!(session->flag & DP_BFD_RECEIVE_TIMER_ON))
    {
        session->receive_timer.data = session->packet.my_discriminator;
        session->receive_timer.function = dp_bfd_timer_receive_callback;
        session->receive_timer.expires = jiffies + session->receive_interval;
        add_timer_on(&session->receive_timer, 1);
        session->flag |= DP_BFD_RECEIVE_TIMER_ON;
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_up_to_up add receive_timer *********receive_interval=%d\n", session->receive_interval);
    }
    else
    {
        mod_timer(&session->receive_timer, jiffies + session->receive_interval);
        
        if(dp_bfd_debug & DEBUG_BFD_TIMER)
            printk("dp_bfd_up_to_up mod receive_timer *********receive_interval=%d\n", session->receive_interval);
    }
    
    if(DP_BFD_SET == packet->p)
    {
        session->send_interval = (HZ)*DP_BFD_GET_MAX(session->packet.min_tx_interval, packet->min_rx_interval)/1000/1000;
        session->receive_interval = (HZ)*(packet->detect_mult)*(DP_BFD_GET_MAX(packet->min_tx_interval, session->packet.min_rx_interval)/1000)/1000;
        session->packet.f = DP_BFD_SET;

        if(DP_BFD_SET == session->packet.a)
        {
            /*如果验证方式为MD5或SHA1重新计算digest*/
            session->flag |= DP_BFD_FILL_AUTH;
        }
        
        dp_bfd_send_msg(session, DP_BFD_SEND_CONTROL_PACKET);
        
        if(session->packet.f == DP_BFD_SET)
        {
            session->packet.f = DP_BFD_UNSET;
            if(DP_BFD_SET == session->packet.a)
            {
                /*如果验证方式为MD5或SHA1重新计算digest*/
                session->flag |= DP_BFD_FILL_AUTH;
            }
        }
        
        return NF_DROP;
    }
    
    if(DP_BFD_SET == packet->f)
    {
        if(session->packet.min_tx_interval > session->dmti_old)
        {
            session->send_interval = (HZ)*DP_BFD_GET_MAX(session->packet.min_tx_interval, packet->min_rx_interval)/1000/1000;
        }
        
        if(session->packet.min_rx_interval < session->rmri_old)
        {
            session->receive_interval = (HZ)*(packet->detect_mult)*(DP_BFD_GET_MAX(packet->min_tx_interval, session->packet.min_rx_interval)/1000)/1000;
        }
        /* Using new interval to adjust timers */
        
        return NF_DROP;
    }
    
    return NF_DROP;
}
int gpu_control_state_set(struct kbase_device *kbdev, gpu_control_state state, int param)
{
	int ret = 0, voltage;
#ifdef CONFIG_MALI_MIDGARD_DVFS
	unsigned long flags;
#endif /* CONFIG_MALI_MIDGARD_DVFS */
	struct exynos_context *platform = (struct exynos_context *) kbdev->platform_context;
	if (!platform)
		return -ENODEV;

	mutex_lock(&platform->gpu_clock_lock);
	switch (state) {
	case GPU_CONTROL_CLOCK_ON:
		ret = gpu_clock_on(platform);
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_ON_POST:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (!kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = true;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_start(&kbdev->pm.metrics.timer, HR_TIMER_DELAY_MSEC(platform->polling_speed), HRTIMER_MODE_REL);
#else
			kbdev->pm.metrics.tlist.expires = jiffies + msecs_to_jiffies(platform->polling_speed);
			add_timer_on(&kbdev->pm.metrics.tlist, 0);
#endif
		}
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, 0);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
#ifdef GPU_EARLY_CLK_GATING
	case GPU_CONTROL_CLOCK_OFF_POST:
#else
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (platform->dvfs_status && kbdev->pm.metrics.timer_active) {
			spin_lock_irqsave(&kbdev->pm.metrics.lock, flags);
			kbdev->pm.metrics.timer_active = false;
			spin_unlock_irqrestore(&kbdev->pm.metrics.lock, flags);
#if !defined(SLSI_SUBSTITUTE)
			hrtimer_cancel(&kbdev->pm.metrics.timer);
#else
			del_timer(&kbdev->pm.metrics.tlist);
#endif
		}
		gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_RESET);
		gpu_dvfs_handler_control(kbdev, GPU_HANDLER_UPDATE_TIME_IN_STATE, platform->cur_clock);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
#ifdef GPU_EARLY_CLK_GATING
		break;
	case GPU_CONTROL_CLOCK_OFF:
#endif /* GPU_EARLY_CLK_GATING*/
		ret = gpu_clock_off(platform);
		break;
	case GPU_CONTROL_CHANGE_CLK_VOL:
		ret = gpu_set_clk_vol(kbdev, param, gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_VOLTAGE, param));
#ifdef CONFIG_MALI_MIDGARD_DVFS
		if (ret == 0) {
			ret = gpu_dvfs_handler_control(kbdev, GPU_HANDLER_DVFS_GET_LEVEL, platform->cur_clock);
			if (ret >= 0) {
				spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
				platform->step = ret;
				spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
			} else {
				GPU_LOG(DVFS_ERROR, "Invalid dvfs level returned [%d]\n", GPU_CONTROL_CHANGE_CLK_VOL);
			}
		}
		if (gpu_pm_qos_command(platform, GPU_CONTROL_PM_QOS_SET) < -1)
			GPU_LOG(DVFS_ERROR, "failed to set the PM_QOS\n");
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_PREPARE_ON:
#ifdef CONFIG_MALI_MIDGARD_DVFS
		spin_lock_irqsave(&platform->gpu_dvfs_spinlock, flags);
		if (platform->dvfs_status && platform->wakeup_lock)
			platform->cur_clock = MALI_DVFS_START_FREQ;

		if (platform->min_lock > 0)
			platform->cur_clock = MAX(platform->min_lock, platform->cur_clock);
		if (platform->max_lock > 0)
			platform->cur_clock = MIN(platform->max_lock, platform->cur_clock);

		platform->down_requirement = platform->table[platform->step].stay_count;
		spin_unlock_irqrestore(&platform->gpu_dvfs_spinlock, flags);
#endif /* CONFIG_MALI_MIDGARD_DVFS */
		break;
	case GPU_CONTROL_IS_POWER_ON:
		ret = gpu_is_power_on();
		break;
	case GPU_CONTROL_SET_MARGIN:
		voltage = MAX(platform->table[platform->step].voltage + platform->voltage_margin, COLD_MINIMUM_VOL);
		gpu_set_voltage(platform, voltage);
		GPU_LOG(DVFS_DEBUG, "we set the voltage: %d\n", voltage);
		break;
	default:
		mutex_unlock(&platform->gpu_clock_lock);
		return -1;
	}
	mutex_unlock(&platform->gpu_clock_lock);

	return ret;
}
예제 #23
0
unsigned int
dp_bfd_deal(struct sk_buff *pskb)
{
	struct udphdr _hdr, *hdr;
    dp_bfd_packet_s _packet, *packet;
    dp_bfd_auth_s _auth_packet, *auth_packet;
    dp_bfd_echo_packet_s _echo_packet, *echo_packet;
    dp_bfd_session_s *session;
    u32 (*dp_bfd_state_trans_callback)(dp_bfd_packet_s *, dp_bfd_session_s *) = NULL;
    u32 ret;
    u8 old_state;

	u32 dataoff = (u32)(((pskb)->nh.raw - (pskb)->data) + (pskb)->nh.iph->ihl*4);
    
	if (IPPROTO_UDP != (pskb)->nh.iph->protocol)
	{
		return NF_ACCEPT;
	}
    
	hdr = skb_header_pointer(pskb, (s32)dataoff, sizeof(_hdr), &_hdr);
	if ((NULL == hdr)||((DP_BFD_PORT != hdr->dest)&&(DP_BFD_ECHO_PORT != hdr->dest)))
	{
    	return NF_ACCEPT;
	}

    /* deal with echo packet */
	if(DP_BFD_PORT != hdr->dest)
	{
	    return NF_ACCEPT;
	    /* rebound the packet */
	    if(DP_BFD_ECHO_PORT == hdr->dest)
        {
            dp_bfd_echo_packet_rebound(pskb);
            return NF_DROP;
        }

        /* add echo receive timer */
        if(DP_BFD_ECHO_PORT == hdr->source)
        {
            /*取回声报文*/
            echo_packet = skb_header_pointer(pskb, (s32)(dataoff + sizeof(_hdr)), sizeof(_echo_packet), &_echo_packet);
            if ( (NULL == echo_packet)||(0 == g_session_total) )
            {
                return NF_DROP;
            }
            session = dp_bfd_find_session_simple(echo_packet->my_discriminator);
            if(NULL == session)
            {
                return NF_DROP;
            }
            
            session->echo_receive_interval = (HZ)*session->packet.min_echo_rx_interval/1000/1000;
            if(!(session->flag & DP_BFD_ECHORECV_TIMER_ON))
            {
                session->echo_receive_timer.expires = jiffies + session->echo_receive_interval;
                session->echo_receive_timer.data = (unsigned long)(session->packet.my_discriminator);
                session->echo_receive_timer.function = dp_bfd_timer_echo_receive_callback;
                add_timer_on(&session->echo_receive_timer, 1);
                session->flag |= DP_BFD_ECHORECV_TIMER_ON;
                
                if(dp_bfd_debug & DEBUG_BFD_TIMER)
                    printk("dp_bfd_deal echo_receive_timer\n");
            }
            else
            {
                mod_timer(&session->echo_receive_timer, jiffies + session->echo_receive_interval);
                
                if(dp_bfd_debug & DEBUG_BFD_TIMER)
                    printk("dp_bfd_deal mod echo_receive_timer\n");
            }
            write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
            return NF_DROP;
        }
		return NF_ACCEPT;
	}

    if(hdr->source < 49152)
    {
        return NF_ACCEPT;
    }
    
    /*BFD报文处理*/
    if(0 == g_session_total)
    {
        return NF_DROP;
    }
	packet = skb_header_pointer(pskb, (s32)(dataoff + sizeof(_hdr)), sizeof(_packet), &_packet);
    if ( (NULL == packet)||(NF_DROP == dp_bfd_rcv_packet_deal(packet)) )
    {
        return NF_DROP;
    }
    
    /* session != NULL 则该接口必然已使能 */
    session = dp_bfd_find_session(packet, (pskb)->nh.iph->saddr, (pskb)->nh.iph->daddr);
    if(NULL == session)
    {
        return NF_DROP;
    }

    if (ADMIN_DOWN == session->packet.sta)
    {
        write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
        return NF_DROP;
    }
    
    if(session->packet.a != packet->a)
    {
        write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
        return NF_DROP;
    }
    
    if(DP_BFD_SET == session->packet.a)
    {       
        /*取认证数据报文*/
        auth_packet = skb_header_pointer(pskb, (s32)(dataoff + sizeof(_hdr) + sizeof(_packet)),  packet->length - sizeof(dp_bfd_packet_s), &_auth_packet);         
        if ( (NULL == auth_packet)||(ERROR_FAIL == dp_bfd_auth_deal(packet, auth_packet, session)) )
        {
            if(dp_bfd_debug & DEBUG_BFD_AUTH)
                printk("********************auth failed*********************\n");
            write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
            return NF_DROP;
        }
    }

    if ( (DP_BFD_UNSET == session->packet.p)&&(DP_BFD_SET == packet->f) )
    {
        write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
        return NF_DROP;
    }

    if ( (DP_BFD_SET == session->packet.p)&&(DP_BFD_SET == packet->f) )
    {
        session->packet.p = DP_BFD_UNSET;
        if (DP_BFD_SET == session->packet.a)
        {
            session->flag |= DP_BFD_FILL_AUTH;
        }
    }

    old_state = session->packet.sta;
    dp_bfd_state_trans_callback = g_bfd_state_trans[session->packet.sta][packet->sta].func;
    session->packet.sta = g_bfd_state_trans[session->packet.sta][packet->sta].state;

    if ( (INIT <= session->packet.sta)&&(NO_DIAGNOSTIC != session->packet.diag) )
    {
        session->packet.diag = NO_DIAGNOSTIC;
        if (session->packet.a)
        {
            session->flag |= DP_BFD_FILL_AUTH;
        }
    }
    
    if((old_state != session->packet.sta)&&(session->packet.a))
    {
        session->flag |= DP_BFD_FILL_AUTH;
    }
    
    if(NULL != dp_bfd_state_trans_callback)
    {
        ret = dp_bfd_state_trans_callback(packet, session);
        write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
        return ret;
    }

    write_unlock_bh(&g_session_cask[session->packet.my_discriminator%256].session_lock);
    return NF_DROP;
}