Beispiel #1
0
// TODO: print values to proc file
int log_timing(int type)
{
    s64 ns_time, ns_diff;
    struct timeval current_time;

    //static inline s64 timeval_to_ns(const struct timeval *tv)
    //extern void do_gettimeofday(struct timeval *tv);

    do_gettimeofday(&current_time);
    ns_time = timeval_to_ns(&current_time);
    if( (type != TIMING_TX_START) && (type != TIMING_RX_START) ) {

        if( type <= ((TIMING_NUM_ELEMENTS - 1)/2) ) 
            ns_diff = ns_time - __timing_array[TIMING_TX_START];
        else 
            ns_diff = ns_time - __timing_array[TIMING_RX_START];

        __timing_array[type] += ns_diff;
    } else {
        __timing_array[type] = ns_time;

        if( type == TIMING_TX_START )
            __timing_array[TIMING_TX_PACKETS] += 1;
        else if( type == TIMING_RX_START )
            __timing_array[TIMING_RX_PACKETS] += 1;
    }

    return 0;
}
Beispiel #2
0
/* return the current time in nanoseconds */
static int64_t time_to_ns(void)
{
    struct timeval tv;

    do_gettimeofday(&tv);
    
    return timeval_to_ns(&tv);
}
Beispiel #3
0
void swsusp_show_speed(struct timeval *start, struct timeval *stop,
			unsigned nr_pages, char *msg)
{
	s64 elapsed_centisecs64;
	int centisecs;
	int k;
	int kps;

	elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
	do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
	centisecs = elapsed_centisecs64;
	if (centisecs == 0)
		centisecs = 1;	/* avoid div-by-zero */
	k = nr_pages * (PAGE_SIZE / 1024);
	kps = (k * 100) / centisecs;
	printk("%s %d kbytes in %d.%02d seconds (%d.%02d MB/s)\n", msg, k,
			centisecs / 100, centisecs % 100,
			kps / 1000, (kps % 1000) / 10);
}
Beispiel #4
0
static void set_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
			   const struct itimerval *const value,
			   struct itimerval *const ovalue)
{
	cputime_t cval, nval, cinterval, ninterval;
	s64 ns_ninterval, ns_nval;
	u32 error, incr_error;
	struct cpu_itimer *it = &tsk->signal->it[clock_id];

	nval = timeval_to_cputime(&value->it_value);
	ns_nval = timeval_to_ns(&value->it_value);
	ninterval = timeval_to_cputime(&value->it_interval);
	ns_ninterval = timeval_to_ns(&value->it_interval);

	error = cputime_sub_ns(nval, ns_nval);
	incr_error = cputime_sub_ns(ninterval, ns_ninterval);

	spin_lock_irq(&tsk->sighand->siglock);

	cval = it->expires;
	cinterval = it->incr;
	if (!cputime_eq(cval, cputime_zero) ||
	    !cputime_eq(nval, cputime_zero)) {
		if (cputime_gt(nval, cputime_zero))
			nval = cputime_add(nval, cputime_one_jiffy);
		set_process_cpu_timer(tsk, clock_id, &nval, &cval);
	}
	it->expires = nval;
	it->incr = ninterval;
	it->error = error;
	it->incr_error = incr_error;
	trace_itimer_state(clock_id == CPUCLOCK_VIRT ?
			   ITIMER_VIRTUAL : ITIMER_PROF, value, nval);

	spin_unlock_irq(&tsk->sighand->siglock);

	if (ovalue) {
		cputime_to_timeval(cval, &ovalue->it_value);
		cputime_to_timeval(cinterval, &ovalue->it_interval);
	}
}
void get_dilated_time(struct task_struct * task,struct timeval* tv)
{
	s64 temp_past_physical_time;

	do_gettimeofday(tv);

	if(task->virt_start_time != 0){
		if (task->group_leader != task) { //use virtual time of the leader thread
                       	task = task->group_leader;
        }
		s64 now = timeval_to_ns(tv);
		s32 rem;
		s64 real_running_time;
		s64 dilated_running_time;
		//spin_lock(&task->dialation_lock);
		if(task->freeze_time == 0){
			real_running_time = now - task->virt_start_time;
		}
		else{
			real_running_time = task->freeze_time - task->virt_start_time;
		}
		//real_running_time = now - task->virt_start_time;
		//if (task->freeze_time != 0)
		//	temp_past_physical_time = task->past_physical_time + (now - task->freeze_time);
		//else
		temp_past_physical_time = task->past_physical_time;

		if (task->dilation_factor > 0) {
			dilated_running_time = div_s64_rem( (real_running_time - temp_past_physical_time)*1000 ,task->dilation_factor,&rem) + task->past_virtual_time;
			now = dilated_running_time + task->virt_start_time;
		}
		else if (task->dilation_factor < 0) {
			dilated_running_time = div_s64_rem( (real_running_time - temp_past_physical_time)*(task->dilation_factor*-1),1000,&rem) + task->past_virtual_time;
			now =  dilated_running_time + task->virt_start_time;
		}
		else {
			dilated_running_time = (real_running_time - temp_past_physical_time) + task->past_virtual_time;
			now = dilated_running_time + task->virt_start_time;
		}
		
		if(task->freeze_time == 0){
			task->past_physical_time = real_running_time;
			task->past_virtual_time = dilated_running_time;
		}
		//spin_unlock(&task->dialation_lock);
		*tv = ns_to_timeval(now);
	}

	return;

}
Beispiel #6
0
/**
 * swsusp_show_speed - Print time elapsed between two events during hibernation.
 * @start: Starting event.
 * @stop: Final event.
 * @nr_pages: Number of memory pages processed between @start and @stop.
 * @msg: Additional diagnostic message to print.
 */
void swsusp_show_speed(struct timeval *start, struct timeval *stop,
			unsigned nr_pages, char *msg)
{
	u64 elapsed_centisecs64;
	unsigned int centisecs;
	unsigned int k;
	unsigned int kps;

	elapsed_centisecs64 = timeval_to_ns(stop) - timeval_to_ns(start);
	/*
	 * If "(s64)elapsed_centisecs64 < 0", it will print long elapsed time,
	 * it is obvious enough for what went wrong.
	 */
	do_div(elapsed_centisecs64, NSEC_PER_SEC / 100);
	centisecs = elapsed_centisecs64;
	if (centisecs == 0)
		centisecs = 1;	/* avoid div-by-zero */
	k = nr_pages * (PAGE_SIZE / 1024);
	kps = (k * 100) / centisecs;
	printk(KERN_INFO "PM: %s %u kbytes in %u.%02u seconds (%u.%02u MB/s)\n",
			msg, k,
			centisecs / 100, centisecs % 100,
			kps / 1000, (kps % 1000) / 10);
}
static void
do_p2m(unsigned long (*conv)(unsigned long),
       const char* msg, const char* prefix, 
       unsigned long start_gpfn, unsigned end_gpfn, unsigned long stride)
{
	struct timeval before_tv;
	struct timeval after_tv;
	unsigned long gpfn;
	unsigned long mfn;
	unsigned long count;
	s64 nsec;

	count = 0;
	do_gettimeofday(&before_tv);
	for (gpfn = start_gpfn; gpfn < end_gpfn; gpfn += stride) {
		mfn = (*conv)(gpfn);
		count++;
	}
	do_gettimeofday(&after_tv);
	nsec = timeval_to_ns(&after_tv) - timeval_to_ns(&before_tv);
	printk("%s stride %4ld %s: %9ld / %6ld = %5ld nsec\n",
	       msg, stride, prefix,
	       nsec, count, nsec/count);
}
Beispiel #8
0
static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
{
	const struct v4l2_buffer *b = pb;
	struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
	struct vb2_queue *q = vb->vb2_queue;

	if (q->is_output) {
		/*
		 * For output buffers copy the timestamp if needed,
		 * and the timecode field and flag if needed.
		 */
		if (q->copy_timestamp)
			vb->timestamp = timeval_to_ns(&b->timestamp);
		vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
			vbuf->timecode = b->timecode;
	}
};
Beispiel #9
0
static int check_format(char *string) {
	char *str;
	if(string[0] != '+') {
		printf("invalid format string use '+%%N'\n");
		return -1;
	}
	if(NULL != (str = get_next_key(string))) {
		switch(str[0]) {
		case 'N': {
			struct timeval tv;
			time64_t ns;

			ktime_get_timeval(&tv);
			ns = timeval_to_ns(&tv);
			printf("%lld\n", ns % MAX_NANOSECONDS);
			break;
		}
		default:
			break;
		}
	}
	return 0;
}
Beispiel #10
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_msecs64;
	unsigned int elapsed_msecs;
	bool wakeup = false;
	int sleep_usecs = USEC_PER_MSEC;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		usleep_range(sleep_usecs / 2, sleep_usecs);
		if (sleep_usecs < 8 * USEC_PER_MSEC)
			sleep_usecs *= 2;
	}

	do_gettimeofday(&end);
	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_msecs64, NSEC_PER_MSEC);
	elapsed_msecs = elapsed_msecs64;

	if (todo) {
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					user_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       wakeup ? "aborted" : "failed",
			       elapsed_msecs / 1000, elapsed_msecs % 1000,
			       todo - wq_busy, wq_busy);
		}

		if (!wakeup) {
#ifdef CONFIG_MSM_WATCHDOG
			
			msm_watchdog_suspend(NULL);
#endif
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p) &&
				    elapsed_msecs > 1000)
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
#ifdef CONFIG_MSM_WATCHDOG
			msm_watchdog_resume(NULL);
#endif
		}
	} else {
Beispiel #11
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's
			 * scheduler lock after setting TIF_FREEZE, it's
			 * guaranteed that either we see TASK_RUNNING or
			 * try_to_stop() after schedule() in ptrace/signal
			 * stop sees TIF_FREEZE.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
		       wakeup ? "aborted" : "failed",
		       elapsed_csecs / 100, elapsed_csecs % 100,
		       todo - wq_busy, wq_busy);

		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (!wakeup && !freezer_should_skip(p) &&
			    p != current && freezing(p) && !frozen(p))
				sched_show_task(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
Beispiel #12
0
static int try_to_freeze_tasks(int freeze_user_space)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	struct timeval start, end;
	s64 elapsed_csecs64;
	unsigned int elapsed_csecs;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;
	do {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (frozen(p) || !freezeable(p))
				continue;

			if (p->state == TASK_TRACED && frozen(p->parent)) {
				cancel_freezing(p);
				continue;
			}

			if (!freeze_task(p, freeze_user_space))
				continue;

			if (!freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
		yield();			/* Yield is okay here */
		if (time_after(jiffies, end_time))
			break;
	} while (todo);

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		printk("\n");
		printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
				"(%d tasks refusing to freeze):\n",
				elapsed_csecs / 100, elapsed_csecs % 100, todo);
		show_state();
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
			if (freezing(p) && !freezer_should_skip(p))
				printk(KERN_ERR " %s\n", p->comm);
			cancel_freezing(p);
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
Beispiel #13
0
static int try_to_freeze_tasks(bool sig_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;
#ifdef CONFIG_SHSYS_CUST
	struct timespec tu;
#endif

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!sig_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (frozen(p) || !freezable(p))
				continue;

			if (!freeze_task(p, sig_only))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's
			 * scheduler lock after setting TIF_FREEZE, it's
			 * guaranteed that either we see TASK_RUNNING or
			 * try_to_stop() after schedule() in ptrace/signal
			 * stop sees TIF_FREEZE.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!sig_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
#ifdef CONFIG_SHSYS_CUST
		tu.tv_sec = 0;
		tu.tv_nsec = 10000000;
		hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
#else
		msleep(10);
#endif
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					sig_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       elapsed_csecs / 100, elapsed_csecs % 100,
			       todo - wq_busy, wq_busy);
		}
		thaw_workqueues();

		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
			if (freezing(p) && !freezer_should_skip(p) &&
				elapsed_csecs > 100)
				sched_show_task(p);
			cancel_freezing(p);
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
Beispiel #14
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's scheduler lock, it's
			 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
			 * transition can't race with task state testing here.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					user_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       wakeup ? "aborted" : "failed",
			       elapsed_csecs / 100, elapsed_csecs % 100,
			       todo - wq_busy, wq_busy);
		}

		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p) &&
				    elapsed_csecs > 100)
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted (%s)\n",
					user_only ? "user space " : "tasks ", q ? q->comm : "NONE");
		}
		else {
Beispiel #16
0
static inline long long timeval_subtract(struct timeval *a, struct timeval *b)
{
	return timeval_to_ns(a) - timeval_to_ns(b);
}
void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period)
{
	int under_perform_boundary_value = 0;
	int over_perform_boundary_value = 0;
	int current_fps = 0;
	int current_gpu_util = 0;
	bool clock_changed = false;
#if CLOCK_TUNING_TIME_DEBUG
	struct timeval start;
	struct timeval stop;
	unsigned int elapse_time;
	do_gettimeofday(&start);
#endif
	u32 window_render_fps;

	if (NULL == gpu_clk) {
		MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n"));
		return;
	}

	window_render_fps = calculate_window_render_fps(time_period);

	current_fps = window_render_fps;
	current_gpu_util = data->utilization_gpu;

	/* Get the specific under_perform_boundary_value and over_perform_boundary_value */
	if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) {
		under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90);
		over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
	} else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) {
		under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
		over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
	} else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) {
		under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
		over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50);
	} else {
		under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
		over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
	}

	MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util));
	MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d,  over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value));
	MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d,  pressure render fps = %d \n", current_fps, window_render_fps));

	/* Get current clock value */
	cur_clk_step = mali_gpu_get_freq();

	/* Consider offscreen */
	if (0 == current_fps) {
		/* GP or PP under perform, need to give full power */
		if (current_gpu_util > over_perform_boundary_value) {
			if (cur_clk_step != gpu_clk->num_of_steps - 1) {
				clock_changed = true;
				clock_step = gpu_clk->num_of_steps - 1;
			}
		}

		/* If GPU is idle, use lowest power */
		if (0 == current_gpu_util) {
			if (cur_clk_step != 0) {
				clock_changed = true;
				clock_step = 0;
			}
		}

		goto real_setting;
	}

	/* 2. Calculate target clock if the GPU clock can be tuned */
	if (-1 != cur_clk_step) {
		int target_clk_mhz = -1;
		mali_bool pick_clock_up = MALI_TRUE;

		if (current_gpu_util > under_perform_boundary_value) {
			/* when under perform, need to consider the fps part */
			target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps;
			pick_clock_up = MALI_TRUE;
		} else if (current_gpu_util < over_perform_boundary_value) {
			/* when over perform, did't need to consider fps, system didn't want to reach desired fps */
			target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value;
			pick_clock_up = MALI_FALSE;
		}

		if (-1 != target_clk_mhz) {
			clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up);
		}
	}

real_setting:
	if (clock_changed) {
		mali_gpu_set_freq(clock_step);

		_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
					      MALI_PROFILING_EVENT_CHANNEL_GPU |
					      MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
					      gpu_clk->item[clock_step].clock,
					      gpu_clk->item[clock_step].vol / 1000,
					      0, 0, 0);
	}

#if CLOCK_TUNING_TIME_DEBUG
	do_gettimeofday(&stop);

	elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start);
	MALI_DEBUG_PRINT(2, ("Using ARM power policy:  eclapse time = %d\n", elapse_time));
#endif
}
Beispiel #18
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
#ifdef CONFIG_SEC_PM_DEBUG
	struct task_struct *q;
#endif
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_msecs64;
	unsigned int elapsed_msecs;
	bool wakeup = false;
	int sleep_usecs = USEC_PER_MSEC;

	do_gettimeofday(&start);

	end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			if (!freezer_should_skip(p)) {
				todo++;
#ifdef CONFIG_SEC_PM_DEBUG
				q = p;
#endif
			}
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the refrigerator.  Start with an initial
		 * 1 ms sleep followed by exponential backoff until 8 ms.
		 */
		usleep_range(sleep_usecs / 2, sleep_usecs);
		if (sleep_usecs < 8 * USEC_PER_MSEC)
			sleep_usecs *= 2;
	}

	do_gettimeofday(&end);
	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_msecs64, NSEC_PER_MSEC);
	elapsed_msecs = elapsed_msecs64;

	if (todo) {
		printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
		       wakeup ? "aborted" : "failed",
		       elapsed_msecs / 1000, elapsed_msecs % 1000,
		       todo - wq_busy, wq_busy);

#ifdef CONFIG_SEC_PM_DEBUG
		if (wakeup) {
			printk(KERN_ERR "Freezing of %s aborted (%d) (%s)\n",
					user_only ? "user space " : "tasks ",
					q ? q->pid : 0, q ? q->comm : "NONE");
		}
#endif

		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p))
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
Beispiel #19
0
static int gaole_test_thread(void *data)
{
	int i,j,k;
	
	static struct timeval start_time;
	static struct timeval stop_time;
	static signed long long elapsed_time;

	struct sched_param param = {.sched_priority = 90};

	/*set the thread as a real time thread, and its priority is 90*/
	sched_setscheduler(current, SCHED_RR, &param);
	
	for(i=0;i<16;i++){

		data_send_ptr[i] = kmalloc(SEND_BUF_LEN,GFP_KERNEL);
		if (NULL == data_send_ptr[i])
		{
			SDIOTRAN_ERR("kmalloc send buf%d err!!!",i);
			return -1;
		}
		
		SDIOTRAN_DEBUG("kmalloc send buf%d ok!!!",i);

		for(j=0,k=0;j<SEND_BUF_LEN;j++,k++)
			*(data_send_ptr[i]+j) = ((k+i)<=255?(k+i):(k=0,k+i));
	
	}
	//set_blklen(1); 


	while(1)
	{
		for(i=0;i<16;i++)
		{
			do_gettimeofday(&start_time);
			
			for(j=0;j<100;j++)
				test_sdio_send_func(i,data_send_ptr[i]);

			do_gettimeofday(&stop_time);
			elapsed_time = timeval_to_ns(&stop_time) - timeval_to_ns(&start_time);
			SDIOTRAN_ERR("Chn %d test elapsed_time is %lld!!!",i,elapsed_time);
			elapsed_time = 0;
		}
		break;
		
		SDIOTRAN_ERR("sdio test write finish!!!");		
	}	
	
	return 0;
}

void gaole_creat_test(void)
{
	struct task_struct * task;

	task = kthread_create(gaole_test_thread, NULL, "GaoleTest");
	if(0 != task)
		wake_up_process(task);
}


#endif//SDIO_TEST_CASE7

#if defined(SDIO_TEST_CASE8)

#define TEST_BUF_LEN 8192 //SDIO_TEST_CASE8

char *data_test_ptr = NULL;

static int gaole_test_count = 0;

static struct timeval start_time;
static struct timeval stop_time;
static signed long long elapsed_time;

static int last_chn = 0;
bool stop_test = 0;

void test_sdio_recv_func(void)
{
	int i;	
	int read_chn = 0;
	int datalen = 0;



	//SDIOTRAN_ERR("\nap recv: active_chn is 0x%x!!!\n",active_chn);
	

	if(last_chn != read_chn)
	{
		last_chn = read_chn;
		gaole_test_count = 0;

	}
	gaole_test_count ++;
	
		

	//SDIOTRAN_ERR("\nap recv: read_datalen is 0x%x!!!\n",read_datalen);

	//for(i=0;i<TEST_BUF_LEN;i++)
	//	data_test_ptr[i] = 0;

	sdio_dev_read(read_chn,data_test_ptr,&datalen);

	if(1 == gaole_test_count)
		do_gettimeofday(&start_time);	

	
	if(100 == gaole_test_count)
	{
		do_gettimeofday(&stop_time);
		elapsed_time = timeval_to_ns(&stop_time) - timeval_to_ns(&start_time);
		SDIOTRAN_ERR("Chn %d test elapsed_time is %lld!!!",read_chn,elapsed_time);

		elapsed_time = 0;
		if(15 == read_chn)
			stop_test = 1;
	}
	/*
	SDIOTRAN_ERR("\ncase7:ap recv data: %d,%d,%d,%d  %d,%d,%d,%d  %d,%d,%d,%d!!!\n",\
		data_test_ptr[0],\
		data_test_ptr[1],\
		data_test_ptr[2],\
		data_test_ptr[3], \
		data_test_ptr[101], \
		data_test_ptr[102], \
		data_test_ptr[103], \
		data_test_ptr[104], \
		data_test_ptr[251], \
		data_test_ptr[252], \
		data_test_ptr[253], \
		data_test_ptr[254] \
		);*/
}
Beispiel #20
0
static int gaole_test_thread(void *data)
{
	int i,j;

	struct sched_param param = {.sched_priority = 90};

	/*set the thread as a real time thread, and its priority is 90*/
	sched_setscheduler(current, SCHED_RR, &param);

	data_test_ptr = kmalloc(TEST_BUF_LEN,GFP_KERNEL);
	if (NULL == data_test_ptr)
	{
		SDIOTRAN_ERR("kmalloc send buf err!!!");
		return -1;
	}
	
	SDIOTRAN_DEBUG("kmalloc send buf ok!!!");

	for(i=0,j=0;i<TEST_BUF_LEN;i++,j++)
		data_test_ptr[i] = (j<=255?j:(j=0,j));	

	test_sdio_send_func();


	while(1)
	{
		test_sdio_recv_func();
		
		if(1 == stop_test)
			break;		
	}	
	
	return 0;
}

void gaole_creat_test(void)
{
	struct task_struct * task;

	task = kthread_create(gaole_test_thread, NULL, "GaoleTest");
	if(0 != task)
		wake_up_process(task);
}


#endif//SDIO_TEST_CASE8


#if defined(SDIO_TEST_CASE9)

#define TEST_BUF_LEN 8192 //SDIO_TEST_CASE9

#define TEST_SEND_CHN 0


char *data_test_ptr = NULL;


int active_chn = 0;
int read_datalen = 0;
static int gaole_test_count = 0;

static struct timeval start_time;
static struct timeval stop_time;
static signed long long elapsed_time;

static int last_chn = 0;

void case9_workq_handler(void)
{
	int i;
	
	active_chn = 0;
	read_datalen = 0;

	active_chn = sdio_dev_get_read_chn();
	//SDIOTRAN_ERR("\nap recv: active_chn is 0x%x!!!\n",active_chn);
	
	if(-1 == active_chn)
		return;

	if(last_chn != active_chn)
	{
		last_chn = active_chn;
		gaole_test_count = 0;

	}
	gaole_test_count ++;
	
	

	//SDIOTRAN_ERR("\nap recv: read_datalen is 0x%x!!!\n",read_datalen);

	//for(i=0;i<TEST_BUF_LEN;i++)
	//	data_test_ptr[i] = 0;

	sdio_dev_read(active_chn,data_test_ptr,&read_datalen);

	if(2 == gaole_test_count)
		do_gettimeofday(&start_time);	

	
	if(101 == gaole_test_count)
	{
		do_gettimeofday(&stop_time);
		elapsed_time = timeval_to_ns(&stop_time) - timeval_to_ns(&start_time);
		SDIOTRAN_ERR("Chn %d test elapsed_time is %lld!!!",active_chn,elapsed_time);
		elapsed_time = 0;
	}
	/*
	SDIOTRAN_ERR("\ncase7:ap recv data: %d,%d,%d,%d  %d,%d,%d,%d  %d,%d,%d,%d!!!\n",\
		data_test_ptr[0],\
		data_test_ptr[1],\
		data_test_ptr[2],\
		data_test_ptr[3], \
		data_test_ptr[101], \
		data_test_ptr[102], \
		data_test_ptr[103], \
		data_test_ptr[104], \
		data_test_ptr[251], \
		data_test_ptr[252], \
		data_test_ptr[253], \
		data_test_ptr[254] \
		);*/
}
Beispiel #21
0
static int try_to_freeze_tasks(bool sig_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	unsigned int wakeup = 0;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;
	do {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (frozen(p) || !freezeable(p))
				continue;

			if (!freeze_task(p, sig_only))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's
			 * scheduler lock after setting TIF_FREEZE, it's
			 * guaranteed that either we see TASK_RUNNING or
			 * try_to_stop() after schedule() in ptrace/signal
			 * stop sees TIF_FREEZE.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
		yield();			/* Yield is okay here */
		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (time_after(jiffies, end_time))
			break;
	} while (todo);

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
				"(%d tasks refusing to freeze):\n",
				wakeup ? "aborted" : "failed",
				elapsed_csecs / 100, elapsed_csecs % 100, todo);
		if(!wakeup)
			show_state();
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
			if (freezing(p) && !freezer_should_skip(p) &&
							elapsed_csecs > 100)
				printk(KERN_ERR " %s\n", p->comm);
			cancel_freezing(p);
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	bool wakeup = false;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
                                                
                                                    
                                                   
                                                 
     
                                                                 
                                                         
                                                         
    */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
                                                             
                                    
   */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/*                                                         
                                                               
                                                               
                                                  
   */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					user_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks %s after %d.%02d seconds "
			       "(%d tasks refusing to freeze, wq_busy=%d):\n",
			       wakeup ? "aborted" : "failed",
			       elapsed_csecs / 100, elapsed_csecs % 100,
			       todo - wq_busy, wq_busy);
		}

		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p) &&
				    elapsed_csecs > 100)
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
Beispiel #23
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_msecs64;
	unsigned int elapsed_msecs;
	bool wakeup = false;
	int sleep_usecs = USEC_PER_MSEC;
	char suspend_abort[MAX_SUSPEND_ABORT_LEN];

	do_gettimeofday(&start);

	end_time = jiffies + msecs_to_jiffies(freeze_timeout_msecs);

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			if (!freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

		if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
#ifndef CONFIG_UML
			pm_get_active_wakeup_sources(suspend_abort,
				MAX_SUSPEND_ABORT_LEN);
#endif
			log_suspend_abort_reason(suspend_abort);
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the refrigerator.  Start with an initial
		 * 1 ms sleep followed by exponential backoff until 8 ms.
		 */
		usleep_range(sleep_usecs / 2, sleep_usecs);
		if (sleep_usecs < 8 * USEC_PER_MSEC)
			sleep_usecs *= 2;
	}

	do_gettimeofday(&end);
	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_msecs64, NSEC_PER_MSEC);
	elapsed_msecs = elapsed_msecs64;

	if (wakeup) {
		printk("\n");
		printk(KERN_ERR "Freezing of tasks aborted after %d.%03d seconds",
		       elapsed_msecs / 1000, elapsed_msecs % 1000);
	} else if (todo) {
		printk("\n");
		printk(KERN_ERR "Freezing of tasks failed after %d.%03d seconds"
		       " (%d tasks refusing to freeze, wq_busy=%d):\n",
		       elapsed_msecs / 1000, elapsed_msecs % 1000,
		       todo - wq_busy, wq_busy);

		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p != current && !freezer_should_skip(p)
			    && freezing(p) && !frozen(p))
				sched_show_task(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
Beispiel #24
0
/**
 * drm_calc_vbltimestamp_from_scanoutpos - helper routine for kms
 * drivers. Implements calculation of exact vblank timestamps from
 * given drm_display_mode timings and current video scanout position
 * of a crtc. This can be called from within get_vblank_timestamp()
 * implementation of a kms driver to implement the actual timestamping.
 *
 * Should return timestamps conforming to the OML_sync_control OpenML
 * extension specification. The timestamp corresponds to the end of
 * the vblank interval, aka start of scanout of topmost-leftmost display
 * pixel in the following video frame.
 *
 * Requires support for optional dev->driver->get_scanout_position()
 * in kms driver, plus a bit of setup code to provide a drm_display_mode
 * that corresponds to the true scanout timing.
 *
 * The current implementation only handles standard video modes. It
 * returns as no operation if a doublescan or interlaced video mode is
 * active. Higher level code is expected to handle this.
 *
 * @dev: DRM device.
 * @crtc: Which crtc's vblank timestamp to retrieve.
 * @max_error: Desired maximum allowable error in timestamps (nanosecs).
 *             On return contains true maximum error of timestamp.
 * @vblank_time: Pointer to struct timeval which should receive the timestamp.
 * @flags: Flags to pass to driver:
 *         0 = Default.
 *         DRM_CALLED_FROM_VBLIRQ = If function is called from vbl irq handler.
 * @refcrtc: drm_crtc* of crtc which defines scanout timing.
 *
 * Returns negative value on error, failure or if not supported in current
 * video mode:
 *
 * -EINVAL   - Invalid crtc.
 * -EAGAIN   - Temporary unavailable, e.g., called before initial modeset.
 * -ENOTSUPP - Function not supported in current display mode.
 * -EIO      - Failed, e.g., due to failed scanout position query.
 *
 * Returns or'ed positive status flags on success:
 *
 * DRM_VBLANKTIME_SCANOUTPOS_METHOD - Signal this method used for timestamping.
 * DRM_VBLANKTIME_INVBL - Timestamp taken while scanout was in vblank interval.
 *
 */
int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc,
					  int *max_error,
					  struct timeval *vblank_time,
					  unsigned flags,
					  struct drm_crtc *refcrtc)
{
	struct timeval stime, etime;
#ifdef notyet
	struct timeval mono_time_offset;
#endif
	struct drm_display_mode *mode;
	int vbl_status, vtotal, vdisplay;
	int vpos, hpos, i;
	s64 framedur_ns, linedur_ns, pixeldur_ns, delta_ns, duration_ns;
	bool invbl;

	if (crtc < 0 || crtc >= dev->num_crtcs) {
		DRM_ERROR("Invalid crtc %d\n", crtc);
		return -EINVAL;
	}

	/* Scanout position query not supported? Should not happen. */
	if (!dev->driver->get_scanout_position) {
		DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
		return -EIO;
	}

	mode = &refcrtc->hwmode;
	vtotal = mode->crtc_vtotal;
	vdisplay = mode->crtc_vdisplay;

	/* Durations of frames, lines, pixels in nanoseconds. */
	framedur_ns = refcrtc->framedur_ns;
	linedur_ns  = refcrtc->linedur_ns;
	pixeldur_ns = refcrtc->pixeldur_ns;

	/* If mode timing undefined, just return as no-op:
	 * Happens during initial modesetting of a crtc.
	 */
	if (vtotal <= 0 || vdisplay <= 0 || framedur_ns == 0) {
		DRM_DEBUG("crtc %d: Noop due to uninitialized mode.\n", crtc);
		return -EAGAIN;
	}

	/* Get current scanout position with system timestamp.
	 * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times
	 * if single query takes longer than max_error nanoseconds.
	 *
	 * This guarantees a tight bound on maximum error if
	 * code gets preempted or delayed for some reason.
	 */
	for (i = 0; i < DRM_TIMESTAMP_MAXRETRIES; i++) {
		/* Disable preemption to make it very likely to
		 * succeed in the first iteration even on PREEMPT_RT kernel.
		 */
#ifdef notyet
		preempt_disable();
#endif

		/* Get system timestamp before query. */
		getmicrouptime(&stime);

		/* Get vertical and horizontal scanout pos. vpos, hpos. */
		vbl_status = dev->driver->get_scanout_position(dev, crtc, &vpos, &hpos);

		/* Get system timestamp after query. */
		getmicrouptime(&etime);
#ifdef notyet
		if (!drm_timestamp_monotonic)
			mono_time_offset = ktime_get_monotonic_offset();

		preempt_enable();
#endif

		/* Return as no-op if scanout query unsupported or failed. */
		if (!(vbl_status & DRM_SCANOUTPOS_VALID)) {
			DRM_DEBUG("crtc %d : scanoutpos query failed [%d].\n",
				  crtc, vbl_status);
			return -EIO;
		}

		duration_ns = timeval_to_ns(&etime) - timeval_to_ns(&stime);

		/* Accept result with <  max_error nsecs timing uncertainty. */
		if (duration_ns <= (s64) *max_error)
			break;
	}

	/* Noisy system timing? */
	if (i == DRM_TIMESTAMP_MAXRETRIES) {
		DRM_DEBUG("crtc %d: Noisy timestamp %d us > %d us [%d reps].\n",
			  crtc, (int) duration_ns/1000, *max_error/1000, i);
	}

	/* Return upper bound of timestamp precision error. */
	*max_error = (int) duration_ns;

	/* Check if in vblank area:
	 * vpos is >=0 in video scanout area, but negative
	 * within vblank area, counting down the number of lines until
	 * start of scanout.
	 */
	invbl = vbl_status & DRM_SCANOUTPOS_INVBL;

	/* Convert scanout position into elapsed time at raw_time query
	 * since start of scanout at first display scanline. delta_ns
	 * can be negative if start of scanout hasn't happened yet.
	 */
	delta_ns = (s64) vpos * linedur_ns + (s64) hpos * pixeldur_ns;

	/* Is vpos outside nominal vblank area, but less than
	 * 1/100 of a frame height away from start of vblank?
	 * If so, assume this isn't a massively delayed vblank
	 * interrupt, but a vblank interrupt that fired a few
	 * microseconds before true start of vblank. Compensate
	 * by adding a full frame duration to the final timestamp.
	 * Happens, e.g., on ATI R500, R600.
	 *
	 * We only do this if DRM_CALLED_FROM_VBLIRQ.
	 */
	if ((flags & DRM_CALLED_FROM_VBLIRQ) && !invbl &&
	    ((vdisplay - vpos) < vtotal / 100)) {
		delta_ns = delta_ns - framedur_ns;

		/* Signal this correction as "applied". */
		vbl_status |= 0x8;
	}

#ifdef notyet
	if (!drm_timestamp_monotonic)
		etime = ktime_sub(etime, mono_time_offset);
#endif

	/* Subtract time delta from raw timestamp to get final
	 * vblank_time timestamp for end of vblank.
	 */
	*vblank_time = ns_to_timeval(timeval_to_ns(&etime) - delta_ns);

	DPRINTF("crtc %d : v %d p(%d,%d)@ %lld.%ld -> %lld.%ld [e %d us, %d rep]\n",
		  crtc, (int)vbl_status, hpos, vpos,
		  (long long)etime.tv_sec, (long)etime.tv_usec,
		  (long long)vblank_time->tv_sec, (long)vblank_time->tv_usec,
		  (int)duration_ns/1000, i);

	vbl_status = DRM_VBLANKTIME_SCANOUTPOS_METHOD;
	if (invbl)
		vbl_status |= DRM_VBLANKTIME_INVBL;

	return vbl_status;
}
Beispiel #25
0
static int try_to_freeze_tasks(bool user_only)
{
	struct task_struct *g, *p;
	struct task_struct *t = NULL;
	unsigned long end_time;
	unsigned int todo;
	bool wq_busy = false;
	struct timeval start, end;
	u64 elapsed_msecs64;
	unsigned int elapsed_msecs;
	bool wakeup = false;
	int sleep_usecs = USEC_PER_MSEC;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;

	if (!user_only)
		freeze_workqueues_begin();

	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (p == current || !freeze_task(p))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 *
			 * Because freeze_task() goes through p's scheduler lock, it's
			 * guaranteed that TASK_STOPPED/TRACED -> TASK_RUNNING
			 * transition can't race with task state testing here.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p)) {
				todo++;
				t = p;
			}
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);

		if (!user_only) {
			wq_busy = freeze_workqueues_busy();
			todo += wq_busy;
		}

        if (!todo || time_after(jiffies, end_time))
			break;

		if (pm_wakeup_pending()) {
			wakeup = true;
			break;
		}

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the refrigerator.  Start with an initial
		 * 1 ms sleep followed by exponential backoff until 8 ms.
		 */
		usleep_range(sleep_usecs / 2, sleep_usecs);
		if (sleep_usecs < 8 * USEC_PER_MSEC)
			sleep_usecs *= 2;
	}

	do_gettimeofday(&end);
	elapsed_msecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_msecs64, NSEC_PER_MSEC);
	elapsed_msecs = elapsed_msecs64;

	if (todo) {
		printk("\n");
		printk(KERN_ERR "Freezing of tasks %s after %d.%03d seconds "
		       "(%d tasks refusing to freeze, wq_busy=%d):\n",
		       wakeup ? "aborted" : "failed",
		       elapsed_msecs / 1000, elapsed_msecs % 1000,
		       todo - wq_busy, wq_busy);
		
		if (!wakeup) {
			read_lock(&tasklist_lock);
			do_each_thread(g, p) {
				if (p != current && !freezer_should_skip(p)
				    && freezing(p) && !frozen(p))
					sched_show_task(p);
			} while_each_thread(g, p);
			read_unlock(&tasklist_lock);
		}
	} else {
Beispiel #26
0
/* Updates pid controller internal values, returns the controlling parameters
 * for the current packet. Those parameters are used to calculate the dynamic
 * sampling limit
 *
 * @param now arrival time of new packet
 * @param deviation current deviation of ring buffer target fill level
 * @param pid out new controller values, unweighted
 */
void cctrack_controller_new_packet(struct cctrack_per_ring_data *this_ring_data,
		struct timeval now, s32 deviation,
		struct cctrack_pid_controller *pid)
{
	s64 now_ns; /* now in nanoseconds */
	s64 prev_ns; /* previous packet arrival in nanoseconds */
	s64 delta_t_ns; /* delta time nanoseconds since last packet arrival */
	s32 delta_t_msec;

	s64 integral;

	struct cctrack_pid_controller_data *pid_controller_data =
			&this_ring_data->pid_controller_data;

	// note the way WHEN we divide to keep numerical error small
	// that's why we calculate with MSEC but result is in SEC

	now_ns = timeval_to_ns(&now);
	cctrack_assert(now_ns > 0);

	spin_lock(&pid_controller_data->lock);

	prev_ns = timeval_to_ns(&pid_controller_data->previous_t);
	cctrack_assert(prev_ns > 0);

	/* now >= prev or they differ at most in only a second due to
	 * reordering */
	cctrack_assert(now_ns >= prev_ns ||
			(prev_ns - now_ns > 0 && prev_ns - now_ns < NSEC_PER_SEC));

	delta_t_ns = now_ns - prev_ns;
	if(now_ns < prev_ns) delta_t_ns *= -1; /* see previous assertion */

	cctrack_assert(delta_t_ns >= 0);

#ifdef DEBUG
	if(!((delta_t_ns/NSEC_PER_SEC) < 3600)) {
		printk("(delta_t_ns/NSEC_PER_SEC) < 3600");
		printk("delta_t_ns: %lld, delta_t_ms:%lld delta_t_s:%lld\n",
				delta_t_ns, delta_t_ns/NSEC_PER_MSEC, delta_t_ns/NSEC_PER_SEC);
	}
	/* assertion failed */
	if(!(now_ns >= prev_ns ||
			(prev_ns - now_ns > 0 && prev_ns - now_ns < NSEC_PER_SEC))){
		printk("now_ns prev_ns: %lld %lld delta_t_ns: %lld\n",
				now_ns, prev_ns, delta_t_ns);
	}
#endif
	cctrack_assert((delta_t_ns/NSEC_PER_SEC) < 3600); /* one packet per hour*/
	cctrack_assert(delta_t_ns >= 0);

	delta_t_msec = (s32)(delta_t_ns/NSEC_PER_MSEC);
	cctrack_assert((s64)delta_t_msec == delta_t_ns/NSEC_PER_MSEC);
	cctrack_assert(delta_t_msec/MSEC_PER_SEC < 3600); /* one packet per hour*/
	cctrack_assert(delta_t_msec >= 0);

	if(delta_t_msec == 0) delta_t_msec = 1; /* discretization, no div by 0 */
	cctrack_assert(delta_t_msec > 0);

	/* proportional */
	pid->proportional = deviation;

	/* integral */
	// integral is in seconds to keep number small
	integral = (s64)((s64)delta_t_msec * (s64)deviation);
	cctrack_assert(!(deviation < 0) || integral < (s64)0);
	cctrack_assert(!(deviation > 0) || integral > (s64)0);
	integral = integral / MSEC_PER_SEC;
	integral += pid_controller_data->integral;

	//INT_MIN <= integral <= INT_MAX;
	if(integral <= (s64)INT_MIN) integral = (s64)INT_MIN;
	if((s64)INT_MAX <= integral) integral = (s64)INT_MAX;
	cctrack_assert(integral >= (s64)INT_MIN);
	cctrack_assert((s64)INT_MAX >= integral);
	cctrack_assert((s32)integral == integral);

	pid_controller_data->integral = integral;
	pid->integral = (s32)integral;

	/* differential */
	// differential is in seconds to be more accurate
	pid->derivate =
		((deviation - pid_controller_data->previous_deviation)*
				MSEC_PER_SEC)
		/
		(delta_t_msec);

	pid_controller_data->previous_t = now;
	pid_controller_data->previous_deviation = deviation;

	spin_unlock(&pid_controller_data->lock);
}
Beispiel #27
0
static int try_to_freeze_tasks(bool sig_only)
{
	struct task_struct *g, *p;
	unsigned long end_time;
	unsigned int todo;
	struct timeval start, end;
	u64 elapsed_csecs64;
	unsigned int elapsed_csecs;
	unsigned int wakeup = 0;

	do_gettimeofday(&start);

	end_time = jiffies + TIMEOUT;
	while (true) {
		todo = 0;
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			if (frozen(p) || !freezeable(p))
				continue;

			if (!freeze_task(p, sig_only))
				continue;

			/*
			 * Now that we've done set_freeze_flag, don't
			 * perturb a task in TASK_STOPPED or TASK_TRACED.
			 * It is "frozen enough".  If the task does wake
			 * up, it will immediately call try_to_freeze.
			 */
			if (!task_is_stopped_or_traced(p) &&
			    !freezer_should_skip(p))
				todo++;
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
		if (todo && has_wake_lock(WAKE_LOCK_SUSPEND)) {
			wakeup = 1;
			break;
		}
		if (!todo || time_after(jiffies, end_time))
			break;

		/*
		 * We need to retry, but first give the freezing tasks some
		 * time to enter the regrigerator.
		 */
		msleep(10);
	}

	do_gettimeofday(&end);
	elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
	do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
	elapsed_csecs = elapsed_csecs64;

	if (todo) {
		/* This does not unfreeze processes that are already frozen
		 * (we have slightly ugly calling convention in that respect,
		 * and caller must call thaw_processes() if something fails),
		 * but it cleans up leftover PF_FREEZE requests.
		 */
		if(wakeup) {
			printk("\n");
			printk(KERN_ERR "Freezing of %s aborted\n",
					sig_only ? "user space " : "tasks ");
		}
		else {
			printk("\n");
			printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
					"(%d tasks refusing to freeze):\n",
					elapsed_csecs / 100, elapsed_csecs % 100, todo);
		}
		read_lock(&tasklist_lock);
		do_each_thread(g, p) {
			task_lock(p);
			if (freezing(p) && !freezer_should_skip(p) &&
				elapsed_csecs > 100)
				sched_show_task(p);
			cancel_freezing(p);
			task_unlock(p);
		} while_each_thread(g, p);
		read_unlock(&tasklist_lock);
	} else {
Beispiel #28
0
/*
 * Disable vblank irq's on crtc, make sure that last vblank count
 * of hardware and corresponding consistent software vblank counter
 * are preserved, even if there are any spurious vblank irq's after
 * disable.
 */
static void vblank_disable_and_save(struct drm_device *dev, int crtc)
{
	u32 vblcount;
	s64 diff_ns;
	int vblrc;
	struct timeval tvblank;
	int count = DRM_TIMESTAMP_MAXRETRIES;

	/* Prevent vblank irq processing while disabling vblank irqs,
	 * so no updates of timestamps or count can happen after we've
	 * disabled. Needed to prevent races in case of delayed irq's.
	 */
	mtx_enter(&dev->vblank_time_lock);

	dev->driver->disable_vblank(dev, crtc);
	dev->vblank_enabled[crtc] = 0;

	/* No further vblank irq's will be processed after
	 * this point. Get current hardware vblank count and
	 * vblank timestamp, repeat until they are consistent.
	 *
	 * FIXME: There is still a race condition here and in
	 * drm_update_vblank_count() which can cause off-by-one
	 * reinitialization of software vblank counter. If gpu
	 * vblank counter doesn't increment exactly at the leading
	 * edge of a vblank interval, then we can lose 1 count if
	 * we happen to execute between start of vblank and the
	 * delayed gpu counter increment.
	 */
	do {
		dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
		vblrc = drm_get_last_vbltimestamp(dev, crtc, &tvblank, 0);
	} while (dev->last_vblank[crtc] != dev->driver->get_vblank_counter(dev, crtc) && (--count) && vblrc);

	if (!count)
		vblrc = 0;

	/* Compute time difference to stored timestamp of last vblank
	 * as updated by last invocation of drm_handle_vblank() in vblank irq.
	 */
	vblcount = atomic_read(&dev->_vblank_count[crtc]);
	diff_ns = timeval_to_ns(&tvblank) -
		  timeval_to_ns(&vblanktimestamp(dev, crtc, vblcount));

	/* If there is at least 1 msec difference between the last stored
	 * timestamp and tvblank, then we are currently executing our
	 * disable inside a new vblank interval, the tvblank timestamp
	 * corresponds to this new vblank interval and the irq handler
	 * for this vblank didn't run yet and won't run due to our disable.
	 * Therefore we need to do the job of drm_handle_vblank() and
	 * increment the vblank counter by one to account for this vblank.
	 *
	 * Skip this step if there isn't any high precision timestamp
	 * available. In that case we can't account for this and just
	 * hope for the best.
	 */
	if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) {
		atomic_inc(&dev->_vblank_count[crtc]);
		smp_mb__after_atomic_inc();
	}

	/* Invalidate all timestamps while vblank irq's are off. */
	clear_vblank_timestamps(dev, crtc);

	mtx_leave(&dev->vblank_time_lock);
}