示例#1
0
static jack_nframes_t 
dummy_driver_wait (dummy_driver_t *driver, int extra_fd, int *status,
		   float *delayed_usecs)
{
	jack_nframes_t nframes = driver->period_size;
	struct timespec now;

	*status = 0;
	/* this driver doesn't work so well if we report a delay */
	*delayed_usecs = 0;		/* lie about it */

	clock_gettime(CLOCK_REALTIME, &now);
	
	if (cmp_lt_ts(driver->next_wakeup, now)) {
		if (driver->next_wakeup.tv_sec == 0) {
			/* first time through */
			clock_gettime(CLOCK_REALTIME, &driver->next_wakeup);
		}  else if ((ts_to_nsec(now) - ts_to_nsec(driver->next_wakeup))/1000LL
			    > (PRETEND_BUFFER_SIZE * 1000000LL
			       / driver->sample_rate)) {
			/* xrun */
			jack_error("**** dummy: xrun of %ju usec",
				(uintmax_t)(ts_to_nsec(now) - ts_to_nsec(driver->next_wakeup))/1000LL);
			nframes = 0;
			driver->next_wakeup.tv_sec = 0;
		} else {
			/* late, but handled by our "buffer"; try to
			 * get back on track */
		}
		driver->next_wakeup = add_ts(driver->next_wakeup, driver->wait_time);
	} else {
		if(clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &driver->next_wakeup, NULL)) {
			jack_error("error while sleeping");
			*status = -1;
		} else {
			clock_gettime(CLOCK_REALTIME, &now);
			// guaranteed to sleep long enough for this to be correct
			*delayed_usecs = (ts_to_nsec(now) - ts_to_nsec(driver->next_wakeup));
			*delayed_usecs /= 1000.0;
		}
		driver->next_wakeup = add_ts(driver->next_wakeup, driver->wait_time);
	}

	driver->last_wait_ust = driver->engine->get_microseconds ();
	driver->engine->transport_cycle_start (driver->engine,
					       driver->last_wait_ust);

	return nframes;
}
示例#2
0
文件: gmua.c 项目: longqzh/chronnOS
/* Check the task list for the cpu and check for schedule feasibility */
struct rt_info* create_feasible_schedule(int cpu_id)
{
	struct rt_info *it, *best_dead, *head, *best_ivd, *last_ivd;
	struct cpu_info *cur_cpu = NULL;
	struct timespec exec_ts;
	int removed;

	cur_cpu = get_cpu_state(cpu_id);
	head = cur_cpu->head;
	best_dead = head;

	if(!head)
		goto out;

	best_ivd = head;
	it = task_list_entry(head->task_list[LIST_CPUTSK].next, LIST_CPUTSK);
	do {
		if(insert_on_list(it, best_ivd, LIST_CPUIVD, SORT_KEY_GVD, 0))
			best_ivd = it;
		it = task_list_entry(it->task_list[LIST_CPUTSK].next, LIST_CPUTSK);
	} while(it != head);

	last_ivd = task_list_entry(best_ivd->task_list[LIST_CPUIVD].prev, LIST_CPUIVD);

	do {
		removed = 0;
		it = best_dead;
		exec_ts = current_kernel_time();

		do {
			add_ts(&exec_ts, &(it->left), &exec_ts);
			if(earlier_deadline(&(it->deadline), &exec_ts)) {
				list_remove(last_ivd, LIST_CPUTSK);
				if(last_ivd == best_dead) {
				   best_dead = task_list_entry(last_ivd->task_list[LIST_CPUTSK].next, LIST_CPUTSK);
				}
				last_ivd = task_list_entry(last_ivd->task_list[LIST_CPUIVD].prev, LIST_CPUIVD);
				removed = 1;
			}
			it = task_list_entry(it->task_list[LIST_CPUTSK].next, LIST_CPUTSK);
		} while(removed == 0 && it != best_dead);
	} while(last_ivd != best_ivd && removed == 1);

out:
	cur_cpu->best_dead = best_dead;

	return best_dead;
}