Esempio n. 1
0
void *helper(void *d) 
{
	int i, ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 93;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	if (pi_cv_enabled) {
		ftrace_write(marker_fd, "Adding helper() thread: pid %d prio 93\n", my_pid);
		pthread_cond_helpers_add(&count_threshold_cv, my_pid);
		ftrace_write(marker_fd, "helper(): helps on cv %p\n", &count_threshold_cv);
	}

	sleep(1);
	ftrace_write(marker_fd, "Starting helper(): pid %d prio 93\n", my_pid);
	
	pthread_mutex_lock(&count_mutex);

	/* Do some work (e.g., fill up the queue) */
	twait = usec_to_timespec(3000000L);
	busywait(&twait);
	
	/* Then block on an rt_mutex */
	ftrace_write(marker_fd, "helper() blocks on rt_mutex %p\n", &rt_mutex);
	pthread_mutex_lock(&rt_mutex);
	twait = usec_to_timespec(3000000L);
	busywait(&twait);
	pthread_mutex_unlock(&rt_mutex);
	
	ftrace_write(marker_fd, "helper() signals on cv %p\n", &count_threshold_cv);
	pthread_cond_broadcast(&count_threshold_cv);
	ftrace_write(marker_fd, "helper(): just sent signal.\n");
	ftrace_write(marker_fd, "helper(): pid %d, unlocking mutex\n", my_pid);
	pthread_mutex_unlock(&count_mutex);

	if (pi_cv_enabled) {
		pthread_cond_helpers_del(&count_threshold_cv, my_pid);
		ftrace_write(marker_fd, "helper(): stop helping on cv %p\n", &count_threshold_cv);
		ftrace_write(marker_fd, "Removing helper() thread: pid %d prio 93\n", my_pid);
	}
	pthread_exit(NULL);
}
Esempio n. 2
0
static char read_code(void) {
	unsigned char scancode = 0;	
	int i;

	for(i = 0; i <= 8; i++) {
		busywait();
		scancode |= ((DATA >> 5) & 0x01 ) << i;
	}

	busywait();

	return scancode;

}
Esempio n. 3
0
void *waiter(void *d) 
{
	int ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	sleep(1);

	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 95;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	ftrace_write(marker_fd, "Starting waiter(): pid %d prio 95\n", my_pid);
	twait = usec_to_timespec(500000L);
	busywait(&twait);
	
	/*
	Lock mutex and wait for signal.  Note that the pthread_cond_wait routine
	will automatically and atomically unlock mutex while it waits. 
	*/
	pthread_mutex_lock(&count_mutex);
	ftrace_write(marker_fd, "waiter(): pid %d. Going into wait...\n", my_pid);
	ftrace_write(marker_fd, "waiter(): waits on cv %p\n", &count_threshold_cv);
	pthread_cond_wait(&count_threshold_cv, &count_mutex);
	ftrace_write(marker_fd, "waiter(): wakes on cv %p\n", &count_threshold_cv);
	/* "Consume" the item... */
	ftrace_write(marker_fd, "waiter(): pid %d Condition signal received.\n", my_pid);
	ftrace_write(marker_fd, "waiter(): pid %d Consuming an item...\n", my_pid);
	twait = usec_to_timespec(2000000L);
	busywait(&twait);
	
	ftrace_write(marker_fd, "waiter(): pid %ld Unlocking mutex.\n", my_pid);
	pthread_mutex_unlock(&count_mutex);

	pthread_exit(NULL);
}
Esempio n. 4
0
void *watch_count(void *t) 
{
	int ret;
	long my_id = (long)t;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 95;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	printf("Starting watch_count(): thread %ld prio 95\n", my_id);
	twait = usec_to_timespec(500000L);
	busywait(&twait);
	
	/*
	Lock mutex and wait for signal.  Note that the pthread_cond_wait routine
	will automatically and atomically unlock mutex while it waits. 
	*/
	pthread_mutex_lock(&count_mutex);
	printf("watch_count(): thread %ld Count= %d. Going into wait...\n", my_id,count);
	pthread_cond_wait(&count_threshold_cv, &count_mutex);
	/* "Consume" the item... */
	printf("watch_count(): thread %ld Condition signal received. Count= %d\n", my_id,count);
	printf("watch_count(): thread %ld Consuming an item...\n", my_id,count);
	twait = usec_to_timespec(2000000L);
	busywait(&twait);
	count -= 1;
	printf("watch_count(): thread %ld count now = %d.\n", my_id, count);
	
	printf("watch_count(): thread %ld Unlocking mutex.\n", my_id);
	pthread_mutex_unlock(&count_mutex);

	pthread_exit(NULL);
}
Esempio n. 5
0
void *annoyer(void *t)
{
	int ret;
	long my_id = (long)t;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}
	
	param.sched_priority = 94;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	printf("Starting annoyer(): thread %ld prio 94\n", my_id);

	printf("annoyer thread should preempt inc_count for 5sec\n");

	twait = usec_to_timespec(5000000L);
	busywait(&twait);

	printf("annoyer thread dies... inc_count can resume\n");
	pthread_exit(NULL);
}
Esempio n. 6
0
void *annoyer(void *d)
{
	int ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}
	
	param.sched_priority = 94;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	ftrace_write(marker_fd, "Starting annoyer(): pid %d prio 94\n", my_pid);

	ftrace_write(marker_fd, "annoyer(): should preempt inc_count for 5sec\n");

	twait = usec_to_timespec(5000000L);
	ftrace_write(marker_fd, "annoyer(): starts running...\n");
	busywait(&twait);

	ftrace_write(marker_fd, "annoyer(): dies...\n");
	pthread_exit(NULL);
}
Esempio n. 7
0
void *annoyer(void *d)
{
	int ret;
	long id = (long) d;
	struct timespec twait, now;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();

	pids[id] = my_pid;
	

	if (global_args.affinity) {
		CPU_ZERO(&mask);
		CPU_SET(0, &mask);
		ret = sched_setaffinity(0, sizeof(mask), &mask);
		if (ret != 0) {
			printf("pthread_setaffinity failed\n"); 
			exit(EXIT_FAILURE);
		}
	}
	
	param.sched_priority = 93;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	/**
	 * Give other some time to warm up.
	 */
	sleep(2);

	if (global_args.ftrace)
		ftrace_write(marker_fd, "Starting annoyer(): prio 93\n");

	while(1) {
		/* 300ms */
		twait = usec_to_timespec(300000L);
		if (global_args.ftrace)
			ftrace_write(marker_fd,
				     "[annoyer %d] starts running...\n",
				     my_pid);
		clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
		twait = timespec_add(&now, &twait);
		busywait(&twait);
		if (global_args.ftrace)
			ftrace_write(marker_fd,
				     "[annoyer %d] sleeps.\n",
				     my_pid);
		sleep(1);
	}
	pthread_exit(NULL);
}
Esempio n. 8
0
void
systick_isr(void)
{
  for (;;) {
    flash_led(0x01);
    flash_led(0x02);
    busywait(800);
  }
}
Esempio n. 9
0
/* Hauptroutine wird von u-boot gerufen */
int main()
{
 	leds_init();

 	/* Endlosschleife */
 	while (1)
 	{
 		yellow_led(1);
 		serial_write("Hallo Welt\r\n");
 		yellow_led(0);
 		busywait();
 	}

}
Esempio n. 10
0
void *inc_count(void *t) 
{
	int i, ret;
	long my_id = (long)t;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 93;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	printf("Starting inc_count(): thread %ld prio 93\n", my_id);
	
	pthread_mutex_lock(&count_mutex);

	/* Do some work (e.g., fill up the queue) */
	twait = usec_to_timespec(6000000L);
	busywait(&twait);
	count++;
	
	printf("inc_count(): thread %ld, count = %d\n",
	       my_id, count);
	pthread_cond_signal(&count_threshold_cv);
	printf("Just sent signal.\n");
	printf("inc_count(): thread %ld, count = %d, unlocking mutex\n", 
	       my_id, count);
	pthread_mutex_unlock(&count_mutex);

	pthread_exit(NULL);
}
Esempio n. 11
0
/**
 *  Asynchronous communication test between two ranks (Ranks 0 and 1).
 *  In principle, Rank 0 simultaneously (a) sends a message to rank 1,
 *  and (b) "busy waits" for a specified amount of time (in seconds).
 */
double
async_comm_test (const double t_delay, const int rank, int* msgbuf, const int len)
{
  const int MSG_TAG = 1000; /* Arbitrary message tag number */
  double t_start = MPI_Wtime ();
  if (rank == 0) {
/*    MPI_Request req;
    MPI_Status stat;
    MPI_Isend (msgbuf, len, MPI_INT, 1, MSG_TAG, MPI_COMM_WORLD, &req);
    busywait (t_delay);
    MPI_Wait (&req, &stat);*/
    /* Converting to blocking version of MPI send */
    #pragma omp task default(none) shared(msgbuf, ompi_mpi_comm_world, ompi_mpi_int)
    MPI_Send (msgbuf, len, MPI_INT, 1, MSG_TAG, MPI_COMM_WORLD);
    busywait (t_delay);
    #pragma omp taskwait
  } else { /* rank == 1 */
    MPI_Status stat;
    MPI_Recv (msgbuf, len, MPI_INT, 0, MSG_TAG, MPI_COMM_WORLD, &stat);
  }
  return MPI_Wtime () - t_start;
}
Esempio n. 12
0
/*
 * qio()
 *	Queue an operation for the BG thread
 */
static void
qio(struct buf *b, uint op)
{
	struct qio *q;
	uint next;

	/*
	 * This buffer is busy until op complete
	 */
	ASSERT_DEBUG(!BUSY(b), "qio: busy");
	b->b_flags |= B_BUSY;

	/*
	 * Get next ring element
	 */
	next = qnext;
	q = &qios[next];
	if (++next >= NQIO) {
		next = 0;
	}
	qnext = next;

	/*
	 * Wait for it to be ready
	 */
	busywait(&q->q_op);

	/*
	 * Fill it in
	 */
	q->q_buf = b;
	q->q_op = op;

	/*
	 * Release BG to do its thing
	 */
	mutex_thread(bg_pid);
}
Esempio n. 13
0
void *rt_owner(void *d) 
{
	int i, ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 92;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	ftrace_write(marker_fd, "Starting rt_owner(): pid %d prio 92\n", my_pid);
	
	pthread_mutex_lock(&rt_mutex);

	/* Do some work (e.g., fill up the queue) */
	twait = usec_to_timespec(6000000L);
	busywait(&twait);
	
	ftrace_write(marker_fd, "rt_owner(): pid %d, unlocking mutex\n", my_pid);
	pthread_mutex_unlock(&rt_mutex);

	pthread_exit(NULL);
}
Esempio n. 14
0
bool otherVsShortAndOverdue() {
	pid_t pid;
	struct sched_param_ex sp;
	int status;
	pid = fork();
	if(pid != 0) {
		// Parent: makes child a SHORT
		sp.sched_priority = 0;
        sp.requested_time = 100;
        sp.num_cooloff = 1; 
		sched_setscheduler(pid, SCHED_SHORT, (struct sched_param *)&sp);
		printf("2\n");
		busywait(20); // father runs for at least 10ms, should be enough time for son to wake up and take his place
		printf("4\n");
		busywait(300); // this is probably large enough for father to change to expired but he still needs to run since son is overdue
		printf("5\n");
		usleep(120000); //father sleeps for more than 100ms so son should become SHORT again by the time father wakes up
		printf("8\n"); //son has become overdue again, we switched to father
		usleep(110000); //father sleeps for 110ms
		printf("10\n"); //son is overdue because he has no cooloffs so father should run
	}
	else {
        // Child, a SHORT.
        while(sched_getscheduler(getpid()) != SCHED_SHORT) {}
		printf("1\n"); //son is SHORT and should run first, before OTHER father
		usleep(10000); //puts child to sleep for 10ms, should be enough time to context_switch to father
		printf("3\n");
		busywait(110); // son runs for at least 100ms, should be enough time for son to become overdue and father needs to take his place
		printf("6\n");
		busywait(120); 
		printf("7\n"); //son is now regular SHORT again , we should not context switch back to father
		busywait(120); //son becomes overdue here
		printf("9\n"); //father sleeps so son should run next
		busywait(110); //son doesn't become SHORT during this since we had only 1 cooloff, and father wakes up here
		printf("11\n");
		exit(0);
    }
	waitpid(pid, &status, 0);
	return true;
		
}
Esempio n. 15
0
void *t_1(void *thread_params) {
	struct sched_param2 dl_params;
	struct timespec t_next, t_period, t_start, t_stop, ran_for,
			t_now, t_crit, t_exec;
	long tid = gettid();
	int retval, i;
	cpu_set_t mask;
	__u64 crit, run1, runtime, deadline, period;

	/*
	 * t_1 should go in budget overflow while in critical section
	 */
	run1 = 8U * NSEC_PER_MSEC;
	crit = 12U * NSEC_PER_MSEC;
	runtime =  run1 + crit + (8U * NSEC_PER_MSEC);
	deadline = 40U * NSEC_PER_MSEC;
	period = deadline;
	t_period = nsec_to_timespec(&period);
	t_crit = nsec_to_timespec(&crit);

	signal(SIGHUP, sighandler);
	signal(SIGINT, sighandler);
	signal(SIGQUIT, sighandler);

	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	retval = sched_setaffinity(0, sizeof(mask), &mask);
	if (retval) {
		fprintf(stderr, "WARNING: could not set task affinity\n");
		exit(-1);
	}

	memset(&dl_params, 0, sizeof(dl_params));
	dl_params.sched_priority = 0;
	dl_params.sched_runtime = runtime;
	dl_params.sched_deadline = deadline;
	dl_params.sched_period = period;
	ftrace_write(marker_fd, "[thread %ld (t_1)]: setting rt=%llums dl=%llums\n", tid,
	       runtime/NSEC_PER_MSEC,
	       deadline/NSEC_PER_MSEC);
	retval = sched_setscheduler2(0, SCHED_DEADLINE, &dl_params);
	if (retval) {
		fprintf(stderr, "WARNING: could not set SCHED_DEADLINE"
				" policy!\n");
		exit(-1);
	}

	clock_gettime(CLOCK_MONOTONIC, &t_next);
	for (i = 0; i < NRUN; i++) {
		ftrace_write(marker_fd, "[t_1] run starts\n");
		clock_gettime(CLOCK_MONOTONIC, &t_start);
		ftrace_write(marker_fd, "[t_1] exec for %lluns\n", run1);
		busywait(run1);
		ftrace_write(marker_fd, "[t_1] locks mutex\n");
		pthread_mutex_lock(&my_mutex);
		ftrace_write(marker_fd, "[t_1] exec for %lluns\n", crit);
		busywait(crit);
		ftrace_write(marker_fd, "[t_1] unlocks mutex\n");
		pthread_mutex_unlock(&my_mutex);
		clock_gettime(CLOCK_MONOTONIC, &t_stop);
		t_next = timespec_add(&t_next, &t_period);
		ran_for = timespec_sub(&t_stop, &t_start);
		printf("[thread %ld]: run %d for %lluus\n",
			tid,
			i,
			timespec_to_usec(&ran_for));
		ftrace_write(marker_fd, "[t_1] run ends\n");
		clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &t_next, NULL);
	}

	retval = sched_setscheduler2(0, SCHED_OTHER, &dl_params);
	if (retval) {
		fprintf(stderr, "WARNING: could not set SCHED_OTHER"
				"policy!\n");
		exit(-1);
	}
}
Esempio n. 16
0
void *producer(void *d)
{
	int ret;
	struct sched_param param;
	long id = (long) d;
	long wait;
	int item = id;
	buffer_t *b = &buffer;
	struct timespec twait, now;
	cpu_set_t mask;
	pid_t my_pid = gettid();

	pids[id] = my_pid;
	
	if (global_args.affinity) {
		CPU_ZERO(&mask);
		CPU_SET(0, &mask);
		ret = sched_setaffinity(0, sizeof(mask), &mask);
		if (ret != 0) {
			printf("pthread_setaffinity failed\n"); 
			exit(EXIT_FAILURE);
		}
	}
	
	param.sched_priority = 92;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	if (global_args.pi_cv_enabled) {
		if (global_args.ftrace)
			ftrace_write(marker_fd, "Adding helper thread: pid %d,"
				     " prio 92\n", my_pid);
		pthread_cond_helpers_add(&buffer.more, my_pid);
		if (global_args.ftrace)
			ftrace_write(marker_fd, "[prod %d] helps on cv %p\n",
				     my_pid, &buffer.more);
	}

	while(!shutdown) {
		pthread_mutex_lock(&b->mutex);

		while (b->occupied >= BSIZE)
			pthread_cond_wait(&b->less, &b->mutex);

		assert(b->occupied < BSIZE);

		b->buf[b->nextin++] = item;
		wait = rand_wait() * 1000;
		twait = usec_to_timespec(wait);
		clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
		twait = timespec_add(&now, &twait);
		busywait(&twait);
		if (global_args.ftrace)
			ftrace_write(marker_fd, "[prod %d] executed for %d usec"
				     " and produced %d\n", my_pid, wait, item);

		b->nextin %= BSIZE;
		b->occupied++;

		/*
		 * now: either b->occupied < BSIZE and b->nextin is the index
		 * of the next empty slot in the buffer, or
		 * b->occupied == BSIZE and b->nextin is the index of the
		 * next (occupied) slot that will be emptied by a consumer
		 * (such as b->nextin == b->nextout)
		 */
	
		pthread_cond_signal(&b->more);
	
		pthread_mutex_unlock(&b->mutex);
		sleep(1);
	}

	if (global_args.pi_cv_enabled) {
		pthread_cond_helpers_del(&buffer.more, my_pid);
		if (global_args.ftrace) {
			ftrace_write(marker_fd, "[prod %d] stop helping"
				     " on cv %p\n", my_pid, &buffer.more);
			ftrace_write(marker_fd, "Removing helper thread:"
				     " pid %d, prio 92\n", my_pid);
		}
	}

	pthread_exit(NULL);
}
Esempio n. 17
0
void *consumer(void *d)
{
	int ret;
	struct sched_param param;
	long id = (long) d;
	long wait;
	int item;
	buffer_t *b = &buffer;
	struct timespec twait, now;
	cpu_set_t mask;
	pid_t my_pid = gettid();

	pids[id] = my_pid;
	

	if (global_args.affinity) {
		CPU_ZERO(&mask);
		CPU_SET(0, &mask);
		ret = sched_setaffinity(0, sizeof(mask), &mask);
		if (ret != 0) {
			printf("pthread_setaffinity failed\n"); 
			exit(EXIT_FAILURE);
		}
	}
	
	param.sched_priority = 94;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	
	/**
	 * Give producers some time to set up.
	 */
	sleep(1);

	while(!shutdown) {
		pthread_mutex_lock(&b->mutex);
		while(b->occupied <= 0) {
			if (global_args.ftrace)
				ftrace_write(marker_fd, "[cons %d] waits\n",
					     my_pid);
			pthread_cond_wait(&b->more, &b->mutex);
		}
	
		assert(b->occupied > 0);
	
		item = b->buf[b->nextout++];
		wait = rand_wait() * 1000;
		twait = usec_to_timespec(wait);
		clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
		twait = timespec_add(&now, &twait);
		busywait(&twait);
		if (global_args.ftrace)
			ftrace_write(marker_fd, "[cons %d] executed for %d usec"
				     " and consumed %d\n", my_pid, wait, item);

		b->nextout %= BSIZE;
		b->occupied--;
	
		/*
		 * now: either b->occupied > 0 and b->nextout is the index
		 * of the next occupied slot in the buffer, or
		 * b->occupied == 0 and b->nextout is the index of the next
		 * (empty) slot that will be filled by a producer (such as
		 * b->nextout == b->nextin)
		 */
	
		pthread_cond_signal(&b->less);
		pthread_mutex_unlock(&b->mutex);
	}

	pthread_exit(NULL);
}
Esempio n. 18
0
void * busypthread(void *msec)
{
    busywait(msec);
    pthread_exit(NULL);
}
Esempio n. 19
0
boolean uClock_EnableClock(uClockIdType eClockId)
{
    boolean bRetVal = TRUE;
    uint32 nTimeout = 150;
    
 //   if(uClockDriverCtxt.bInitialized == FALSE)
 //   {
 //     qurt_mutex_init(&uClockDriverCtxt.Mutex);
 //     uClockDriverCtxt.bInitialized = TRUE;
 //   }
    
   if(eClockId < CLOCK_TOTAL_CLOCK_ENUMS)
   {
  //    qurt_mutex_lock(&uClockDriverCtxt.Mutex);

      if(uClockDriverCtxt.anClockReferences[eClockId] == 0)
      {
         switch(eClockId)
         {
            case CLOCK_GCC_BLSP1_QUP1_APPS_CLK:
               HWIO_OUTF(GCC_BLSP1_QUP1_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP1_QUP2_APPS_CLK:
               HWIO_OUTF(GCC_BLSP1_QUP2_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP1_QUP3_APPS_CLK:
               HWIO_OUTF(GCC_BLSP1_QUP3_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP1_QUP4_APPS_CLK:
               HWIO_OUTF(GCC_BLSP1_QUP4_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP1_QUP5_APPS_CLK:
               HWIO_OUTF(GCC_BLSP1_QUP5_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP1_QUP6_APPS_CLK:
               HWIO_OUTF(GCC_BLSP1_QUP6_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP2_QUP1_APPS_CLK:
               HWIO_OUTF(GCC_BLSP2_QUP1_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP2_QUP2_APPS_CLK:
               HWIO_OUTF(GCC_BLSP2_QUP2_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP2_QUP3_APPS_CLK:
               HWIO_OUTF(GCC_BLSP2_QUP3_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP2_QUP4_APPS_CLK:
               HWIO_OUTF(GCC_BLSP2_QUP4_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP2_QUP5_APPS_CLK:
               HWIO_OUTF(GCC_BLSP2_QUP5_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP2_QUP6_APPS_CLK:
               HWIO_OUTF(GCC_BLSP2_QUP6_I2C_APPS_CBCR, CLK_ENABLE, 0x1);
               break;

            case CLOCK_GCC_BLSP1_AHB_CLK:
               HWIO_OUTF(GCC_SPARE_CLOCK_BRANCH_ENA_VOTE, BLSP1_AHB_CLK_ENA, 0x1);
               break;

            case CLOCK_GCC_BLSP2_AHB_CLK:
               HWIO_OUTF(GCC_SPARE_CLOCK_BRANCH_ENA_VOTE, BLSP2_AHB_CLK_ENA, 0x1);
               break;

            default:
               bRetVal = FALSE;
               break;
         }
         
         while(uClock_IsOn(eClockId) != TRUE)
         {
           if(nTimeout == 0)
           {
             break;
           }
           busywait(2);
           nTimeout--;
         }

      }
      
      /*
       * Increase the reference count on this clock.
       */
      if(nTimeout != 0)
      {
        uClockDriverCtxt.anClockReferences[eClockId]++;
        bRetVal = TRUE;
      }
      else
      {
        bRetVal = FALSE;
      }
        
  //    qurt_mutex_unlock(&uClockDriverCtxt.Mutex);
      
      return(bRetVal);
   }
   
   return(FALSE);

} /* uClock_EnableClock */