Ejemplo n.º 1
0
void *helper(void *d) 
{
	int i, ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 93;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	if (pi_cv_enabled) {
		ftrace_write(marker_fd, "Adding helper() thread: pid %d prio 93\n", my_pid);
		pthread_cond_helpers_add(&count_threshold_cv, my_pid);
		ftrace_write(marker_fd, "helper(): helps on cv %p\n", &count_threshold_cv);
	}

	sleep(1);
	ftrace_write(marker_fd, "Starting helper(): pid %d prio 93\n", my_pid);
	
	pthread_mutex_lock(&count_mutex);

	/* Do some work (e.g., fill up the queue) */
	twait = usec_to_timespec(3000000L);
	busywait(&twait);
	
	/* Then block on an rt_mutex */
	ftrace_write(marker_fd, "helper() blocks on rt_mutex %p\n", &rt_mutex);
	pthread_mutex_lock(&rt_mutex);
	twait = usec_to_timespec(3000000L);
	busywait(&twait);
	pthread_mutex_unlock(&rt_mutex);
	
	ftrace_write(marker_fd, "helper() signals on cv %p\n", &count_threshold_cv);
	pthread_cond_broadcast(&count_threshold_cv);
	ftrace_write(marker_fd, "helper(): just sent signal.\n");
	ftrace_write(marker_fd, "helper(): pid %d, unlocking mutex\n", my_pid);
	pthread_mutex_unlock(&count_mutex);

	if (pi_cv_enabled) {
		pthread_cond_helpers_del(&count_threshold_cv, my_pid);
		ftrace_write(marker_fd, "helper(): stop helping on cv %p\n", &count_threshold_cv);
		ftrace_write(marker_fd, "Removing helper() thread: pid %d prio 93\n", my_pid);
	}
	pthread_exit(NULL);
}
Ejemplo n.º 2
0
int set_sched_dl(long period, long exec)
{	
	pid_t pid;
	struct sched_attr dl_attr;
	struct timespec dl_period, dl_exec;
	int ret;
	unsigned int flags = 0;

	pid = getpid();
	dl_period = usec_to_timespec(period);
	dl_exec = usec_to_timespec(exec);
	dl_attr.size = sizeof(dl_attr);
	dl_attr.sched_flags = 0;
	dl_attr.sched_policy = SCHED_DEADLINE;
	dl_attr.sched_runtime = timespec_to_nsec(&dl_exec) + (timespec_to_nsec(&dl_exec) / 100) * 5;
	dl_attr.sched_deadline = timespec_to_nsec(&dl_period);
	dl_attr.sched_period = timespec_to_nsec(&dl_period);


	ret = sched_setattr(pid, &dl_attr, flags);
	if (ret != 0)
	{
		perror("sched_setattr");
	}

	return ret;

}
Ejemplo n.º 3
0
void *waiter(void *d) 
{
	int ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	sleep(1);

	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 95;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	ftrace_write(marker_fd, "Starting waiter(): pid %d prio 95\n", my_pid);
	twait = usec_to_timespec(500000L);
	busywait(&twait);
	
	/*
	Lock mutex and wait for signal.  Note that the pthread_cond_wait routine
	will automatically and atomically unlock mutex while it waits. 
	*/
	pthread_mutex_lock(&count_mutex);
	ftrace_write(marker_fd, "waiter(): pid %d. Going into wait...\n", my_pid);
	ftrace_write(marker_fd, "waiter(): waits on cv %p\n", &count_threshold_cv);
	pthread_cond_wait(&count_threshold_cv, &count_mutex);
	ftrace_write(marker_fd, "waiter(): wakes on cv %p\n", &count_threshold_cv);
	/* "Consume" the item... */
	ftrace_write(marker_fd, "waiter(): pid %d Condition signal received.\n", my_pid);
	ftrace_write(marker_fd, "waiter(): pid %d Consuming an item...\n", my_pid);
	twait = usec_to_timespec(2000000L);
	busywait(&twait);
	
	ftrace_write(marker_fd, "waiter(): pid %ld Unlocking mutex.\n", my_pid);
	pthread_mutex_unlock(&count_mutex);

	pthread_exit(NULL);
}
Ejemplo n.º 4
0
void *watch_count(void *t) 
{
	int ret;
	long my_id = (long)t;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 95;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	printf("Starting watch_count(): thread %ld prio 95\n", my_id);
	twait = usec_to_timespec(500000L);
	busywait(&twait);
	
	/*
	Lock mutex and wait for signal.  Note that the pthread_cond_wait routine
	will automatically and atomically unlock mutex while it waits. 
	*/
	pthread_mutex_lock(&count_mutex);
	printf("watch_count(): thread %ld Count= %d. Going into wait...\n", my_id,count);
	pthread_cond_wait(&count_threshold_cv, &count_mutex);
	/* "Consume" the item... */
	printf("watch_count(): thread %ld Condition signal received. Count= %d\n", my_id,count);
	printf("watch_count(): thread %ld Consuming an item...\n", my_id,count);
	twait = usec_to_timespec(2000000L);
	busywait(&twait);
	count -= 1;
	printf("watch_count(): thread %ld count now = %d.\n", my_id, count);
	
	printf("watch_count(): thread %ld Unlocking mutex.\n", my_id);
	pthread_mutex_unlock(&count_mutex);

	pthread_exit(NULL);
}
Ejemplo n.º 5
0
void *annoyer(void *t)
{
	int ret;
	long my_id = (long)t;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}
	
	param.sched_priority = 94;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	printf("Starting annoyer(): thread %ld prio 94\n", my_id);

	printf("annoyer thread should preempt inc_count for 5sec\n");

	twait = usec_to_timespec(5000000L);
	busywait(&twait);

	printf("annoyer thread dies... inc_count can resume\n");
	pthread_exit(NULL);
}
Ejemplo n.º 6
0
void *annoyer(void *d)
{
	int ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}
	
	param.sched_priority = 94;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	ftrace_write(marker_fd, "Starting annoyer(): pid %d prio 94\n", my_pid);

	ftrace_write(marker_fd, "annoyer(): should preempt inc_count for 5sec\n");

	twait = usec_to_timespec(5000000L);
	ftrace_write(marker_fd, "annoyer(): starts running...\n");
	busywait(&twait);

	ftrace_write(marker_fd, "annoyer(): dies...\n");
	pthread_exit(NULL);
}
Ejemplo n.º 7
0
void *annoyer(void *d)
{
	int ret;
	long id = (long) d;
	struct timespec twait, now;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();

	pids[id] = my_pid;
	

	if (global_args.affinity) {
		CPU_ZERO(&mask);
		CPU_SET(0, &mask);
		ret = sched_setaffinity(0, sizeof(mask), &mask);
		if (ret != 0) {
			printf("pthread_setaffinity failed\n"); 
			exit(EXIT_FAILURE);
		}
	}
	
	param.sched_priority = 93;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	/**
	 * Give other some time to warm up.
	 */
	sleep(2);

	if (global_args.ftrace)
		ftrace_write(marker_fd, "Starting annoyer(): prio 93\n");

	while(1) {
		/* 300ms */
		twait = usec_to_timespec(300000L);
		if (global_args.ftrace)
			ftrace_write(marker_fd,
				     "[annoyer %d] starts running...\n",
				     my_pid);
		clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
		twait = timespec_add(&now, &twait);
		busywait(&twait);
		if (global_args.ftrace)
			ftrace_write(marker_fd,
				     "[annoyer %d] sleeps.\n",
				     my_pid);
		sleep(1);
	}
	pthread_exit(NULL);
}
Ejemplo n.º 8
0
void *inc_count(void *t) 
{
	int i, ret;
	long my_id = (long)t;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 93;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	printf("Starting inc_count(): thread %ld prio 93\n", my_id);
	
	pthread_mutex_lock(&count_mutex);

	/* Do some work (e.g., fill up the queue) */
	twait = usec_to_timespec(6000000L);
	busywait(&twait);
	count++;
	
	printf("inc_count(): thread %ld, count = %d\n",
	       my_id, count);
	pthread_cond_signal(&count_threshold_cv);
	printf("Just sent signal.\n");
	printf("inc_count(): thread %ld, count = %d, unlocking mutex\n", 
	       my_id, count);
	pthread_mutex_unlock(&count_mutex);

	pthread_exit(NULL);
}
Ejemplo n.º 9
0
void *rt_owner(void *d) 
{
	int i, ret;
	struct timespec twait;
	struct sched_param param;
	cpu_set_t mask;
	pid_t my_pid = gettid();
	
	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	ret = sched_setaffinity(0, sizeof(mask), &mask);
	if (ret != 0) {
		printf("pthread_setaffinity failed\n"); 
		exit(EXIT_FAILURE);
	}

	param.sched_priority = 92;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	ftrace_write(marker_fd, "Starting rt_owner(): pid %d prio 92\n", my_pid);
	
	pthread_mutex_lock(&rt_mutex);

	/* Do some work (e.g., fill up the queue) */
	twait = usec_to_timespec(6000000L);
	busywait(&twait);
	
	ftrace_write(marker_fd, "rt_owner(): pid %d, unlocking mutex\n", my_pid);
	pthread_mutex_unlock(&rt_mutex);

	pthread_exit(NULL);
}
Ejemplo n.º 10
0
void *producer(void *d)
{
	int ret;
	struct sched_param param;
	long id = (long) d;
	long wait;
	int item = id;
	buffer_t *b = &buffer;
	struct timespec twait, now;
	cpu_set_t mask;
	pid_t my_pid = gettid();

	pids[id] = my_pid;
	
	if (global_args.affinity) {
		CPU_ZERO(&mask);
		CPU_SET(0, &mask);
		ret = sched_setaffinity(0, sizeof(mask), &mask);
		if (ret != 0) {
			printf("pthread_setaffinity failed\n"); 
			exit(EXIT_FAILURE);
		}
	}
	
	param.sched_priority = 92;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}

	if (global_args.pi_cv_enabled) {
		if (global_args.ftrace)
			ftrace_write(marker_fd, "Adding helper thread: pid %d,"
				     " prio 92\n", my_pid);
		pthread_cond_helpers_add(&buffer.more, my_pid);
		if (global_args.ftrace)
			ftrace_write(marker_fd, "[prod %d] helps on cv %p\n",
				     my_pid, &buffer.more);
	}

	while(!shutdown) {
		pthread_mutex_lock(&b->mutex);

		while (b->occupied >= BSIZE)
			pthread_cond_wait(&b->less, &b->mutex);

		assert(b->occupied < BSIZE);

		b->buf[b->nextin++] = item;
		wait = rand_wait() * 1000;
		twait = usec_to_timespec(wait);
		clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
		twait = timespec_add(&now, &twait);
		busywait(&twait);
		if (global_args.ftrace)
			ftrace_write(marker_fd, "[prod %d] executed for %d usec"
				     " and produced %d\n", my_pid, wait, item);

		b->nextin %= BSIZE;
		b->occupied++;

		/*
		 * now: either b->occupied < BSIZE and b->nextin is the index
		 * of the next empty slot in the buffer, or
		 * b->occupied == BSIZE and b->nextin is the index of the
		 * next (occupied) slot that will be emptied by a consumer
		 * (such as b->nextin == b->nextout)
		 */
	
		pthread_cond_signal(&b->more);
	
		pthread_mutex_unlock(&b->mutex);
		sleep(1);
	}

	if (global_args.pi_cv_enabled) {
		pthread_cond_helpers_del(&buffer.more, my_pid);
		if (global_args.ftrace) {
			ftrace_write(marker_fd, "[prod %d] stop helping"
				     " on cv %p\n", my_pid, &buffer.more);
			ftrace_write(marker_fd, "Removing helper thread:"
				     " pid %d, prio 92\n", my_pid);
		}
	}

	pthread_exit(NULL);
}
Ejemplo n.º 11
0
void *consumer(void *d)
{
	int ret;
	struct sched_param param;
	long id = (long) d;
	long wait;
	int item;
	buffer_t *b = &buffer;
	struct timespec twait, now;
	cpu_set_t mask;
	pid_t my_pid = gettid();

	pids[id] = my_pid;
	

	if (global_args.affinity) {
		CPU_ZERO(&mask);
		CPU_SET(0, &mask);
		ret = sched_setaffinity(0, sizeof(mask), &mask);
		if (ret != 0) {
			printf("pthread_setaffinity failed\n"); 
			exit(EXIT_FAILURE);
		}
	}
	
	param.sched_priority = 94;
	ret = pthread_setschedparam(pthread_self(), 
				    SCHED_FIFO, 
				    &param);
	if (ret != 0) {
		printf("pthread_setschedparam failed\n"); 
		exit(EXIT_FAILURE);
	}
	
	/**
	 * Give producers some time to set up.
	 */
	sleep(1);

	while(!shutdown) {
		pthread_mutex_lock(&b->mutex);
		while(b->occupied <= 0) {
			if (global_args.ftrace)
				ftrace_write(marker_fd, "[cons %d] waits\n",
					     my_pid);
			pthread_cond_wait(&b->more, &b->mutex);
		}
	
		assert(b->occupied > 0);
	
		item = b->buf[b->nextout++];
		wait = rand_wait() * 1000;
		twait = usec_to_timespec(wait);
		clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
		twait = timespec_add(&now, &twait);
		busywait(&twait);
		if (global_args.ftrace)
			ftrace_write(marker_fd, "[cons %d] executed for %d usec"
				     " and consumed %d\n", my_pid, wait, item);

		b->nextout %= BSIZE;
		b->occupied--;
	
		/*
		 * now: either b->occupied > 0 and b->nextout is the index
		 * of the next occupied slot in the buffer, or
		 * b->occupied == 0 and b->nextout is the index of the next
		 * (empty) slot that will be filled by a producer (such as
		 * b->nextout == b->nextin)
		 */
	
		pthread_cond_signal(&b->less);
		pthread_mutex_unlock(&b->mutex);
	}

	pthread_exit(NULL);
}
Ejemplo n.º 12
0
void
parse_thread_args(char *arg, int idx, thread_data_t *tdata, policy_t def_policy)
{
	char *str = strdup(arg);
	char *token;
	long period, exec, dline;
	char tmp[256];
	int i = 0;
	int cpu;
	dline = 0;

	token = strtok(str, ":");
	tdata->name = malloc(sizeof(char) * 5);
	tdata->ind = idx;
	/* default name for command line threads */
	snprintf(tdata->name, 1, "t%d", tdata->ind); 
	tdata->sched_prio = DEFAULT_THREAD_PRIORITY;
	tdata->sched_policy = def_policy;
	tdata->cpuset = NULL;
	tdata->cpuset_str = NULL;

	while ( token != NULL)
	{
		switch(i) {
		case 0:
			period = strtol(token, NULL, 10);
			if (period <= 0 )
				usage("Cannot set negative period.", 
				      EXIT_INV_COMMANDLINE);
			tdata->period = usec_to_timespec(period);
			i++;
			break;

		case 1:
			exec = strtol(token,NULL, 10);
			//TODO: add support for max_et somehow
			if (exec > period)
				usage("Exec time cannot be greater than"
				      " period.", EXIT_INV_COMMANDLINE);
			if (exec <= 0 )
				usage("Cannot set negative exec time",
				      EXIT_INV_COMMANDLINE);
			tdata->min_et = usec_to_timespec(exec);
			tdata->max_et = usec_to_timespec(exec);
			i++;
			break;

		case 2:
#ifdef AQUOSA
			if (strcmp(token,"q") == 0)
				tdata->sched_policy = aquosa;
			else 
#endif
#ifdef DLSCHED
			if (strcmp(token,"d") == 0)
				tdata->sched_policy = deadline;
			else
#endif
			if (strcmp(token,"f") == 0)
				tdata->sched_policy = fifo;
			else if (strcmp(token,"r") == 0)
				tdata->sched_policy = rr ;
			else if (strcmp(token,"o") == 0)
				tdata->sched_policy = other;
			else {
				snprintf(tmp, 256, 
					"Invalid scheduling policy %s in %s",
					token, arg);
				usage(tmp, EXIT_INV_COMMANDLINE);
			}
			policy_to_string(tdata->sched_policy, 
					 tdata->sched_policy_descr);	

			i++;
			break;
		case 3:
			if (strcmp(token, "-") == 0)
				tdata->cpuset = NULL;
			else {
				tdata->cpuset = malloc (sizeof(cpu_set_t));
				tdata->cpuset_str = strdup(token);
			}
			i++;
			break;
		case 4:
			tdata->sched_prio = strtol(token, NULL, 10);
			// do not check, will fail in pthread_setschedparam
			i++;
			break;
		case 5:
			dline = strtol(token, NULL, 10);
			if (dline < exec)
				usage("Deadline cannot be less than "
				      "execution time", EXIT_INV_COMMANDLINE);
			if (dline > period)
				usage("Deadline cannot be greater than "
				      "period", EXIT_INV_COMMANDLINE);
			if (dline <= 0 )
				usage("Cannot set negative deadline",
				      EXIT_INV_COMMANDLINE);
			tdata->deadline = usec_to_timespec(dline);
			i++;
			break;
		}
		token = strtok(NULL, ":");
	}
	if ( i < 2 ) {
		printf("Period and exec time are mandatory\n");
		exit(EXIT_INV_COMMANDLINE);
	}

	if (dline == 0)
		tdata->deadline = tdata->period;
	
	/* set cpu affinity mask */
	if (tdata->cpuset_str)
	{
		snprintf(tmp, 256, "%s", tdata->cpuset_str);
		token = strtok(tmp, ",");
		while (token != NULL && i < 1000) {
			cpu = strtol(token, NULL, 10);
			CPU_SET(cpu, tdata->cpuset);
			strtok(NULL, ",");
			i++;
		}
	} else 
		tdata->cpuset_str = strdup("-");
	
	free(str);
}
Ejemplo n.º 13
0
int main(int argc, char** argv){


    deadline_miss_flag = (int *)malloc(100*sizeof(int));
    // int cpu_no;
    // for (cpu_no=0; cpu_no<100; cpu_no++){
    //     deadline_miss_flag[cpu_no] = cpu_no;
    // }


    srand(time(NULL));
    int i,j;
    int thread_ret;
    char tmp_str[200], tmp_str_a[200];

    // options
    int node_ids[4] =  {0, 1, 2, 3};
    // int node_ids[16] =  {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14};
    int num_nodes = 4;
    int node_socks[num_nodes];
    int host_id = 200;
    int num_samples = 1*1e6*1e-3; // samples in subframe = (samples/sec)*1ms
    int duration = 50; //secs
    int priority = 10;
    int sched = SCHED_FIFO;
    sprintf(exp_str, "offload");
    mcs = 20;
    int lmax  = 1500;
    int deadline = 1500;
    num_bss = 4;
    num_ants = 1; // antennas per radio
    N_P = 8;

    char c;
    while ((c = getopt (argc, argv, "h::M:A:L:s:d:p:S:e:D:Z:m:")) != -1) {
        switch (c) {
            case 'M':
              num_bss = atoi(optarg);
              break;

            case 'A':
              num_ants = atoi(optarg);
              break;

            case 's':
              num_samples = atoi(optarg);
              break;

            case 'd':
              duration = atoi(optarg);
              break;

            case 'p':
              priority = atoi(optarg);
              if (priority > 99){
                log_error("Unsupported priority!\n");
                exit(-1);
              }
              break;

            case 'Z':
              N_P = atoi(optarg);
              break;

            case 'S':
              switch((char)*optarg){

                  case 'R':
                    sched = SCHED_RR;
                    break;

                  case 'F':
                    sched = SCHED_FIFO;
                    break;

                  case 'O':
                    sched = SCHED_OTHER;
                    break;

                  default:
                    log_error("Unsupported scheduler!\n");
                    exit(-1);
                    break;
              }
              break;

            case 'e':
              switch((char)*optarg){

                  case 'P':
                    sprintf(exp_str, "plain");
                    break;

                  case 'O':
                    sprintf(exp_str, "offload");
                    break;

                  default:
                    log_error("Unsupported exp!\n");
                    exit(-1);
                    break;
              }
              break;

            case 'D':
              debug_trans = atoi(optarg);
              break;

            case 'm':
                mcs = atoi(optarg);
                break;

            case 'L':
                lmax = atoi(optarg);
                break;

            case 'h':
            default:
              printf("%s -h(elp) -M num_bss -A num_ants  -L lmax -s num_samples -d duration(s) -p priority(1-99) -S sched (R/F/O) -e experiment ('P'plain /'O' offload) -D transport debug(0 or 1) -m MCS\n\nExample usage: sudo ./gd_lte -M 4 -A 1 -L 2000 -s 1000 -d 10 -p 10 -S F -e P -D 1 -m 20\n",
                     argv[0]);
              exit(1);
              break;

        }
    }

    num_nodes = num_bss*num_ants;

    // calculate the number of cores to support the given radios
    num_cores_bs = ceil((double)lmax/1000);  // each bs
    proc_nthreads = num_cores_bs*num_bss; // total

    assert(num_cores_bs == 1 || num_cores_bs == 2 || num_cores_bs == 3);
    log_notice("Scheduler will run with %d cores for each of %d BSs. Total cores = %d\n",num_cores_bs, num_bss, proc_nthreads)

    /**************************************************************************/

    iqr = (short*) malloc(1*2*30720*sizeof(short)); //1=no_of_frame/1000, 2=BW/5MHz
    iqi = (short*) malloc(1*2*30720*sizeof(short));
    // configure the baseband
    configure(0, NULL, 0, iqr, iqi, mcs, num_ants);
    /**************************************************************************/


    double my_complex *buffer = (double my_complex*) malloc(num_samples*sizeof(double my_complex));
    policy_to_string(sched, tmp_str_a);
    trans_nthreads = num_nodes;

    /**************************************************************************/
    trans_threads = malloc(trans_nthreads*sizeof(pthread_t));
    gd_thread_data_t *trans_tdata, *timer_tdata;
    trans_tdata = malloc(trans_nthreads*sizeof(gd_thread_data_t));
    timer_tdata = malloc(1*sizeof(gd_thread_data_t));

    proc_threads = malloc(proc_nthreads*sizeof(pthread_t));
    gd_thread_data_t *proc_tdata;
    proc_tdata = malloc(proc_nthreads*sizeof(gd_thread_data_t));


    subframe_mutex = (pthread_mutex_t*)malloc(proc_nthreads*sizeof(pthread_mutex_t));
    state_mutex = (pthread_mutex_t*)malloc(proc_nthreads*sizeof(pthread_mutex_t));
    subframe_cond = (pthread_cond_t*)malloc(proc_nthreads*sizeof(pthread_cond_t));
    common_time = (struct timespec*)malloc((num_cores_bs)*sizeof(struct timespec));
    subframe_avail = (int *)malloc(proc_nthreads*sizeof(int));
    state = (long *)malloc(proc_nthreads*sizeof(long));
    migrate_avail = (migrate *)malloc(proc_nthreads*sizeof(migrate));
    migrate_to = (int *)malloc(proc_nthreads*sizeof(int));



    for (i=0; i<proc_nthreads; i++){
        subframe_avail[i] = 0;
        pthread_mutex_init(&subframe_mutex[i], NULL);
        pthread_mutex_init(&state_mutex[i], NULL);
        pthread_cond_init(&subframe_cond[i], NULL);
        migrate_avail[i].count = 0;
        migrate_avail[i].cur_start_id = 0;
    }

    /* install a signal handler for proper shutdown */
    signal(SIGQUIT, gd_shutdown);
    signal(SIGTERM, gd_shutdown);
    signal(SIGHUP, gd_shutdown);
    signal(SIGINT, gd_shutdown);

    running = 1;
    gd_trans_initialize(node_socks, num_nodes);
    gd_trans_trigger();

    timer_tdata->duration = duration;
    timer_tdata->sched_policy = sched;
    timer_tdata->sched_prio = priority;
    timer_tdata->deadline = usec_to_timespec(500);
    timer_tdata->period = usec_to_timespec(1000);
    timer_tdata->cpuset = malloc(sizeof(cpu_set_t));
    CPU_SET( 2, timer_tdata->cpuset);

    for(i= 0; i < trans_nthreads; i++){

        trans_tdata[i].ind = i;
        trans_tdata[i].duration = duration;
        trans_tdata[i].sched_policy = sched;
        trans_tdata[i].deadline = usec_to_timespec(500);
        trans_tdata[i].period = usec_to_timespec(1000);

        sprintf(tmp_str, "../log_static/exp%s_samp%d_trans%d_prior%d_sched%s_nbss%d_nants%d_ncores%d_Lmax%d_mcs%d.log",
            exp_str, num_samples, i, priority, tmp_str_a, num_bss, num_ants, num_cores_bs, lmax, mcs);
        trans_tdata[i].log_handler = fopen(tmp_str, "w");
        trans_tdata[i].sched_prio = priority;
        trans_tdata[i].cpuset = malloc(sizeof(cpu_set_t));
        CPU_SET( 22 +i, trans_tdata[i].cpuset);

        trans_tdata[i].conn_desc.node_id = i;
        trans_tdata[i].conn_desc.node_sock = node_socks[i];
        trans_tdata[i].conn_desc.host_id = host_id;
        trans_tdata[i].conn_desc.num_samples = num_samples;
        trans_tdata[i].conn_desc.start_sample = 0;
        trans_tdata[i].conn_desc.buffer = buffer;
        trans_tdata[i].conn_desc.buffer_id = 1;
    }


    for(i= 0; i < proc_nthreads; i++){

        proc_tdata[i].ind = i;
        proc_tdata[i].duration = duration;
        proc_tdata[i].sched_policy = sched;
        proc_tdata[i].deadline = usec_to_timespec(deadline);
        proc_tdata[i].period = usec_to_timespec(num_cores_bs*1000);
        sprintf(tmp_str, "../log_static/exp%s_samp%d_proc%d_prior%d_sched%s_nbss%d_nants%d_ncores%d_Lmax%d_mcs%d.log",
            exp_str, num_samples, i, priority,tmp_str_a, num_bss, num_ants, num_cores_bs, lmax, mcs);

        proc_tdata[i].log_handler = fopen(tmp_str, "w");
        proc_tdata[i].sched_prio = priority;
        proc_tdata[i].cpuset = malloc(sizeof(cpu_set_t));
        CPU_SET( 8+i, proc_tdata[i].cpuset);
    }

    struct timespec t_start;
    // starting time
    clock_gettime(CLOCK_MONOTONIC, &t_start);
    timer_tdata->main_start = t_start;
    thread_ret = pthread_create(&timer_thread, NULL, timer_main, timer_tdata);

    log_notice("Starting trans threads");
    // start threads
    for(i = 0; i < trans_nthreads; i++){
        trans_tdata[i].main_start = t_start;
        thread_ret = pthread_create(&trans_threads[i], NULL, trans_main, &trans_tdata[i]);
        if (thread_ret){
            log_error("Cannot start thread");
            exit(-1);
        }
    }


    log_notice("Starting proc threads");
    for(i= 0; i < proc_nthreads; i++){
        proc_tdata[i].main_start = t_start;
        thread_ret = pthread_create(&proc_threads[i], NULL, proc_main, &proc_tdata[i]);
        if (thread_ret){
            log_error("Cannot start thread");
            exit(-1);
        }
    }


    pthread_join(timer_thread, NULL);
    for (i = 0; i < trans_nthreads; i++)
    {
        pthread_join(trans_threads[i], NULL);
    }
    for (i = 0; i < proc_nthreads; i++)
    {
        pthread_join(proc_threads[i], NULL);
    }
    return 0;
}
Ejemplo n.º 14
0
void* proc_main(void* arg){

    // acquire lock, read subframes and process
    gd_thread_data_t *tdata = (gd_thread_data_t *) arg;
    int id = tdata->ind;
    thread_common(pthread_self(), tdata);
    unsigned long abs_period_start = timespec_to_usec(&tdata->main_start);
    struct timespec t_offset;
    t_offset = usec_to_timespec(id*num_cores_bs*1000);
    tdata->main_start = timespec_add(&tdata->main_start, &t_offset);
    struct timespec proc_start, proc_end, t_next, t_deadline;
    gd_proc_timing_meta_t *timings;
    long duration_usec = (tdata->duration * 1e6);
    int nperiods = (int) floor(duration_usec /
            (double) timespec_to_usec(&tdata->period));

    //nperiods reduce a little to prevents trans finishing before proc; ugly fix
    nperiods-=500;

    timings = (gd_proc_timing_meta_t*) malloc ( nperiods * sizeof(gd_proc_timing_meta_t));
    gd_proc_timing_meta_t *timing;
    int period = 0;
    int deadline_miss=0;

    long time_deadline, proc_actual_time, avail_time;
    struct timespec t_temp, t_now, t_temp1;
    log_notice("Starting proc thread %d nperiods %d %lu", id, nperiods, timespec_to_usec(&t_offset));

    int bs_id = (int)(id/num_cores_bs);
    int subframe_id =  id%(num_cores_bs);
    log_notice("checking subframe mutex %d", bs_id*num_cores_bs + subframe_id);

	//	1: Input: P subtasks, each subtask has tp proc. time
	//  2: Input: M cores, each core has fcj > 0 of free time
	//  3: N   P . # of left subtasks (not offloaded)
	//  4: maxoff   0 . max # of offloaded subtasks per core
	//	5: while N > 1 and j  M do
	//	6: limoff = b fcj
	//	tp c . # of subtasks can be offloaded
	//	7: noff   min(N 􀀀 maxoff ; limoff ; bN
	//			2 c)
	//	8: maxoff   max(noff ; maxoff )
	//	9: Offload noff subtasks to jth core
	//	10: N   N 􀀀 noff
	//	11: j   j + 1
	//	12: end while

    struct timespec each =  usec_to_timespec(1000);

    while(running && (period < nperiods)){



        // wait for the transport thread to wake me
		// printf("what's value of subframe_avail[id]:%d\n",subframe_avail[id]);
        pthread_mutex_lock(&subframe_mutex[id]);
        while (!(subframe_avail[id] == num_ants)){
                    pthread_cond_wait(&subframe_cond[id], &subframe_mutex[id]);
        }
		subframe_avail[id]=0;
        pthread_mutex_unlock(&subframe_mutex[id]);


        /****** do LTE processing *****/
        clock_gettime(CLOCK_MONOTONIC, &proc_start);
        t_next = timespec_add(&proc_start, &tdata->period);
        clock_gettime(CLOCK_MONOTONIC, &t_now);

		//check for migration opportunity
		//iterating over the processing threads and studying which ones are available for migration
		int nOffload = 0;
		int max_off = 0;
		int tasksRemain = 14;
        int cur_start_id =0;
        for (int cur = 0; cur<proc_nthreads;cur++) {

			pthread_mutex_lock(&state_mutex[cur]);
		 	avail_time = state[cur] - timespec_to_usec(&t_now);
		 	if (avail_time<0) {
		 		avail_time = 0;
		 	}
		 	int lim_off = floor(avail_time/sub_fft_time);
		 	int noff = MIN(tasksRemain-max_off,MIN(lim_off,floor(tasksRemain/2)));
		 	max_off= MAX(max_off,noff);
		 	tasksRemain = tasksRemain-noff;
		 	if (avail_time>0) {
		 	// printf("I am offloading things: noff:%d to core:%d, maxoff:%d, limoff:%d, remain:%d\n",noff,cur,max_off,lim_off,tasksRemain);
		 	// printf("timings: avail_time[%d]:%li, state[cur]:%li, now:%li\n", cur,avail_time,state[cur],timespec_to_usec(&t_now));
			}
			// //update state[cur] to be -1
			if (noff>0) {
				state[cur]=-1;
				migrate_avail[cur].count=noff;
                migrate_avail[cur].start_id = cur_start_id;
			}
            cur_start_id +=noff;
			pthread_mutex_unlock(&state_mutex[cur]);

		 }


		 if (nOffload == 0){
             task_fft();
         } else {
             for (int left_iter = cur_start_id; left_iter< 14;left_iter ++) {
		 		subtask_fft(left_iter);
		 	}

        }

        clock_gettime(CLOCK_MONOTONIC, &t_now);
        // check if there is enough time to decode else kill
        if (timespec_to_usec(&t_next) - (timespec_to_usec(&t_now) + 5*decode_time[mcs]) < 0.0){
            // printf("I kill myslef\n");
        }else{
            task_decode();
        }
        clock_gettime(CLOCK_MONOTONIC, &proc_end);


        // there is time to receive migrated task
        clock_gettime(CLOCK_MONOTONIC, &t_now);
        long rem_time = timespec_to_usec(&t_next)  - timespec_to_usec(&t_now);
     //   printf("remtime[%d] is:%li\n",id, rem_time);

        if (rem_time > 50){

			state[id]=timespec_to_usec(&t_next);
			struct timespec t_before, t_after;
			clock_gettime(CLOCK_MONOTONIC, &t_before);

            // wait for received task or for the remaining time
            clock_gettime(CLOCK_MONOTONIC, &t_now);
			int rvd_task = 0;
            while( timespec_to_usec(&t_now) <=  timespec_to_usec(&t_next)-50 ) {
				if (migrate_avail[id].count > 0) {
					rvd_task = 1;
                    int i = 0;
                    for (i=migrate_avail[id].start_id; i < migrate_avail[id].start_id + migrate_avail[id].count; i++){
                        subtask_fft(i);
                    }
                    migrate_avail[id].count=0;
				}

				clock_gettime(CLOCK_MONOTONIC, &t_now);
				rem_time = timespec_to_usec(&t_next)  - timespec_to_usec(&t_now);
				if (rem_time>50) {
					state[id] = timespec_to_usec(&t_next);
				}

             }
			clock_gettime(CLOCK_MONOTONIC, &t_after);
			// printf("remtime[%d] is:%li, slept for:%li,rcvd task:%d\n",id, rem_time,timespec_to_usec(&t_after)-timespec_to_usec(&t_before),rvd_task);

        }
        state[id] = -1;


        // task_all();
        clock_gettime(CLOCK_MONOTONIC, &t_now);

        // if (timespec_lower(&t_now, &t_next)==1){
        //     clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &t_next, NULL);
        // }

        //check if result is ready
        // clock_gettime(CLOCK_MONOTONIC, &proc_end);
        /*****************************/
        // log_notice("proc thread [%d] just finished its processing", id);

        timing = &timings[period];
        timing->ind = id;
        timing->period = period;
        timing->abs_period_time = timespec_to_usec(&t_next);
        timing->rel_period_time = timing->abs_period_time - abs_period_start;
        timing->abs_start_time = timespec_to_usec(&proc_start);
        timing->rel_start_time = timing->abs_start_time - abs_period_start;
        timing->abs_end_time = timespec_to_usec(&proc_end);
        timing->rel_end_time = timing->abs_end_time - abs_period_start;
        timing->abs_deadline = timespec_to_usec(&t_deadline);
        timing->rel_deadline = timing->abs_deadline - abs_period_start;
        timing->original_duration = 0;
        timing->actual_duration = timing->rel_end_time - timing->rel_start_time;
        timing->miss = (timing->rel_deadline - timing->rel_end_time >= 0) ? 0 : 1;
        period++;
    }

    log_notice("Writing to log ... proc thread %d", id);
    fprintf(tdata->log_handler, "#idx\t\tabs_period\t\tabs_deadline\t\tabs_start\t\tabs_end"
                   "\t\trel_period\t\trel_start\t\trel_end\t\tduration\t\tmiss\n");

    int i;
    for (i=0; i < nperiods; i++){
        proc_log_timing(tdata->log_handler, &timings[i]);
    }

    fclose(tdata->log_handler);
    log_notice("Exit proc thread %d",id);
    pthread_exit(NULL);
}
Ejemplo n.º 15
0
static void
parse_thread_data(char *name, struct json_object *obj, int idx,
		  thread_data_t *data, const rtapp_options_t *opts)
{
	long exec, period, dline;
	char *policy;
	char def_policy[RTAPP_POLICY_DESCR_LENGTH];
	struct array_list *cpuset;
	struct json_object *cpuset_obj, *cpu, *resources, *locks;
	int i, cpu_idx;

	log_info(PFX "Parsing thread %s [%d]", name, idx);
	/* common and defaults */
	data->ind = idx;
	data->name = strdup(name);
	data->lock_pages = opts->lock_pages;
	data->sched_prio = DEFAULT_THREAD_PRIORITY;
	data->cpuset = NULL;
	data->cpuset_str = NULL;

	/* loop */
	data->loop = get_int_value_from(obj, "loop", TRUE, -1);

	/* period */
	period = get_int_value_from(obj, "period", FALSE, 0);
	if (period <= 0) {
		log_critical(PIN2 "Cannot set negative period");
		exit(EXIT_INV_CONFIG);
	}
	data->period = usec_to_timespec(period);

	/* exec time */
	exec = get_int_value_from(obj, "exec", FALSE, 0);
	if (exec > period) {
		log_critical(PIN2 "Exec must be greather than period");
		exit(EXIT_INV_CONFIG);
	}
	if (exec < 0) {
		log_critical(PIN2 "Cannot set negative exec time");
		exit(EXIT_INV_CONFIG);
	}
	data->min_et = usec_to_timespec(exec);
	data->max_et = usec_to_timespec(exec);

	/* deadline */
	dline = get_int_value_from(obj, "deadline", TRUE, period);
	if (dline < exec) {
		log_critical(PIN2 "Deadline cannot be less than exec time");
		exit(EXIT_INV_CONFIG);
	}
	if (dline > period) {
		log_critical(PIN2 "Deadline cannot be greater than period");
		exit(EXIT_INV_CONFIG);
	}
	data->deadline = usec_to_timespec(dline);

	/* policy */
	policy_to_string(opts->policy, def_policy);
	policy = get_string_value_from(obj, "policy", TRUE, def_policy);
	if (policy) {
		if (string_to_policy(policy, &data->sched_policy) != 0) {
			log_critical(PIN2 "Invalid policy %s", policy);
			exit(EXIT_INV_CONFIG);
		}
	}
	policy_to_string(data->sched_policy, data->sched_policy_descr);

	/* priority */
	data->sched_prio = get_int_value_from(obj, "priority", TRUE,
					      DEFAULT_THREAD_PRIORITY);

	/* cpu set */
	cpuset_obj = get_in_object(obj, "cpus", TRUE);
	if (cpuset_obj) {
		assure_type_is(cpuset_obj, obj, "cpus", json_type_array);
		data->cpuset_str = strdup(json_object_to_json_string(cpuset_obj));
		data->cpuset = malloc(sizeof(cpu_set_t));
		cpuset = json_object_get_array(cpuset_obj);
		CPU_ZERO(data->cpuset);
		for (i=0; i < json_object_array_length(cpuset_obj); i++) {
			cpu = json_object_array_get_idx(cpuset_obj, i);
			cpu_idx = json_object_get_int(cpu);
			CPU_SET(cpu_idx, data->cpuset);
		}
	} else {
		data->cpuset_str = strdup("-");
		data->cpuset = NULL;
	}
	log_info(PIN "key: cpus %s", data->cpuset_str);

	/* resources */
	resources = get_in_object(obj, "resources", TRUE);
	locks = get_in_object(obj, "lock_order", TRUE);
	if (locks) {
		assure_type_is(locks, obj, "lock_order", json_type_array);
		log_info(PIN "key: lock_order %s", json_object_to_json_string(locks));
		if (resources) {
			assure_type_is(resources, obj, "resources",
					json_type_object);
			log_info(PIN "key: resources %s",
				  json_object_to_json_string(resources));
		}
		parse_thread_resources(opts, locks, resources, data);
	}

}
Ejemplo n.º 16
0
static void
parse_thread_resources(const rtapp_options_t *opts, struct json_object *locks,
		       struct json_object *task_resources, thread_data_t *data)
{
	int i,j, cur_res_idx, usage_usec;
	struct json_object *res;
	int res_dur;
	char res_name[4];

	rtapp_resource_access_list_t *tmp, *head, *last;
	char debug_msg[512], tmpmsg[512];

	data->nblockages = json_object_array_length(locks);
	data->blockages = malloc(sizeof(rtapp_tasks_resource_list_t) *
				 data->nblockages);

	for (i = 0; i< data->nblockages; i++)
	{
		res = json_object_array_get_idx(locks, i);
		if (!json_object_is_type(res, json_type_int)){
			log_critical("Invalid resource index");
			exit(EXIT_INV_CONFIG);
		}
		cur_res_idx = json_object_get_int(res);

		data->blockages[i].usage = usec_to_timespec(0);
		data->blockages[i].acl = NULL;
		serialize_acl(&data->blockages[i].acl, cur_res_idx,
				task_resources, opts->resources);

		/* since the "current" resource is returned as the first
		 * element in the list, we move it to the back  */
		tmp = data->blockages[i].acl;
		head = data->blockages[i].acl;
		do {
			last = tmp;
			tmp = tmp->next;
		} while (tmp != NULL);

		/* move first element to list end */
		if (last != head) {
			data->blockages[i].acl = head->next;
			data->blockages[i].acl->prev = NULL;
			last->next = head;
			head->next = NULL;
			head->prev = last;
		}

		tmp = data->blockages[i].acl;
		debug_msg[0] = '\0';
		do  {
			snprintf(tmpmsg, 512, "%s %d", debug_msg, tmp->res->index);
			strncpy(debug_msg, tmpmsg, 512);
			last = tmp;
			tmp = tmp->next;
		} while (tmp != NULL);

		log_info(PIN "key: acl %s", debug_msg);

		snprintf(res_name, 4, "%d", cur_res_idx);
		res = get_in_object(task_resources, res_name, TRUE);
		if (!res) {
			usage_usec = 0;
			data->blockages[i].usage = usec_to_timespec(0);
		} else {
			assure_type_is(res, task_resources, res_name,
					json_type_object);
			usage_usec = get_int_value_from(res, "duration", TRUE, 0);
			data->blockages[i].usage = usec_to_timespec(usage_usec);
		}
		log_info(PIN "res %d, usage: %d acl: %s", cur_res_idx,
			 usage_usec, debug_msg);
	}
}