Beispiel #1
0
void* timer_main(void* arg){

    gd_thread_data_t *tdata = (gd_thread_data_t *) arg;
    thread_common(pthread_self(), tdata);
    long duration_usec = (tdata->duration * 1e6);
    int nperiods = (int) ceil( duration_usec /
            (double) timespec_to_usec(&tdata->period));
    int period = 0;
    struct timespec t_next, t_now;
    t_next = tdata->main_start;
    int subframe_id;

    while(running && (period < nperiods)){

        subframe_id = period%(num_cores_bs);
        t_next = timespec_add(&t_next, &tdata->period);
        clock_gettime(CLOCK_MONOTONIC, &t_now);
        common_time[subframe_id] = t_now;
        common_time_ref = t_now;
        common_time_next = t_next;

        if (timespec_lower(&t_now, &t_next)){
            // sleep for remaining time
            clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &t_next, NULL);
        }else{
            printf("timer is screwed\n");
        }

        period++;
    }
    running = 0;

    pthread_exit(NULL);
}
Beispiel #2
0
void lock(int ind, ...) {
  int resource_id;
  unsigned int loops, i;
  double accumulator=0.25;
  //struct timespec t_start, now, t_exec, t_totexec;
  struct timespec *t_spec;
  va_list argp;
  va_start(argp, ind);
  t_spec = va_arg(argp, struct timespec*);
  resource_id = va_arg(argp, int);
  va_end(argp);
  //clock_gettime(CLOCK_THREAD_CPUTIME_ID, &t_start);
  loops = timespec_to_usec(t_spec);
#ifdef TRACE_BEGINS_LOCK
  log_ftrace(ft_data.marker_fd, "[%d] begins lock", ind+1);
#endif
  pthread_mutex_lock(&opts.resources[resource_id].mtx);
#ifdef TRACE_LOCK_ACQUIRED
  log_ftrace(ft_data.marker_fd, "[%d] lock acquired", ind+1);
#endif
  // clock_gettime(CLOCK_THREAD_CPUTIME_ID, &now);
  for (i = 0; i < loops; i++) {
    accumulator += 0.5;
    accumulator -= floor(accumulator);
  }
  // counter = (++counter) * i;
  // t_exec = timespec_add(&now, t_spec);
  // busywait(&t_exec);
  pthread_mutex_unlock(&opts.resources[resource_id].mtx);
}
Beispiel #3
0
void memory (int ind, ...) {
  int memory_used, loops, i;
  double *accumulator;
  struct timespec *t_spec;
  va_list argp;
  va_start(argp, ind);
  t_spec = va_arg(argp, struct timespec*);
  memory_used = va_arg(argp, int);
  va_end(argp); 
  loops = timespec_to_usec(t_spec);
  accumulator = malloc(memory_used*sizeof(double));
  for (i = 0; i < loops; i++) {
    accumulator[i%memory_used] += 0.5;
    accumulator[i%memory_used] -= floor(accumulator[i%memory_used]);
  }
  free(accumulator);
}
Beispiel #4
0
void shared (int ind, ...) {
  int memory_used, loops, i;
  struct timespec *t_spec;
  va_list argp;
  va_start(argp, ind);
  t_spec = va_arg(argp, struct timespec*);
  va_end(argp); 
  loops = timespec_to_usec(t_spec);
  if (opts.nshared != 0) {
	  pthread_mutex_lock(&opts.buffermtx);
	  for (i = 0; i < loops; i++) {
	    opts.shared[i%opts.nshared] += 0.5;
	    opts.shared[i%opts.nshared] -= floor(opts.shared[i%opts.nshared]);
	  }
	  pthread_mutex_unlock(&opts.buffermtx);
  }
}
Beispiel #5
0
void compute (int ind, ...) {
  //  unsigned int loops, i, counter = 0;
  unsigned int loops, i;
  double accumulator=0.25;
  struct timespec *t_spec;
  va_list argp;
  va_start(argp, ind);
  t_spec = va_arg(argp, struct timespec*);
  va_end(argp);
  loops = timespec_to_usec(t_spec);
#ifdef TRACE_BEGINS_COMPUTE
  log_ftrace(ft_data.marker_fd, "[%d] begins compute", ind+1);
#endif
  for (i = 0; i < loops; i++) {
    accumulator += 0.5;
    accumulator -= floor(accumulator);
  }
}
Beispiel #6
0
static inline guint64
pka_resolution_apply (PkaResolution    res, /* IN */
                      struct timespec *ts)  /* IN */
{
	guint64 usec = 0;

	timespec_to_usec(ts, &usec);

	switch (res) {
	CASE(PKA_RESOLUTION_USEC);
		RETURN(usec);
	CASE(PKA_RESOLUTION_MSEC);
		RETURN(usec / G_GUINT64_CONSTANT(1000));
	CASE(PKA_RESOLUTION_SECOND);
		RETURN(usec / G_USEC_PER_SEC);
	CASE(PKA_RESOLUTION_MINUTE);
		RETURN(usec / ((guint64)(60 * G_USEC_PER_SEC)));
	CASE(PKA_RESOLUTION_HOUR);
		RETURN(usec / (((guint64)3600 * G_USEC_PER_SEC)));
	default:
		g_assert_not_reached();
		return usec;
	}
}
Beispiel #7
0
static gboolean
pka_encoder_real_encode_manifest (PkaManifest  *manifest, /* IN */
                                  guint8      **data,     /* IN */
                                  gsize        *data_len) /* IN */
{
	EggBuffer *buf, *mbuf, *ebuf;
	struct timespec ts;
	guint64 t;
	const guint8 *tbuf;
	gsize tlen;
	gint rows;
	gint i;

	g_return_val_if_fail(manifest != NULL, FALSE);
	g_return_val_if_fail(data != NULL, FALSE);
	g_return_val_if_fail(data_len != NULL, FALSE);

	ENTRY;
	buf = egg_buffer_new();

	/*
	 * Field 1: Timestamp.  Currently encoded in microseconds.  We should
	 *   determine what we want to do long-term.
	 */
	pka_manifest_get_timespec(manifest, &ts);
	timespec_to_usec(&ts, &t);
	egg_buffer_write_tag(buf, 1, EGG_BUFFER_UINT64);
	egg_buffer_write_uint64(buf, t);

	/*
	 * Desired sample resolution.  This allows us to save considerable
	 * width in the relative-timestamp per sample.
	 */
	egg_buffer_write_tag(buf, 2, EGG_BUFFER_ENUM);
	egg_buffer_write_uint(buf, pka_manifest_get_resolution(manifest));

	/*
	 * Source index offset within the channel.
	 */
	egg_buffer_write_tag(buf, 3, EGG_BUFFER_UINT);
	egg_buffer_write_uint(buf, pka_manifest_get_source_id(manifest));

	/*
	 * Create a new buffer for the repeated data series.
	 */
	ebuf = egg_buffer_new();

	/*
	 * Write the manifest data description.  This is a set of embedded
	 * messages within the message.
	 */
	rows = pka_manifest_get_n_rows(manifest);
	for (i = 1; i <= rows; i++) {
		mbuf = egg_buffer_new();

		/*
		 * Write the row identifier.
		 */
		egg_buffer_write_tag(mbuf, 1, EGG_BUFFER_UINT);
		egg_buffer_write_uint(mbuf, i);

		/*
		 * Write the row type.
		 */
		egg_buffer_write_tag(mbuf, 2, EGG_BUFFER_ENUM);
		egg_buffer_write_uint(mbuf, pka_manifest_get_row_type(manifest, i));

		/*
		 * Write the row name.
		 */
		egg_buffer_write_tag(mbuf, 3, EGG_BUFFER_STRING);
		egg_buffer_write_string(mbuf, pka_manifest_get_row_name(manifest, i));

		/*
		 * Embed the message as a data blob.
		 */
		egg_buffer_get_buffer(mbuf, &tbuf, &tlen);
		egg_buffer_write_data(ebuf, tbuf, tlen);

		egg_buffer_unref(mbuf);
	}

	/*
	 * Add the repeated message length and data.
	 */
	egg_buffer_get_buffer(ebuf, &tbuf, &tlen);
	egg_buffer_write_tag(buf, 4, EGG_BUFFER_REPEATED);
	egg_buffer_write_data(buf, tbuf, tlen);
	egg_buffer_unref(ebuf);

	/*
	 * Copy the buffer to the destination.
	 */
	egg_buffer_get_buffer(buf, &tbuf, &tlen);
	*data = g_malloc(tlen);
	*data_len = tlen;
	memcpy(*data, tbuf, tlen);

	egg_buffer_unref(buf);
	RETURN(TRUE);
}
int main(int argc, char **argv)
{
	static const struct option options[] = {
		{ "depth", 1, NULL, 'd' },
		{ "help", 0, NULL, 'h' },
		{ "regenerate", 0, NULL, 'r' },
		{ "subdivisions", 1, NULL, 's' },
		{ "transform", 0, NULL, 't' },
		{ "version", 0, NULL, 'V' },
		{ NULL, 0, NULL, 0 },
	};
	struct framebuffer *display;
	struct framebuffer *source;
	struct pipeline *pipeline;
	unsigned long depth = 24;
	bool regenerate = false;
	float duration, texels;
	unsigned int frames;
	uint64_t start, end;
	struct timespec ts;
	struct gles *gles;
	int opt;

	while ((opt = getopt_long(argc, argv, "d:hrs:tV", options, NULL)) != -1) {
		switch (opt) {
		case 'd':
			depth = strtoul(optarg, NULL, 10);
			if (!depth) {
				fprintf(stderr, "invalid depth: %s\n", optarg);
				return 1;
			}
			break;

		case 'h':
			usage(stdout, argv[0]);
			return 0;

		case 'r':
			regenerate = true;
			break;

		case 's':
			subdivisions = strtoul(optarg, NULL, 10);
			break;

		case 't':
			transform = true;
			break;

		case 'V':
			printf("%s %s\n", argv[0], PACKAGE_VERSION);
			return 0;

		default:
			fprintf(stderr, "invalid option: '%c'\n", opt);
			return 1;
		}
	}

	if (optind >= argc) {
		usage(stderr, argv[0]);
		return 1;
	}

	gles = gles_new(depth, regenerate);
	if (!gles) {
		fprintf(stderr, "gles_new() failed\n");
		return 1;
	}

	display = display_framebuffer_new(gles->width, gles->height);
	if (!display) {
		fprintf(stderr, "display_framebuffer_new() failed\n");
		return 1;
	}

	source = framebuffer_new(gles->width, gles->height);
	if (!source) {
		fprintf(stderr, "failed to create framebuffer\n");
		return 1;
	}

	pipeline = create_pipeline(gles, argc - optind, &argv[optind],
				   regenerate, source);
	if (!pipeline) {
		fprintf(stderr, "failed to create pipeline\n");
		return 1;
	}

	texels = gles->width * gles->height * FRAME_COUNT;

	clock_gettime(CLOCK_MONOTONIC, &ts);
	start = timespec_to_usec(&ts);

	for (frames = 0; frames < FRAME_COUNT; frames++)
		pipeline_render(pipeline);

	clock_gettime(CLOCK_MONOTONIC, &ts);
	end = timespec_to_usec(&ts);

	pipeline_free(pipeline);
	framebuffer_free(source);
	display_framebuffer_free(display);
	gles_free(gles);

	duration = (end - start) / 1000000.0f;
	printf("Rendered %d frames in %fs\n", FRAME_COUNT, duration);
	printf("Average fps was %.02f\n", FRAME_COUNT / duration);
	printf("MTexels/s: %fs\n", (texels / 1000000.0f) / duration);

	return 0;
}
Beispiel #9
0
void* proc_main(void* arg){

    // acquire lock, read subframes and process
    gd_thread_data_t *tdata = (gd_thread_data_t *) arg;
    int id = tdata->ind;
    thread_common(pthread_self(), tdata);
    unsigned long abs_period_start = timespec_to_usec(&tdata->main_start);
    struct timespec t_offset;
    t_offset = usec_to_timespec(id*num_cores_bs*1000);
    tdata->main_start = timespec_add(&tdata->main_start, &t_offset);
    struct timespec proc_start, proc_end, t_next, t_deadline;
    gd_proc_timing_meta_t *timings;
    long duration_usec = (tdata->duration * 1e6);
    int nperiods = (int) floor(duration_usec /
            (double) timespec_to_usec(&tdata->period));

    //nperiods reduce a little to prevents trans finishing before proc; ugly fix
    nperiods-=500;

    timings = (gd_proc_timing_meta_t*) malloc ( nperiods * sizeof(gd_proc_timing_meta_t));
    gd_proc_timing_meta_t *timing;
    int period = 0;
    int deadline_miss=0;

    long time_deadline, proc_actual_time, avail_time;
    struct timespec t_temp, t_now, t_temp1;
    log_notice("Starting proc thread %d nperiods %d %lu", id, nperiods, timespec_to_usec(&t_offset));

    int bs_id = (int)(id/num_cores_bs);
    int subframe_id =  id%(num_cores_bs);
    log_notice("checking subframe mutex %d", bs_id*num_cores_bs + subframe_id);

	//	1: Input: P subtasks, each subtask has tp proc. time
	//  2: Input: M cores, each core has fcj > 0 of free time
	//  3: N   P . # of left subtasks (not offloaded)
	//  4: maxoff   0 . max # of offloaded subtasks per core
	//	5: while N > 1 and j  M do
	//	6: limoff = b fcj
	//	tp c . # of subtasks can be offloaded
	//	7: noff   min(N 􀀀 maxoff ; limoff ; bN
	//			2 c)
	//	8: maxoff   max(noff ; maxoff )
	//	9: Offload noff subtasks to jth core
	//	10: N   N 􀀀 noff
	//	11: j   j + 1
	//	12: end while

    struct timespec each =  usec_to_timespec(1000);

    while(running && (period < nperiods)){



        // wait for the transport thread to wake me
		// printf("what's value of subframe_avail[id]:%d\n",subframe_avail[id]);
        pthread_mutex_lock(&subframe_mutex[id]);
        while (!(subframe_avail[id] == num_ants)){
                    pthread_cond_wait(&subframe_cond[id], &subframe_mutex[id]);
        }
		subframe_avail[id]=0;
        pthread_mutex_unlock(&subframe_mutex[id]);


        /****** do LTE processing *****/
        clock_gettime(CLOCK_MONOTONIC, &proc_start);
        t_next = timespec_add(&proc_start, &tdata->period);
        clock_gettime(CLOCK_MONOTONIC, &t_now);

		//check for migration opportunity
		//iterating over the processing threads and studying which ones are available for migration
		int nOffload = 0;
		int max_off = 0;
		int tasksRemain = 14;
        int cur_start_id =0;
        for (int cur = 0; cur<proc_nthreads;cur++) {

			pthread_mutex_lock(&state_mutex[cur]);
		 	avail_time = state[cur] - timespec_to_usec(&t_now);
		 	if (avail_time<0) {
		 		avail_time = 0;
		 	}
		 	int lim_off = floor(avail_time/sub_fft_time);
		 	int noff = MIN(tasksRemain-max_off,MIN(lim_off,floor(tasksRemain/2)));
		 	max_off= MAX(max_off,noff);
		 	tasksRemain = tasksRemain-noff;
		 	if (avail_time>0) {
		 	// printf("I am offloading things: noff:%d to core:%d, maxoff:%d, limoff:%d, remain:%d\n",noff,cur,max_off,lim_off,tasksRemain);
		 	// printf("timings: avail_time[%d]:%li, state[cur]:%li, now:%li\n", cur,avail_time,state[cur],timespec_to_usec(&t_now));
			}
			// //update state[cur] to be -1
			if (noff>0) {
				state[cur]=-1;
				migrate_avail[cur].count=noff;
                migrate_avail[cur].start_id = cur_start_id;
			}
            cur_start_id +=noff;
			pthread_mutex_unlock(&state_mutex[cur]);

		 }


		 if (nOffload == 0){
             task_fft();
         } else {
             for (int left_iter = cur_start_id; left_iter< 14;left_iter ++) {
		 		subtask_fft(left_iter);
		 	}

        }

        clock_gettime(CLOCK_MONOTONIC, &t_now);
        // check if there is enough time to decode else kill
        if (timespec_to_usec(&t_next) - (timespec_to_usec(&t_now) + 5*decode_time[mcs]) < 0.0){
            // printf("I kill myslef\n");
        }else{
            task_decode();
        }
        clock_gettime(CLOCK_MONOTONIC, &proc_end);


        // there is time to receive migrated task
        clock_gettime(CLOCK_MONOTONIC, &t_now);
        long rem_time = timespec_to_usec(&t_next)  - timespec_to_usec(&t_now);
     //   printf("remtime[%d] is:%li\n",id, rem_time);

        if (rem_time > 50){

			state[id]=timespec_to_usec(&t_next);
			struct timespec t_before, t_after;
			clock_gettime(CLOCK_MONOTONIC, &t_before);

            // wait for received task or for the remaining time
            clock_gettime(CLOCK_MONOTONIC, &t_now);
			int rvd_task = 0;
            while( timespec_to_usec(&t_now) <=  timespec_to_usec(&t_next)-50 ) {
				if (migrate_avail[id].count > 0) {
					rvd_task = 1;
                    int i = 0;
                    for (i=migrate_avail[id].start_id; i < migrate_avail[id].start_id + migrate_avail[id].count; i++){
                        subtask_fft(i);
                    }
                    migrate_avail[id].count=0;
				}

				clock_gettime(CLOCK_MONOTONIC, &t_now);
				rem_time = timespec_to_usec(&t_next)  - timespec_to_usec(&t_now);
				if (rem_time>50) {
					state[id] = timespec_to_usec(&t_next);
				}

             }
			clock_gettime(CLOCK_MONOTONIC, &t_after);
			// printf("remtime[%d] is:%li, slept for:%li,rcvd task:%d\n",id, rem_time,timespec_to_usec(&t_after)-timespec_to_usec(&t_before),rvd_task);

        }
        state[id] = -1;


        // task_all();
        clock_gettime(CLOCK_MONOTONIC, &t_now);

        // if (timespec_lower(&t_now, &t_next)==1){
        //     clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &t_next, NULL);
        // }

        //check if result is ready
        // clock_gettime(CLOCK_MONOTONIC, &proc_end);
        /*****************************/
        // log_notice("proc thread [%d] just finished its processing", id);

        timing = &timings[period];
        timing->ind = id;
        timing->period = period;
        timing->abs_period_time = timespec_to_usec(&t_next);
        timing->rel_period_time = timing->abs_period_time - abs_period_start;
        timing->abs_start_time = timespec_to_usec(&proc_start);
        timing->rel_start_time = timing->abs_start_time - abs_period_start;
        timing->abs_end_time = timespec_to_usec(&proc_end);
        timing->rel_end_time = timing->abs_end_time - abs_period_start;
        timing->abs_deadline = timespec_to_usec(&t_deadline);
        timing->rel_deadline = timing->abs_deadline - abs_period_start;
        timing->original_duration = 0;
        timing->actual_duration = timing->rel_end_time - timing->rel_start_time;
        timing->miss = (timing->rel_deadline - timing->rel_end_time >= 0) ? 0 : 1;
        period++;
    }

    log_notice("Writing to log ... proc thread %d", id);
    fprintf(tdata->log_handler, "#idx\t\tabs_period\t\tabs_deadline\t\tabs_start\t\tabs_end"
                   "\t\trel_period\t\trel_start\t\trel_end\t\tduration\t\tmiss\n");

    int i;
    for (i=0; i < nperiods; i++){
        proc_log_timing(tdata->log_handler, &timings[i]);
    }

    fclose(tdata->log_handler);
    log_notice("Exit proc thread %d",id);
    pthread_exit(NULL);
}
Beispiel #10
0
void* trans_main(void* arg){

    gd_thread_data_t *tdata = (gd_thread_data_t *) arg;
    int id = tdata->ind;

    thread_common(pthread_self(), tdata);
    unsigned long abs_period_start = timespec_to_usec(&tdata->main_start);


    gd_timing_meta_t *timings;
    long duration_usec = (tdata->duration * 1e6);
    int nperiods = (int) ceil( duration_usec /
            (double) timespec_to_usec(&tdata->period));
    timings = (gd_timing_meta_t*) malloc ( nperiods * sizeof(gd_timing_meta_t));
    gd_timing_meta_t* timing;


    struct timespec t_next, t_deadline, trans_start, trans_end, t_temp, t_now;

    t_next = tdata->main_start;
    int period = 0;
    int bs_id  = ((int)(id/num_ants));
    int subframe_id;

    while(running && (period < nperiods)){


        subframe_id = period%(num_cores_bs);

        // get current deadline and next period
        t_deadline = timespec_add(&t_next, &tdata->deadline);
        t_next = timespec_add(&t_next, &tdata->period);

        clock_gettime(CLOCK_MONOTONIC, &trans_start);
        /******* Main transport ******/
        if (debug_trans==1) {
            int j, k;
            for(j=0; j <60000; j++){k=k+1;}
        } else {
            gd_trans_read(tdata->conn_desc);
        }
        /******* Main transport ******/


        pthread_mutex_lock(&subframe_mutex[bs_id*num_cores_bs + subframe_id]);
        // subframe_avail[bs_id*num_cores_bs + subframe_id] = (subframe_avail[bs_id*num_cores_bs + subframe_id]+1)%(num_ants);
        subframe_avail[bs_id*num_cores_bs + subframe_id] ++;
		// printf("subframe_avail:%d %d\n",bs_id*num_cores_bs + subframe_id,subframe_avail[bs_id*num_cores_bs + subframe_id]);


        // hanging fix -- if trans misses a proc, reset the subframe available counter
        if (subframe_avail[bs_id*num_cores_bs + subframe_id] == (num_ants+1)) {
               subframe_avail[bs_id*num_cores_bs + subframe_id] = 1;
        }

        pthread_cond_signal(&subframe_cond[bs_id*num_cores_bs + subframe_id]);
        pthread_mutex_unlock(&subframe_mutex[bs_id*num_cores_bs + subframe_id]);


        clock_gettime(CLOCK_MONOTONIC, &trans_end);
        /*****************************/

        timing = &timings[period];
        timing->ind = id;
        timing->period = period;
        timing->abs_period_time = timespec_to_usec(&t_next);
        timing->rel_period_time = timing->abs_period_time - abs_period_start;

        timing->abs_start_time = timespec_to_usec(&trans_start);
        timing->rel_start_time = timing->abs_start_time - abs_period_start;
        timing->abs_end_time = timespec_to_usec(&trans_end);
        timing->rel_end_time = timing->abs_end_time - abs_period_start;
        timing->abs_deadline = timespec_to_usec(&t_deadline);
        timing->rel_deadline = timing->abs_deadline - abs_period_start;
        timing->actual_duration = timing->rel_end_time - timing->rel_start_time;
        timing->miss = (timing->rel_deadline - timing->rel_end_time >= 0) ? 0 : 1;

        if (timing->actual_duration > 1000){
            // log_critical("Transport overload. Thread[%d] Duration= %lu us. Reduce samples or increase threads",
                // tdata->ind, timing->actual_duration);
        }

        clock_gettime(CLOCK_MONOTONIC, &t_now);

        // check if deadline was missed
        if (timespec_lower(&t_now, &t_next)){
            // sleep for remaining time
            clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &t_next, NULL);
        }else{
            printf("Transport %d is too slow\n", id);
        }

        period ++;

    }
    clock_gettime(CLOCK_MONOTONIC, &t_temp);
    log_notice("Trans thread [%d] ran for %f s", id, ((float) (timespec_to_usec(&t_temp)-abs_period_start))/1e6);


    fprintf(tdata->log_handler, "#idx\t\tabs_period\t\tabs_deadline\t\tabs_start\t\tabs_end"
                   "\t\trel_period\t\trel_start\t\trel_end\t\tduration\t\tmiss\n");


    int i;
    for (i=0; i < nperiods; i++){
        log_timing(tdata->log_handler, &timings[i]);
    }
    fclose(tdata->log_handler);
    log_notice("Exit trans thread %d", id);

    running = 0;
    for (i=0;i<proc_nthreads;i++) {
            pthread_mutex_lock(&subframe_mutex[i]);
            subframe_avail[i]=-1;
            pthread_cond_signal(&subframe_cond[i]);
            pthread_mutex_unlock(&subframe_mutex[i]);
    }

    pthread_exit(NULL);
}
Beispiel #11
0
void *thread_body(void *arg) {
  
  int ret;
  int nperiods;
  struct sched_param param;
  timing_point_t *timings;
  pid_t tid;
  struct sched_attr attr;
  unsigned int flags = 0;
  struct timespec t, t_next;
  timing_point_t tmp_timing;
  timing_point_t *curr_timing;
  unsigned long t_start_usec;
  int i = 0;

  thread_data_t *data = (thread_data_t*) arg;

  /* set thread affinity */
  if (data->cpuset != NULL) {
    log_notice("[%d] setting cpu affinity to CPU(s) %s",
      data->ind, data->cpuset_str);
    ret = pthread_setaffinity_np(pthread_self(),
      sizeof(cpu_set_t), data->cpuset);
    if (ret < 0) {
      errno = ret;
      perror("pthread_setaffinity_np");
      exit(EXIT_FAILURE);
    }
  }

  /* set scheduling policy and print pretty info on stdout */
  log_notice("[%d] Using %s policy:", data->ind, data->sched_policy_descr);
  switch (data->sched_policy) {
    case rr:
    case fifo:
      fprintf(data->log_handler, "# Policy : %s\n",
        (data->sched_policy == rr ? "SCHED_RR" : "SCHED_FIFO"));
      param.sched_priority = data->sched_prio;
      ret = pthread_setschedparam(pthread_self(),
        data->sched_policy, &param);
      if (ret != 0) {
        errno = ret; 
        perror("pthread_setschedparam"); 
        exit(EXIT_FAILURE);
      }

      log_notice("[%d] starting thread with period: %" PRIu64 
        ", exec: %" PRIu64 ",""deadline: %" PRIu64 ", priority: %d",
        data->ind,
        timespec_to_usec(&data->period), 
        timespec_to_usec(&data->min_et),
        timespec_to_usec(&data->deadline),
        data->sched_prio
      );
      break;
    case other:
      fprintf(data->log_handler, "# Policy : SCHED_OTHER\n");
      log_notice("[%d] starting thread with period: %" PRIu64 
           ", exec: %" PRIu64 ",""deadline: %" PRIu64 "", data->ind,
        timespec_to_usec(&data->period), 
        timespec_to_usec(&data->min_et),
        timespec_to_usec(&data->deadline)
      );
      data->lock_pages = 0; /* forced off for SCHED_OTHER */
      break;
    case deadline:
      fprintf(data->log_handler, "# Policy : SCHED_DEADLINE\n");
      tid = gettid();
      attr.size = sizeof(attr);
      attr.sched_flags = data->sched_flags;
      if (data->sched_flags && SCHED_FLAG_SOFT_RSV)
        fprintf(data->log_handler, "# Type : SOFT_RSV\n");
      else
        fprintf(data->log_handler, "# Type : HARD_RSV\n");
      attr.sched_policy = SCHED_DEADLINE;
      attr.sched_priority = 0;
      attr.sched_runtime = timespec_to_nsec(&data->max_et) +
        (timespec_to_nsec(&data->max_et) /100) * BUDGET_OVERP;
      attr.sched_deadline = timespec_to_nsec(&data->period);
      attr.sched_period = timespec_to_nsec(&data->period);  
      break;
    default:
      log_error("Unknown scheduling policy %d",
        data->sched_policy);
      exit(EXIT_FAILURE);
  }

  if (data->lock_pages == 1) {
    log_notice("[%d] Locking pages in memory", data->ind);
    ret = mlockall(MCL_CURRENT | MCL_FUTURE);
    if (ret < 0) {
      errno = ret;
      perror("mlockall");
      exit(EXIT_FAILURE);
    }
  }

  /* if we know the duration we can calculate how many periods we will
   * do at most, and the log to memory, instead of logging to file.
   */
  timings = NULL;
  if (data->duration > 0) {
    nperiods = (int) ceil( (data->duration * 10e6) / 
              (double) timespec_to_usec(&data->period));
    timings = malloc ( nperiods * sizeof(timing_point_t));
  }

  fprintf(data->log_handler, "#idx\tperiod\tmin_et\tmax_et\trel_st\tstart"
           "\t\tend\t\tdeadline\tdur.\tslack\tresp_t"
           "\tBudget\tUsed Budget\n");

  if (data->ind == 0) {
    clock_gettime(CLOCK_MONOTONIC, &t_zero);
#ifdef TRACE_SETS_ZERO_TIME
    if (opts.ftrace)
      log_ftrace(ft_data.marker_fd,
           "[%d] sets zero time",
           data->ind);
#endif
  }

  pthread_barrier_wait(&threads_barrier);

  /*
   * Set the task to SCHED_DEADLINE as far as possible touching its
   * budget as little as possible for the first iteration.
   */
  if (data->sched_policy == SCHED_DEADLINE) {
    ret = sched_setattr(tid, &attr, flags);
    if (ret != 0) {
      log_critical("[%d] sched_setattr "
        "returned %d", data->ind, ret);
      errno = ret;
      perror("sched_setattr");
      exit(EXIT_FAILURE);
    }
  }

  t = t_zero;
  t_next = msec_to_timespec(1000LL);
  t_next = timespec_add(&t, &t_next);
  clock_nanosleep(CLOCK_MONOTONIC, 
    TIMER_ABSTIME, 
    &t_next,
    NULL);

  data->deadline = timespec_add(&t_next, &data->deadline);

  while (continue_running) {
    int pn;
    struct timespec t_start, t_end, t_diff, t_slack, t_resp;

    /* Thread numeration reported starts with 1 */
#ifdef TRACE_BEGINS_LOOP
    if (opts.ftrace)
      log_ftrace(ft_data.marker_fd, "[%d] begins job %d", data->ind+1, i);
#endif
    clock_gettime(CLOCK_MONOTONIC, &t_start);
    if (data->nphases == 0) {
      compute(data->ind, &data->min_et, NULL, 0);
    } else {
      for (pn = 0; pn < data->nphases; pn++) {
        log_notice("[%d] phase %d start", data->ind+1, pn);
        exec_phase(data, pn);
        log_notice("[%d] phase %d end", data->ind+1, pn);
      }
    }
    clock_gettime(CLOCK_MONOTONIC, &t_end);
    
    t_diff = timespec_sub(&t_end, &t_start);
    t_slack = timespec_sub(&data->deadline, &t_end);
    t_resp = timespec_sub(&t_end, &t_next);
    t_start_usec = timespec_to_usec(&t_start); 

    if (i < nperiods) {
      if (timings)
        curr_timing = &timings[i];
      else
        curr_timing = &tmp_timing;

      curr_timing->ind = data->ind;
      curr_timing->period = timespec_to_usec(&data->period);
      curr_timing->min_et = timespec_to_usec(&data->min_et);
      curr_timing->max_et = timespec_to_usec(&data->max_et);
      curr_timing->rel_start_time = 
        t_start_usec - timespec_to_usec(&data->main_app_start);
      curr_timing->abs_start_time = t_start_usec;
      curr_timing->end_time = timespec_to_usec(&t_end);
      curr_timing->deadline = timespec_to_usec(&data->deadline);
      curr_timing->duration = timespec_to_usec(&t_diff);
      curr_timing->slack =  timespec_to_lusec(&t_slack);
      curr_timing->resp_time =  timespec_to_usec(&t_resp);
    }
    if (!timings)
      log_timing(data->log_handler, curr_timing);

    t_next = timespec_add(&t_next, &data->period);
    data->deadline = timespec_add(&data->deadline, &data->period);
#ifdef TRACE_END_LOOP
    if (opts.ftrace)
      log_ftrace(ft_data.marker_fd, "[%d] end loop %d", data->ind, i);
#endif
    if (curr_timing->slack < 0)
      log_notice("[%d] DEADLINE MISS !!!", data->ind+1);
    i++;
  }

  free(timings);
}
Beispiel #12
0
void *t_1(void *thread_params) {
	struct sched_param2 dl_params;
	struct timespec t_next, t_period, t_start, t_stop, ran_for,
			t_now, t_crit, t_exec;
	long tid = gettid();
	int retval, i;
	cpu_set_t mask;
	__u64 crit, run1, runtime, deadline, period;

	/*
	 * t_1 should go in budget overflow while in critical section
	 */
	run1 = 8U * NSEC_PER_MSEC;
	crit = 12U * NSEC_PER_MSEC;
	runtime =  run1 + crit + (8U * NSEC_PER_MSEC);
	deadline = 40U * NSEC_PER_MSEC;
	period = deadline;
	t_period = nsec_to_timespec(&period);
	t_crit = nsec_to_timespec(&crit);

	signal(SIGHUP, sighandler);
	signal(SIGINT, sighandler);
	signal(SIGQUIT, sighandler);

	CPU_ZERO(&mask);
	CPU_SET(0, &mask);
	retval = sched_setaffinity(0, sizeof(mask), &mask);
	if (retval) {
		fprintf(stderr, "WARNING: could not set task affinity\n");
		exit(-1);
	}

	memset(&dl_params, 0, sizeof(dl_params));
	dl_params.sched_priority = 0;
	dl_params.sched_runtime = runtime;
	dl_params.sched_deadline = deadline;
	dl_params.sched_period = period;
	ftrace_write(marker_fd, "[thread %ld (t_1)]: setting rt=%llums dl=%llums\n", tid,
	       runtime/NSEC_PER_MSEC,
	       deadline/NSEC_PER_MSEC);
	retval = sched_setscheduler2(0, SCHED_DEADLINE, &dl_params);
	if (retval) {
		fprintf(stderr, "WARNING: could not set SCHED_DEADLINE"
				" policy!\n");
		exit(-1);
	}

	clock_gettime(CLOCK_MONOTONIC, &t_next);
	for (i = 0; i < NRUN; i++) {
		ftrace_write(marker_fd, "[t_1] run starts\n");
		clock_gettime(CLOCK_MONOTONIC, &t_start);
		ftrace_write(marker_fd, "[t_1] exec for %lluns\n", run1);
		busywait(run1);
		ftrace_write(marker_fd, "[t_1] locks mutex\n");
		pthread_mutex_lock(&my_mutex);
		ftrace_write(marker_fd, "[t_1] exec for %lluns\n", crit);
		busywait(crit);
		ftrace_write(marker_fd, "[t_1] unlocks mutex\n");
		pthread_mutex_unlock(&my_mutex);
		clock_gettime(CLOCK_MONOTONIC, &t_stop);
		t_next = timespec_add(&t_next, &t_period);
		ran_for = timespec_sub(&t_stop, &t_start);
		printf("[thread %ld]: run %d for %lluus\n",
			tid,
			i,
			timespec_to_usec(&ran_for));
		ftrace_write(marker_fd, "[t_1] run ends\n");
		clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &t_next, NULL);
	}

	retval = sched_setscheduler2(0, SCHED_OTHER, &dl_params);
	if (retval) {
		fprintf(stderr, "WARNING: could not set SCHED_OTHER"
				"policy!\n");
		exit(-1);
	}
}