int main(void) { DDRB = 0xff; DDRD = 0xff; busyloop(); return 1; }
int main(int argc, char *argv[]){ int i; int rd[NUM_TASKS]; int ret; struct cpu_reserve_attr cpu_attr; int pid[NUM_TASKS] = {0,}; int cpuid; long long C, T, D; char R[MAX_RESOURCE_SET_NAME_LEN]; int mid, key = 1; unsigned long long tm, start_time; memset(&cpu_attr, 0, sizeof(cpu_attr)); if (rk_get_start_of_next_vcpu_period(&tm) == RK_ERROR) { printf("Error: rk_get_start_of_next_vcpu_period\n"); return -1; } // base start time: next vcpu start period + 3sec tm += 3 * NANOSEC_LL; // create three resource sets // - i=0(task lo, vcpu hi): Core 0, offset=0, C=4(1.0, 2.0, 1.0), T=1000 // - i=1(task me, vcpu me): Core 1, offset=1, C=2(1.5, 1.0, 1.0), T=1000 // - i=2(task hi, vcpu lo): Core 2, offset=1, C=4(1.0, 2.0, 1.0), T=1000 for (i = 0; i < NUM_TASKS; i++) { C = 100 * MILLISEC_TO_NANOSEC; T = 1000 * MILLISEC_TO_NANOSEC; D = T - 10 * i; if (i == 0) { start_time = tm; cpuid = 0; } if (i == 1) { start_time = tm + 1 * MILLISEC_TO_NANOSEC; cpuid = 1; } if (i == 2) { start_time = tm + 1 * MILLISEC_TO_NANOSEC; cpuid = 2; } sprintf(R, "RSET_%d", i); cpu_attr.cpunum = cpuid; cpu_attr.start_time.tv_sec = start_time / NANOSEC_LL; cpu_attr.start_time.tv_nsec = start_time % NANOSEC_LL; cpu_attr.compute_time.tv_sec=(C/NANOSEC_LL); cpu_attr.period.tv_sec=(T/NANOSEC_LL); cpu_attr.deadline.tv_sec=(D/NANOSEC_LL); cpu_attr.blocking_time.tv_sec=0; cpu_attr.compute_time.tv_nsec=(C%NANOSEC_LL); cpu_attr.period.tv_nsec=(T%NANOSEC_LL); cpu_attr.deadline.tv_nsec=(D%NANOSEC_LL); cpu_attr.blocking_time.tv_nsec=0; cpu_attr.reserve_mode.sch_mode = RSV_HARD; cpu_attr.reserve_mode.enf_mode = RSV_HARD; cpu_attr.reserve_mode.rep_mode = RSV_SOFT; rd[i] = rk_resource_set_create(R, 1, 1, CPURSV_NO_MIGRATION); rk_cpu_reserve_create(rd[i], &cpu_attr); } // create three tasks for (i = 0; i < NUM_TASKS; i++) { ret = fork(); if (ret != 0) { pid[i] = ret; continue; } rk_resource_set_attach_process(rd[i], getpid(), NULL); // wait for the cpursv to be activated rt_wait_for_next_period(); // now, cpursv is activated -> rt priority available mid = rk_vmpcp_intervm_mutex_open(key, MTX_CREATE); if (mid == RK_ERROR) { printf("Error: rk_vmpcp_mutex_open\n"); return -1; } rk_event_log_set(getpid()); fprintf(stderr, "%d\n", getpid()); while (1) { struct sched_param par; rt_wait_for_next_period(); if (i == 0) { busyloop(1000); if (rk_vmpcp_intervm_mutex_lock(mid) < 0) return -1; busyloop(2000); if (rk_vmpcp_intervm_mutex_unlock(mid) < 0) return -1; busyloop(1000); } if (i == 1) { busyloop(1500); if (rk_vmpcp_intervm_mutex_lock(mid) < 0) return -1; busyloop(1000); if (rk_vmpcp_intervm_mutex_unlock(mid) < 0) return -1; busyloop(1000); } if (i == 2) { busyloop(1000); if (rk_vmpcp_intervm_mutex_lock(mid) < 0) return -1; busyloop(2000); if (rk_vmpcp_intervm_mutex_unlock(mid) < 0) return -1; busyloop(1000); } } } getchar(); kill(0, 9); return 0; }
int main(int argc, char **argv) { struct sigaction act; uint64_t *val; size_t sz, pgsz; int ret, i; setlocale(LC_ALL, ""); ret = pfm_initialize(); if (ret != PFM_SUCCESS) errx(1, "Cannot initialize library: %s", pfm_strerror(ret)); pgsz = sysconf(_SC_PAGESIZE); /* * Install the signal handler (SIGIO) * need SA_SIGINFO because we need the fd * in the signal handler */ memset(&act, 0, sizeof(act)); act.sa_sigaction = sigio_handler; act.sa_flags = SA_SIGINFO; sigaction (SIGIO, &act, 0); /* * allocates fd for us */ ret = perf_setup_list_events("cycles," "instructions", &fds, &num_fds); if (ret || (num_fds == 0)) exit(1); fds[0].fd = -1; for(i=0; i < num_fds; i++) { /* want a notification for every each added to the buffer */ fds[i].hw.disabled = !i; if (!i) { fds[i].hw.wakeup_events = 1; fds[i].hw.sample_type = PERF_SAMPLE_IP|PERF_SAMPLE_READ|PERF_SAMPLE_PERIOD; fds[i].hw.sample_period = SMPL_PERIOD; /* read() returns event identification for signal handler */ fds[i].hw.read_format = PERF_FORMAT_GROUP|PERF_FORMAT_ID|PERF_FORMAT_SCALE; } fds[i].fd = perf_event_open(&fds[i].hw, 0, -1, fds[0].fd, 0); if (fds[i].fd == -1) err(1, "cannot attach event %s", fds[i].name); } sz = (3+2*num_fds)*sizeof(uint64_t); val = malloc(sz); if (!val) err(1, "cannot allocated memory"); /* * On overflow, the non lead events are stored in the sample. * However we need some key to figure the order in which they * were laid out in the buffer. The file descriptor does not * work for this. Instead, we extract a unique ID for each event. * That id will be part of the sample for each event value. * Therefore we will be able to match value to events * * PERF_FORMAT_ID: returns unique 64-bit identifier in addition * to event value. */ ret = read(fds[0].fd, val, sz); if (ret == -1) err(1, "cannot read id %zu", sizeof(val)); /* * we are using PERF_FORMAT_GROUP, therefore the structure * of val is as follows: * * { u64 nr; * { u64 time_enabled; } && PERF_FORMAT_ENABLED * { u64 time_running; } && PERF_FORMAT_RUNNING * { u64 value; * { u64 id; } && PERF_FORMAT_ID * } cntr[nr]; * We are skipping the first 3 values (nr, time_enabled, time_running) * and then for each event we get a pair of values. */ for(i=0; i < num_fds; i++) { fds[i].id = val[2*i+1+3]; printf("%"PRIu64" %s\n", fds[i].id, fds[i].name); } fds[0].buf = mmap(NULL, (buffer_pages+1)*pgsz, PROT_READ|PROT_WRITE, MAP_SHARED, fds[0].fd, 0); if (fds[0].buf == MAP_FAILED) err(1, "cannot mmap buffer"); fds[0].pgmsk = (buffer_pages * pgsz) - 1; /* * setup asynchronous notification on the file descriptor */ ret = fcntl(fds[0].fd, F_SETFL, fcntl(fds[0].fd, F_GETFL, 0) | O_ASYNC); if (ret == -1) err(1, "cannot set ASYNC"); /* * necessary if we want to get the file descriptor for * which the SIGIO is sent in siginfo->si_fd. * SA_SIGINFO in itself is not enough */ ret = fcntl(fds[0].fd, F_SETSIG, SIGIO); if (ret == -1) err(1, "cannot setsig"); /* * get ownership of the descriptor */ ret = fcntl(fds[0].fd, F_SETOWN, getpid()); if (ret == -1) err(1, "cannot setown"); /* * enable the group for one period */ ret = ioctl(fds[0].fd, PERF_EVENT_IOC_REFRESH , 1); if (ret == -1) err(1, "cannot refresh"); busyloop(); ret = ioctl(fds[0].fd, PERF_EVENT_IOC_DISABLE, 1); if (ret == -1) err(1, "cannot disable"); /* * destroy our session */ for(i=0; i < num_fds; i++) close(fds[i].fd); perf_free_fds(fds, num_fds); free(val); /* free libpfm resources cleanly */ pfm_terminate(); return 0; }
int main(int argc, char *argv[]) { struct rusage usage; unsigned long ulast, udelta, slast, sdelta; int i, lc; char *msg; char msg_string[BUFSIZ]; msg = parse_opts(argc, argv, child_options, fusage); if (msg != NULL) tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg); #if (__powerpc__) || (__powerpc64__) || (__s390__) || (__s390x__) tst_brkm(TCONF, NULL, "This test is not designed for current system"); #endif setup(); if (opt_factor) factor_nr = SAFE_STRTOL(cleanup, factor_str, 0, FACTOR_MAX); tst_resm(TINFO, "Using %ld as multiply factor for max [us]time " "increment (1000+%ldus)!", factor_nr, BIAS_MAX * factor_nr); for (lc = 0; TEST_LOOPING(lc); lc++) { tst_count = 0; i = 0; SAFE_GETRUSAGE(cleanup, RUSAGE_THREAD, &usage); tst_resm(TINFO, "utime:%12luus; stime:%12luus", usage.ru_utime.tv_usec, usage.ru_stime.tv_usec); ulast = usage.ru_utime.tv_usec; slast = usage.ru_stime.tv_usec; while (i < RECORD_MAX) { SAFE_GETRUSAGE(cleanup, RUSAGE_THREAD, &usage); udelta = usage.ru_utime.tv_usec - ulast; sdelta = usage.ru_stime.tv_usec - slast; if (udelta > 0 || sdelta > 0) { i++; tst_resm(TINFO, "utime:%12luus; stime:%12luus", usage.ru_utime.tv_usec, usage.ru_stime.tv_usec); if (udelta > 1000 + (BIAS_MAX * factor_nr)) { sprintf(msg_string, "utime increased > %ldus:", 1000 + BIAS_MAX * factor_nr); tst_brkm(TFAIL, cleanup, msg_string, " delta = %luus", udelta); } if (sdelta > 1000 + (BIAS_MAX * factor_nr)) { sprintf(msg_string, "stime increased > %ldus:", 1000 + BIAS_MAX * factor_nr); tst_brkm(TFAIL, cleanup, msg_string, " delta = %luus", sdelta); } } ulast = usage.ru_utime.tv_usec; slast = usage.ru_stime.tv_usec; busyloop(100000); } } tst_resm(TPASS, "Test Passed"); cleanup(); tst_exit(); }
int main(int argc, char **argv) { pfarg_context_t ctx[1]; pfmlib_input_param_t inp; pfmlib_output_param_t outp; pfarg_reg_t pc[NUM_PMCS]; pfarg_load_t load_args; pfmlib_options_t pfmlib_options; struct sigaction act; size_t len; unsigned int i, num_counters; int ret; /* * Initialize pfm library (required before we can use it) */ if (pfm_initialize() != PFMLIB_SUCCESS) { printf("Can't initialize library\n"); exit(1); } /* * Install the signal handler (SIGIO) */ memset(&act, 0, sizeof(act)); act.sa_handler = (sig_t)sigio_handler; sigaction (SIGIO, &act, 0); /* * pass options to library (optional) */ memset(&pfmlib_options, 0, sizeof(pfmlib_options)); pfmlib_options.pfm_debug = 0; /* set to 1 for debug */ pfm_set_options(&pfmlib_options); memset(pc, 0, sizeof(pc)); memset(ctx, 0, sizeof(ctx)); memset(&load_args, 0, sizeof(load_args)); memset(&inp,0, sizeof(inp)); memset(&outp,0, sizeof(outp)); pfm_get_num_counters(&num_counters); if (pfm_get_cycle_event(&inp.pfp_events[0]) != PFMLIB_SUCCESS) fatal_error("cannot find cycle event\n"); if (pfm_get_inst_retired_event(&inp.pfp_events[1]) != PFMLIB_SUCCESS) fatal_error("cannot find inst retired event\n"); i = 2; if (i > num_counters) { i = num_counters; printf("too many events provided (max=%d events), using first %d event(s)\n", num_counters, i); } /* * set the default privilege mode for all counters: * PFM_PLM3 : user level only */ inp.pfp_dfl_plm = PFM_PLM3; /* * how many counters we use */ inp.pfp_event_count = i; /* * how many counters we use */ if (i > 1) { inp.pfp_event_count = i; pfm_get_max_event_name_len(&len); event1_name = malloc(len+1); if (event1_name == NULL) fatal_error("cannot allocate event name\n"); pfm_get_full_event_name(&inp.pfp_events[1], event1_name, len+1); } /* * let the library figure out the values for the PMCS */ if ((ret=pfm_dispatch_events(&inp, NULL, &outp, NULL)) != PFMLIB_SUCCESS) { fatal_error("Cannot configure events: %s\n", pfm_strerror(ret)); } /* * when we know we are self-monitoring and we have only one context, then * when we get an overflow we know where it is coming from. Therefore we can * save the call to the kernel to extract the notification message. By default, * a message is generated. The queue of messages has a limited size, therefore * it is important to clear the queue by reading the message on overflow. Failure * to do so may result in a queue full and you will lose notification messages. * * With the PFM_FL_OVFL_NO_MSG, no message will be queue, but you will still get * the signal. Similarly, the PFM_MSG_END will be generated. */ ctx[0].ctx_flags = PFM_FL_OVFL_NO_MSG; /* * now create the context for self monitoring/per-task */ if (perfmonctl(0, PFM_CREATE_CONTEXT, ctx, 1) == -1 ) { if (errno == ENOSYS) { fatal_error("Your kernel does not have performance monitoring support!\n"); } fatal_error("Can't create PFM context %s\n", strerror(errno)); } ctx_fd = ctx->ctx_fd; /* * Now prepare the argument to initialize the PMDs and PMCS. * We use pfp_pmc_count to determine the number of registers to * setup. Note that this field can be >= pfp_event_count. */ for (i=0; i < outp.pfp_pmc_count; i++) { pc[i].reg_num = outp.pfp_pmcs[i].reg_num; pc[i].reg_value = outp.pfp_pmcs[i].reg_value; } for (i=0; i < inp.pfp_event_count; i++) { pd[i].reg_num = pc[i].reg_num; } /* * We want to get notified when the counter used for our first * event overflows */ pc[0].reg_flags |= PFM_REGFL_OVFL_NOTIFY; pc[0].reg_reset_pmds[0] |= 1UL << outp.pfp_pmcs[1].reg_num; /* * we arm the first counter, such that it will overflow * after SMPL_PERIOD events have been observed */ pd[0].reg_value = (~0UL) - SMPL_PERIOD + 1; pd[0].reg_long_reset = (~0UL) - SMPL_PERIOD + 1; pd[0].reg_short_reset = (~0UL) - SMPL_PERIOD + 1; /* * Now program the registers * * We don't use the save variable to indicate the number of elements passed to * the kernel because, as we said earlier, pc may contain more elements than * the number of events we specified, i.e., contains more than counting monitors. */ if (perfmonctl(ctx_fd, PFM_WRITE_PMCS, pc, outp.pfp_pmc_count) == -1) { fatal_error("perfmonctl error PFM_WRITE_PMCS errno %d\n",errno); } if (perfmonctl(ctx_fd, PFM_WRITE_PMDS, pd, inp.pfp_event_count) == -1) { fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno); } /* * we want to monitor ourself */ load_args.load_pid = getpid(); if (perfmonctl(ctx_fd, PFM_LOAD_CONTEXT, &load_args, 1) == -1) { fatal_error("perfmonctl error PFM_WRITE_PMDS errno %d\n",errno); } /* * setup asynchronous notification on the file descriptor */ ret = fcntl(ctx_fd, F_SETFL, fcntl(ctx_fd, F_GETFL, 0) | O_ASYNC); if (ret == -1) { fatal_error("cannot set ASYNC: %s\n", strerror(errno)); } /* * get ownership of the descriptor */ ret = fcntl(ctx_fd, F_SETOWN, getpid()); if (ret == -1) { fatal_error("cannot setown: %s\n", strerror(errno)); } /* * Let's roll now */ pfm_self_start(ctx_fd); busyloop(); pfm_self_stop(ctx_fd); /* * free our context */ close(ctx_fd); return 0; }