void parent(int sock) { ssize_t size; int fd,i; struct perf_event_attr pe; memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.disabled=1; pe.exclude_kernel=1; pe.exclude_hv=1; arch_adjust_domain(&pe,quiet); fd=perf_event_open(&pe,0,-1,-1,0); if (fd<0) { fprintf(stderr,"Error opening leader %llx %s\n",pe.config,strerror(errno)); test_fail(test_string); } size = sock_fd_write(sock, "1", 1, fd); if (!quiet) printf ("wrote fd %d (size %d)\n", fd, (int)size); sleep(1); for(i=0;i<20;i++) instructions_million(); }
int main(int argc, char **argv) { int fd; struct perf_event_attr pe; int i; int result; long long counts[1],prev=0; long long total=0,average,max=0,min=0x7ffffffffffffffULL; memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.disabled=1; pe.exclude_kernel=1; pe.exclude_hv=1; fd=perf_event_open(&pe,0,-1,-1,0); if (fd<0) { fprintf(stderr,"Error opening leader %llx %s\n",pe.config,strerror(errno)); exit(1); } ioctl(fd, PERF_EVENT_IOC_RESET, 0); ioctl(fd, PERF_EVENT_IOC_ENABLE,0); for(i=0;i<NUM_RUNS;i++) { result=instructions_million(); result=read(fd,&counts,sizeof(long long)); results[i]=counts[0]-prev; prev=counts[0]; } ioctl(fd, PERF_EVENT_IOC_DISABLE,0); close(fd); for(i=0;i<NUM_RUNS;i++) { total+=results[i]; if (results[i]>max) max=results[i]; if (results[i]<min) min=results[i]; } average=total/NUM_RUNS; printf("Average=%lld max=%lld min=%lld\n",average,max,min); (void) result; return 0; }
int test_routine(void) { int i,result; for(i=0;i<500;i++) { result=instructions_million(); } return result; }
int main(int argc, char **argv) { int retval; retval = PAPI_library_init(PAPI_VER_CURRENT); if (retval != PAPI_VER_CURRENT) { fprintf(stderr,"Error! PAPI_library_init %d\n", retval); } retval = PAPI_query_event(PAPI_TOT_INS); if (retval != PAPI_OK) { fprintf(stderr,"PAPI_TOT_INS not supported\n"); exit(1); } int i; int events[1],result; long long counts[1]; long long total=0,average,max=0,min=0x7ffffffffffffffULL; events[0]=PAPI_TOT_INS; PAPI_start_counters(events,1); for(i=0;i<NUM_RUNS;i++) { result=instructions_million(); PAPI_read_counters(counts,1); results[i]=counts[0]; } PAPI_stop_counters(counts,1); PAPI_shutdown(); for(i=0;i<NUM_RUNS;i++) { total+=results[i]; if (results[i]>max) max=results[i]; if (results[i]<min) min=results[i]; } average=total/NUM_RUNS; printf("Average=%lld max=%lld min=%lld\n",average,max,min); (void) result; return 0; }
void child(int sock) { int fd; char buf[16]; ssize_t size; long long count[1]; int result,read_result; sleep(1); for (;;) { /* read fd from socket */ size = sock_fd_read(sock, buf, sizeof(buf), &fd); if (size <= 0) break; if (!quiet) printf ("read fd of %d, size %d\n", fd, (int)size); if (fd != -1) { ioctl(fd, PERF_EVENT_IOC_RESET, 0); ioctl(fd, PERF_EVENT_IOC_ENABLE,0); result=instructions_million(); ioctl(fd, PERF_EVENT_IOC_DISABLE,0); read_result=read(fd,&count,sizeof(long long)); if (read_result!=sizeof(long long)) { fprintf(stderr,"\tImproper return from read: %d\n",read_result); test_fail(test_string); } if (result==CODE_UNIMPLEMENTED) { fprintf(stderr,"\tCode unimplemented\n"); test_fail(test_string); } if (!quiet) { printf("Read %lld instructions\n",count[0]); } close(fd); } } }
static int wait_for_attach_and_loop( int num ) { int i,result=0; // printf("BEFORE START\n"); kill( getpid(), SIGSTOP ); // printf("STARTING\n"); for(i=0;i<num;i++) { result=instructions_million(); } // sleep(5); if (result==CODE_UNIMPLEMENTED) printf("Warning, no million\n"); // printf("BEFORE STOP\n"); kill( getpid(), SIGSTOP ); // printf("EXITING\n"); return 0; }
int main(int argc, char** argv) { int fd[2],i; int quiet; int validation_errors=0; int old_behavior=1; long long diff; struct perf_event_attr pe; struct sigaction sa; quiet=test_quiet(); if (!quiet) { printf("This tests the PERF_EVENT_IOC_PERIOD ioctl.\n\n"); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.disabled=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.sample_period=10000; arch_adjust_domain(&pe, quiet); fd[0]=perf_event_open(&pe,0,-1,-1,0); if (fd[0]<0) { if (!quiet) fprintf(stderr,"Error opening\n"); test_fail(test_string); } /* Set up overflow */ memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGRTMIN+2, &sa, NULL) < 0) { printf("Error setting up signal handler\n"); } fcntl(fd[0], F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd[0], F_SETSIG, SIGRTMIN+2); fcntl(fd[0], F_SETOWN,getpid()); ioctl(fd[0], PERF_EVENT_IOC_REFRESH,1); instructions_million(); ioctl(fd[0], PERF_EVENT_IOC_DISABLE,0); close(fd[0]); if (!quiet) { printf("Overflows:\n"); for(i=0;i<overflows;i++) { printf("\t%d %lld\n",i,overflow_counts[i]); } } if (ioctl_errors) { if (!quiet) { if (ioctl_errno==EFAULT) { fprintf(stderr,"Known issue with kernels <2.6.36, PERF_IOC_PERIOD always fails due to kernel bug.\n"); } else { fprintf(stderr,"Unknown failure with PERF_EVENT_IOC_PERIOD: %s\n",strerror(errno)); } } test_fail(test_string); } /* validate results */ /* should be 10k apart for 0,1,2,3,4 */ for(i=0;i<4;i++) { diff=overflow_counts[i+1]-overflow_counts[i]; if ((diff>11000) || (diff<9000)) { if (!quiet) { fprintf(stderr,"Overflow %i-%i should have been 10,000, was %lld\n",i,i+1,diff); } validation_errors++; } } /* 4-5 should be 10k (old behavior) or 100k (new behavior) */ diff=overflow_counts[5]-overflow_counts[4]; if ((diff<11000) && (diff>9000)) { if (!quiet) { fprintf(stderr,"Overflow %i-%i near 10,000 (%lld), old behavior\n",i,i+1,diff); } old_behavior=1; } else if ((diff<101000) && (diff>99000)) { if (!quiet) { fprintf(stderr,"Overflow %i-%i near 100,000 (%lld), new behavior\n",i,i+1,diff); } old_behavior=0; } else { if (!quiet) { fprintf(stderr,"Overflow %i-%i %lld, unexpected\n",i,i+1,diff); } validation_errors++; } /* 5-6 and after should be 100k */ for(i=5;i<overflows-1;i++) { diff=overflow_counts[i+1]-overflow_counts[i]; if ((diff>101000) || (diff<99000)) { if (!quiet) { fprintf(stderr,"Overflow %i-%i should have been 100,000, was %lld\n",i,i+1,diff); } validation_errors++; } } if (validation_errors) { test_fail(test_string); } if (old_behavior) { test_yellow_old_behavior(test_string); } else { test_green_new_behavior(test_string); } /* FIXME: also check for case where we reset overflow on running counter? */ return 0; }
int main(int argc, char **argv) { int ret; int fd; int mmap_pages=1+MMAP_DATA_SIZE; struct perf_event_attr pe; struct sigaction sa; char test_string[]="Testing PERF_SAMPLE_REGS_INTR..."; quiet=test_quiet(); if (!quiet) printf("This tests PERF_SAMPLE_REGS_INTR samples\n"); memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.size=sizeof(struct perf_event_attr); pe.sample_period=SAMPLE_FREQUENCY; pe.sample_type=PERF_SAMPLE_IP | PERF_SAMPLE_REGS_USER | PERF_SAMPLE_REGS_INTR; global_sample_type=pe.sample_type; #if defined (__x86_64__) /* Bitfield saying which registers we want */ pe.sample_regs_user=(1ULL<<PERF_REG_X86_64_MAX)-1; /* DS, ES, FS, and GS not valid on x86_64 */ /* see perf_reg_validate() in arch/x86/kernel/perf_regs.c */ pe.sample_regs_user&=~(1ULL<<PERF_REG_X86_DS); pe.sample_regs_user&=~(1ULL<<PERF_REG_X86_ES); pe.sample_regs_user&=~(1ULL<<PERF_REG_X86_FS); pe.sample_regs_user&=~(1ULL<<PERF_REG_X86_GS); pe.sample_regs_intr=pe.sample_regs_user; pe.read_format=0; pe.disabled=1; pe.pinned=1; pe.wakeup_events=1; #elif defined(__i386__) pe.sample_regs_user=(1ULL<<PERF_REG_X86_32_MAX)-1; pe.sample_regs_intr=pe.sample_regs_user; #elif defined(__arm__) pe.sample_regs_user=(1ULL<<PERF_REG_ARM_MAX)-1; pe.sample_regs_intr=pe.sample_regs_user; #else pe.sample_regs_user=1; pe.sample_regs_intr=1; #endif global_sample_regs_user=pe.sample_regs_user; if (detect_vendor()==VENDOR_AMD) { if (!quiet) printf("Using cycles:pp on AMD\n"); /* On AMD cycles is a precise event */ pe.type=PERF_TYPE_HARDWARE; pe.config=PERF_COUNT_HW_CPU_CYCLES; /* on AMD ibs the following must be false */ /* see bad9ac2d7f878a31cf1ae8c1ee3768077d222bcb */ /* .exclude_user = 1, .exclude_kernel = 1, .exclude_hv = 1, .exclude_idle = 1, .exclude_host = 1, .exclude_guest = 1, */ pe.exclude_user = 0; pe.exclude_kernel = 0; pe.exclude_hv = 0; pe.exclude_idle = 0; pe.exclude_host = 0; pe.exclude_guest = 0; } else { if (!quiet) printf("Using instructions:pp\n"); /* Set up Instruction Event */ // pe.type=PERF_TYPE_RAW; // pe.config=0x5300c0; // INST_RETIRED:ANY_P pe.type=PERF_TYPE_HARDWARE; pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.exclude_kernel=0; pe.exclude_hv=1; } arch_adjust_domain(&pe,quiet); /* Must be greater than 0 for sample_regs_intr to be interesting? */ /* Not seeing any difference. */ pe.precise_ip=2; if (detect_vendor()==VENDOR_AMD) { /* On AMD needs to be system-wide per-cpu event */ /* or the IBS PMU won't work. */ fd=perf_event_open(&pe,-1,0,-1,0); } else { fd=perf_event_open(&pe,0,-1,-1,0); } if (fd<0) { if (!quiet) { fprintf(stderr,"Problem opening leader %s\n", strerror(errno)); } if (errno==EACCES) { test_skip(test_string); } test_fail(test_string); } our_mmap=mmap(NULL, mmap_pages*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd, F_SETSIG, SIGIO); fcntl(fd, F_SETOWN,getpid()); ioctl(fd, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE " "of group leader: %d %s\n", errno,strerror(errno)); exit(1); } } int i; for(i=0;i<10;i++) { // write(1,"0",1); instructions_million(); } ret=ioctl(fd, PERF_EVENT_IOC_REFRESH,0); if (!quiet) { printf("Counts %d, using mmap buffer %p\n",count_total,our_mmap); } if (count_total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } ret=ioctl(fd, PERF_EVENT_IOC_DISABLE,0); munmap(our_mmap,mmap_pages*getpagesize()); close(fd); test_pass(test_string); return 0; }
int main(int argc, char** argv) { int fd1,i; struct perf_event_attr pe1; int errors=0; int result; char test_string[]="Testing PERF_EVENT_IOC_SET_FILTER ioctl..."; quiet=test_quiet(); if (!quiet) { printf("Testing PERF_EVENT_IOC_SET_FILTER ioctl.\n"); } /****************************************************/ /* Check if /sys/kernel/debug/tracing/events exists */ /****************************************************/ // result=access("/sys/kernel/debug/tracing/events",F_OK); /* Actually this is pointless, as it gives EACCESS */ /* as a normal user even if the file exists */ /************************************/ /* Creating a tracepoint event */ /************************************/ if (!quiet) { printf("Creating a tracepoint event\n"); } memset(&pe1,0,sizeof(struct perf_event_attr)); pe1.type=PERF_TYPE_TRACEPOINT; pe1.size=sizeof(struct perf_event_attr); /* Find a trace event that will let us add a particular filter */ /* It should work with */ /* writeback:writeback_start*/ /* but we can't get the id of this directly without debugfs/tracefs */ /* mounted (the id numbers change depending on machine/kernel) */ /* Valid filter */ strcpy(filter,"nr_pages==2"); if (!quiet) { printf("Trying to find an event that will allow filter %s\n", filter); } /* Usually there are fewer than 1000 trace events? */ for(i=0;i<MAX_SEARCH;i++) { pe1.config=i; pe1.disabled=1; pe1.exclude_kernel=0; pe1.exclude_hv=0; arch_adjust_domain(&pe1,quiet); fd1=perf_event_open(&pe1,0,-1,-1,0); if (fd1<0) { if (!quiet) { // fprintf(stderr,"Failed on %d\n",i); } continue; } result=ioctl(fd1, PERF_EVENT_IOC_SET_FILTER, filter); if (result==0) { if (!quiet) printf("Found proper event %d\n",i); close(fd1); break; } else { } close(fd1); } if (i==MAX_SEARCH) { if (!quiet) { printf("Could not find any trace event to filter\n"); } test_skip(test_string); errors++; } pe1.config=i; pe1.disabled=1; pe1.exclude_kernel=0; pe1.exclude_hv=0; arch_adjust_domain(&pe1,quiet); /* Create group leader */ fd1=perf_event_open(&pe1,0,-1,-1,0); if (fd1<0) { if (!quiet) { fprintf(stderr,"Unexpected error %s\n",strerror(errno)); } test_fail(test_string); } for(i=0;i<MAX_FILTER;i++) { filter[i]=0xff; } /* Check a too big value */ if (!quiet) { printf("\t+ Checking a too-big event: "); } result=ioctl(fd1, PERF_EVENT_IOC_SET_FILTER, filter); if ((result==-1) && (errno==EINVAL)) { if (!quiet) printf("Failed as expected\n"); } else { if (!quiet) printf("Unexpected %d %s\n",result,strerror(errno)); errors++; } /* Check off-by-one value */ /* Size limited to pagesize */ if (!quiet) { printf("\t+ Checking off-by-one filter: "); } filter[4096]=0; result=ioctl(fd1, PERF_EVENT_IOC_SET_FILTER, filter); if ((result==-1) && (errno==EINVAL)) { if (!quiet) printf("Failed as expected\n"); } else { if (!quiet) printf("Unexpected %d %s\n",result,strerror(errno)); errors++; } /* Check a just-right value */ if (!quiet) { printf("\t+ Checking max size invalid filter: "); } filter[4095]=0; result=ioctl(fd1, PERF_EVENT_IOC_SET_FILTER, filter); if ((result==-1) && (errno==EINVAL)) { if (!quiet) printf("Failed as expected\n"); } else { if (!quiet) printf("Unexpected %d %s\n",result,strerror(errno)); errors++; } /* Check an empty value */ if (!quiet) { printf("\t+ Checking empty filter: "); } filter[0]=0; result=ioctl(fd1, PERF_EVENT_IOC_SET_FILTER, filter); if ((result==-1) && (errno==EINVAL)) { if (!quiet) printf("Failed as expected\n"); } else { if (!quiet) printf("Unexpected %d %s\n",result,strerror(errno)); errors++; } /* Clear a filter */ if (!quiet) { printf("\t+ Clear filter (write 0): "); } filter[0]='0'; filter[1]=0; result=ioctl(fd1, PERF_EVENT_IOC_SET_FILTER, filter); if ((result==-1) && (errno==EINVAL)) { if (!quiet) printf("Failed as expected\n"); } else { if (!quiet) printf("Unexpected %d %s\n",result,strerror(errno)); errors++; } /* tracefs usually under /sys/kernel/tracing */ /* start */ ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); /* million */ result=instructions_million(); /* stop */ ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); close(fd1); if (errors) { test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char **argv) { int i=0,j,result,num_cpus=1,current_cpu; long page_size=getpagesize(); double error; void *addr[MAX_CPUS]; struct perf_event_attr pe; int fd[MAX_CPUS],ret1,ret2; unsigned long long values[MAX_CPUS],enabled[MAX_CPUS],running[MAX_CPUS]; quiet=test_quiet(); if (!quiet) { printf("This tests rdpmc when attaching to CPU.\n"); } /* See if we support rdpmc access */ if (!detect_rdpmc(quiet)) { test_skip(test_string); } /* See if we support enough CPUs */ // get_nprocs_conf(), num_cpus=get_nprocs(); if (num_cpus<2) { if (!quiet) printf("Not running test, only %d cores found\n",num_cpus); test_skip(test_string); } if (!quiet) { printf("Found %d cpus\n\n",num_cpus); } /******************/ /* Run on one CPU */ /******************/ if (!quiet) { printf("Trying rdpmc with 1 cpu, 100 million instructions\n\n"); } /* Open event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); fd[0]=-1; pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.disabled=1; fd[0]=perf_event_open(&pe,0,-1,fd[0],0); if (fd[0]<0) { fprintf(stderr,"Error opening event %d\n",0); test_fail(test_string); } /* mmap() event */ addr[0]=mmap(NULL,page_size, PROT_READ, MAP_SHARED,fd[0],0); if (addr[0] == (void *)(-1)) { fprintf(stderr,"Error mmap()ing event %d!\n",0); test_fail(test_string); } /* start the event */ ret1=ioctl(fd[0], PERF_EVENT_IOC_ENABLE,0); if (ret1<0) { fprintf(stderr,"Error enabling!\n"); } for(j=0;j<100;j++) { result=instructions_million(); } if (result==CODE_UNIMPLEMENTED) printf("Warning, no million\n"); /* read */ values[0] = mmap_read_self(addr[0], &enabled[0], &running[0]); /* stop */ ret2=ioctl(fd[0], PERF_EVENT_IOC_DISABLE,0); if (ret1<0) { fprintf(stderr,"Error starting!\n"); test_fail(test_string); } if (ret2<0) { fprintf(stderr,"Error stopping!\n"); test_fail(test_string); } if (values[0]<0) { if (!quiet) printf("rdpmc support not available.\n"); test_yellow_no(test_string); } if (!quiet) { printf("\tEvent %x -- Raw count: %lld " "enabled: %lld running: %lld\n", 0,values[0],enabled[0],running[0]); } /* Close */ close(fd[0]); munmap(addr[0],page_size); error=display_error(values[0], // avg values[0], // hi values[0], // low 100000000ULL,quiet); if ((error>1.0) || ( error<-1.0)) { if (!quiet) printf("Error out of range!\n"); test_fail(test_string); } /*******************/ /* Run on all CPUs */ /*******************/ if (num_cpus>MAX_CPUS) num_cpus=MAX_CPUS; if (!quiet) { printf("\nTrying rdpmc with %d cpus, " "100 million instructions\n\n",num_cpus); } current_cpu=sched_getcpu(); printf("Running on CPU %d\n",current_cpu); /* Open events */ for(i=0;i<num_cpus;i++) { memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); fd[i]=-1; pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.disabled=1; fd[i]=perf_event_open(&pe,0,i,-1,0); if (fd[i]<0) { fprintf(stderr,"Error opening event %d\n",i); test_fail(test_string); } /* mmap() event */ addr[i]=mmap(NULL,page_size, PROT_READ, MAP_SHARED,fd[i],0); if (addr[i] == (void *)(-1)) { fprintf(stderr,"Error mmap()ing event %d!\n",i); test_fail(test_string); } } for(i=0;i<num_cpus;i++) { /* start the event */ ret1=ioctl(fd[i], PERF_EVENT_IOC_ENABLE,0); if (ret1<0) { fprintf(stderr,"Error enabling!\n"); } } for(j=0;j<100;j++) { result=instructions_million(); } if (result==CODE_UNIMPLEMENTED) printf("Warning, no million\n"); /* read */ for(i=0;i<num_cpus;i++) { values[i] = mmap_read_self(addr[i], &enabled[i], &running[i]); } /* stop */ for(i=0;i<num_cpus;i++) { ret2=ioctl(fd[i], PERF_EVENT_IOC_DISABLE,0); if (ret1<0) { fprintf(stderr,"Error starting!\n"); test_fail(test_string); } if (ret2<0) { fprintf(stderr,"Error stopping!\n"); test_fail(test_string); } if (values[i]<0) { if (!quiet) printf("rdpmc support not available.\n"); test_yellow_no(test_string); } if (!quiet) { if (values[i]==-1) { printf("\tCPU %x -- rdpmc not available\n",i); } else { printf("\tCPU %x -- Raw count: %lld " "enabled: %lld running: %lld\n", i,values[i],enabled[i],running[i]); } } } /* Close */ for(i=0;i<num_cpus;i++) { close(fd[i]); munmap(addr[i],page_size); } /* Validate */ for(i=0;i<num_cpus;i++) { if (i!=current_cpu) { if (values[i]!=-1) { if (!quiet) { fprintf(stderr,"Error: CPU %d unexpectedly worked!\n", i); } test_fail(test_string); } } else { error=display_error(values[i], // avg values[i], // hi values[i], // low 100000000ULL,quiet); if ((error>1.0) || ( error<-1.0)) { if (!quiet) printf("Error out of range!\n"); test_fail(test_string); } } } test_pass(test_string); return 0; }
void fork_random_event(void) { int status; if (ignore_but_dont_skip.fork) return; if (already_forked) { if (logging&TYPE_FORK) { sprintf(log_buffer,"F 0\n"); write(log_fd,log_buffer,strlen(log_buffer)); } kill(forked_pid,SIGKILL); /* not sure if this will cause us to miss bugs */ /* but it does make the logs more deterministic */ if (attempt_determinism) { waitpid(forked_pid, &status, 0); } already_forked=0; } else { if (logging&TYPE_FORK) { sprintf(log_buffer,"F 1\n"); write(log_fd,log_buffer,strlen(log_buffer)); } forked_pid=fork(); /* we're the child */ if (forked_pid==0) { while(1) { instructions_million(); /* we were orphaned, exit */ /* Had problems with orphans clogging up */ /* the system if the parent emergency */ /* exited */ if (getppid()==1) { exit(1); } } } stats.fork_attempts++; /* We do see failures sometimes */ /* And when we do, if we foolishly kill process "-1" */ /* It will kill *all* processes beloning to the user */ /* Logging you out on all windows. */ if (forked_pid==-1) { printf("Fork failed! %s\n",strerror(errno)); already_forked=0; } else { stats.fork_successful++; already_forked=1; } } }
int main(int argc, char **argv) { int ret,status; int fd; int mmap_pages=1+MMAP_DATA_SIZE; int events_read; int child; int version; struct perf_event_attr pe; struct sigaction sa; char test_string[]="Testing PERF_RECORD_COMM_EXEC..."; quiet=test_quiet(); if (!quiet) printf("This tests PERF_RECORD_COMM_EXEC samples:\n"); memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } /* Fork child to measure */ /* We do this in a child as we have to exec */ child=fork(); if (child==0) { FILE *fff; if (ptrace(PTRACE_TRACEME, 0, 0, 0) == 0) { kill(getpid(),SIGTRAP); /* The actual thing to measure */ instructions_million(); /* prctl */ if (!quiet) printf("\tprctl(PR_SET_NAME,\"vmw\");\n"); prctl(PR_SET_NAME,"vmw"); /* /proc/self/comm */ if (!quiet) printf("\tcat \"krg krg krg\" > /proc/self/comm\n"); fff=fopen("/proc/self/comm","w"); if (fff!=NULL) { fprintf(fff,"krg krg krg"); fclose(fff); } /* exec */ if (!quiet) printf("\texecl(\"/bin/false\"); [should have PERF_RECORD_MISC_COMM_EXEC set]\n"); execl("/bin/false","/bin/true",NULL); instructions_million(); /* Done measuring */ } else { fprintf(stderr,"Failed ptrace...\n"); } return 1; } /* wait for child to stop */ child=wait(&status); /* Set up Instruction Event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_SOFTWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_SW_DUMMY; pe.sample_period=SAMPLE_FREQUENCY; pe.read_format=0; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; pe.comm_exec=1; pe.comm=1; arch_adjust_domain(&pe,quiet); fd=perf_event_open(&pe,child,-1,-1,0); if (fd<0) { if (!quiet) { fprintf(stderr,"Problem opening leader %s\n", strerror(errno)); } version=get_kernel_version(); /* Introduced in 3.16 */ if (version<0x31000) { if (!quiet) { fprintf(stderr,"comm_exec support not added until Linux 3.16\n"); } test_fail_kernel(test_string); } test_fail(test_string); } our_mmap=mmap(NULL, mmap_pages*4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (our_mmap==MAP_FAILED) { fprintf(stderr,"mmap() failed %s!\n",strerror(errno)); test_fail(test_string); } fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd, F_SETSIG, SIGIO); fcntl(fd, F_SETOWN,getpid()); ioctl(fd, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE " "of group leader: %d %s\n", errno,strerror(errno)); exit(1); } } /* restart child */ if ( ptrace( PTRACE_CONT, child, NULL, NULL ) == -1 ) { fprintf(stderr,"Error continuing child\n"); test_fail(test_string); } /* Wait for child to finish */ waitpid(child,&status,0); ret=ioctl(fd, PERF_EVENT_IOC_REFRESH,0); if (!quiet) { printf("Counts %d, using mmap buffer %p\n",count_total,our_mmap); } /* Drain any remaining events */ prev_head=perf_mmap_read(our_mmap,MMAP_DATA_SIZE,prev_head, global_sample_type,0,global_sample_regs_user, NULL,quiet,&events_read); munmap(our_mmap,mmap_pages*4096); close(fd); #define EXPECTED_EVENTS 3 if (events_read!=EXPECTED_EVENTS) { if (!quiet) fprintf(stderr,"Wrong number of events! Expected %d but got %d\n", EXPECTED_EVENTS,events_read); test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char **argv) { int i,result; long page_size=getpagesize(); long long start_before,stop_after; void *addr[MAX_EVENTS]; struct perf_event_attr pe,syswide; int fd[MAX_EVENTS],ret1,ret2,syswide_fd[MAX_EVENTS]; unsigned long long values[MAX_EVENTS],enabled[MAX_EVENTS],running[MAX_EVENTS]; unsigned long long values2[MAX_EVENTS],enabled2[MAX_EVENTS],running2[MAX_EVENTS]; double error; int count=3; quiet=test_quiet(); if (!quiet) { printf("This test checks if rdpmc works with event groups.\n"); printf("and if a syswide event is happening at the same time.\n\n"); } /* See if we support rdpmc access */ if (!detect_rdpmc(quiet)) { test_skip(test_string); } /*****************************/ /* TEST START/WORK/READ/STOP */ /*****************************/ /* Open event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); fd[0]=-1; for(i=0;i<count;i++) { pe.config=PERF_COUNT_HW_INSTRUCTIONS; if (i==0) { pe.disabled=1; } else { pe.disabled=0; } fd[i]=perf_event_open(&pe,0,-1,fd[0],0); if (fd[i]<0) { fprintf(stderr,"Error opening event %d\n",i); test_fail(test_string); } /* mmap() event */ addr[i]=mmap(NULL,page_size, PROT_READ, MAP_SHARED,fd[i],0); if (addr[i] == (void *)(-1)) { fprintf(stderr,"Error mmap()ing event %d!\n",i); test_fail(test_string); } } /* Open syswide event */ memset(&syswide,0,sizeof(struct perf_event_attr)); syswide.type=PERF_TYPE_HARDWARE; syswide.size=sizeof(struct perf_event_attr); syswide.config=PERF_COUNT_HW_INSTRUCTIONS; syswide.disabled=0; for(i=0;i<4;i++) { syswide_fd[i]=perf_event_open(&pe,0,-1,-1,0); if (syswide_fd[i]<0) { fprintf(stderr,"Error opening syswide event\n"); test_fail(test_string); } ret1=ioctl(syswide_fd[i], PERF_EVENT_IOC_ENABLE,0); } /* start the rest */ start_before=rdtsc(); ret1=ioctl(fd[0], PERF_EVENT_IOC_ENABLE,0); if (ret1<0) { fprintf(stderr,"Error enabling!\n"); } for(i=0;i<100;i++) { result=instructions_million(); } if (result==CODE_UNIMPLEMENTED) printf("Warning, no million\n"); /* read */ for(i=0;i<count;i++) { values[i] = mmap_read_self(addr[i], &enabled[i], &running[i]); } for(i=0;i<100;i++) { result=instructions_million(); } /* read */ for(i=0;i<count;i++) { long long rvalue; int result; result=read(fd[i],&rvalue,8); if (result!=8) { fprintf(stderr,"Read error!\n"); exit(1); } values2[i] = rvalue; } /* stop */ ret2=ioctl(fd[0], PERF_EVENT_IOC_DISABLE,0); stop_after=rdtsc(); if (ret1<0) { fprintf(stderr,"Error starting!\n"); test_fail(test_string); } if (ret2<0) { fprintf(stderr,"Error stopping!\n"); test_fail(test_string); } if (values[0]<0) { if (!quiet) printf("rdpmc support not available.\n"); test_yellow_no(test_string); } if (!quiet) { printf("total start/read/stop latency: %lld cycles\n", stop_after-start_before); for(i=0;i<count;i++) { printf("\tEvent %x -- Raw count: %lld enabled: %lld running: %lld\n", i,values[i],enabled[i],running[i]); printf("\t%lld\n",values[i]*enabled[i]/running[i]); printf("\tEvent %x -- Raw count: %lld enabled: %lld running: %lld\n", i,values2[i],enabled2[i],running2[i]); } } for(i=0;i<count;i++) { close(fd[i]); munmap(addr[i],page_size); } if (!quiet) printf("\n"); error=display_error(values[0], // avg values[0], // hi values[0], // low 100000000ULL,quiet); if ((error>1.0) || ( error<-1.0)) { if (!quiet) printf("Error out of range!\n"); test_fail(test_string); } error=display_error(values2[0], // avg values2[0], // hi values2[0], // low 200000000ULL,quiet); if ((error>1.0) || ( error<-1.0)) { if (!quiet) printf("Error out of range!\n"); test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char **argv) { int retval,quiet,result; int i,events[1]; long long counts[1]; char test_string[]="Testing PAPI_HW_INT predefined event..."; quiet=test_quiet(); retval = PAPI_library_init(PAPI_VER_CURRENT); if (retval != PAPI_VER_CURRENT) { if (!quiet) printf("Error: PAPI_library_init: %d\n", retval); test_fail(test_string); } retval = PAPI_query_event(PAPI_HW_INT); if (retval != PAPI_OK) { if (!quiet) printf("PAPI_HW_INT not supported"); test_skip(test_string); } events[0]=PAPI_HW_INT; if (!quiet) { printf("\n"); printf("Testing a loop of 1 million instructions (%d times):\n", NUM_RUNS); printf("A certain number of interrupts should happen (mostly timer)\n"); } PAPI_start_counters(events,1); for(i=0;i<NUM_RUNS;i++) { result=instructions_million(); } PAPI_stop_counters(counts,1); if (result==CODE_UNIMPLEMENTED) { fprintf(stderr,"\tCode unimplemented\n"); test_fail(test_string); } if (!quiet) { printf(" Expected: >0\n"); printf(" Obtained: %lld\n",counts[0]); printf("\n"); } if (counts[0] == 0) { if (!quiet) printf("Error: Interrupt count was zero\n"); test_fail(test_string); } PAPI_shutdown(); test_pass(test_string); return 0; }
int main(int argc, char** argv) { int ret,quiet,status; pid_t pid,child; struct perf_event_attr pe; void *our_mmap; char test_string[]="Testing catching overflow with poll()..."; quiet=test_quiet(); if (!quiet) printf("This tests using poll() to catch overflow.\n"); /* fork off a child */ pid=fork(); if (pid<0) { fprintf(stderr,"Failed fork\n"); test_fail(test_string); } /* Our child. Set up ptrace to stop it */ /* originally tried kill(getpid(),SIGSTOP); */ /* but that is unreliable? */ if (pid==0) { if (ptrace(PTRACE_TRACEME, 0, 0, 0) == 0) { int c; kill(getpid(),SIGTRAP); for(c=0;c<10;c++) { instructions_million(); } } else { fprintf(stderr,"Failed ptrace...\n"); } return 1; } child=wait(&status); /* Make sure child is stopped and waiting */ if (!quiet) printf( "Monitoring pid %d status %d\n",pid,status); if (WIFSTOPPED( status )) { if (!quiet) { printf( "Child has stopped due to signal %d (%s)\n", WSTOPSIG( status ), strsignal(WSTOPSIG( status )) ); } } if (WIFSIGNALED( status )) { if (!quiet) { printf( "Child %d received signal %d (%s)\n", child, WTERMSIG(status), strsignal(WTERMSIG( status )) ); } } /* Create a sampled event and attach to child */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; /* 1 million. Tried 100k but that was too short on */ /* faster machines, likely triggered overflow while */ /* poll still was being handled? */ pe.sample_period=1000000; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,child,-1,-1,0); if (fd1<0) { if (!quiet) fprintf(stderr,"Error opening leader %llx\n",pe.config); test_fail(test_string); } our_mmap=mmap(NULL, (1+MMAP_PAGES)*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE of group leader: " "%d %s\n",errno,strerror(errno)); test_fail(test_string); } } struct pollfd fds[1]; int result; fds[0].fd=fd1; fds[0].events=POLLIN|POLLHUP|POLLNVAL|POLLERR; /* Restart child process */ if (!quiet) printf("Continuing child\n"); if ( ptrace( PTRACE_CONT, pid, NULL, NULL ) == -1 ) { fprintf(stderr,"Error continuing\n"); test_fail(test_string); } // kill(child,SIGCONT); while(1) { result=poll(fds,1,100); if (result==0) { waitpid(child,&status,WNOHANG); if (WIFEXITED(status)) break; if (WIFSIGNALED(status)) { printf("Signalled %d!\n",WTERMSIG(status)); break; } } else if (result==-1) { printf("Error: %s\n",strerror(errno)); break; } if (fds[0].revents&POLLIN) count.in++; if (fds[0].revents&POLLHUP) count.hup++; if (fds[0].revents&POLLERR) { if (!quiet) printf("Returned error!\n"); break; } /* On 3.18 and newer we get infinite POLLHUP */ /* When the child exits rather than an error */ if (fds[0].revents&POLLHUP) { if (!quiet) printf("Returned HUP!\n"); break; } // printf("%d %d\n",result,fds[0].revents); } close(fd1); count.total=count.in+count.hup; if (!quiet) { printf("Counts, using mmap buffer %p\n",our_mmap); printf("\tPOLL_IN : %d\n",count.in); printf("\tPOLL_OUT: %d\n",count.out); printf("\tPOLL_MSG: %d\n",count.msg); printf("\tPOLL_ERR: %d\n",count.err); printf("\tPOLL_PRI: %d\n",count.pri); printf("\tPOLL_HUP: %d\n",count.hup); printf("\tUNKNOWN : %d\n",count.unknown); } /* I think it is exected that we get POLL_IN for each time the */ /* wakeup value is triggered (indicating data is ready) and we */ /* only get POLL_HUP if the monitored process exits (hangs up) */ /* I think older (pre-3.18?) did this differently. */ if (count.total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } if (count.in!=10) { if (!quiet) printf("Unexpected POLL_IN interrupt.\n"); test_fail(test_string); } if (count.hup!=1) { if (!quiet) { printf("POLL_HUP value %d, expected %d.\n", count.hup,1); printf("Expected if kernel older than 3.18, " "as poll() would get an error rather than " "POLL_HUP if the monitored process detached\n"); } test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char** argv) { int fd1,fd2; struct perf_event_attr pe1,pe2; int result; long long single=0,both=0; char test_string[]="Testing PERF_EVENT_IOC_SET_OUTPUT ioctl..."; char *our_mmap; quiet=test_quiet(); if (!quiet) { printf("Testing PERF_EVENT_IOC_SET_OUTPUT ioctl.\n"); } /**********************************/ /* test 1, creating group, normal */ /**********************************/ if (!quiet) { printf("1. Testing normal group leader\n"); } /* Create group leader */ memset(&pe1,0,sizeof(struct perf_event_attr)); pe1.type=PERF_TYPE_HARDWARE; pe1.size=sizeof(struct perf_event_attr); pe1.config=PERF_COUNT_HW_INSTRUCTIONS; pe1.disabled=1; pe1.exclude_kernel=1; pe1.exclude_hv=1; pe1.sample_period=50000; arch_adjust_domain(&pe1,quiet); /* Create group leader */ fd1=perf_event_open(&pe1,0,-1,-1,0); if (fd1<0) { if (!quiet) { fprintf(stderr,"Unexpected error %s\n",strerror(errno)); } test_fail(test_string); } our_mmap=mmap(NULL, 8192, PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); if (our_mmap==MAP_FAILED) { fprintf(stderr,"mmap() failed %s!\n",strerror(errno)); test_fail(test_string); } /* Create group member */ memset(&pe2,0,sizeof(struct perf_event_attr)); pe2.type=PERF_TYPE_HARDWARE; pe2.size=sizeof(struct perf_event_attr); pe2.config=PERF_COUNT_HW_INSTRUCTIONS; pe2.disabled=0; pe2.exclude_kernel=1; pe2.exclude_hv=1; arch_adjust_domain(&pe2,quiet); fd2=perf_event_open(&pe2,0,-1,fd1,0); if (fd2<0) { if (!quiet) { fprintf(stderr,"Unexpected error %s\n",strerror(errno)); } test_fail(test_string); } /* start */ ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ioctl(fd2, PERF_EVENT_IOC_RESET, 0); ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); /* million */ result=instructions_million(); /* stop */ ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); if (result==CODE_UNIMPLEMENTED) { fprintf(stderr,"\tCode unimplemented\n"); test_skip(test_string); } /* read mmap */ single=read_mmap_size(our_mmap); /* close */ munmap(our_mmap,8192); close(fd1); close(fd2); /************************************************/ /* test 2, reading group, PERF_FLAG_FD_NO_GROUP */ /************************************************/ if (!quiet) { printf("2. Testing with PERF_EVENT_IOC_SET_OUTPUT\n"); } /* Create group leader */ memset(&pe1,0,sizeof(struct perf_event_attr)); pe1.type=PERF_TYPE_HARDWARE; pe1.size=sizeof(struct perf_event_attr); pe1.config=PERF_COUNT_HW_INSTRUCTIONS; pe1.disabled=1; pe1.exclude_kernel=1; pe1.exclude_hv=1; pe1.sample_period=50000; arch_adjust_domain(&pe1,quiet); fd1=perf_event_open(&pe1,0,-1,-1,0); if (fd1<0) { if (!quiet) { fprintf(stderr,"Unexpected error %s\n",strerror(errno)); } test_fail(test_string); } /* Create group member */ memset(&pe2,0,sizeof(struct perf_event_attr)); pe2.type=PERF_TYPE_HARDWARE; pe2.size=sizeof(struct perf_event_attr); pe2.config=PERF_COUNT_HW_INSTRUCTIONS; pe2.disabled=0; pe2.exclude_kernel=1; pe2.exclude_hv=1; pe2.sample_period=50000; arch_adjust_domain(&pe2,quiet); fd2=perf_event_open(&pe2,0,-1,fd1,0); if (fd2<0) { if (!quiet) { fprintf(stderr,"Unexpected error on perf_event_open() %s\n",strerror(errno)); } test_fail(test_string); } our_mmap=mmap(NULL, 8192, PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); if (our_mmap==MAP_FAILED) { fprintf(stderr,"mmap() failed %s!\n",strerror(errno)); test_fail(test_string); } ioctl(fd2, PERF_EVENT_IOC_SET_OUTPUT, fd1); /* start */ ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ioctl(fd2, PERF_EVENT_IOC_RESET, 0); ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); /* million */ result=instructions_million(); /* stop */ ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); if (result==CODE_UNIMPLEMENTED) { fprintf(stderr,"\tCode unimplemented\n"); test_skip(test_string); } /* read */ both=read_mmap_size(our_mmap); /* close */ munmap(our_mmap,8192); close(fd1); close(fd2); if (both<=single) { if (!quiet) { fprintf(stderr,"Expected to get more samples when both together\n"); } test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char **argv) { int ret; int fd; int mmap_pages=1+MMAP_DATA_SIZE; struct perf_event_attr pe; struct sigaction sa; char test_string[]="Testing pebs..."; quiet=test_quiet(); if (!quiet) printf("This tests the intel PEBS interface.\n"); memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } /* Set up Instruction Event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=SAMPLE_FREQUENCY; // pe.sample_type=PERF_SAMPLE_IP | PERF_SAMPLE_REGS_USER; pe.sample_type=PERF_SAMPLE_IP | PERF_SAMPLE_REGS_INTR; global_sample_type=pe.sample_type; #if defined(__i386__) || defined (__x86_64__) /* Bitfield saying which registers we want */ pe.sample_regs_intr=(1ULL<<PERF_REG_X86_64_MAX)-1; // pe.sample_regs_user=(1ULL<<PERF_REG_X86_IP); /* DS, ES, FS, and GS not valid on x86_64 */ /* see perf_reg_validate() in arch/x86/kernel/perf_regs.c */ pe.sample_regs_intr&=~(1ULL<<PERF_REG_X86_DS); pe.sample_regs_intr&=~(1ULL<<PERF_REG_X86_ES); pe.sample_regs_intr&=~(1ULL<<PERF_REG_X86_FS); pe.sample_regs_intr&=~(1ULL<<PERF_REG_X86_GS); printf("%llx %d\n",pe.sample_regs_user,PERF_REG_X86_DS); #else pe.sample_regs_intr=1; #endif global_sample_regs_user=pe.sample_regs_intr; pe.read_format=0; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; arch_adjust_domain(&pe,quiet); fd=perf_event_open(&pe,0,-1,-1,0); if (fd<0) { if (!quiet) { if (errno==EINVAL) { fprintf(stderr,"Problem opening leader " "probably need to run a newer kernel: %s\n", strerror(errno)); } else { fprintf(stderr,"Problem opening leader %s\n", strerror(errno)); } test_fail(test_string); } } our_mmap=mmap(NULL, mmap_pages*4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd, F_SETSIG, SIGIO); fcntl(fd, F_SETOWN,getpid()); ioctl(fd, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE " "of group leader: %d %s\n", errno,strerror(errno)); exit(1); } } instructions_million(); ret=ioctl(fd, PERF_EVENT_IOC_REFRESH,0); if (!quiet) { printf("Counts %d, using mmap buffer %p\n",count_total,our_mmap); } if (count_total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } munmap(our_mmap,mmap_pages*4096); close(fd); test_pass(test_string); return 0; }
int main(int argc, char** argv) { int fd,bpf_fd; struct perf_event_attr pe1; int errors=0; unsigned long long text_begin,symbol; union bpf_attr battr; int result; char test_string[]="Testing PERF_EVENT_IOC_SET_BPF ioctl..."; quiet=test_quiet(); if (!quiet) { printf("Testing PERF_EVENT_IOC_SET_BPF ioctl.\n"); } /*******************************************/ /* Creating a kprobe tracepoint event */ /*******************************************/ if (!quiet) { printf("Creating a kprobe tracepoint event\n"); } FILE *fff; int kprobe_id; char filename[BUFSIZ]; char tracefs_location[BUFSIZ]; char *find_result; find_result=find_tracefs_location(tracefs_location,quiet); if (find_result==NULL) { if (!quiet) { fprintf(stderr,"Error finding tracefs location!\n"); } test_skip(test_string); } sprintf(filename,"%s/kprobe_events",tracefs_location); fff=fopen(filename, "w"); if (fff==NULL) { printf("Cannot open %s!\n",filename); printf("You may want to: mount -t tracefs nodev /sys/kernel/tracing\n"); test_fail(test_string); } text_begin=lookup_symbol("_text"); symbol=lookup_symbol("handle_mm_fault"); if ((text_begin==0) || (symbol==0)) { fprintf(stderr,"Error finding symbol _text, handle_mm_fault\n"); test_fail(test_string); } /* perf probe -a VMW=handle_mm_fault */ fprintf(fff,"p:probe/VMW _text+%lld",symbol-text_begin); fclose(fff); sprintf(filename,"%s/events/probe/VMW/id",tracefs_location); fff=fopen(filename, "r"); if (fff==NULL) { printf("Cannot open %s!\n",filename); test_fail(test_string); } fscanf(fff,"%d",&kprobe_id); fclose(fff); memset(&pe1,0,sizeof(struct perf_event_attr)); pe1.type=PERF_TYPE_TRACEPOINT; pe1.size=sizeof(struct perf_event_attr); pe1.config=kprobe_id; pe1.disabled=1; pe1.exclude_kernel=0; pe1.exclude_hv=0; arch_adjust_domain(&pe1,quiet); /* Create group leader */ fd=perf_event_open(&pe1,0,-1,-1,0); if (fd<0) { if (!quiet) { fprintf(stderr,"Unexpected error %s\n",strerror(errno)); } printf("Cannot open kprobe id %d\n",kprobe_id); test_fail(test_string); } struct bpf_insn instructions[] = { BPF_MOV64_IMM(BPF_REG_0, 0), /* r0 = 0 */ BPF_EXIT_INSN(), /* return r0 */ }; unsigned char license[]="GPL"; #define LOG_BUF_SIZE 65536 static char bpf_log_buf[LOG_BUF_SIZE]; /* Kernel will EINVAL if unused bits aren't zero */ memset(&battr,0,sizeof(battr)); /* Version has to match currently running kernel */ struct utsname version; int major, minor, subminor, version_code; uname(&version); printf("We are running release %s\n",version.release); sscanf(version.release,"%d.%d.%d",&major,&minor,&subminor); version_code = (major<<16) | (minor<<8) | subminor; printf("Using LINUX_VERSION_CODE: %d\n", version_code); battr.prog_type = BPF_PROG_TYPE_KPROBE; // battr.insn_cnt = sizeof(instructions); battr.insn_cnt= sizeof(instructions) / sizeof(struct bpf_insn); battr.insns = (uint64_t) (unsigned long) instructions; battr.license = (uint64_t) (unsigned long) license; battr.log_buf = (uint64_t) (unsigned long) bpf_log_buf; battr.log_size = LOG_BUF_SIZE; battr.log_level = 1; battr.kern_version = version_code; bpf_log_buf[0] = 0; bpf_fd = sys_bpf(BPF_PROG_LOAD, &battr, sizeof(battr)); if (bpf_fd < 0) { printf("bpf: load: failed to load program, %s\n" "-- BEGIN DUMP LOG ---\n%s\n-- END LOG --\n", strerror(errno), bpf_log_buf); return bpf_fd; } result=ioctl(fd, PERF_EVENT_IOC_SET_BPF, bpf_fd); if (result<0) { if (!quiet) printf("Unexpected %d %s\n",result,strerror(errno)); errors++; } /* start */ ioctl(fd, PERF_EVENT_IOC_ENABLE,0); /* million */ result=instructions_million(); /* stop */ ioctl(fd, PERF_EVENT_IOC_DISABLE,0); close(fd); fff=fopen(filename,"a"); if (fff==NULL) { fprintf(stderr,"Couldn't open %s for closing\n",filename); test_fail(test_string); } fprintf(fff,"-:probe/VMW\n"); fclose(fff); if (errors) { test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char** argv) { int ret; int mmap_pages; struct perf_event_attr pe; struct sigaction sa; char test_string[]="Checking behavior of various mmap sizes..."; quiet=test_quiet(); if (!quiet) printf("This checks a variety of mmap buffer sizes.\n"); /* set up validation */ validate.pid=getpid(); validate.tid=mygettid(); validate.events=2; for(mmap_pages=0;mmap_pages<18;mmap_pages++) { if (!quiet) { printf("Testing with %d mmap pages\n",mmap_pages); } if (mmap_pages>0) { mmap_data_size=mmap_pages-1; } else { mmap_data_size=0; } memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=SAMPLE_FREQUENCY; pe.sample_type=sample_type; pe.read_format=read_format; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (!quiet) fprintf(stderr,"Error opening leader %llx\n",pe.config); test_fail(test_string); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_CPU_CYCLES; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=0; pe.exclude_kernel=1; pe.exclude_hv=1; arch_adjust_domain(&pe,quiet); fd2=perf_event_open(&pe,0,-1,fd1,0); if (fd2<0) { if (!quiet) fprintf(stderr,"Error opening %llx\n",pe.config); test_fail(test_string); } our_mmap=mmap(NULL, mmap_pages*4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); if (our_mmap == MAP_FAILED) { if (!quiet) printf("\tmmap failed: %d %s\n",errno,strerror(errno)); continue; } fcntl(fd1, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd1, F_SETSIG, SIGIO); fcntl(fd1, F_SETOWN,getpid()); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr, "Error with PERF_EVENT_IOC_ENABLE of group leader: " "%d %s\n",errno,strerror(errno)); } exit(1); } instructions_million(); ret=ioctl(fd1, PERF_EVENT_IOC_REFRESH,0); if (count.total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } munmap(our_mmap,mmap_pages*4096); close(fd2); close(fd1); } test_pass(test_string); return 0; }
int main(int argc, char **argv) { int ret; int fd; int mmap_pages=1+MMAP_DATA_SIZE; struct perf_event_attr pe; struct sigaction sa; char test_string[]="Testing pebs latency..."; quiet=test_quiet(); if (!quiet) printf("This tests the intel PEBS latency.\n"); memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } /* Set up Instruction Event */ memset(&pe,0,sizeof(struct perf_event_attr)); sample_type=PERF_SAMPLE_IP|PERF_SAMPLE_WEIGHT; read_format=0; pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=SAMPLE_FREQUENCY; pe.sample_type=sample_type; pe.read_format=read_format; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; arch_adjust_domain(&pe,quiet); fd=perf_event_open(&pe,0,-1,-1,0); if (fd<0) { if (!quiet) { fprintf(stderr,"Problem opening leader %s\n", strerror(errno)); test_fail(test_string); } } our_mmap=mmap(NULL, mmap_pages*4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd, F_SETSIG, SIGIO); fcntl(fd, F_SETOWN,getpid()); ioctl(fd, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE " "of group leader: %d %s\n", errno,strerror(errno)); exit(1); } } instructions_million(); ret=ioctl(fd, PERF_EVENT_IOC_REFRESH,0); if (!quiet) { printf("Counts %d, using mmap buffer %p\n",count_total,our_mmap); } if (count_total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } munmap(our_mmap,mmap_pages*4096); close(fd); test_pass(test_string); return 0; }
int main(int argc, char** argv) { int ret,quiet; struct perf_event_attr pe; struct sigaction sa; void *our_mmap; char test_string[]="Testing large sample_period..."; quiet=test_quiet(); if (!quiet) { printf("This tests behavior of large sample_period.\n"); printf("This was broken prior to Linux 3.15.\n\n"); } memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } /*******************************************************/ /* First try sample period that *will* cause overflows */ /*******************************************************/ count.in=0; count.out=0; count.msg=0; count.err=0; count.pri=0; count.hup=0; count.unknown=0; count.total=0; if (!quiet) { printf("Trying 1 million instructions with period 100,000\n"); printf("Should be 10 overflows\n"); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=100000; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (!quiet) fprintf(stderr,"Error opening leader %llx\n",pe.config); test_fail(test_string); } our_mmap=mmap(NULL, (1+MMAP_PAGES)*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); fcntl(fd1, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd1, F_SETSIG, SIGIO); fcntl(fd1, F_SETOWN,getpid()); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_REFRESH,1); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE of group leader: " "%d %s\n",errno,strerror(errno)); test_fail(test_string); } } instructions_million(); ret=ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); close(fd1); if (!quiet) { printf("Counts, using mmap buffer %p\n",our_mmap); printf("\tPOLL_IN : %d\n",count.in); printf("\tPOLL_OUT: %d\n",count.out); printf("\tPOLL_MSG: %d\n",count.msg); printf("\tPOLL_ERR: %d\n",count.err); printf("\tPOLL_PRI: %d\n",count.pri); printf("\tPOLL_HUP: %d\n",count.hup); printf("\tUNKNOWN : %d\n",count.unknown); } if (count.total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } if (count.in!=0) { if (!quiet) printf("Unexpected POLL_IN interrupt.\n"); test_fail(test_string); } if (count.hup!=10) { if (!quiet) printf("POLL_HUP value %d, expected %d.\n", count.hup,10); test_fail(test_string); } /*******************************************************/ /* Next try moderate size value, should be no overflow */ /*******************************************************/ count.in=0; count.out=0; count.msg=0; count.err=0; count.pri=0; count.hup=0; count.unknown=0; count.total=0; if (!quiet) { printf("Trying 1 million instructions with period 10,000,000\n"); printf("Should be 0 overflows\n"); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=10000000; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (!quiet) fprintf(stderr,"Error opening leader %llx\n",pe.config); test_fail(test_string); } our_mmap=mmap(NULL, (1+MMAP_PAGES)*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); fcntl(fd1, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd1, F_SETSIG, SIGIO); fcntl(fd1, F_SETOWN,getpid()); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_REFRESH,1); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE of group leader: " "%d %s\n",errno,strerror(errno)); test_fail(test_string); } } instructions_million(); ret=ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); close(fd1); if (!quiet) { printf("Counts, using mmap buffer %p\n",our_mmap); printf("\tPOLL_IN : %d\n",count.in); printf("\tPOLL_OUT: %d\n",count.out); printf("\tPOLL_MSG: %d\n",count.msg); printf("\tPOLL_ERR: %d\n",count.err); printf("\tPOLL_PRI: %d\n",count.pri); printf("\tPOLL_HUP: %d\n",count.hup); printf("\tUNKNOWN : %d\n",count.unknown); } if (count.total!=0) { if (!quiet) printf("Unexpected overflow events generated.\n"); test_fail(test_string); } /*******************************************************/ /* Next try large size value, should be no overflow */ /*******************************************************/ count.in=0; count.out=0; count.msg=0; count.err=0; count.pri=0; count.hup=0; count.unknown=0; count.total=0; memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=(1ULL<<63)-1; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; if (!quiet) { printf("Trying 1 million instructions with period (2^63)-1 (%llx)\n", pe.sample_period); printf("Should be 0 overflows\n"); } arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (!quiet) fprintf(stderr,"Error opening leader %llx\n",pe.config); test_fail(test_string); } our_mmap=mmap(NULL, (1+MMAP_PAGES)*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); fcntl(fd1, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd1, F_SETSIG, SIGIO); fcntl(fd1, F_SETOWN,getpid()); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_REFRESH,1); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE of group leader: " "%d %s\n",errno,strerror(errno)); test_fail(test_string); } } instructions_million(); ret=ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); close(fd1); if (!quiet) { printf("Counts, using mmap buffer %p\n",our_mmap); printf("\tPOLL_IN : %d\n",count.in); printf("\tPOLL_OUT: %d\n",count.out); printf("\tPOLL_MSG: %d\n",count.msg); printf("\tPOLL_ERR: %d\n",count.err); printf("\tPOLL_PRI: %d\n",count.pri); printf("\tPOLL_HUP: %d\n",count.hup); printf("\tUNKNOWN : %d\n",count.unknown); } if (count.total!=0) { if (!quiet) printf("Unexpected overflow events generated.\n"); test_fail(test_string); } /************************************************************/ /* Next try very large size value, should be no overflow */ /* In fact, on 3.15 and later the open should fail */ /************************************************************/ count.in=0; count.out=0; count.msg=0; count.err=0; count.pri=0; count.hup=0; count.unknown=0; count.total=0; memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=0xc0000000000000bd; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; if (!quiet) { printf("Trying 1 million instructions with period %llx\n", pe.sample_period); printf("Should be 0 overflows\n"); } arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (errno==EINVAL) { if (!quiet) { fprintf(stderr,"Properly failed with too-large sample_period\n"); test_pass(test_string); exit(0); } } if (!quiet) fprintf(stderr,"Error opening leader %llx\n",pe.config); test_fail(test_string); } our_mmap=mmap(NULL, (1+MMAP_PAGES)*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); fcntl(fd1, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd1, F_SETSIG, SIGIO); fcntl(fd1, F_SETOWN,getpid()); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_REFRESH,1); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE of group leader: " "%d %s\n",errno,strerror(errno)); test_fail(test_string); } } instructions_million(); ret=ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); close(fd1); if (!quiet) { printf("Counts, using mmap buffer %p\n",our_mmap); printf("\tPOLL_IN : %d\n",count.in); printf("\tPOLL_OUT: %d\n",count.out); printf("\tPOLL_MSG: %d\n",count.msg); printf("\tPOLL_ERR: %d\n",count.err); printf("\tPOLL_PRI: %d\n",count.pri); printf("\tPOLL_HUP: %d\n",count.hup); printf("\tUNKNOWN : %d\n",count.unknown); } if (count.total>20000) { int version; if (!quiet) printf("Stopping early, too many overflows encountered.\n"); /* This is expected before 3.15 */ version=get_kernel_version(); if (version<0x30f00) { test_fail_kernel(test_string); } else { test_fail(test_string); } } if (count.total!=0) { if (!quiet) printf("Unexpected overflow events generated.\n"); test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char** argv) { int ret,fd1,quiet,i; int result; pid_t pid; struct perf_event_attr pe; char test_string[]="Testing reads in forked children..."; quiet=test_quiet(); /* set up group leader */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (!quiet) { fprintf(stderr,"Error opening leader %llx\n",pe.config); } test_fail(test_string); } if (!quiet) { printf("Testing fork behavior\n"); printf("Even though the child runs longer, the value\n"); printf("it reads should be that of the parent.\n"); } /* enable counting */ ret=ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); /* Run a million */ result=instructions_million(); /* fork off a child */ pid = fork(); if ( pid < 0 ) { fprintf(stderr,"Failed fork\n"); test_fail(test_string); } /* our child */ if ( pid == 0 ) { printf("In child, running an extra million\n"); /* extra million */ result=instructions_million(); } else { printf("In parent\n"); /* disable counting */ ret=ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); if (ret<0) { if (!quiet) printf("Error disabling\n"); } } #define BUFFER_SIZE 32 long long buffer[BUFFER_SIZE]; for(i=0;i<BUFFER_SIZE;i++) { buffer[i]=-1; } result=read(fd1,buffer,BUFFER_SIZE*sizeof(long long)); if (result<0) { if (!quiet) { fprintf(stderr,"Unexpected read result %d\n",result); } test_fail(test_string); } /* should be 1 + 2*num_events */ /* which is 3 in our case */ if (result!=(3)*sizeof(long long)) { if (!quiet) { fprintf(stderr,"Unexpected read result %d (should be %ld)\n", result,3*sizeof(long long)); } test_fail(test_string); } if (!quiet) { printf("Number of events: %lld\n",buffer[0]); for(i=0;i<buffer[0];i++) { printf("Value [%d] : %lld\n",i,buffer[1+(i*2)]); printf("Format ID[%d] : %lld\n",i,buffer[1+((i*2)+1)]); } } double error; long long average,high,low; int failure=0; average=high=low=buffer[1]; error=display_error(average,high,low,1000000ULL,quiet); if ((error > 1.0) || (error<-1.0)) { failure++; } /* child */ if (pid==0) { return failure; } else { int status; waitpid(pid,&status,0); if (WIFEXITED(status)) { if (WEXITSTATUS(status)!=0) failure++; } } if (failure) { test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char **argv) { struct perf_event_attr pe; int result=0; int fd[MAX_OPEN]; int i; int num_counters=0; int errors=0; long long values[256]; quiet=test_quiet(); if (!quiet) { printf("This test checks the intel fixed counter 1\n"); printf("This is a best effort, Linux does not let you\n"); printf("specify which counter events are scheduled in.\n"); printf("The NMI watchdog often grabs fixed counter 1,\n"); printf("hiding the issue.\n"); printf("Anyway, all the values should match.\n\n"); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_CPU_CYCLES; pe.read_format=PERF_FORMAT_GROUP; pe.exclude_kernel=1; pe.exclude_hv=1; for(i=0;i<MAX_OPEN;i++) { pe.disabled=(i==0); pe.pinned=(i==0); fd[i]=perf_event_open(&pe,0,-1,i==0?-1:fd[0],0); if (fd[i]<0) { fprintf(stderr,"Error opening event %d %s\n",i,strerror(errno)); break; } } num_counters=i; if (num_counters<1) { test_fail(test_string); } ioctl(fd[0],PERF_EVENT_IOC_ENABLE,0); result+=instructions_million(); ioctl(fd[0],PERF_EVENT_IOC_DISABLE,0); read(fd[0],values,128); close(fd[0]); for(i=0;i<num_counters;i++) { if (!quiet) { printf("%d %lld\n",i,values[i+1]); } if (values[i+1]!=values[1]) { errors++; } } if (errors) { if (!quiet) { fprintf(stderr,"Some results don't match!\n"); } test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char** argv) { int ret,quiet,i; struct perf_event_attr pe; struct sigaction sa; void *our_mmap; char test_string[]="Testing overflows on sibling..."; quiet=test_quiet(); if (!quiet) printf("This tests that overflows of siblings work.\n"); memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_CPU_CYCLES; pe.sample_period=0; pe.sample_type=0; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=1; pe.pinned=0; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { fprintf(stderr,"Error opening leader %llx\n",pe.config); test_fail(test_string); } pe.type=PERF_TYPE_HARDWARE; pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=100000; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=0; pe.disabled=0; pe.pinned=0; pe.exclude_kernel=1; pe.exclude_hv=1; arch_adjust_domain(&pe,quiet); fd2=perf_event_open(&pe,0,-1,fd1,0); if (fd2<0) { fprintf(stderr,"Error opening %llx\n",pe.config); test_fail(test_string); } /* large enough that threshold not a problem */ our_mmap=mmap(NULL, (1+MMAP_PAGES)*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd2, 0); fcntl(fd2, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd2, F_SETSIG, SIGIO); fcntl(fd2, F_SETOWN,getpid()); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ioctl(fd2, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE of group leader: " "%d %s\n",errno,strerror(errno)); test_fail(test_string); } for(i=0;i<100;i++) { instructions_million(); } ret=ioctl(fd1, PERF_EVENT_IOC_DISABLE,0); if (!quiet) printf("Count: %d %p\n",count.total,our_mmap); if (count.total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } if (count.total!=1000) { if (!quiet) printf("Expected %d overflows, got %d.\n", count.total,100); test_fail(test_string); } close(fd1); close(fd2); test_pass(test_string); return 0; }
int main(int argc, char** argv) { int ret; int mmap_pages=1+MMAP_DATA_SIZE; struct perf_event_attr pe; struct sigaction sa; char test_string[]="Testing record sampling..."; quiet=test_quiet(); if (!quiet) printf("This tests the record sampling interface.\n"); memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } /* Set up Instruction Event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; pe.sample_period=SAMPLE_FREQUENCY; pe.sample_type=sample_type; pe.read_format=read_format; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; pe.branch_sample_type=PERF_SAMPLE_BRANCH_ANY; arch_adjust_domain(&pe,quiet); fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (!quiet) { fprintf(stderr,"Problem opening leader %s\n", strerror(errno)); fprintf(stderr,"Trying without branches\n"); } sample_type&=~PERF_SAMPLE_BRANCH_STACK; pe.sample_type=sample_type; fd1=perf_event_open(&pe,0,-1,-1,0); if (fd1<0) { if (!quiet) { fprintf(stderr,"Error opening leader %s\n", strerror(errno)); } test_fail(test_string); } } /* Open Cycles Event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_CPU_CYCLES; pe.sample_type=PERF_SAMPLE_IP; pe.read_format=PERF_FORMAT_GROUP|PERF_FORMAT_ID; pe.disabled=0; pe.exclude_kernel=1; pe.exclude_hv=1; arch_adjust_domain(&pe,quiet); fd2=perf_event_open(&pe,0,-1,fd1,0); if (fd2<0) { if (!quiet) fprintf(stderr,"Error opening %llx\n",pe.config); test_fail(test_string); } our_mmap=mmap(NULL, mmap_pages*4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd1, 0); fcntl(fd1, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd1, F_SETSIG, SIGIO); fcntl(fd1, F_SETOWN,getpid()); ioctl(fd1, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd1, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE " "of group leader: %d %s\n", errno,strerror(errno)); exit(1); } } instructions_million(); ret=ioctl(fd1, PERF_EVENT_IOC_REFRESH,0); if (!quiet) { printf("Counts, using mmap buffer %p\n",our_mmap); printf("\tPOLL_IN : %d\n",count.in); printf("\tPOLL_OUT: %d\n",count.out); printf("\tPOLL_MSG: %d\n",count.msg); printf("\tPOLL_ERR: %d\n",count.err); printf("\tPOLL_PRI: %d\n",count.pri); printf("\tPOLL_HUP: %d\n",count.hup); printf("\tUNKNOWN : %d\n",count.unknown); } if (count.total==0) { if (!quiet) printf("No overflow events generated.\n"); test_fail(test_string); } munmap(our_mmap,mmap_pages*4096); close(fd2); close(fd1); test_pass(test_string); return 0; }