void mmap_random_event(int type) { int which; switch(rand()%6) { case 0: /* mmap random */ which=find_random_active_event(); setup_mmap(which); break; case 1: /* aux random */ break; case 2: /* munmap random */ which=find_random_active_mmap(); if (which>=0) unmap_mmap(which,0); break; case 3: /* mmap read */ which=find_random_active_mmap(); if (which>=0) perf_mmap_read(which); break; case 4: /* trash mmap */ if (type & TYPE_TRASH_MMAP) { trash_random_mmap(); } break; default: break; } return; }
static void our_handler(int signum, siginfo_t *info, void *uc) { int ret; int fd = info->si_fd; ret=ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); long long perf_mmap_read( void *our_mmap, int mmap_size, long long prev_head, int sample_type, int read_format, long long reg_mask, struct validate_values *validate, int quiet, int *events_read ); prev_head=perf_mmap_read(our_mmap,MMAP_DATA_SIZE,prev_head, sample_type,read_format, 0, /* reg_mask */ NULL, /*validate */ quiet, NULL); /* events read */ count_total++; ret=ioctl(fd, PERF_EVENT_IOC_REFRESH, 1); (void) ret; }
static void our_handler(int signum,siginfo_t *oh, void *blah) { int ret; ret=ioctl(fd1, PERF_EVENT_IOC_DISABLE, 0); if (num_oflos%100==0) { prev_head=perf_mmap_read(our_mmap,mmap_data_size,prev_head, sample_type,read_format,0,NULL,quiet,NULL); } num_oflos++; switch(oh->si_code) { case POLL_IN: count.in++; break; case POLL_OUT: count.out++; break; case POLL_MSG: count.msg++; break; case POLL_ERR: count.err++; break; case POLL_PRI: count.pri++; break; case POLL_HUP: count.hup++; break; default: count.unknown++; break; } count.total++; ret=ioctl(fd1, PERF_EVENT_IOC_REFRESH, 1); (void) ret; }
static void our_handler(int signum, siginfo_t *info, void *uc) { int ret; int fd = info->si_fd; ret=ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); prev_head=perf_mmap_read( our_mmap, MMAP_DATA_SIZE, prev_head, sample_type, read_format, 0, /* reg_mask */ NULL, /*validate */ quiet, NULL, /* events read */ RAW_IBS_FETCH); /* RAW type */ count_total++; ret=ioctl(fd, PERF_EVENT_IOC_REFRESH, 1); (void) ret; }
// The iterator variable void fizz_handler(int signum, siginfo_t* info, void* p) { // Check lib/parse_record.c of https://github.com/deater/perf_event_tests disable_trace(perffd); // fprintf(stderr, "Hihi, i is %d. Addr %p. P %p\n", i, p); //fprintf(stderr, "Hihi, i is %d. P %p\n", i, p); fprintf(stderr, "Hihi, i is %d at %p\n", i, &i); prev_head=perf_mmap_read(our_mmap,MMAP_DATA_SIZE,prev_head, sample_type,read_format,0,NULL,quiet,NULL); ioctl(perffd, PERF_EVENT_IOC_REFRESH, 1); }
static void our_handler(int signum, siginfo_t *info, void *uc) { int ret; int fd = info->si_fd; ret=ioctl(fd, PERF_EVENT_IOC_DISABLE, 0); prev_head=perf_mmap_read(our_mmap,MMAP_DATA_SIZE,prev_head, global_sample_type,0,global_sample_regs_user, NULL,quiet,NULL); count_total++; ret=ioctl(fd, PERF_EVENT_IOC_REFRESH, 1); (void) ret; }
/* Thus they never have most of these problems */ void our_handler(int signum, siginfo_t *info, void *uc) { static int already_handling=0; int fd = info->si_fd; int i; int ret; static int last_fd=0; /* In some cases (syscall tracepoint) */ /* The act of disabling an event would trigger */ /* Another overflow, leading to a recursive storm */ if (already_handling) { stats.already_overflows++; return; } already_handling=1; stats.overflows++; /* disable the event for the time being */ /* we were having trouble with signal storms */ ret=ioctl(fd,PERF_EVENT_IOC_DISABLE,0); /* Do not log, logging only make sense if */ /* we have deterministic counts which we don't */ /* Somehow we got a signal from an invalid event? */ /* How would this happen? */ /* Looks like if we fork() then close an event, */ /* It can still be alive in the child and cause */ /* a signal to come in even though it is closed.*/ if (ret<0) { if (fd!=last_fd) { printf("Signal from invalid fd %d %s\n", fd,strerror(errno)); last_fd=fd; } already_handling=0; return; // orderly_shutdown(); } i=lookup_event(fd); if (i>=0) { event_data[i].overflows++; if (event_data[i].overflows>10000) { if (!logging) printf("Throttling event %d fd %d, last_refresh=%d, " "period=%llu, type=%d throttles %d\n", i,event_data[i].fd,event_data[i].last_refresh, event_data[i].attr.sample_period, event_data[i].attr.type, event_data[i].throttles); event_data[i].overflows=0; event_data[i].throttles++; /* otherwise if we re-trigger next time */ /* with >1 refresh the throttle never */ /* lasts a significant amount of time. */ next_refresh=0; /* Avoid infinite throttle storms */ if (event_data[i].throttles > MAX_THROTTLES) { printf("Stuck in a signal storm w/o forward progress; Max throttle count hit, giving up\n"); close(event_data[i].fd); // orderly_shutdown(); /* In a storm we used to try to somehow stop */ /* it by closing all events, but this never */ /* really worked. */ #if 0 /* Disable all events */ printf("Trying to disable all events\n"); for(j=0;j<NUM_EVENTS;j++) { if (event_data[j].active) { ioctl(event_data[j].fd,PERF_EVENT_IOC_DISABLE,0); } } throttle_close_event=i; #endif } } else { /* read the event */ perf_mmap_read(event_data[i].mmap); /* cannot call rand() from signal handler! */ /* we re-enter and get stuck in a futex :( */ ret=ioctl(fd, PERF_EVENT_IOC_REFRESH,next_refresh); if (ret==0) { event_data[i].last_refresh=next_refresh; } /* Do not log, makes no sense */ } } already_handling=0; (void) ret; }
int main(int argc, char **argv) { int ret,status; int fd; int mmap_pages=1+MMAP_DATA_SIZE; int events_read; int child; int version; struct perf_event_attr pe; struct sigaction sa; char test_string[]="Testing PERF_RECORD_COMM_EXEC..."; quiet=test_quiet(); if (!quiet) printf("This tests PERF_RECORD_COMM_EXEC samples:\n"); memset(&sa, 0, sizeof(struct sigaction)); sa.sa_sigaction = our_handler; sa.sa_flags = SA_SIGINFO; if (sigaction( SIGIO, &sa, NULL) < 0) { fprintf(stderr,"Error setting up signal handler\n"); exit(1); } /* Fork child to measure */ /* We do this in a child as we have to exec */ child=fork(); if (child==0) { FILE *fff; if (ptrace(PTRACE_TRACEME, 0, 0, 0) == 0) { kill(getpid(),SIGTRAP); /* The actual thing to measure */ instructions_million(); /* prctl */ if (!quiet) printf("\tprctl(PR_SET_NAME,\"vmw\");\n"); prctl(PR_SET_NAME,"vmw"); /* /proc/self/comm */ if (!quiet) printf("\tcat \"krg krg krg\" > /proc/self/comm\n"); fff=fopen("/proc/self/comm","w"); if (fff!=NULL) { fprintf(fff,"krg krg krg"); fclose(fff); } /* exec */ if (!quiet) printf("\texecl(\"/bin/false\"); [should have PERF_RECORD_MISC_COMM_EXEC set]\n"); execl("/bin/false","/bin/true",NULL); instructions_million(); /* Done measuring */ } else { fprintf(stderr,"Failed ptrace...\n"); } return 1; } /* wait for child to stop */ child=wait(&status); /* Set up Instruction Event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_SOFTWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_SW_DUMMY; pe.sample_period=SAMPLE_FREQUENCY; pe.read_format=0; pe.disabled=1; pe.pinned=1; pe.exclude_kernel=1; pe.exclude_hv=1; pe.wakeup_events=1; pe.comm_exec=1; pe.comm=1; arch_adjust_domain(&pe,quiet); fd=perf_event_open(&pe,child,-1,-1,0); if (fd<0) { if (!quiet) { fprintf(stderr,"Problem opening leader %s\n", strerror(errno)); } version=get_kernel_version(); /* Introduced in 3.16 */ if (version<0x31000) { if (!quiet) { fprintf(stderr,"comm_exec support not added until Linux 3.16\n"); } test_fail_kernel(test_string); } test_fail(test_string); } our_mmap=mmap(NULL, mmap_pages*4096, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (our_mmap==MAP_FAILED) { fprintf(stderr,"mmap() failed %s!\n",strerror(errno)); test_fail(test_string); } fcntl(fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); fcntl(fd, F_SETSIG, SIGIO); fcntl(fd, F_SETOWN,getpid()); ioctl(fd, PERF_EVENT_IOC_RESET, 0); ret=ioctl(fd, PERF_EVENT_IOC_ENABLE,0); if (ret<0) { if (!quiet) { fprintf(stderr,"Error with PERF_EVENT_IOC_ENABLE " "of group leader: %d %s\n", errno,strerror(errno)); exit(1); } } /* restart child */ if ( ptrace( PTRACE_CONT, child, NULL, NULL ) == -1 ) { fprintf(stderr,"Error continuing child\n"); test_fail(test_string); } /* Wait for child to finish */ waitpid(child,&status,0); ret=ioctl(fd, PERF_EVENT_IOC_REFRESH,0); if (!quiet) { printf("Counts %d, using mmap buffer %p\n",count_total,our_mmap); } /* Drain any remaining events */ prev_head=perf_mmap_read(our_mmap,MMAP_DATA_SIZE,prev_head, global_sample_type,0,global_sample_regs_user, NULL,quiet,&events_read); munmap(our_mmap,mmap_pages*4096); close(fd); #define EXPECTED_EVENTS 3 if (events_read!=EXPECTED_EVENTS) { if (!quiet) fprintf(stderr,"Wrong number of events! Expected %d but got %d\n", EXPECTED_EVENTS,events_read); test_fail(test_string); } test_pass(test_string); return 0; }
int main(int argc, char** argv) { void *mmap_buffers[NUM_CPUS]; long long prev_head[NUM_CPUS]; static int fd[NUM_EVENTS][NUM_CPUS]; int ret; int mmap_pages=1+MMAP_DATA_SIZE; int i,j; struct perf_event_attr pe; /* Initialize */ for(i=0;i<NUM_CPUS;i++) { prev_head[i]=0; } /* Set up Leader Event */ for(i=0;i<NUM_CPUS;i++) { memset(&pe,0,sizeof(struct perf_event_attr)); // pe.type=PERF_TYPE_HARDWARE; // pe.config=PERF_COUNT_HW_REF_CPU_CYCLES; pe.type=PERF_TYPE_SOFTWARE; pe.config=PERF_COUNT_SW_CPU_CLOCK; pe.size=sizeof(struct perf_event_attr); pe.sample_period=SAMPLE_FREQUENCY; pe.sample_type=sample_type; pe.read_format=read_format; pe.disabled=1; pe.pinned=1; fd[0][i]=perf_event_open(&pe,-1,0,-1,0); if (fd[0][i]<0) { fprintf(stderr,"Problem opening leader %d %s\n", i,strerror(errno)); } /* Open Cycles Event */ memset(&pe,0,sizeof(struct perf_event_attr)); pe.type=PERF_TYPE_HARDWARE; pe.size=sizeof(struct perf_event_attr); pe.config=PERF_COUNT_HW_INSTRUCTIONS; // pe.sample_type=sample_type; // pe.read_format=read_format; pe.disabled=0; fd[1][i]=perf_event_open(&pe,-1,0,fd[0][i],0); if (fd[1][i]<0) { fprintf(stderr,"Error opening %llx\n",pe.config); } mmap_buffers[i]=mmap(NULL, mmap_pages*getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED, fd[0][i], 0); } for(i=0;i<NUM_CPUS;i++) { ioctl(fd[0][i], PERF_EVENT_IOC_RESET, 0); ioctl(fd[0][i], PERF_EVENT_IOC_ENABLE,0); } /* Work to do */ sleep(1); /* Disable */ for(i=0;i<NUM_CPUS;i++) { ret=ioctl(fd[0][i], PERF_EVENT_IOC_DISABLE,0); } /* Read */ for(i=0;i<NUM_CPUS;i++) { printf("(* CPU %d *)\n",i); prev_head[i]=perf_mmap_read(mmap_buffers[i], MMAP_DATA_SIZE,prev_head[i], sample_type,read_format); } /* Cleanup */ for(i=0;i<NUM_CPUS;i++) { munmap(mmap_buffers[i],mmap_pages*getpagesize()); for(j=0;j<NUM_EVENTS;j++) { close(fd[j][i]); } } return 0; }