// // Print the event counts (as saved/accumulated in the // static event variables) to the specified file. // void print_counts(FILE* result_file) { char* event_name_0 = NULL ; char* event_name_1 = NULL ; fprintf(result_file, " Cycle Counter: %llu ", ccr) ; if (ccr_overflow) { fprintf(result_file, "(OVERFLOW) ") ; } fprintf(result_file, " Processor cycles (scaled by 64)\n") ; if ((event_name_0 = lookup_event(event_0)) == NULL) { fprintf(stderr, "*warning* Unknown PM event (0) when printing\n") ; fprintf(result_file, " Counter 0: *warning*\n") ; return ; } else { fprintf(result_file, " Counter 0: %llu ", ctr0) ; if (ctr0_overflow) { fprintf(result_file, "(OVERFLOW) ") ; } fprintf(result_file, " %s\n", event_name_0) ; } if ((event_name_1 = lookup_event(event_1)) == NULL) { fprintf(stderr, "*warning* Unknown PM event (1) when printing\n") ; fprintf(result_file, " Counter 1: *warning*\n") ; return ; } else { fprintf(result_file, " Counter 1: %llu ", ctr1) ; if (ctr1_overflow) { fprintf(result_file, "(OVERFLOW) ") ; } fprintf(result_file, " %s\n", event_name_1) ; } }
static void record_event (ShellPerfLog *perf_log, gint64 event_time, ShellPerfEvent *event, const guchar *bytes, size_t bytes_len) { ShellPerfBlock *block; size_t total_bytes; guint32 time_delta; guint32 pos; if (!perf_log->enabled) return; total_bytes = sizeof (gint32) + sizeof (gint16) + bytes_len; if (G_UNLIKELY (bytes_len > BLOCK_SIZE || total_bytes > BLOCK_SIZE)) { g_warning ("Discarding oversize event '%s'\n", event->name); return; } if (event_time > perf_log->last_time + G_GINT64_CONSTANT(0xffffffff)) { perf_log->last_time = event_time; record_event (perf_log, event_time, lookup_event (perf_log, "perf.setTime", "x"), (const guchar *)&event_time, sizeof(gint64)); time_delta = 0; } else if (event_time < perf_log->last_time) time_delta = 0; else time_delta = (guint32)(event_time - perf_log->last_time); perf_log->last_time = event_time; if (perf_log->blocks->tail == NULL || total_bytes + ((ShellPerfBlock *)perf_log->blocks->tail->data)->bytes > BLOCK_SIZE) { block = g_new (ShellPerfBlock, 1); block->bytes = 0; g_queue_push_tail (perf_log->blocks, block); } else { block = (ShellPerfBlock *)perf_log->blocks->tail->data; } pos = block->bytes; memcpy (block->buffer + pos, &time_delta, sizeof (guint32)); pos += sizeof (guint32); memcpy (block->buffer + pos, &event->id, sizeof (guint16)); pos += sizeof (guint16); memcpy (block->buffer + pos, bytes, bytes_len); pos += bytes_len; block->bytes = pos; }
/** * shell_perf_log_event: * @perf_log: a #ShellPerfLog * @name: name of the event * * Records a performance event with no arguments. */ void shell_perf_log_event (ShellPerfLog *perf_log, const char *name) { ShellPerfEvent *event = lookup_event (perf_log, name, ""); if (G_UNLIKELY (event == NULL)) return; record_event (perf_log, get_time(), event, NULL, 0); }
/** * shell_perf_log_event_s: * @perf_log: a #ShellPerfLog * @name: name of the event * @arg: the argument * * Records a performance event with one string argument. */ void shell_perf_log_event_s (ShellPerfLog *perf_log, const char *name, const char *arg) { ShellPerfEvent *event = lookup_event (perf_log, name, "s"); if (G_UNLIKELY (event == NULL)) return; record_event (perf_log, get_time(), event, (const guchar *)arg, strlen (arg) + 1); }
/** * shell_perf_log_event_x: * @perf_log: a #ShellPerfLog * @name: name of the event * @arg: the argument * * Records a performance event with one 64-bit integer argument. */ void shell_perf_log_event_x (ShellPerfLog *perf_log, const char *name, gint64 arg) { ShellPerfEvent *event = lookup_event (perf_log, name, "x"); if (G_UNLIKELY (event == NULL)) return; record_event (perf_log, get_time(), event, (const guchar *)&arg, sizeof (arg)); }
/** * cinnamon_perf_log_event_i: * @perf_log: a #CinnamonPerfLog * @name: name of the event * @arg: the argument * * Records a performance event with one 32-bit integer argument. */ void cinnamon_perf_log_event_i (CinnamonPerfLog *perf_log, const char *name, gint32 arg) { CinnamonPerfEvent *event = lookup_event (perf_log, name, "i"); if (G_UNLIKELY (event == NULL)) return; record_event (perf_log, get_time(), event, (const guchar *)&arg, sizeof (arg)); }
// // Clear the performance counters and start counting. Always count // process cycles. event_a and event_b must specify a valid ARM V6 // performance event (e.g., ARMV6_EVENT_INSTR_EXEC). // void start_counting(int evt0, int evt1) { ccr = 0 ; ctr0 = 0 ; ctr1 = 0 ; ccr_overflow = 0 ; ctr0_overflow = 0 ; ctr1_overflow = 0 ; event_0 = evt0 ; event_1 = evt1 ; if (lookup_event(evt0) == NULL) { fprintf(stderr, "*warning* Unknown PM event was requested (counter 0)\n") ; event_0 = ARMV6_EVENT_NOP ; } if (lookup_event(evt1) == NULL) { fprintf(stderr, "*warning* Unknown PM event was requested (counter 1)\n") ; event_1 = ARMV6_EVENT_NOP ; } // Clear the sticky overflow bits armv6_pmcr_write(ARMV6_PMCR_CCOUNT_OVERFLOW | ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW ) ; // Clear and start the performance counters armv6_pmcr_write(ARMV6_PMCR_ENABLE | ARMV6_PMCR_CCOUNT_RESET | ARMV6_PMCR_CCOUNT_DIV | ARMV6_PMCR_CTR01_RESET | (event_0 << ARMV6_PMCR_EVT_COUNT0_SHIFT) | (event_1 << ARMV6_PMCR_EVT_COUNT1_SHIFT) ) ; }
void unregister_event(int fd) { int ret; struct event_info *ei; ei = lookup_event(fd); if (!ei) return; ret = epoll_ctl(efd, EPOLL_CTL_DEL, fd, NULL); if (ret) sd_err("failed to delete epoll event for fd %d: %m", fd); rb_erase(&ei->rb, &events_tree); free(ei); /* * Although ei is no longer valid pointer, ei->handler() might be about * to be called in do_event_loop(). Refreshing the event loop is safe. */ event_force_refresh(); }
int modify_event(int fd, unsigned int new_events) { int ret; struct epoll_event ev; struct event_info *ei; ei = lookup_event(fd); if (!ei) { sd_err("event info for fd %d not found", fd); return 1; } memset(&ev, 0, sizeof(ev)); ev.events = new_events; ev.data.ptr = ei; ret = epoll_ctl(efd, EPOLL_CTL_MOD, fd, &ev); if (ret) { sd_err("failed to modify epoll event for fd %d: %m", fd); return 1; } return 0; }
/* Thus they never have most of these problems */ void our_handler(int signum, siginfo_t *info, void *uc) { static int already_handling=0; int fd = info->si_fd; int i; int ret; static int last_fd=0; /* In some cases (syscall tracepoint) */ /* The act of disabling an event would trigger */ /* Another overflow, leading to a recursive storm */ if (already_handling) { stats.already_overflows++; return; } already_handling=1; stats.overflows++; /* disable the event for the time being */ /* we were having trouble with signal storms */ ret=ioctl(fd,PERF_EVENT_IOC_DISABLE,0); /* Do not log, logging only make sense if */ /* we have deterministic counts which we don't */ /* Somehow we got a signal from an invalid event? */ /* How would this happen? */ /* Looks like if we fork() then close an event, */ /* It can still be alive in the child and cause */ /* a signal to come in even though it is closed.*/ if (ret<0) { if (fd!=last_fd) { printf("Signal from invalid fd %d %s\n", fd,strerror(errno)); last_fd=fd; } already_handling=0; return; // orderly_shutdown(); } i=lookup_event(fd); if (i>=0) { event_data[i].overflows++; if (event_data[i].overflows>10000) { if (!logging) printf("Throttling event %d fd %d, last_refresh=%d, " "period=%llu, type=%d throttles %d\n", i,event_data[i].fd,event_data[i].last_refresh, event_data[i].attr.sample_period, event_data[i].attr.type, event_data[i].throttles); event_data[i].overflows=0; event_data[i].throttles++; /* otherwise if we re-trigger next time */ /* with >1 refresh the throttle never */ /* lasts a significant amount of time. */ next_refresh=0; /* Avoid infinite throttle storms */ if (event_data[i].throttles > MAX_THROTTLES) { printf("Stuck in a signal storm w/o forward progress; Max throttle count hit, giving up\n"); close(event_data[i].fd); // orderly_shutdown(); /* In a storm we used to try to somehow stop */ /* it by closing all events, but this never */ /* really worked. */ #if 0 /* Disable all events */ printf("Trying to disable all events\n"); for(j=0;j<NUM_EVENTS;j++) { if (event_data[j].active) { ioctl(event_data[j].fd,PERF_EVENT_IOC_DISABLE,0); } } throttle_close_event=i; #endif } } else { /* read the event */ perf_mmap_read(event_data[i].mmap); /* cannot call rand() from signal handler! */ /* we re-enter and get stuck in a futex :( */ ret=ioctl(fd, PERF_EVENT_IOC_REFRESH,next_refresh); if (ret==0) { event_data[i].last_refresh=next_refresh; } /* Do not log, makes no sense */ } } already_handling=0; (void) ret; }
void open_random_event(int mmap_enabled, int overflow_enabled) { int fd; int i,trinity_type; i=find_empty_event(); /* return if no free events */ if (i<0) return; event_data[i].overflows=0; event_data[i].throttles=0; /* repeat until we create a valid event */ while(1) { /* call trinity random perf_event_open() code */ //generic_sanitise(0); trinity_type=syscall_perf_event_open.sanitise(&shm->syscall[0]); memcpy(&event_data[i].attr, (struct perf_event_attr *)shm->syscall[0].a1, sizeof(struct perf_event_attr)); event_data[i].pid=shm->syscall[0].a2; event_data[i].cpu=get_cpu(); event_data[i].group_fd=shm->syscall[0].a4; event_data[i].flags=shm->syscall[0].a5; post_perf_event_open(&shm->syscall[0]); /* Randomly make part of a group 1/4 of the time */ if (rand()%4==2) { int j; j=find_random_active_event(); /* is it a master? */ /* can we set a group leader that isn't itself */ /* a leader? */ // if (event_data[j].group_fd==-1) { event_data[i].group_fd=event_data[j].fd; // } } /* Randomly try to use a kprobe */ if (event_data[i].attr.type==PERF_TYPE_TRACEPOINT) { if (rand()%10==5) { event_data[i].attr.config=kprobe_id; } } if (ignore_but_dont_skip.open) return; /* Debugging code */ /* We don't usually log failed opens as there are so many */ if (logging&TYPE_OPEN) { #if LOG_FAILURES if (trigger_failure_logging) { /* uncomment if failing opens causing crashes */ // static int quit_next=0; // if (event_data[i].attr.type==PERF_TYPE_TRACEPOINT) { sprintf(log_buffer,"# O -1 %d %d %d %lx ", event_data[i].pid, event_data[i].cpu, event_data[i].group_fd, event_data[i].flags); write(log_fd,log_buffer,strlen(log_buffer)); perf_log_attr(&event_data[i].attr); // fsync(log_fd); // } // if (quit_next==1) exit(1); // if (quit_next) quit_next--; // if ((event_data[i].attr.read_format==0x2d2d2d)) // quit_next=2; } #endif } /* Actually try to open the event */ fd=perf_event_open( &event_data[i].attr, event_data[i].pid, event_data[i].cpu, event_data[i].group_fd, event_data[i].flags); stats.open_attempts++; stats.total_syscalls++; int which_type=event_data[i].attr.type; if ((which_type<0) || (which_type>MAX_OPEN_TYPE-1)) { which_type=MAX_OPEN_TYPE-1; } /* If we succede, break out of the infinite loop */ if (fd>0) { stats.open_type_success[which_type]++; stats.open_trinity_type_success[trinity_type]++; break; } #if 0 /* Track source of UNKNOWN errnos */ if (errno==16) { printf("Event t=%d c=%llx pid=%d cpu=%d %s\n", event_data[i].attr.type, event_data[i].attr.config, event_data[i].pid, event_data[i].cpu, strerror(errno)); } #endif /* Otherwise, track the errors */ if (errno<MAX_ERRNOS) { stats.open_errno_count[errno]++; stats.open_type_fail[which_type]++; stats.open_trinity_type_fail[trinity_type]++; } /* no more file descriptors, so give up */ if (errno==EMFILE) return; } /* We successfully opened an event! */ stats.open_successful++; stats.current_open++; if (logging&TYPE_OPEN) { sprintf(log_buffer,"O %d %d %d %d %lx ", fd, event_data[i].pid, event_data[i].cpu, event_data[i].group_fd, event_data[i].flags); write(log_fd,log_buffer,strlen(log_buffer)); perf_log_attr(&event_data[i].attr); #if FSYNC_EVERY fsync(log_fd); #endif } event_data[i].fd=fd; event_data[i].active=1; active_events++; /* if we are member of a group, update size of group */ /* this is needed for calcuating "proper" read size */ /* Also I don't think we adjust this on close */ if (event_data[i].group_fd!=-1) { int j=lookup_event(event_data[i].group_fd); event_data[j].number_in_group++; event_data[j].read_size=update_read_size(j); } /* Setup mmap buffer */ if (mmap_enabled) { setup_mmap(i); } /* Setup overflow 50% of the time */ if ((overflow_enabled) && (rand()%2)) { if (!ignore_but_dont_skip.overflow) { int fcntl_result; if (logging&TYPE_OVERFLOW) { sprintf(log_buffer,"o %d\n",event_data[i].fd); write(log_fd,log_buffer,strlen(log_buffer)); } memset(&event_data[i].sa, 0, sizeof(struct sigaction)); event_data[i].sa.sa_sigaction = our_handler; event_data[i].sa.sa_flags = SA_SIGINFO; if (sigaction( SIGRTMIN+2, &event_data[i].sa, NULL) < 0) { printf("Error setting up signal handler\n"); } fcntl_result=fcntl(event_data[i].fd, F_SETFL, O_RDWR|O_NONBLOCK|O_ASYNC); if (fcntl_result<0) fprintf(stderr,"F1 error!\n"); fcntl_result=fcntl(event_data[i].fd, F_SETSIG, SIGRTMIN+2); if (fcntl_result<0) fprintf(stderr,"F1 error!\n"); fcntl_result=fcntl(event_data[i].fd, F_SETOWN,getpid()); if (fcntl_result<0) fprintf(stderr,"F1 error!\n"); } } event_data[i].number_in_group=1; event_data[i].read_size=update_read_size(i); }