static void callstack_log( struct callstack *cs, uint32_t hcode, uint32_t dcode ) { unsigned int i, j, n, of = 4; /* Header on the stack */ BUF_DATA2( hcode, cs->flags, cs->nframes ); /* look for how many batches of 4 */ n = cs->nframes / 4; of = cs->nframes % 4; if( of != 0 ) n++; /* print all the stack data, and zero the overflow */ for( i = 0; i < n; i++ ) { #define SCRUB_FRAME(x) (((x)<cs->nframes)?cs->frames[x]:0) j = i * 4; BUF_DATA ( dcode, SCRUB_FRAME(j+0), SCRUB_FRAME(j+1), SCRUB_FRAME(j+2), SCRUB_FRAME(j+3) ); #undef SCRUB_FRAME } }
//----------------------------------------------------------------------------- // External function that is used to reply to a http request. void rq_http_reply(rq_http_req_t *req, const int code, char *ctype, expbuf_t *data) { rq_http_t *http; assert(req && code > 0); assert(req->reply); assert(BUF_LENGTH(req->reply) == 0); addCmd(req->reply, HTTP_CMD_CLEAR); if (ctype) { addCmdShortStr(req->reply, HTTP_CMD_CONTENT_TYPE, strlen(ctype), ctype); } if (data && BUF_LENGTH(data)) { addCmdLargeStr(req->reply, HTTP_CMD_FILE, BUF_LENGTH(data), BUF_DATA(data)); } addCmdInt(req->reply, HTTP_CMD_CODE, code); addCmd(req->reply, HTTP_CMD_REPLY); // If we already have a reply, then we send it and then close off the request object. assert(req->msg); rq_reply(req->msg, BUF_LENGTH(req->reply), BUF_DATA(req->reply)); expbuf_clear(req->reply); req->msg = NULL; if (req->inprocess > 0) { // need to remove the request from the list. assert(req->http); http = req->http; assert(http->req_list); ll_remove(http->req_list, req); req_free(req); req = NULL; } }
//----------------------------------------------------------------------------- // Handle the message that was sent over the queue. the message itself uses // the RISP method, so we pass the data on to the risp processor. static void message_handler(rq_message_t *msg, void *arg) { int processed; control_t *control; assert(msg); control = (control_t *) arg; assert(control); assert(control->rqsvc); assert(control->rqsvc->rq); assert(msg->conn); assert(msg->conn->rq); assert(control->rqsvc->rq == msg->conn->rq); assert(control->reply); assert(BUF_LENGTH(control->reply) == 0); // since we will only be processing one request at a time, and there are no // paths for blocking when processing the request, we will put the request // in the control structure. If we were processing more than one, we would // create a list of pending requests which contain the control structure in // it. assert(control->req == NULL); control->req = msg; assert(control->risp); assert(msg->data); processed = risp_process(control->risp, control, BUF_LENGTH(msg->data), BUF_DATA(msg->data)); assert(processed == BUF_LENGTH(msg->data)); // we need to get the reply and return it. Has that been done? assert(control->reply); assert(BUF_LENGTH(control->reply) > 0); rq_reply(msg, BUF_LENGTH(control->reply), BUF_DATA(control->reply)); expbuf_clear(control->reply); msg = NULL; control->req = NULL; }
void kperf_task_snapshot_log(struct kperf_task_snapshot *tksn) { assert(tksn != NULL); #if defined(__LP64__) BUF_DATA(PERF_TK_SNAP_DATA, tksn->kptksn_flags, ENCODE_UPPER_64(tksn->kptksn_suspend_count) | ENCODE_LOWER_64(tksn->kptksn_pageins), tksn->kptksn_user_time_in_terminated_threads, tksn->kptksn_system_time_in_terminated_threads); #else BUF_DATA(PERF_TK_SNAP_DATA1_32, UPPER_32(tksn->kptksn_flags), LOWER_32(tksn->kptksn_flags), tksn->kptksn_suspend_count, tksn->kptksn_pageins); BUF_DATA(PERF_TK_SNAP_DATA2_32, UPPER_32(tksn->kptksn_user_time_in_terminated_threads), LOWER_32(tksn->kptksn_user_time_in_terminated_threads), UPPER_32(tksn->kptksn_system_time_in_terminated_threads), LOWER_32(tksn->kptksn_system_time_in_terminated_threads)); #endif /* defined(__LP64__) */ }
void kperf_on_cpu_internal(thread_t thread, thread_continue_t continuation, uintptr_t *starting_fp) { if (kperf_kdebug_cswitch) { /* trace the new thread's PID for Instruments */ int pid = task_pid(get_threadtask(thread)); BUF_DATA(PERF_TI_CSWITCH, thread_tid(thread), pid); } if (kperf_lightweight_pet_active) { kperf_pet_on_cpu(thread, continuation, starting_fp); } }
void kperf_ipi_handler(void *param) { struct kperf_context ctx; struct kperf_timer *timer = param; assert(timer != NULL); /* Always cut a tracepoint to show a sample event occurred */ BUF_DATA(PERF_TM_HNDLR | DBG_FUNC_START, 0); int ncpu = cpu_number(); struct kperf_sample *intbuf = kperf_intr_sample_buffer(); /* On a timer, we can see the "real" current thread */ ctx.cur_thread = current_thread(); ctx.cur_pid = task_pid(get_threadtask(ctx.cur_thread)); /* who fired */ ctx.trigger_type = TRIGGER_TYPE_TIMER; ctx.trigger_id = (unsigned int)(timer - kperf_timerv); if (ctx.trigger_id == pet_timer_id && ncpu < machine_info.logical_cpu_max) { kperf_thread_on_cpus[ncpu] = ctx.cur_thread; } /* make sure sampling is on */ unsigned int status = kperf_sampling_status(); if (status == KPERF_SAMPLING_OFF) { BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_OFF); return; } else if (status == KPERF_SAMPLING_SHUTDOWN) { BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, SAMPLE_SHUTDOWN); return; } /* call the action -- kernel-only from interrupt, pend user */ int r = kperf_sample(intbuf, &ctx, timer->actionid, SAMPLE_FLAG_PEND_USER); /* end tracepoint is informational */ BUF_INFO(PERF_TM_HNDLR | DBG_FUNC_END, r); #if defined(__x86_64__) (void)atomic_bit_clear(&(timer->pending_cpus), ncpu, __ATOMIC_RELAXED); #endif /* defined(__x86_64__) */ }
static void kperf_timer_handler(void *param0, __unused void *param1) { struct kperf_timer *timer = param0; unsigned int ntimer = (unsigned int)(timer - kperf_timerv); unsigned int ncpus = machine_info.logical_cpu_max; timer->active = 1; /* along the lines of do not ipi if we are all shutting down */ if (kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN) { goto deactivate; } BUF_DATA(PERF_TM_FIRE, ntimer, ntimer == pet_timer_id, timer->period, timer->actionid); if (ntimer == pet_timer_id) { kperf_pet_fire_before(); /* clean-up the thread-on-CPUs cache */ bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); } /* ping all CPUs */ kperf_mp_broadcast_running(timer); /* release the pet thread? */ if (ntimer == pet_timer_id) { /* PET mode is responsible for rearming the timer */ kperf_pet_fire_after(); } else { /* * FIXME: Get the current time from elsewhere. The next * timer's period now includes the time taken to reach this * point. This causes a bias towards longer sampling periods * than requested. */ kperf_timer_schedule(timer, mach_absolute_time()); } deactivate: timer->active = 0; }
//----------------------------------------------------------------------------- // This function is the main public-facing function that will be used. It will // take a printf-formatted string (and arguments), add some timestamp, and // log-level info to it, and then send it to the logging queue. void rq_log(rq_log_t *log, short int level, char *format, ...) { va_list ap; short int done = 0; int res; expbuf_t *buf; assert(log); assert(level >= 0 && level < 256); assert(format); if (level >= log->level) { while (done == 0) { buf = &log->buffer; assert(BUF_LENGTH(buf) == 0 && BUF_MAX(buf) > 0 && BUF_DATA(buf)); va_start(ap, format); res = vsnprintf(BUF_DATA(buf), BUF_MAX(buf), format, ap); va_end(ap); if (res >= BUF_MAX(buf)) { // we didn't have enough room to format all the data, so we need to // increase the size of our buffer. Fortunately, the vsnprintf // returns the actual size of the buffer that we will need. So we // simply increase the size of our buffer, and then try again. assert(BUF_LENGTH(buf) == 0); assert(BUF_MAX(buf) > 0); assert(BUF_DATA(buf)); expbuf_shrink(buf, res+1); assert(done == 0); } else { // we successfully formatted the log output. done ++; assert(done > 0); BUF_LENGTH(buf) = res; assert(BUF_LENGTH(buf) < BUF_MAX(buf)); assert(BUF_DATA(buf)[BUF_LENGTH(buf)] == '\0'); // we dont need to do any trimmings, so we send it now. rq_log_send(log, level, BUF_DATA(buf), BUF_LENGTH(buf)); expbuf_clear(buf); } } } assert(BUF_LENGTH(&log->buffer) == 0 && BUF_MAX(&log->buffer) > 0 && BUF_DATA(&log->buffer) != NULL); }
//----------------------------------------------------------------------------- // This callback function is used when a complete message is received to // consume. We basically need to create a request to handle it, add it to the // list. If a reply is sent during the processing, then it will close out the // request automatically, otherwise it will be up to something else to close it // out. static void message_handler(rq_message_t *msg, void *arg) { int processed; rq_http_t *http; rq_http_req_t *req; assert(msg); assert(arg); http = (rq_http_t *) arg; assert(http); // We dont know what the use of this object will be, so we need to create it // and put it in a list (to keep track of it) until something gets rid of it. req = req_new(http, http->arg); req->msg = msg; assert(req->reply); assert(BUF_LENGTH(req->reply) == 0); assert(msg->data); assert(http->risp); processed = risp_process(http->risp, req, BUF_LENGTH(msg->data), BUF_DATA(msg->data)); assert(processed == BUF_LENGTH(msg->data)); // if we still have the msg pointer as part of the request, then the message // hasn't been replied yet, so we need to add the request to the list and // let it finish elsewhere. if (req->msg) { assert(req->inprocess == 0); req->inprocess++; // then we need to add this request to the list. assert(http->req_list); ll_push_head(http->req_list, req); req = NULL; } else { // We have already replied to the request, so we dont need it anymore. req_free(req); req = NULL; } }
//----------------------------------------------------------------------------- // Handle the response from the blacklist service. static void blacklist_handler(rq_message_t *reply) { cache_waiting_t *waiting; int processed; assert(reply); waiting = reply->arg; assert(waiting); assert(waiting->msg == NULL); waiting->msg = reply; assert(reply->data); assert(waiting->blacklist); assert(waiting->blacklist->risp); processed = risp_process(waiting->blacklist->risp, waiting, BUF_LENGTH(reply->data), (risp_char_t *) BUF_DATA(reply->data)); assert(processed == BUF_LENGTH(reply->data)); waiting->msg = NULL; }
/* log an existing sample into the buffer */ void kperf_threadinfo_log(struct threadinfo *ti) { /* XXX: K64 only? */ BUF_DATA( PERF_TI_DATA, ti->pid, ti->tid, ti->dq_addr, ti->runmode ); }