int main(int argc, char* argv[]) { LibRzuScopedUse useLibRzu; init(); ConfigInfo::get()->init(argc, argv); uint64_t startTime = 0; Log mainLogger(GlobalCoreConfig::get()->log.enable, GlobalCoreConfig::get()->log.level, GlobalCoreConfig::get()->log.consoleLevel, GlobalCoreConfig::get()->log.dir, GlobalCoreConfig::get()->log.file, GlobalCoreConfig::get()->log.maxQueueSize); Log::setDefaultLogger(&mainLogger); ConfigInfo::get()->dump(); int usecBetweenConnection = CFG_GET("usecperconnection")->getInt(); benchConnections = CFG_GET("benchconnection")->getBool(); config.connectionsDone = config.connectionsStarted = 0; config.connectionTargetCount = CFG_GET("targetcount")->getInt(); if(benchConnections) benchmarkConnections(); else benchmarkAuthentication(); mainLogger.log(Object::LL_Info, "main", 4, "Starting benchmark\n"); if(usecBetweenConnection == 0) startTime = uv_hrtime(); if(benchConnections) startBenchConnections(usecBetweenConnection); else startBenchAuth(usecBetweenConnection); if(usecBetweenConnection != 0) { mainLogger.log( Object::LL_Info, "main", 4, "Connected %d connections at limited speed, continuing benchmark at full-speed (time counter begin now)\n", config.connectionsStarted); startTime = uv_hrtime(); } EventLoop::getInstance()->run(UV_RUN_DEFAULT); uint64_t duration = (uv_hrtime() - startTime) / 1000; // nanosec to usec mainLogger.log(Object::LL_Info, "main", 4, "%d connections in %" PRIu64 " usec => %f auth/sec\n", config.connectionsDone, duration, config.connectionsDone / ((float) duration / 1000000.0f)); }
static void async_bench(const char* path) { struct async_req reqs[MAX_CONCURRENT_REQS]; struct async_req* req; uint64_t before; uint64_t after; int count; int i; for (i = 1; i <= MAX_CONCURRENT_REQS; i++) { count = NUM_ASYNC_REQS; for (req = reqs; req < reqs + i; req++) { req->path = path; req->count = &count; uv_fs_stat(uv_default_loop(), &req->fs_req, req->path, stat_cb); } before = uv_hrtime(); uv_run(uv_default_loop()); after = uv_hrtime(); printf("%s stats (%d concurrent): %.2fs (%s/s)\n", fmt(1.0 * NUM_ASYNC_REQS), i, (after - before) / 1e9, fmt((1.0 * NUM_ASYNC_REQS) / ((after - before) / 1e9))); fflush(stdout); } }
/// Sleeps for `us` microseconds. /// /// @param us Number of microseconds to sleep. /// @param ignoreinput If true, ignore all input (including SIGINT/CTRL-C). /// If false, waiting is aborted on any input. void os_microdelay(uint64_t us, bool ignoreinput) { uint64_t elapsed = 0u; uint64_t base = uv_hrtime(); // Convert microseconds to nanoseconds, or UINT64_MAX on overflow. const uint64_t ns = (us < UINT64_MAX / 1000u) ? us * 1000u : UINT64_MAX; uv_mutex_lock(&delay_mutex); while (elapsed < ns) { // If ignoring input, we simply wait the full delay. // Else we check for input in ~100ms intervals. const uint64_t ns_delta = ignoreinput ? ns - elapsed : MIN(ns - elapsed, 100000000u); // 100ms const int rv = uv_cond_timedwait(&delay_cond, &delay_mutex, ns_delta); if (0 != rv && UV_ETIMEDOUT != rv) { assert(false); break; } // Else: Timeout proceeded normally. if (!ignoreinput && os_char_avail()) { break; } const uint64_t now = uv_hrtime(); elapsed += now - base; base = now; } uv_mutex_unlock(&delay_mutex); }
void SessionServerCommon::onCheckIdleSockets() { int kickedConnections = 0; uint64_t begin; bool logTrace = Log::get() && Log::get()->wouldLog(LL_Trace); if(logTrace) begin = uv_hrtime(); for(auto it = sockets.begin(); it != sockets.end();) { SocketSession* session = it->first; Stream* socket = session->getStream(); ++it; // if the socket is removed from the list (when closed), we keep a valid iterator if(socket && socket->getState() == Stream::ConnectedState) { if(socket->isPacketTransferedSinceLastCheck() == false) { StreamAddress remoteAddress = socket->getRemoteAddress(); char ipStr[108]; remoteAddress.getName(ipStr, sizeof(ipStr)); socket->close(); kickedConnections++; log(LL_Info, "Kicked idle connection: %s:%d\n", ipStr, remoteAddress.port); } else { socket->resetPacketTransferedFlag(); } } } // check fo trace to avoid call to uv_hrtime if not needed if(logTrace) log(LL_Trace, "Idle socket check: kicked %d sockets in %" PRIu64 " ns\n", kickedConnections, uv_hrtime() - begin); }
int ftw_socket_inbox_shutdown(struct ftw_socket_inbox ** const sock) { uint64_t time; double elapsed_time; int rc; /* Preconditions expected of LabVIEW. */ ftw_assert(sock); if (*sock == NULL) { errno = EBADF; return -1; } time = uv_hrtime(); rc = nn_close((*sock)->id); nn_sem_post(&(*sock)->deinitialized); nn_thread_term(&(*sock)->async_recv_thread); nn_sem_term(&(*sock)->msg_acknowledged); nn_sem_term(&(*sock)->initialized); nn_sem_term(&(*sock)->deinitialized); time = uv_hrtime() - time; elapsed_time = time / 1000000000.0; ftw_debug("Inbox Shutdown time: %.3fsec", elapsed_time); return rc; }
bool event_poll(int32_t ms) { int64_t remaining = ms; uint64_t end; bool result; if (ms > 0) { // Calculate end time in nanoseconds end = uv_hrtime() + ms * 1e6; } for (;;) { result = poll_uv_loop((int32_t)remaining); // Process queued events process_all_events(); if (ms > 0) { // Calculate remaining time in milliseconds remaining = (end - uv_hrtime()) / 1e6; } if (input_ready() || got_int) { // Bail out if we have pending input return true; } if (!result || (ms >= 0 && remaining <= 0)) { // Or if we timed out return false; } } }
// we expect the AVPacket.data to contain either interleaved S16, or interleaved F32 audio bool AudioEncoderMP3::encodePacket(AVPacket* p, FLVTag& tag) { assert(lame_flags); assert(settings.in_interleaved); /* we only support interleaved audio for now */ int nsamples = 0; int written = 0; #if defined(USE_GRAPH) uint64_t enc_start = uv_hrtime() / 1000000; #endif if(settings.in_bitsize == AV_AUDIO_BITSIZE_S16) { nsamples = p->data.size() / (sizeof(int16_t) * nchannels); //printf("----------------- samples: %d, channels: %d, data.size(): %zu\n", nsamples, nchannels, p->data.size()); written = lame_encode_buffer_interleaved(lame_flags, (short int*)&p->data.front(), nsamples, mp3_buffer, AUDIO_ENCODER_BUFFER_SIZE); } else if(settings.in_bitsize == AV_AUDIO_BITSIZE_F32) { nsamples = p->data.size() / (sizeof(float) * nchannels); written = lame_encode_buffer_interleaved_ieee_float(lame_flags, (const float*)&p->data.front(), nsamples, mp3_buffer, AUDIO_ENCODER_BUFFER_SIZE); } if(written > 0) { bitrate_nbytes += written; } uint64_t time_now = uv_hrtime(); if(time_now >= bitrate_timeout) { bitrate_timeout = time_now + bitrate_delay; double duration = (time_now - bitrate_time_started) / 1000000000.0; // in s. bitrate_in_kbps = ((bitrate_nbytes * 8) / 1000) / duration; STREAMER_STATUS("audio bitrate: %0.2f kbps\n", bitrate_in_kbps); } #if defined(USE_GRAPH) frames_graph["enc_audio"] += ((uv_hrtime()/1000000) - enc_start); frames_graph["enc_audio_video"] += ((uv_hrtime()/1000000) - enc_start); network_graph["mp3"] += written; #endif #if AUDIO_USE_DATA_PTR if(written) { tag.setData(mp3_buffer, written); } #elif AUDIO_USE_COPY_DATA tag.bs.clear(); if(written) { tag.bs.putBytes((uint8_t*)mp3_buffer, written); tag.setData(tag.bs.getPtr(), tag.bs.size()); } #endif tag.makeAudioTag(); tag.setTimeStamp(p->timestamp); return written > 0; }
int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid, void **bcast_val) { if (tg->tid_map[ext_tid] == 0) { tg->envelope = bcast_val ? *bcast_val : NULL; cpu_sfence(); tg->forked = 1; tg->group_sense = tg->thread_sense[0]->sense; // if it's possible that threads are sleeping, signal them if (tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); uv_cond_broadcast(&tg->alarm); uv_mutex_unlock(&tg->alarm_lock); } } else { // spin up to threshold ns (count sheep), then sleep uint64_t spin_ns; uint64_t spin_start = 0; while (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { if (tg->sleep_threshold) { if (!spin_start) { // Lazily initialize spin_start since uv_hrtime is expensive spin_start = uv_hrtime(); continue; } spin_ns = uv_hrtime() - spin_start; // In case uv_hrtime is not monotonic, we'll sleep earlier if (spin_ns >= tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); if (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { uv_cond_wait(&tg->alarm, &tg->alarm_lock); } uv_mutex_unlock(&tg->alarm_lock); spin_start = 0; continue; } } cpu_pause(); } cpu_lfence(); if (bcast_val) *bcast_val = tg->envelope; } return 0; }
int rxp_clock_start(rxp_clock* clock) { if (!clock) { return - 1; } clock->time_start = uv_hrtime(); clock->time_last = clock->time_start; clock->time = 0; return 0; }
void ftw_resource_usage(double *user_cpu_time, double *system_cpu_time, double *uptime, uint64_t *hi_res_relative_time, uint64_t *peak_working_set, uint64_t *lv_dataspace_size, uint64_t *hard_page_faults) { uv_rusage_t rusage; double up; MemStatRec stats; MgErr lvrc; int rc; lvrc = DSMemStats(&stats); if (lvrc == mgNoErr) { *lv_dataspace_size = (uint64_t)stats.totAllocSize; } rc = uv_getrusage(&rusage); if (rc == 0) { *user_cpu_time = (double)rusage.ru_utime.tv_sec + rusage.ru_utime.tv_usec * 1e-6; *system_cpu_time = (double)rusage.ru_stime.tv_sec + rusage.ru_stime.tv_usec * 1e-6; *peak_working_set = rusage.ru_maxrss; *hard_page_faults = rusage.ru_majflt; } rc = uv_uptime(&up); if (rc == 0) { *uptime = up; } *hi_res_relative_time = uv_hrtime(); return; }
void RequestHandler::set_response(const SharedRefPtr<Response>& response) { uint64_t elapsed = uv_hrtime() - start_time_ns(); current_host_->update_latency(elapsed); connection_->metrics()->record_request(elapsed); future_->set_response(current_host_->address(), response); return_connection_and_finish(); }
/* Logs the end of a GC run. */ void MVM_profiler_log_gc_end(MVMThreadContext *tc) { MVMProfileThreadData *ptd = get_thread_data(tc); MVMProfileCallNode *pcn = ptd->current_call; MVMuint64 gc_time; MVMint32 retained_bytes; /* Record time spent. */ gc_time = uv_hrtime() - ptd->cur_gc_start_time; ptd->gcs[ptd->num_gcs].time = gc_time; /* Record retained and promoted bytes. */ retained_bytes = (char *)tc->nursery_alloc - (char *)tc->nursery_tospace; ptd->gcs[ptd->num_gcs].promoted_bytes = tc->gc_promoted_bytes; ptd->gcs[ptd->num_gcs].retained_bytes = retained_bytes; /* Tweak cleared bytes count. */ ptd->gcs[ptd->num_gcs].cleared_bytes -= (retained_bytes + tc->gc_promoted_bytes); /* Record number of gen 2 roots (from gen2 to nursery) */ ptd->gcs[ptd->num_gcs].num_gen2roots = tc->num_gen2roots; /* Increment the number of GCs we've done. */ ptd->num_gcs++; /* Discount GC time from all active frames. */ while (pcn) { pcn->cur_skip_time += gc_time; pcn = pcn->pred; } }
Host::Ptr LatencyAwarePolicy::LatencyAwareQueryPlan::compute_next() { int64_t min = policy_->min_average_.load(); const Settings& settings = policy_->settings_; uint64_t now = uv_hrtime(); Host::Ptr host; while ((host = child_plan_->compute_next())) { TimestampedAverage latency = host->get_current_average(); if (min < 0 || latency.average < 0 || latency.num_measured < settings.min_measured || (now - latency.timestamp) > settings.retry_period_ns) { return host; } if (latency.average <= static_cast<int64_t>(settings.exclusion_threshold * min)) { return host; } skipped_.push_back(host); } if (skipped_index_ < skipped_.size()) { return skipped_[skipped_index_++]; } return Host::Ptr(); }
/* Gets the current thread's profiling data structure, creating it if needed. */ static MVMProfileThreadData * get_thread_data(MVMThreadContext *tc) { if (!tc->prof_data) { tc->prof_data = MVM_calloc(1, sizeof(MVMProfileThreadData)); tc->prof_data->start_time = uv_hrtime(); } return tc->prof_data; }
Meter(ThreadState* thread_state) : one_minute_rate_(1.0 - exp(-static_cast<double>(ExponentiallyWeightedMovingAverage::INTERVAL) / 60.0 / 1), thread_state) , five_minute_rate_(1.0 - exp(-static_cast<double>(ExponentiallyWeightedMovingAverage::INTERVAL) / 60.0 / 5), thread_state) , fifteen_minute_rate_(1.0 - exp(-static_cast<double>(ExponentiallyWeightedMovingAverage::INTERVAL) / 60.0 / 15), thread_state) , count_(thread_state) , start_time_(uv_hrtime()) , last_tick_(start_time_) {}
int uv__loop_init(uv_loop_t* loop, int default_loop) { #if HAVE_KQUEUE int flags = EVBACKEND_KQUEUE; #else int flags = EVFLAG_AUTO; #endif memset(loop, 0, sizeof(*loop)); RB_INIT(&loop->ares_handles); RB_INIT(&loop->timer_handles); ngx_queue_init(&loop->active_reqs); ngx_queue_init(&loop->idle_handles); ngx_queue_init(&loop->async_handles); ngx_queue_init(&loop->check_handles); ngx_queue_init(&loop->prepare_handles); ngx_queue_init(&loop->handle_queue); loop->closing_handles = NULL; loop->channel = NULL; loop->time = uv_hrtime() / 1000000; loop->async_pipefd[0] = -1; loop->async_pipefd[1] = -1; loop->ev = (default_loop ? ev_default_loop : ev_loop_new)(flags); ev_set_userdata(loop->ev, loop); eio_channel_init(&loop->uv_eio_channel, loop); #if __linux__ RB_INIT(&loop->inotify_watchers); loop->inotify_fd = -1; #endif #if HAVE_PORTS_FS loop->fs_fd = -1; #endif return 0; }
static void microdelay(uint64_t microseconds) { uint64_t hrtime; int64_t ns = microseconds * 1000; // convert to nanoseconds uv_mutex_lock(&delay_mutex); while (ns > 0) { hrtime = uv_hrtime(); if (uv_cond_timedwait(&delay_cond, &delay_mutex, ns) == UV_ETIMEDOUT) break; ns -= uv_hrtime() - hrtime; } uv_mutex_unlock(&delay_mutex); }
/* Log that we're entering a new frame. */ void MVM_profile_log_enter(MVMThreadContext *tc, MVMStaticFrame *sf, MVMuint64 mode) { MVMProfileThreadData *ptd = get_thread_data(tc); /* Try to locate the entry node, if it's in the call graph already. */ MVMProfileCallNode *pcn = NULL; MVMuint32 i; if (ptd->current_call) for (i = 0; i < ptd->current_call->num_succ; i++) if (ptd->current_call->succ[i]->sf == sf) pcn = ptd->current_call->succ[i]; /* If we didn't find a call graph node, then create one and add it to the * graph. */ if (!pcn) { pcn = MVM_calloc(1, sizeof(MVMProfileCallNode)); pcn->sf = sf; if (ptd->current_call) { MVMProfileCallNode *pred = ptd->current_call; pcn->pred = pred; if (pred->num_succ == pred->alloc_succ) { pred->alloc_succ += 8; pred->succ = MVM_realloc(pred->succ, pred->alloc_succ * sizeof(MVMProfileCallNode *)); } pred->succ[pred->num_succ] = pcn; pred->num_succ++; } else { if (!ptd->call_graph) ptd->call_graph = pcn; } } /* Increment entry counts. */ pcn->total_entries++; switch (mode) { case MVM_PROFILE_ENTER_SPESH: pcn->specialized_entries++; break; case MVM_PROFILE_ENTER_SPESH_INLINE: pcn->specialized_entries++; pcn->inlined_entries++; break; case MVM_PROFILE_ENTER_JIT: pcn->jit_entries++; break; case MVM_PROFILE_ENTER_JIT_INLINE: pcn->jit_entries++; pcn->inlined_entries++; break; } pcn->entry_mode = mode; /* Log entry time; clear skip time. */ pcn->cur_entry_time = uv_hrtime(); pcn->cur_skip_time = 0; /* The current call graph node becomes this one. */ ptd->current_call = pcn; }
void rxs_jitter_update(rxs_jitter* jit) { uint64_t now = ((uv_hrtime() - jit->time_start)); rxs_packet* pkt = NULL; /* only when our buffer is filled */ if (jit->npackets < (jit->packets->npackets / 2)) { return ; } /* check if there is a packet which needs to be shown */ if (jit->timeout == 0) { jit->timestamp_start = jit->packets->packets[0].timestamp; jit->time_start = uv_hrtime(); jit->timeout = now; jit->curr_pkt = &jit->packets->packets[0]; printf("First timeout set: %llu, first seqnum: %u\n", jit->timeout, jit->curr_pkt->seqnum); } if (now < jit->timeout) { return; } if (!jit->curr_pkt) { printf("Error: cannot find curr pkt.\n"); return; } /* construct a packet, @todo - not sure if we need to check the return value here... */ rxs_reconstruct_merge_packets(&jit->reconstruct, jit->curr_pkt->timestamp); /* find the next timeout */ pkt = jitter_next_packet(jit); if (!pkt) { printf("Error: no next packet found. This should not happen (unless sender stopped)!\n"); exit(0); } else { jit->timeout = pkt->timestamp - jit->timestamp_start; jit->curr_pkt = pkt; return ; } printf("no next packet found\n"); exit(0); return ; }
double mean_rate() const { if (count() == 0) { return 0.0; } else { double elapsed = static_cast<double>(uv_hrtime() - start_time_) / 1e9; return count() / elapsed; } }
/// Sleeps for a certain amount of microseconds /// /// @param microseconds Number of microseconds to sleep void os_microdelay(uint64_t microseconds) { uint64_t elapsed = 0; uint64_t ns = microseconds * 1000; // convert to nanoseconds uint64_t base = uv_hrtime(); uv_mutex_lock(&delay_mutex); while (elapsed < ns) { if (uv_cond_timedwait(&delay_cond, &delay_mutex, ns - elapsed) == UV_ETIMEDOUT) break; uint64_t now = uv_hrtime(); elapsed += now - base; base = now; } uv_mutex_unlock(&delay_mutex); }
// The boundary string is used when posting multipart/form-data void HTTPRequest::generateBoundary() { if(boundary.size()) { RX_ERROR("Already created an boundary. Not updating"); return; } std::stringstream ss; ss << uv_hrtime(); boundary = "ROXLU" +ss.str(); }
static void sync_bench(const char* path) { uint64_t before; uint64_t after; uv_fs_t req; int i; /* do the sync benchmark */ before = uv_hrtime(); for (i = 0; i < NUM_SYNC_REQS; i++) sync_stat(&req, path); after = uv_hrtime(); printf("%s stats (sync): %.2fs (%s/s)\n", fmt(1.0 * NUM_SYNC_REQS), (after - before) / 1e9, fmt((1.0 * NUM_SYNC_REQS) / ((after - before) / 1e9))); fflush(stdout); }
static int pound_it(int concurrency, const char* type, setup_fn do_setup, connect_fn do_connect, make_connect_fn make_connect, void* arg) { double secs; int r; uint64_t start_time; /* in ns */ uint64_t end_time; loop = uv_default_loop(); uv_update_time(loop); start = uv_now(loop); /* Run benchmark for at least five seconds. */ start_time = uv_hrtime(); do_setup(concurrency, arg); r = do_connect(concurrency, make_connect, arg); ASSERT(!r); uv_run(loop, UV_RUN_DEFAULT); end_time = uv_hrtime(); /* Number of fractional seconds it took to run the benchmark. */ secs = (double)(end_time - start_time) / NANOSEC; fprintf(stderr, "%s-conn-pound-%d: %.0f accepts/s (%d failed)\n", type, concurrency, closed_streams / secs, conns_failed); fflush(stderr); MAKE_VALGRIND_HAPPY(); return 0; }
static void on_timer(uv_timer_t* handle) { char msg[1024]; const char* s = "abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ."; snprintf(msg, sizeof(msg), "Repeated text: (1) %s (2) %s (3) %s (4) %s", s, s, s, s); uint64_t elapsed_time = uv_hrtime(); // ns for(int i = 0; i < bench; i++) { // uvx_log_send(&xlog, UVX_LOG_INFO, "xlog,test,liigo", msg, "/home/liigo/source.c", i + 1); UVX_LOG(&xlog, UVX_LOG_INFO, "xlog,test,liigo", "Log content(index %d): %s", i, msg); } printf("sent %d logs, elapsed time: %"_UINT64_FMT" us (1000us = 1ms).\n", bench, (uv_hrtime() - elapsed_time) / 1000); }
int uv__loop_init(uv_loop_t* loop, int default_loop) { unsigned int i; int flags; uv__signal_global_once_init(); #if HAVE_KQUEUE flags = EVBACKEND_KQUEUE; #else flags = EVFLAG_AUTO; #endif memset(loop, 0, sizeof(*loop)); RB_INIT(&loop->timer_handles); ngx_queue_init(&loop->wq); ngx_queue_init(&loop->active_reqs); ngx_queue_init(&loop->idle_handles); ngx_queue_init(&loop->async_handles); ngx_queue_init(&loop->check_handles); ngx_queue_init(&loop->prepare_handles); ngx_queue_init(&loop->handle_queue); loop->closing_handles = NULL; loop->time = uv_hrtime() / 1000000; loop->async_pipefd[0] = -1; loop->async_pipefd[1] = -1; loop->async_sweep_needed = 0; loop->signal_pipefd[0] = -1; loop->signal_pipefd[1] = -1; loop->emfile_fd = -1; loop->ev = (default_loop ? ev_default_loop : ev_loop_new)(flags); ev_set_userdata(loop->ev, loop); uv_signal_init(loop, &loop->child_watcher); uv__handle_unref(&loop->child_watcher); loop->child_watcher.flags |= UV__HANDLE_INTERNAL; for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++) ngx_queue_init(loop->process_handles + i); if (uv_mutex_init(&loop->wq_mutex)) abort(); if (uv_async_init(loop, &loop->wq_async, uv__work_done)) abort(); uv__handle_unref(&loop->wq_async); loop->wq_async.flags |= UV__HANDLE_INTERNAL; if (uv__platform_loop_init(loop, default_loop)) return -1; return 0; }
Node(uint64_t id, int cluster_size, ab_callbacks_t callbacks, void* callbacks_data) : m_id(id) , m_cluster_size(cluster_size) , m_peer_registry(std::make_unique<PeerRegistry>()) , m_codec(std::make_shared<Codec>()) , m_index_counter(0) , m_trusted_peer(0) , m_last_leader_active(uv_hrtime()) , m_role(std::make_unique<Role>(*m_peer_registry, id, cluster_size)) , m_mutex(std::make_unique<std::mutex>()) { m_role->set_callbacks(callbacks, callbacks_data); }
/* Log that we've entered a native routine */ void MVM_profile_log_enter_native(MVMThreadContext *tc, MVMObject *nativecallsite) { MVMProfileThreadData *ptd = get_thread_data(tc); MVMProfileCallNode *pcn = NULL; MVMNativeCallBody *callbody; MVMuint32 i; /* We locate the right call node by looking at sf being NULL and the * native_target_name matching our intended target. */ callbody = MVM_nativecall_get_nc_body(tc, nativecallsite); if (ptd->current_call) for (i = 0; i < ptd->current_call->num_succ; i++) if (ptd->current_call->succ[i]->sf == NULL) if (strcmp(callbody->sym_name, ptd->current_call->succ[i]->native_target_name) == 0) { pcn = ptd->current_call->succ[i]; break; } /* If we didn't find a call graph node, then create one and add it to the * graph. */ if (!pcn) { pcn = MVM_calloc(1, sizeof(MVMProfileCallNode)); pcn->native_target_name = callbody->sym_name; if (ptd->current_call) { MVMProfileCallNode *pred = ptd->current_call; pcn->pred = pred; if (pred->num_succ == pred->alloc_succ) { pred->alloc_succ += 8; pred->succ = MVM_realloc(pred->succ, pred->alloc_succ * sizeof(MVMProfileCallNode *)); } pred->succ[pred->num_succ] = pcn; pred->num_succ++; } else { if (!ptd->call_graph) ptd->call_graph = pcn; } } /* Increment entry counts. */ pcn->total_entries++; pcn->entry_mode = 0; /* Log entry time; clear skip time. */ pcn->cur_entry_time = uv_hrtime(); pcn->cur_skip_time = 0; /* The current call graph node becomes this one. */ ptd->current_call = pcn; }
/* Log that we've finished doing bytecode specialization or JIT. */ void MVM_profiler_log_spesh_end(MVMThreadContext *tc) { MVMProfileThreadData *ptd = get_thread_data(tc); MVMProfileCallNode *pcn = ptd->current_call; MVMuint64 spesh_time; /* Record time spent. */ spesh_time = uv_hrtime() - ptd->cur_spesh_start_time; ptd->spesh_time += spesh_time; /* Discount spesh time from all active frames. */ while (pcn) { pcn->cur_skip_time += spesh_time; pcn = pcn->pred; } }
/* Logs the start of a GC run. */ void MVM_profiler_log_gc_start(MVMThreadContext *tc, MVMuint32 full) { MVMProfileThreadData *ptd = get_thread_data(tc); /* Make a new entry in the GCs. We use the cleared_bytes to store the * maximum that could be cleared, and after GC is done will subtract * retained bytes and promoted bytes. */ if (ptd->num_gcs == ptd->alloc_gcs) { ptd->alloc_gcs += 16; ptd->gcs = MVM_realloc(ptd->gcs, ptd->alloc_gcs * sizeof(MVMProfileGC)); } ptd->gcs[ptd->num_gcs].full = full; ptd->gcs[ptd->num_gcs].cleared_bytes = (char *)tc->nursery_alloc - (char *)tc->nursery_tospace; /* Record start time. */ ptd->cur_gc_start_time = uv_hrtime(); }