/* To avoid deadlock with uv_cancel() it's crucial that the worker
 * never holds the global mutex and the loop-local mutex at the same time.
 */
static void worker(void* arg) {
  struct uv__work* w;
  QUEUE* q;

  uv_sem_post((uv_sem_t*) arg);
  arg = NULL;

  for (;;) {
    uv_mutex_lock(&mutex);

    while (QUEUE_EMPTY(&wq)) {
      idle_threads += 1;
      uv_cond_wait(&cond, &mutex);
      idle_threads -= 1;
    }

    q = QUEUE_HEAD(&wq);

    if (q == &exit_message)
      uv_cond_signal(&cond);
    else {
      QUEUE_REMOVE(q);
      QUEUE_INIT(q);  /* Signal uv_cancel() that the work req is
                             executing. */
    }

    uv_mutex_unlock(&mutex);

    if (q == &exit_message)
      break;

    w = QUEUE_DATA(q, struct uv__work, wq);
    w->work(w);

    uv_mutex_lock(&w->loop->wq_mutex);
    w->work = NULL;  /* Signal uv_cancel() that the work req is done
                        executing. */
    QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
    uv_async_send(&w->loop->wq_async);
    uv_mutex_unlock(&w->loop->wq_mutex);
  }
}
Exemple #2
0
void Log::logv(Level level, const char* objectName, size_t objectNameSize, const char* message, va_list args) {
	if(!wouldLog(level) || this->messageQueueFull)
		return;

	Message* msg = new Message;
	msg->time_ms = Utils::getTimeInMsec();
	msg->level = level;
	msg->writeToConsole = level <= consoleMaxLevel;
	msg->writeToFile = level <= fileMaxLevel;
	msg->objectName = std::string(objectName, objectName + objectNameSize);
	Utils::stringFormatv(msg->message, message, args);

	uv_mutex_lock(&this->messageListMutex);
	if((int) this->messageQueue.size() < maxQueueSize.get())
		this->messageQueue.push_back(msg);
	else
		this->messageQueueFull = true;
	if(this->messageQueue.size() > maxQueueSizeReached)
		maxQueueSizeReached = this->messageQueue.size();
	uv_cond_signal(&this->messageListCond);
	uv_mutex_unlock(&this->messageListMutex);
}
Exemple #3
0
static int start_application_main( void )
{
    zlog_info( crcl(c), "Charcoal application starting.  argc: %d  argv: %p  env: %p",
               __argc, __argv, __env );
    int rc;
    /* There's nothing particularly special about the thread that runs
     * the application's 'main' procedure.  The application will
     * continue running until all its threads finish (or exit is called
     * or whatever). */
    rc = thread_start( &crcl(main_thread), NULL /* options */ );
    if( rc )
    {
        return rc;
    }

    /* XXX This is getting a little hacky */
    // crcl(main_activity).oldest_frame.activity = &crcl(main_activity);
    activity_t dummy_act;
    crcl(frame_t) dummy_frm;
    dummy_frm.activity = &dummy_act;
    crcl(frame_p) main_frame = app_main_prologue(
        &dummy_frm, 0, &crcl(process_exit_code), __argc, __argv, __env );
    if( !main_frame )
    {
        return -3;
    }
    activate_in_thread(
        &crcl(main_thread),
        &crcl(main_activity),
        &dummy_frm,
        main_frame );
    crcl(push_ready_queue)( &crcl(main_activity) );
    crcl(main_thread).running = &crcl(main_activity);
    uv_cond_signal( &crcl(main_thread).thd_management_cond );
    return 0;
}
static void SignalDeviceHandled() {
	uv_mutex_lock(&notify_mutex);
	deviceHandled = true;
	uv_cond_signal(&notifyDeviceHandled);
	uv_mutex_unlock(&notify_mutex);
}
Exemple #5
0
static void post(QUEUE* q) {
  uv_mutex_lock(&mutex);
  QUEUE_INSERT_TAIL(&wq, q);
  uv_cond_signal(&cond);
  uv_mutex_unlock(&mutex);
}
Exemple #6
0
static mrb_value
mrb_uv_cond_signal(mrb_state *mrb, mrb_value self)
{
  return uv_cond_signal((uv_cond_t*)mrb_uv_get_ptr(mrb, self, &mrb_uv_cond_type)), self;
}
Exemple #7
0
void Graph::addPacket(GraphPacket* gp) {
  uv_mutex_lock(&mutex);
    work.push_back(gp);
    uv_cond_signal(&cv);
  uv_mutex_unlock(&mutex);
}
Exemple #8
0
void status_notify(Status* status) {
  uv_mutex_lock(&status->mutex);
  status->count--;
  uv_cond_signal(&status->cond);
  uv_mutex_unlock(&status->mutex);
}
Exemple #9
0
/* To avoid deadlock with uv_cancel() it's crucial that the worker
 * never holds the global mutex and the loop-local mutex at the same time.
 */
static void worker(void* arg) {
  struct uv__work* w;
  QUEUE* q;

#ifndef _WIN32
  struct data_t *data = arg;
#endif

  for (;;) {
    uv_mutex_lock(&mutex);

    while (QUEUE_EMPTY(&wq)) {
      idle_threads += 1;
      uv_cond_wait(&cond, &mutex);
      idle_threads -= 1;
    }

    q = QUEUE_HEAD(&wq);

    if (q == &exit_message)
      uv_cond_signal(&cond);
    else {
      QUEUE_REMOVE(q);
      QUEUE_INIT(q);  /* Signal uv_cancel() that the work req is
                             executing. */
    }

    uv_mutex_unlock(&mutex);

    if (q == &exit_message)
      break;

    w = QUEUE_DATA(q, struct uv__work, wq);
#ifndef _WIN32
    if(pilight.debuglevel >= 2) {
      getThreadCPUUsage(pthread_self(), &data->cpu_usage);
      clock_gettime(CLOCK_MONOTONIC, &data->timestamp.first);
    }
#endif
    w->work(w);

#ifndef _WIN32
    if(pilight.debuglevel >= 2) {
      clock_gettime(CLOCK_MONOTONIC, &data->timestamp.second);
      getThreadCPUUsage(pthread_self(), &data->cpu_usage);
      fprintf(stderr, "worker %d, executed %s in %.6f sec using %f%% CPU\n",
        data->nr,
        w->name,
        ((double)data->timestamp.second.tv_sec + 1.0e-9*data->timestamp.second.tv_nsec) -
        ((double)data->timestamp.first.tv_sec + 1.0e-9*data->timestamp.first.tv_nsec),
        data->cpu_usage.cpu_per
      );
    }
#endif

    // free(w->name);
    uv_mutex_lock(&w->loop->wq_mutex);
    w->work = NULL;  /* Signal uv_cancel() that the work req is done
                        executing. */
    QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
    uv_async_send(&w->loop->wq_async);
    uv_mutex_unlock(&w->loop->wq_mutex);
  }
#ifndef _WIN32
  free(data);
#endif
}
Exemple #10
0
/* Enters the work loop. */
static void worker(MVMThreadContext *tc, MVMCallsite *callsite, MVMRegister *args) {
    MVMObject *updated_static_frames = MVM_repr_alloc_init(tc,
        tc->instance->boot_types.BOOTArray);
    MVMObject *previous_static_frames = MVM_repr_alloc_init(tc,
        tc->instance->boot_types.BOOTArray);

    tc->instance->speshworker_thread_id = tc->thread_obj->body.thread_id;

    MVMROOT2(tc, updated_static_frames, previous_static_frames, {
        while (1) {
            MVMObject *log_obj;
            MVMuint64 start_time;
            unsigned int interval_id;
            if (MVM_spesh_debug_enabled(tc))
                start_time = uv_hrtime();
            log_obj = MVM_repr_shift_o(tc, tc->instance->spesh_queue);
            if (MVM_spesh_debug_enabled(tc)) {
                MVM_spesh_debug_printf(tc,
                    "Received Logs\n"
                    "=============\n\n"
                    "Was waiting %dus for logs on the log queue.\n\n",
                    (int)((uv_hrtime() - start_time) / 1000));
            }

            if (tc->instance->main_thread->prof_data)
                MVM_profiler_log_spesh_start(tc);

            interval_id = MVM_telemetry_interval_start(tc, "spesh worker consuming a log");

            uv_mutex_lock(&(tc->instance->mutex_spesh_sync));
            tc->instance->spesh_working = 1;
            uv_mutex_unlock(&(tc->instance->mutex_spesh_sync));

            tc->instance->spesh_stats_version++;
            if (log_obj->st->REPR->ID == MVM_REPR_ID_MVMSpeshLog) {
                MVMSpeshLog *sl = (MVMSpeshLog *)log_obj;
                MVM_telemetry_interval_annotate((uintptr_t)sl->body.thread->body.tc, interval_id, "from this thread");
                MVMROOT(tc, sl, {
                    MVMThreadContext *stc;
                    MVMuint32 i;
                    MVMuint32 n;

                    /* Update stats, and if we're logging dump each of them. */
                    tc->instance->spesh_stats_version++;
                    if (MVM_spesh_debug_enabled(tc))
                        start_time = uv_hrtime();
                    MVM_spesh_stats_update(tc, sl, updated_static_frames);
                    n = MVM_repr_elems(tc, updated_static_frames);
                    if (MVM_spesh_debug_enabled(tc)) {
                        MVM_spesh_debug_printf(tc,
                            "Statistics Updated\n"
                            "==================\n"
                            "%d frames had their statistics updated in %dus.\n\n",
                            (int)n, (int)((uv_hrtime() - start_time) / 1000));
                        for (i = 0; i < n; i++) {
                            char *dump = MVM_spesh_dump_stats(tc, (MVMStaticFrame* )
                                MVM_repr_at_pos_o(tc, updated_static_frames, i));
                            MVM_spesh_debug_printf(tc, "%s==========\n\n", dump);
                            MVM_free(dump);
                        }
                    }
                    MVM_telemetry_interval_annotate((uintptr_t)n, interval_id, "stats for this many frames");
                    GC_SYNC_POINT(tc);

                    /* Form a specialization plan. */
                    if (MVM_spesh_debug_enabled(tc))
                        start_time = uv_hrtime();
                    tc->instance->spesh_plan = MVM_spesh_plan(tc, updated_static_frames);
                    if (MVM_spesh_debug_enabled(tc)) {
                        n = tc->instance->spesh_plan->num_planned;
                        MVM_spesh_debug_printf(tc,
                            "Specialization Plan\n"
                            "===================\n"
                            "%u specialization(s) will be produced (planned in %dus).\n\n",
                            n, (int)((uv_hrtime() - start_time) / 1000));
                        for (i = 0; i < n; i++) {
                            char *dump = MVM_spesh_dump_planned(tc,
                                &(tc->instance->spesh_plan->planned[i]));
                            MVM_spesh_debug_printf(tc, "%s==========\n\n", dump);
                            MVM_free(dump);
                        }
                    }
                    MVM_telemetry_interval_annotate((uintptr_t)tc->instance->spesh_plan->num_planned, interval_id,
                            "this many specializations planned");
                    GC_SYNC_POINT(tc);

                    /* Implement the plan and then discard it. */
                    n = tc->instance->spesh_plan->num_planned;
                    for (i = 0; i < n; i++) {
                        MVM_spesh_candidate_add(tc, &(tc->instance->spesh_plan->planned[i]));
                        GC_SYNC_POINT(tc);
                    }
                    MVM_spesh_plan_destroy(tc, tc->instance->spesh_plan);
                    tc->instance->spesh_plan = NULL;

                    /* Clear up stats that didn't get updated for a while,
                     * then add frames updated this time into the previously
                     * updated array. */
                    MVM_spesh_stats_cleanup(tc, previous_static_frames);
                    n = MVM_repr_elems(tc, updated_static_frames);
                    for (i = 0; i < n; i++)
                        MVM_repr_push_o(tc, previous_static_frames,
                            MVM_repr_at_pos_o(tc, updated_static_frames, i));

                    /* Clear updated static frames array. */
                    MVM_repr_pos_set_elems(tc, updated_static_frames, 0);

                    /* Allow the sending thread to produce more logs again,
                     * putting a new spesh log in place if needed. */
                    stc = sl->body.thread->body.tc;
                    if (stc && !sl->body.was_compunit_bumped)
                        if (MVM_incr(&(stc->spesh_log_quota)) == 0) {
                            stc->spesh_log = MVM_spesh_log_create(tc, sl->body.thread);
                            MVM_telemetry_timestamp(stc, "logging restored after quota had run out");
                        }

                    /* If needed, signal sending thread that it can continue. */
                    if (sl->body.block_mutex) {
                        uv_mutex_lock(sl->body.block_mutex);
                        MVM_store(&(sl->body.completed), 1);
                        uv_cond_signal(sl->body.block_condvar);
                        uv_mutex_unlock(sl->body.block_mutex);
                    }
                    {
                        MVMSpeshLogEntry *entries = sl->body.entries;
                        sl->body.entries = NULL;
                        MVM_free(entries);
                    }
                });
            }
            else if (MVM_is_null(tc, log_obj)) {
Exemple #11
0
void signal_exit(CassSession* session) {
  uv_mutex_lock(&mutex);
  close_future = cass_session_close(session);
  uv_cond_signal(&cond);
  uv_mutex_unlock(&mutex);
}
 void notify_one() {
   uv_cond_signal(&cond_);
 }