int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid, void **bcast_val) { if (tg->tid_map[ext_tid] == 0) { tg->envelope = bcast_val ? *bcast_val : NULL; cpu_sfence(); tg->forked = 1; tg->group_sense = tg->thread_sense[0]->sense; // if it's possible that threads are sleeping, signal them if (tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); uv_cond_broadcast(&tg->alarm); uv_mutex_unlock(&tg->alarm_lock); } } else { // spin up to threshold cycles (count sheep), then sleep uint64_t spin_cycles, spin_start = rdtsc(); while (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { if (tg->sleep_threshold) { spin_cycles = rdtsc() - spin_start; if (spin_cycles >= tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); if (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { uv_cond_wait(&tg->alarm, &tg->alarm_lock); } uv_mutex_unlock(&tg->alarm_lock); spin_start = rdtsc(); continue; } } cpu_pause(); } cpu_lfence(); if (bcast_val) *bcast_val = tg->envelope; } return 0; }
int bmon_offer(batch_monitor_t* m, void* item) { /* offer data to idle batch queue */ uv_mutex_lock(&m->lock); int bq_idx = m->curr_idx; batch_queue_t *bq = &m->queues[bq_idx]; bq->puts_waiting++; heap_offer(&bq->queue, item); /* wait until our batch queue is emptied by the batch thread */ while (1) { if (m->curr_idx != bq_idx) break; /* signal wasn't meant for us */ uv_cond_signal(&m->done); uv_cond_wait(&m->done, &m->lock); } /* batch thread wrote our data */ bq->puts_waiting--; uv_mutex_unlock(&m->lock); /* let one of our fellow batch threads exit */ uv_cond_signal(&m->done); return 0; }
int uv_barrier_wait(uv_barrier_t* barrier) { struct _uv_barrier* b; int last; if (barrier == NULL || barrier->b == NULL) return UV_EINVAL; b = barrier->b; uv_mutex_lock(&b->mutex); if (++b->in == b->threshold) { b->in = 0; b->out = b->threshold; uv_cond_signal(&b->cond); } else { do uv_cond_wait(&b->cond, &b->mutex); while (b->in != 0); } last = (--b->out == 0); if (!last) uv_cond_signal(&b->cond); /* Not needed for last thread. */ uv_mutex_unlock(&b->mutex); return last; }
static void WaitForDeviceHandled() { uv_mutex_lock(¬ify_mutex); if(deviceHandled == false) { uv_cond_wait(¬ifyDeviceHandled, ¬ify_mutex); } deviceHandled = false; uv_mutex_unlock(¬ify_mutex); }
// send data to the grapher app void graph_thread(void* user) { Graph* g = static_cast<Graph*>(user); int sock = nn_socket(AF_SP, NN_PUSH); if(sock == -1) { printf("error: cannot create client socket.\n"); return; } int rc = nn_connect(sock, g->address.c_str()); if(rc < 0) { printf("error: cannot connect, %s\n", nn_strerror(errno)); return; } std::vector<GraphPacket*> todo; while(true) { // get the work! uv_mutex_lock(&g->mutex); { while(g->work.size() == 0) { uv_cond_wait(&g->cv, &g->mutex); } std::copy(g->work.begin(), g->work.end(), std::back_inserter(todo)); g->work.clear(); } uv_mutex_unlock(&g->mutex); // perform work bool must_stop = false; for(std::vector<GraphPacket*>::iterator it = todo.begin(); it != todo.end(); ++it) { GraphPacket* p = *it; if(p->data[0] == PKT_TYPE_STOP) { must_stop = true; break; } else { int rc = nn_send(sock, (char*)&p->data.front(), p->data.size(), 0); if(rc < 0) { printf("error: cannot send to grapher: %s\n", nn_strerror(rc)); } } delete p; p = NULL; } // for todo.clear(); if(must_stop) { break; } } printf("@todo -> cleanup socket.\n"); }
void wait_exit() { uv_mutex_lock(&mutex); while (close_future == NULL) { uv_cond_wait(&cond, &mutex); } uv_mutex_unlock(&mutex); cass_future_wait(close_future); cass_future_free(close_future); }
static void uv__custom_sem_wait(uv_sem_t* sem_) { uv_semaphore_t* sem; sem = *(uv_semaphore_t**)sem_; uv_mutex_lock(&sem->mutex); while (sem->value == 0) uv_cond_wait(&sem->cond, &sem->mutex); sem->value--; uv_mutex_unlock(&sem->mutex); }
static mrb_value mrb_uv_cond_wait(mrb_state *mrb, mrb_value self) { mrb_value mutex_val; uv_mutex_t *mutex; mrb_get_args(mrb, "o", &mutex_val); mutex = (uv_mutex_t*)mrb_uv_get_ptr(mrb, mutex_val, &mrb_uv_mutex_type); return uv_cond_wait((uv_cond_t*)mrb_uv_get_ptr(mrb, self, &mrb_uv_cond_type), mutex), self; }
void wait_exit() { uv_mutex_lock(&mutex); while (exit_flag == 0) { uv_cond_wait(&cond, &mutex); } uv_mutex_unlock(&mutex); if (close_future) { cass_future_wait(close_future); cass_future_free(close_future); } }
UI *ui_bridge_attach(UI *ui, ui_main_fn ui_main, event_scheduler scheduler) { UIBridgeData *rv = xcalloc(1, sizeof(UIBridgeData)); rv->ui = ui; rv->bridge.rgb = ui->rgb; rv->bridge.stop = ui_bridge_stop; rv->bridge.resize = ui_bridge_resize; rv->bridge.clear = ui_bridge_clear; rv->bridge.eol_clear = ui_bridge_eol_clear; rv->bridge.cursor_goto = ui_bridge_cursor_goto; rv->bridge.mode_info_set = ui_bridge_mode_info_set; rv->bridge.update_menu = ui_bridge_update_menu; rv->bridge.busy_start = ui_bridge_busy_start; rv->bridge.busy_stop = ui_bridge_busy_stop; rv->bridge.mouse_on = ui_bridge_mouse_on; rv->bridge.mouse_off = ui_bridge_mouse_off; rv->bridge.mode_change = ui_bridge_mode_change; rv->bridge.set_scroll_region = ui_bridge_set_scroll_region; rv->bridge.scroll = ui_bridge_scroll; rv->bridge.highlight_set = ui_bridge_highlight_set; rv->bridge.put = ui_bridge_put; rv->bridge.bell = ui_bridge_bell; rv->bridge.visual_bell = ui_bridge_visual_bell; rv->bridge.update_fg = ui_bridge_update_fg; rv->bridge.update_bg = ui_bridge_update_bg; rv->bridge.update_sp = ui_bridge_update_sp; rv->bridge.flush = ui_bridge_flush; rv->bridge.suspend = ui_bridge_suspend; rv->bridge.set_title = ui_bridge_set_title; rv->bridge.set_icon = ui_bridge_set_icon; rv->scheduler = scheduler; for (UIWidget i = 0; (int)i < UI_WIDGETS; i++) { rv->bridge.ui_ext[i] = ui->ui_ext[i]; } rv->ui_main = ui_main; uv_mutex_init(&rv->mutex); uv_cond_init(&rv->cond); uv_mutex_lock(&rv->mutex); rv->ready = false; if (uv_thread_create(&rv->ui_thread, ui_thread_run, rv)) { abort(); } while (!rv->ready) { uv_cond_wait(&rv->cond, &rv->mutex); } uv_mutex_unlock(&rv->mutex); ui_attach_impl(&rv->bridge); return &rv->bridge; }
static void ui_bridge_suspend(UI *b) { UIBridgeData *data = (UIBridgeData *)b; uv_mutex_lock(&data->mutex); UI_CALL(b, suspend, 1, b); data->ready = false; // suspend the main thread until CONTINUE is called by the UI thread while (!data->ready) { uv_cond_wait(&data->cond, &data->mutex); } uv_mutex_unlock(&data->mutex); }
/** * Start the query execution thread */ void start_query_execution() { is_running_ = true; is_error_ = false; is_warming_up_ = true; memset(max_node_latency, 0, sizeof(max_node_latency)); uv_thread_create(&thread_, start_thread, NULL); // Allow metrics to gather some initial data uv_mutex_lock(&lock_); while (is_warming_up_) { uv_cond_wait(&condition_, &lock_); } uv_mutex_unlock(&lock_); }
int ti_threadgroup_fork(ti_threadgroup_t *tg, int16_t ext_tid, void **bcast_val) { if (tg->tid_map[ext_tid] == 0) { tg->envelope = bcast_val ? *bcast_val : NULL; cpu_sfence(); tg->forked = 1; tg->group_sense = tg->thread_sense[0]->sense; // if it's possible that threads are sleeping, signal them if (tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); uv_cond_broadcast(&tg->alarm); uv_mutex_unlock(&tg->alarm_lock); } } else { // spin up to threshold ns (count sheep), then sleep uint64_t spin_ns; uint64_t spin_start = 0; while (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { if (tg->sleep_threshold) { if (!spin_start) { // Lazily initialize spin_start since uv_hrtime is expensive spin_start = uv_hrtime(); continue; } spin_ns = uv_hrtime() - spin_start; // In case uv_hrtime is not monotonic, we'll sleep earlier if (spin_ns >= tg->sleep_threshold) { uv_mutex_lock(&tg->alarm_lock); if (tg->group_sense != tg->thread_sense[tg->tid_map[ext_tid]]->sense) { uv_cond_wait(&tg->alarm, &tg->alarm_lock); } uv_mutex_unlock(&tg->alarm_lock); spin_start = 0; continue; } } cpu_pause(); } cpu_lfence(); if (bcast_val) *bcast_val = tg->envelope; } return 0; }
void *uv_chan_receive(uv_chan_t *chan) { uv__chan_item_t *item; QUEUE *head; void *data = NULL; uv_mutex_lock(&chan->mutex); while (QUEUE_EMPTY(&chan->q)) { uv_cond_wait(&chan->cond, &chan->mutex); } head = QUEUE_HEAD(&chan->q); item = QUEUE_DATA(head, uv__chan_item_t, active_queue); data = item->data; QUEUE_REMOVE(head); free(item); uv_mutex_unlock(&chan->mutex); return data; }
static luv_msg_t* luv_queue_recv(luv_queue_t* queue, int timeout) { luv_msg_t* msg = NULL; luv_queue_lock(queue); if (queue->limit >= 0) { queue->limit++; uv_cond_signal(&queue->send_sig); } // wait while (timeout != 0 && queue->count <= 0) { if (timeout > 0) { int64_t waittime = timeout; waittime = waittime * 1000000L; if (uv_cond_timedwait(&queue->recv_sig, &queue->lock, waittime) != 0) { break; } } else { uv_cond_wait(&queue->recv_sig, &queue->lock); } } if (queue->count > 0) { msg = queue->msg_head; if (msg) { queue->msg_head = msg->next; if (queue->msg_head == NULL) { queue->msg_tail = NULL; } msg->next = NULL; } queue->count--; uv_cond_signal(&queue->send_sig); } if (queue->limit > 0) { queue->limit--; } luv_queue_unlock(queue); return msg; }
/* To avoid deadlock with uv_cancel() it's crucial that the worker * never holds the global mutex and the loop-local mutex at the same time. */ static void worker(void* arg) { struct uv__work* w; QUEUE* q; uv_sem_post((uv_sem_t*) arg); arg = NULL; for (;;) { uv_mutex_lock(&mutex); while (QUEUE_EMPTY(&wq)) { idle_threads += 1; uv_cond_wait(&cond, &mutex); idle_threads -= 1; } q = QUEUE_HEAD(&wq); if (q == &exit_message) uv_cond_signal(&cond); else { QUEUE_REMOVE(q); QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */ } uv_mutex_unlock(&mutex); if (q == &exit_message) break; w = QUEUE_DATA(q, struct uv__work, wq); w->work(w); uv_mutex_lock(&w->loop->wq_mutex); w->work = NULL; /* Signal uv_cancel() that the work req is done executing. */ QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); uv_async_send(&w->loop->wq_async); uv_mutex_unlock(&w->loop->wq_mutex); } }
static int luv_queue_send(luv_queue_t* queue, luv_msg_t* msg, int timeout) { luv_queue_lock(queue); // wait while (timeout != 0 && queue->limit >= 0 && queue->count + 1 > queue->limit) { if (timeout > 0) { int64_t waittime = timeout; waittime = waittime * 1000000L; if (uv_cond_timedwait(&queue->send_sig, &queue->lock, waittime) != 0) { break; } } else { uv_cond_wait(&queue->send_sig, &queue->lock); } } // printf("queue: %d/%d", queue->limit, queue->count); if (queue->limit < 0 || queue->count + 1 <= queue->limit) { msg->next = NULL; if (queue->msg_tail) { queue->msg_tail->next = msg; } queue->msg_tail = msg; if (queue->msg_head == NULL) { queue->msg_head = msg; } queue->count++; uv_cond_signal(&queue->recv_sig); } else { msg = NULL; } luv_queue_unlock(queue); return msg ? 1 : 0; }
static PyObject * Condition_func_wait(Condition *self, PyObject *args) { Mutex *pymutex; RAISE_IF_NOT_INITIALIZED(self, NULL); if (!PyArg_ParseTuple(args, "O!:wait", &MutexType, &pymutex)) { return NULL; } Py_INCREF(pymutex); Py_BEGIN_ALLOW_THREADS uv_cond_wait(&self->uv_condition, &pymutex->uv_mutex); Py_END_ALLOW_THREADS Py_DECREF(pymutex); Py_RETURN_NONE; }
int main() { uv_thread_t thread; worker_config wc; memset(&wc, 0, sizeof(wc)); uv_cond_init(&wc.cond); uv_mutex_init(&wc.mutex); uv_thread_create(&thread, worker, &wc); uv_mutex_lock(&wc.mutex); uv_cond_wait(&wc.cond, &wc.mutex); uv_mutex_unlock(&wc.mutex); uv_thread_join(&thread); uv_mutex_destroy(&wc.mutex); uv_cond_destroy(&wc.cond); return 0; }
JL_DLLEXPORT jl_task_t *jl_task_get_next(jl_value_t *getsticky) { jl_ptls_t ptls = jl_get_ptls_states(); // spin briefly before blocking when the workqueue is empty size_t spin_count = 0; jl_task_t *task; while (1) { jl_gc_safepoint(); task = get_next_task(getsticky); if (task) return task; if (!_threadedregion) { spin_count = 0; if (ptls->tid == 0) { if (jl_run_once(jl_global_event_loop()) == 0) { task = get_next_task(getsticky); if (task) return task; #ifdef _OS_WINDOWS_ Sleep(INFINITE); #else pause(); #endif } } else { int sleepnow = 0; uv_mutex_lock(&sleep_lock); if (!_threadedregion) { sleepnow = 1; } else { uv_mutex_unlock(&sleep_lock); } if (sleepnow) { int8_t gc_state = jl_gc_safe_enter(ptls); uv_cond_wait(&sleep_alarm, &sleep_lock); uv_mutex_unlock(&sleep_lock); jl_gc_safe_leave(ptls, gc_state); } } } else { if (++spin_count > 1000 && jl_atomic_load(&jl_uv_n_waiters) == 0 && jl_mutex_trylock(&jl_uv_mutex)) { task = get_next_task(getsticky); if (task) { JL_UV_UNLOCK(); return task; } uv_loop_t *loop = jl_global_event_loop(); loop->stop_flag = 0; uv_run(loop, UV_RUN_ONCE); JL_UV_UNLOCK(); } else { jl_cpu_pause(); } } } }
/* * The caller must hold page descriptor lock. * The lock will be released and re-acquired. The descriptor is not guaranteed * to exist after this function returns. */ void pg_cache_wait_event_unsafe(struct rrdeng_page_cache_descr *descr) { ++descr->waiters; uv_cond_wait(&descr->cond, &descr->mutex); --descr->waiters; }
void wait(linear::unique_lock<linear::mutex>& lock) { uv_cond_wait(&cond_, lock.mutex()->native_handle()->native_handle()); }
/** HTTP POST entry point for receiving entries from client * Provide the user with an ID */ static int __http_get_id(h2o_handler_t *self, h2o_req_t *req) { static h2o_generator_t generator = { NULL, NULL }; if (!h2o_memis(req->method.base, req->method.len, H2O_STRLIT("POST"))) return -1; /* redirect to leader if needed */ int leader = raft_get_current_leader(sv->raft); if (-1 == leader) { return h2oh_respond_with_error(req, 503, "Leader unavailable"); } else if (leader != sv->node_idx) { raft_node_t* node = raft_get_node(sv->raft, leader); peer_connection_t* leader_conn = raft_node_get_udata(node); char leader_url[LEADER_URL_LEN]; static h2o_generator_t generator = { NULL, NULL }; static h2o_iovec_t body = { .base = "", .len = 0 }; req->res.status = 301; req->res.reason = "Moved Permanently"; h2o_start_response(req, &generator); snprintf(leader_url, LEADER_URL_LEN, "http://%s:%d/", inet_ntoa(leader_conn->addr.sin_addr), leader_conn->http_port); h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_LOCATION, leader_url, strlen(leader_url)); h2o_send(req, &body, 1, 1); return 0; } int e; unsigned int ticket = __generate_ticket(); msg_entry_t entry; entry.id = rand(); entry.data.buf = (void*)&ticket; entry.data.len = sizeof(ticket); uv_mutex_lock(&sv->raft_lock); msg_entry_response_t r; e = raft_recv_entry(sv->raft, sv->node_idx, &entry, &r); if (0 != e) return h2oh_respond_with_error(req, 500, "BAD"); /* block until the entry is committed */ int done = 0; do { uv_cond_wait(&sv->appendentries_received, &sv->raft_lock); e = raft_msg_entry_response_committed(sv->raft, &r); switch (e) { case 0: /* not committed yet */ break; case 1: done = 1; uv_mutex_unlock(&sv->raft_lock); break; case -1: uv_mutex_unlock(&sv->raft_lock); return h2oh_respond_with_error(req, 400, "TRY AGAIN"); } } while (!done); /* serialize ID */ char id_str[100]; h2o_iovec_t body; sprintf(id_str, "%d", entry.id); body = h2o_iovec_init(id_str, strlen(id_str)); req->res.status = 200; req->res.reason = "OK"; h2o_start_response(req, &generator); h2o_send(req, &body, 1, 1); return 0; }
static long consread(Chan *c, void *va, long n, vlong offset) { int send; char *p, buf[64], ch; if(c->qid.type & QTDIR) return devdirread(c, va, n, contab, nelem(contab), devgen); switch((ulong)c->qid.path) { default: error(Egreg); case Qsysctl: return readstr(offset, va, n, VERSION); case Qsysname: if(ossysname == nil) return 0; return readstr(offset, va, n, ossysname); case Qrandom: return randomread(va, n); case Qnotquiterandom: genrandom(va, n); return n; case Qhostowner: return readstr(offset, va, n, eve); case Qhoststdin: return read(0, va, n); /* should be pread */ case Quser: return readstr(offset, va, n, up->env->user); case Qjit: snprint(buf, sizeof(buf), "%d", cflag); return readstr(offset, va, n, buf); case Qtime: snprint(buf, sizeof(buf), "%.lld", timeoffset + osusectime()); return readstr(offset, va, n, buf); case Qdrivers: return devtabread(c, va, n, offset); case Qmemory: return poolread(va, n, offset); case Qnull: return 0; case Qmsec: return readnum(offset, va, n, osmillisec(), NUMSIZE); case Qcons: qlock(&kbd.q); if(waserror()){ qunlock(&kbd.q); nexterror(); } if(dflag) error(Enonexist); /* register this proc's read interest in the channel */ /* this probably won't be done here. the read/readn requests * are going to be different watcher callbacks */ /* the following waits for a signal from the keyboard handler */ uv_mutex_lock(&line_lock); uv_cond_wait(&line_ready, &line_lock); n = qread(lineq, va, n); uv_mutex_unlock(&line_lock); qunlock(&kbd.q); poperror(); return n; case Qscancode: if(offset == 0) return readstr(0, va, n, gkscanid); return qread(gkscanq, va, n); case Qkeyboard: return qread(gkbdq, va, n); case Qkprint: rlock(&kprintq.l); if(waserror()){ runlock(&kprintq.l); nexterror(); } n = qread(kprintq.q, va, n); poperror(); runlock(&kprintq.l); return n; } }
void Log::logWritterThread() { std::vector<Message*>* messagesToWrite = new std::vector<Message*>; size_t i, size; struct tm localtm; std::vector<char> logHeader; bool endLoop = false; bool willUpdateFile; FILE* logFile = nullptr; int lastYear = -1; int lastMonth = -1; int lastDay = -1; #ifdef _WIN32 this->logWritterThreadNativeId = GetCurrentThreadId(); #endif this->logWritterThreadStarted = true; uv_once(&initMutexOnce, &initMutex); while(endLoop == false) { size_t pendingMessages; uv_mutex_lock(&this->messageListMutex); pendingMessages = this->messageQueue.size(); uv_mutex_unlock(&this->messageListMutex); // Flush only if we will wait if(pendingMessages == 0 && logFile) fflush(logFile); uv_mutex_lock(&this->messageListMutex); while(this->messageQueue.size() == 0 && this->stop == false) { uv_cond_wait(&this->messageListCond, &this->messageListMutex); } endLoop = this->stop; willUpdateFile = this->updateFileRequested; messagesToWrite->swap(this->messageQueue); this->messageQueueFull = false; uv_mutex_unlock(&this->messageListMutex); size = messagesToWrite->size(); bool messageUseFile = false; for(i = 0; i < size; i++) { if((*messagesToWrite)[i]->writeToFile) { messageUseFile = true; break; } } // Check if the date changed, if so, update the log file to us a new one (filename has timestamp) if(size > 0 && messageUseFile) { uint64_t firstMsgTime = messagesToWrite->at(0)->time_ms; Utils::getGmTime(firstMsgTime / 1000, &localtm); if(localtm.tm_year != lastYear) { willUpdateFile = true; lastYear = localtm.tm_year; } if(localtm.tm_mon != lastMonth) { willUpdateFile = true; lastMonth = localtm.tm_mon; } if(localtm.tm_mday != lastDay) { willUpdateFile = true; lastDay = localtm.tm_mday; } if(willUpdateFile || logFile == nullptr) { this->updateFileRequested = false; FILE* newfile = openLogFile(logFile, this->dir.get(), this->fileName.get(), lastYear, lastMonth, lastDay); if(newfile == logFile) { if(logFile) fprintf(logFile, "Failed to change log file to %s\n", this->fileName.get().c_str()); fprintf(stdout, "Failed to change log file to %s\n", this->fileName.get().c_str()); } logFile = newfile; if(logFile) setvbuf(logFile, nullptr, _IOFBF, 64 * 1024); } } for(i = 0; i < size; i++) { const Message* const msg = messagesToWrite->at(i); uint64_t messageTimeMs = msg->time_ms; Utils::getGmTime(messageTimeMs / 1000, &localtm); // 30 char to %-5s included logHeader.resize(31 + msg->objectName.size() + 3); size_t strLen = snprintf(&logHeader[0], logHeader.size(), "%4d-%02d-%02d %02d:%02d:%02d.%03d %-5s %s: ", localtm.tm_year, localtm.tm_mon, localtm.tm_mday, localtm.tm_hour, localtm.tm_min, localtm.tm_sec, (unsigned int) (messageTimeMs % 1000), LEVELSTRINGS[msg->level], msg->objectName.c_str()); if(strLen >= logHeader.size()) { uv_mutex_lock(&consoleMutex); fprintf(stdout, "------------------- ERROR Log::logWritterThread: Log buffer was too small, next log message " "might be truncated\n"); uv_mutex_unlock(&consoleMutex); strLen = logHeader.size() - 1; // do not write the \0 } if(msg->writeToConsole) { uv_mutex_lock(&consoleMutex); fwrite(&logHeader[0], 1, strLen, stdout); fwrite(msg->message.c_str(), 1, msg->message.size(), stdout); uv_mutex_unlock(&consoleMutex); } if(logFile && msg->writeToFile) { fwrite(&logHeader[0], 1, strLen, logFile); fwrite(msg->message.c_str(), 1, msg->message.size(), logFile); } delete msg; } messagesToWrite->clear(); } delete messagesToWrite; if(logFile) fclose(logFile); }
/* To avoid deadlock with uv_cancel() it's crucial that the worker * never holds the global mutex and the loop-local mutex at the same time. */ static void worker(void* arg) { struct uv__work* w; QUEUE* q; #ifndef _WIN32 struct data_t *data = arg; #endif for (;;) { uv_mutex_lock(&mutex); while (QUEUE_EMPTY(&wq)) { idle_threads += 1; uv_cond_wait(&cond, &mutex); idle_threads -= 1; } q = QUEUE_HEAD(&wq); if (q == &exit_message) uv_cond_signal(&cond); else { QUEUE_REMOVE(q); QUEUE_INIT(q); /* Signal uv_cancel() that the work req is executing. */ } uv_mutex_unlock(&mutex); if (q == &exit_message) break; w = QUEUE_DATA(q, struct uv__work, wq); #ifndef _WIN32 if(pilight.debuglevel >= 2) { getThreadCPUUsage(pthread_self(), &data->cpu_usage); clock_gettime(CLOCK_MONOTONIC, &data->timestamp.first); } #endif w->work(w); #ifndef _WIN32 if(pilight.debuglevel >= 2) { clock_gettime(CLOCK_MONOTONIC, &data->timestamp.second); getThreadCPUUsage(pthread_self(), &data->cpu_usage); fprintf(stderr, "worker %d, executed %s in %.6f sec using %f%% CPU\n", data->nr, w->name, ((double)data->timestamp.second.tv_sec + 1.0e-9*data->timestamp.second.tv_nsec) - ((double)data->timestamp.first.tv_sec + 1.0e-9*data->timestamp.first.tv_nsec), data->cpu_usage.cpu_per ); } #endif // free(w->name); uv_mutex_lock(&w->loop->wq_mutex); w->work = NULL; /* Signal uv_cancel() that the work req is done executing. */ QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq); uv_async_send(&w->loop->wq_async); uv_mutex_unlock(&w->loop->wq_mutex); } #ifndef _WIN32 free(data); #endif }