Example #1
0
const char* qsf_getenv(const char* key, const char* value)
{
    lua_State* L = global_env.L;
    assert(L && key);
    uv_mutex_lock(&global_env.mutex);
    lua_getglobal(L, key);
    const char* s = lua_tostring(L, -1);
    lua_pop(L, 1);
    uv_mutex_unlock(&global_env.mutex);
    return (s == NULL ? value : s);
}
Example #2
0
static PyObject *
Mutex_func_lock(Mutex *self)
{
    RAISE_IF_NOT_INITIALIZED(self, NULL);

    Py_BEGIN_ALLOW_THREADS
    uv_mutex_lock(&self->uv_mutex);
    Py_END_ALLOW_THREADS

    Py_RETURN_NONE;
}
Example #3
0
UI *ui_bridge_attach(UI *ui, ui_main_fn ui_main, event_scheduler scheduler)
{
  UIBridgeData *rv = xcalloc(1, sizeof(UIBridgeData));
  rv->ui = ui;
  rv->bridge.rgb = ui->rgb;
  rv->bridge.stop = ui_bridge_stop;
  rv->bridge.resize = ui_bridge_resize;
  rv->bridge.clear = ui_bridge_clear;
  rv->bridge.eol_clear = ui_bridge_eol_clear;
  rv->bridge.cursor_goto = ui_bridge_cursor_goto;
  rv->bridge.mode_info_set = ui_bridge_mode_info_set;
  rv->bridge.update_menu = ui_bridge_update_menu;
  rv->bridge.busy_start = ui_bridge_busy_start;
  rv->bridge.busy_stop = ui_bridge_busy_stop;
  rv->bridge.mouse_on = ui_bridge_mouse_on;
  rv->bridge.mouse_off = ui_bridge_mouse_off;
  rv->bridge.mode_change = ui_bridge_mode_change;
  rv->bridge.set_scroll_region = ui_bridge_set_scroll_region;
  rv->bridge.scroll = ui_bridge_scroll;
  rv->bridge.highlight_set = ui_bridge_highlight_set;
  rv->bridge.put = ui_bridge_put;
  rv->bridge.bell = ui_bridge_bell;
  rv->bridge.visual_bell = ui_bridge_visual_bell;
  rv->bridge.update_fg = ui_bridge_update_fg;
  rv->bridge.update_bg = ui_bridge_update_bg;
  rv->bridge.update_sp = ui_bridge_update_sp;
  rv->bridge.flush = ui_bridge_flush;
  rv->bridge.suspend = ui_bridge_suspend;
  rv->bridge.set_title = ui_bridge_set_title;
  rv->bridge.set_icon = ui_bridge_set_icon;
  rv->scheduler = scheduler;

  for (UIWidget i = 0; (int)i < UI_WIDGETS; i++) {
    rv->bridge.ui_ext[i] = ui->ui_ext[i];
  }

  rv->ui_main = ui_main;
  uv_mutex_init(&rv->mutex);
  uv_cond_init(&rv->cond);
  uv_mutex_lock(&rv->mutex);
  rv->ready = false;

  if (uv_thread_create(&rv->ui_thread, ui_thread_run, rv)) {
    abort();
  }

  while (!rv->ready) {
    uv_cond_wait(&rv->cond, &rv->mutex);
  }
  uv_mutex_unlock(&rv->mutex);

  ui_attach_impl(&rv->bridge);
  return &rv->bridge;
}
Example #4
0
/*
 * Returns page flags.
 * The lock will be released and re-acquired. The descriptor is not guaranteed
 * to exist after this function returns.
 */
unsigned long pg_cache_wait_event(struct rrdeng_page_cache_descr *descr)
{
    unsigned long flags;

    uv_mutex_lock(&descr->mutex);
    pg_cache_wait_event_unsafe(descr);
    flags = descr->flags;
    uv_mutex_unlock(&descr->mutex);

    return flags;
}
Example #5
0
/* To avoid deadlock with uv_cancel() it's crucial that the worker
 * never holds the global mutex and the loop-local mutex at the same time.
 */
static void worker(void* arg) {
  struct uv__work* w;
  QUEUE* q;

  (void) arg;

  for (;;) {
    uv_mutex_lock(&mutex);

    while (QUEUE_EMPTY(&wq)) {
      idle_threads += 1;
      uv_cond_wait(&cond, &mutex);
      idle_threads -= 1;
    }

    q = QUEUE_HEAD(&wq);

    if (q == &exit_message)
      uv_cond_signal(&cond);
    else {
      QUEUE_REMOVE(q);
      QUEUE_INIT(q);  /* Signal uv_cancel() that the work req is
                             executing. */
    }

    uv_mutex_unlock(&mutex);

    if (q == &exit_message)
      break;

    w = QUEUE_DATA(q, struct uv__work, wq);
    w->work(w);

    uv_mutex_lock(&w->loop->wq_mutex);
    w->work = NULL;  /* Signal uv_cancel() that the work req is done
                        executing. */
    QUEUE_INSERT_TAIL(&w->loop->wq, &w->wq);
    uv_async_send(&w->loop->wq_async);
    uv_mutex_unlock(&w->loop->wq_mutex);
  }
}
Example #6
0
ONSConsumerV8::~ONSConsumerV8()
{
    Stop();

    uv_mutex_lock(&mutex);
    if(real_consumer) delete real_consumer;
    if(listener) delete listener;
    uv_mutex_unlock(&mutex);

    uv_mutex_destroy(&mutex);
    uv_close((uv_handle_t*)&async, NULL);
}
Example #7
0
static int luv_queue_channel_stop(lua_State* L)
{
	luv_queue_t* queue = luv_queue_check_queue_t(L);

	uv_mutex_lock(&luv_queues_lock);
	if (queue->async_cb != LUA_REFNIL) {
		uv_close((uv_handle_t*)&queue->async, NULL);
		queue->async_cb = LUA_REFNIL;
	}
	uv_mutex_unlock(&luv_queues_lock);
	return 0;
}
Example #8
0
void Clients::disconnect(uv_stream_t *stream, TcpClient* client)
{
	stream->data = nullptr;

	uv_mutex_lock(&this->mutex);
	this->connected.unsafe_erase(stream);
	uv_mutex_unlock(&this->mutex);

	this->available_clients.push(client);

	LOG(INFO) << "Client " << static_cast<void*>(client) << " disconnected";
}
Example #9
0
static mrb_value
mrb_uv_once(mrb_state *mrb, mrb_value self)
{
  uv_mutex_lock(&once_info.lock);

  once_info.mrb = mrb;
  once_info.block = mrb_iv_get(mrb, self, mrb_intern_lit(mrb, "once_cb"));
  uv_once((uv_once_t*)DATA_PTR(self), _uv_once_cb);

  uv_mutex_unlock(&once_info.lock);
  return self;
}
Example #10
0
void uv_barrier_wait(uv_barrier_t* barrier) {
  uv_mutex_lock(&barrier->mutex);
  if (++barrier->count == barrier->n) {
    uv_sem_wait(&barrier->turnstile2);
    uv_sem_post(&barrier->turnstile1);
  }
  uv_mutex_unlock(&barrier->mutex);

  uv_sem_wait(&barrier->turnstile1);
  uv_sem_post(&barrier->turnstile1);

  uv_mutex_lock(&barrier->mutex);
  if (--barrier->count == 0) {
    uv_sem_wait(&barrier->turnstile1);
    uv_sem_post(&barrier->turnstile2);
  }
  uv_mutex_unlock(&barrier->mutex);

  uv_sem_wait(&barrier->turnstile2);
  uv_sem_post(&barrier->turnstile2);
}
Example #11
0
Kinect::~Kinect() {
  printf("Error: need to free buffers.\n");

  uv_mutex_lock(&mutex);
  must_stop = true;
  uv_mutex_unlock(&mutex);

  uv_thread_join(&thread);

  if(ctx) {
    freenect_close_device(device);
    freenect_shutdown(ctx);
    ctx = NULL;
  }

  uv_mutex_destroy(&mutex);

  has_new_rgb = false;
  has_new_depth = false;

  if(depth_back) {
    delete[] depth_back;
  }

  if(depth_mid) {
    delete[] depth_mid;
  }

  if(depth_front) {
    delete[] depth_front;
  }

  depth_back = NULL;
  depth_mid = NULL;
  depth_front = NULL;

  if(rgb_back) {
    delete[] rgb_back;
  }

  if(rgb_mid) {
    delete[] rgb_mid;
  }

  if(rgb_front) {
    delete[] rgb_front;
  }

  rgb_back = NULL;
  rgb_mid = NULL;
  rgb_front = NULL;
  device = NULL;
}
Example #12
0
static void ui_bridge_suspend(UI *b)
{
  UIBridgeData *data = (UIBridgeData *)b;
  uv_mutex_lock(&data->mutex);
  UI_CALL(b, suspend, 1, b);
  data->ready = false;
  // suspend the main thread until CONTINUE is called by the UI thread
  while (!data->ready) {
    uv_cond_wait(&data->cond, &data->mutex);
  }
  uv_mutex_unlock(&data->mutex);
}
Example #13
0
void uv_chan_clear(uv_chan_t *chan) {
    uv_mutex_lock(&chan->mutex);
    uv__chan_item_t *item = NULL;
    QUEUE *head = NULL;
    while (!QUEUE_EMPTY(&chan->q)) {
        head = QUEUE_HEAD(&chan->q);
        item = QUEUE_DATA(head, uv__chan_item_t, active_queue);
        QUEUE_REMOVE(head);
        free(item);
    }
    uv_mutex_unlock(&chan->mutex);
}
Example #14
0
void uv__stream_osx_select(void* arg) {
  uv_stream_t* stream;
  uv__stream_select_t* s;
  fd_set read;
  fd_set write;
  fd_set error;
  struct timeval timeout;
  int events;
  int fd;
  int r;

  stream = arg;
  s = stream->select;
  fd = stream->fd;

  while (1) {
    /* Terminate on semaphore */
    if (uv_sem_trywait(&s->sem) == 0) break;

    /* Watch fd using select(2) */
    FD_ZERO(&read);
    FD_ZERO(&write);
    FD_ZERO(&error);
    FD_SET(fd, &read);
    FD_SET(fd, &write);
    FD_SET(fd, &error);

    timeout.tv_sec = 0;
    timeout.tv_usec = 250000; /* 250 ms timeout */
    r = select(fd + 1, &read, &write, &error, &timeout);
    if (r == -1) {
      if (errno == EINTR) continue;
      /* XXX: Possible?! */
      abort();
    }

    /* Ignore timeouts */
    if (r == 0) continue;

    /* Handle events */
    events = 0;
    if (FD_ISSET(fd, &read)) events |= UV__IO_READ;
    if (FD_ISSET(fd, &write)) events |= UV__IO_WRITE;
    if (FD_ISSET(fd, &error)) events |= UV__IO_ERROR;

    uv_mutex_lock(&s->mutex);
    s->events |= events;
    uv_mutex_unlock(&s->mutex);

    if (events != 0) uv_async_send(&s->async);
  }
}
Example #15
0
File: core.c Project: Kitware/CMake
void uv__wake_all_loops(void) {
  int i;
  uv_loop_t* loop;

  uv_mutex_lock(&uv__loops_lock);
  for (i = 0; i < uv__loops_size; ++i) {
    loop = uv__loops[i];
    assert(loop);
    if (loop->iocp != INVALID_HANDLE_VALUE)
      PostQueuedCompletionStatus(loop->iocp, 0, 0, NULL);
  }
  uv_mutex_unlock(&uv__loops_lock);
}
Example #16
0
static int uv__work_cancel(uv_loop_t* loop, uv_req_t* req, struct uv__work* w) {
  int cancelled;

  uv_mutex_lock(&mutex);
  uv_mutex_lock(&w->loop->wq_mutex);

  cancelled = !QUEUE_EMPTY(&w->wq) && w->work != NULL;
  if (cancelled) QUEUE_REMOVE(&w->wq);

  uv_mutex_unlock(&w->loop->wq_mutex);
  uv_mutex_unlock(&mutex);

  if (!cancelled) return -1;

  w->work = uv__cancelled;
  uv_mutex_lock(&loop->wq_mutex);
  QUEUE_INSERT_TAIL(&loop->wq, &w->wq);
  uv_async_send(&loop->wq_async);
  uv_mutex_unlock(&loop->wq_mutex);

  return 0;
}
Example #17
0
/* Loads the SC dependencies list. */
static void deserialize_sc_deps(MVMThreadContext *tc, MVMCompUnit *cu, ReaderState *rs) {
    MVMCompUnitBody *cu_body = &cu->body;
    MVMuint32 i, sh_idx;
    MVMuint8  *pos;

    /* Allocate SC lists in compilation unit. */
    cu_body->scs = MVM_malloc(rs->expected_scs * sizeof(MVMSerializationContext *));
    cu_body->scs_to_resolve = MVM_malloc(rs->expected_scs * sizeof(MVMSerializationContextBody *));
    cu_body->sc_handle_idxs = MVM_malloc(rs->expected_scs * sizeof(MVMint32));
    cu_body->num_scs = rs->expected_scs;

    /* Resolve all the things. */
    pos = rs->sc_seg;
    for (i = 0; i < rs->expected_scs; i++) {
        MVMSerializationContextBody *scb;
        MVMString *handle;

        /* Grab string heap index. */
        ensure_can_read(tc, cu, rs, pos, 4);
        sh_idx = read_int32(pos, 0);
        pos += 4;

        /* Resolve to string. */
        if (sh_idx >= cu_body->num_strings) {
            cleanup_all(tc, rs);
            MVM_exception_throw_adhoc(tc, "String heap index beyond end of string heap");
        }
        cu_body->sc_handle_idxs[i] = sh_idx;
        handle = MVM_cu_string(tc, cu, sh_idx);

        /* See if we can resolve it. */
        uv_mutex_lock(&tc->instance->mutex_sc_weakhash);
        MVM_string_flatten(tc, handle);
        MVM_HASH_GET(tc, tc->instance->sc_weakhash, handle, scb);
        if (scb && scb->sc) {
            cu_body->scs_to_resolve[i] = NULL;
            MVM_ASSIGN_REF(tc, &(cu->common.header), cu_body->scs[i], scb->sc);
        }
        else {
            if (!scb) {
                scb = MVM_calloc(1, sizeof(MVMSerializationContextBody));
                scb->handle = handle;
                MVM_HASH_BIND(tc, tc->instance->sc_weakhash, handle, scb);
                MVM_sc_add_all_scs_entry(tc, scb);
            }
            cu_body->scs_to_resolve[i] = scb;
            cu_body->scs[i] = NULL;
        }
        uv_mutex_unlock(&tc->instance->mutex_sc_weakhash);
    }
}
  /**
   * Start the query execution thread
   */
  void start_query_execution() {
    is_running_ = true;
    is_error_ = false;
    is_warming_up_ = true;
    memset(max_node_latency, 0, sizeof(max_node_latency));
    uv_thread_create(&thread_, start_thread, NULL);

    // Allow metrics to gather some initial data
    uv_mutex_lock(&lock_);
    while (is_warming_up_) {
      uv_cond_wait(&condition_, &lock_);
    }
    uv_mutex_unlock(&lock_);
  }
Example #19
0
File: tor.c Project: postfix/libtor
static void
_locking_callback (int mode, int n, const char* file, int line)
{
	if (locks.length > n) {
		return;
	}

	if (mode & CRYPTO_LOCK) {
		uv_mutex_lock(&locks.item[n]);
	}
	else {
		uv_mutex_unlock(&locks.item[n]);
	}
}
Example #20
0
void qsf_setenv(const char* key, const char* value)
{
    lua_State* L = global_env.L;
    assert(L && key && value);
    uv_mutex_lock(&global_env.mutex);
    lua_getglobal(L, key);
    if (lua_isnil(L, -1))
    {
        lua_pushstring(L, value);
        lua_setglobal(L, key);
    }
    lua_pop(L, 1);
    uv_mutex_unlock(&global_env.mutex);
}
Example #21
0
void pc__client_reconnect_reset(pc_client_t *client) {
  assert(client);
  if(client->transport) {
    pc_transport_destroy(client->transport);
    client->transport = NULL;
  }

  if(client->heartbeat_timer != NULL) {
    uv_timer_stop(client->heartbeat_timer);
    client->heartbeat = 0;
  }

  if(client->timeout_timer != NULL) {
    uv_timer_stop(client->timeout_timer);
    client->timeout = 0;
  } 
  
  if(client->requests) {
    pc_map_clear(client->requests);
  }

  if(client->pkg_parser) {
    pc_pkg_parser_reset(client->pkg_parser);
  }

  if(client->handshake_opts) {
    json_decref(client->handshake_opts);
    client->handshake_opts = NULL;
  }

  if(client->route_to_code) {
    json_decref(client->route_to_code);
    client->route_to_code = NULL;
  }
  if(client->code_to_route) {
    json_decref(client->code_to_route);
    client->code_to_route = NULL;
  }
  if(client->server_protos) {
    json_decref(client->server_protos);
    client->server_protos = NULL;
  }
  if(client->client_protos) {
    json_decref(client->client_protos);
    client->client_protos = NULL;
  }
  uv_mutex_lock(&client->state_mutex);
  client->state = PC_ST_INITED;
  uv_mutex_unlock(&client->state_mutex);
}
Example #22
0
int CX264Encoder2::Put(AVFrame* frame)
{
    uv_mutex_lock(pQueueMutex);
    if(queueFrame.size() > 50){
        uv_mutex_unlock(pQueueMutex);
        av_frame_free(&frame);
        return 0;
    }
    queueFrame.push_back(frame);
    printf("%d", queueFrame.size());
    uv_mutex_unlock(pQueueMutex);
    uv_cond_signal(pQueueNotEmpty);
    return 0;
}
Example #23
0
int CX264Encoder::put_sample(cc_src_sample_t sample)
{
	uv_mutex_lock(&queue_mutex);
	if (sample_queue.size() > 50){
		uv_mutex_unlock(&queue_mutex);
		free(sample.buf[0]);
		return 0;
	}
	sample_queue.push_back(sample);
	printf("%d", sample_queue.size());
	uv_mutex_unlock(&queue_mutex);
	uv_cond_signal(&queue_not_empty);
	return 0;
}
Example #24
0
File: partr.c Project: KDr2/julia
JL_DLLEXPORT void jl_wakeup_thread(int16_t tid)
{
    jl_ptls_t ptls = jl_get_ptls_states();
    /* ensure thread tid is awake if necessary */
    if (ptls->tid != tid && !_threadedregion && tid != -1) {
        uv_mutex_lock(&sleep_lock);
        uv_cond_broadcast(&sleep_alarm); // TODO: make this uv_cond_signal / just wake up correct thread
        uv_mutex_unlock(&sleep_lock);
    }
    if (_threadedregion && jl_uv_mutex.owner != jl_thread_self())
        jl_wake_libuv();
    else
        uv_stop(jl_global_event_loop());
}
Example #25
0
v8::Handle<v8::Value> Write(const v8::Arguments& args) {
  v8::HandleScope scope;

  // file descriptor
  if(!args[0]->IsInt32()) {
    return scope.Close(v8::ThrowException(v8::Exception::TypeError(v8::String::New("First argument must be an int"))));
  }
  int fd = args[0]->ToInt32()->Int32Value();

  // buffer
  if(!args[1]->IsObject() || !node::Buffer::HasInstance(args[1])) {
    return scope.Close(v8::ThrowException(v8::Exception::TypeError(v8::String::New("Second argument must be a buffer"))));
  }
  v8::Persistent<v8::Object> buffer = v8::Persistent<v8::Object>::New(args[1]->ToObject());
  char* bufferData = node::Buffer::Data(buffer);
  size_t bufferLength = node::Buffer::Length(buffer);

  // callback
  if(!args[2]->IsFunction()) {
    return scope.Close(v8::ThrowException(v8::Exception::TypeError(v8::String::New("Third argument must be a function"))));
  }
  v8::Local<v8::Value> callback = args[2];

  WriteBaton* baton = new WriteBaton();
  memset(baton, 0, sizeof(WriteBaton));
  baton->fd = fd;
  baton->buffer = buffer;
  baton->bufferData = bufferData;
  baton->bufferLength = bufferLength;
  // baton->offset = 0;
  baton->callback = v8::Persistent<v8::Value>::New(callback);

  QueuedWrite* queuedWrite = new QueuedWrite();
  memset(queuedWrite, 0, sizeof(QueuedWrite));
  ngx_queue_init(&queuedWrite->queue);
  queuedWrite->baton = baton;
  queuedWrite->req.data = queuedWrite;

  uv_mutex_lock(&write_queue_mutex);
  bool empty = ngx_queue_empty(&write_queue);

  ngx_queue_insert_tail(&write_queue, &queuedWrite->queue);

  if (empty) {
    uv_queue_work(uv_default_loop(), &queuedWrite->req, EIO_Write, (uv_after_work_cb)EIO_AfterWrite);
  }   
  uv_mutex_unlock(&write_queue_mutex);

  return scope.Close(v8::Undefined());
}
Example #26
0
void eventpool_trigger(int reason, void *(*done)(void *), void *data) {
	if(eventpoolinit == 0) {
		return;
	}

#ifdef _WIN32
	SetThreadPriority(GetCurrentThread(), THREAD_PRIORITY_HIGHEST);
#else
	struct sched_param sched;
	memset(&sched, 0, sizeof(sched));
	sched.sched_priority = 80;
	pthread_setschedparam(pthread_self(), SCHED_RR, &sched);
#endif
	int eventqueue_size = 0;

	struct eventqueue_t *node = MALLOC(sizeof(struct eventqueue_t));
	if(node == NULL) {
		OUT_OF_MEMORY /*LCOV_EXCL_LINE*/
	}
	memset(node, 0, sizeof(struct eventqueue_t));
	node->reason = reason;
	node->done = done;
	node->data = data;

	uv_mutex_lock(&listeners_lock);
	struct eventqueue_t *tmp = eventqueue;
	if(tmp != NULL) {
		while(tmp->next != NULL) {
			eventqueue_size++;
			tmp = tmp->next;
		}
		tmp->next = node;
		node = tmp;
	} else {
		node->next = eventqueue;
		eventqueue = node;
	}

	/*
	 * If the eventqueue size is above
	 * 50 entries then there must be a bug
	 * at the trigger side.
	 */
	assert(eventqueue_size < 50);

	uv_mutex_unlock(&listeners_lock);

	uv_async_send(async_req);
}
Example #27
0
// OnDrainWriteQueue ensures that all ByteArrays queued up in the TLSConnection's
// write queue are sent.
void TLSConnectionPrivate::OnDrainWriteQueue(uv_async_t *handle, int status) {
	assert(handle != nullptr);
	assert(handle->data != nullptr);

	TLSConnectionPrivate *cp = static_cast<TLSConnectionPrivate *>(handle->data);
	if (cp->state_ == TLS_CONNECTION_STATE_ESTABLISHED) {
		uv_mutex_lock(&cp->wqlock_);
		while (cp->wq_.size() > 0) {
			const ByteArray &buf = cp->wq_.front();
			cp->Write(buf);
			cp->wq_.pop();
		}
		uv_mutex_unlock(&cp->wqlock_);
	}
}
Example #28
0
static int luv_queue_channel_gc(lua_State* L)
{
	luv_queue_t* queue = luv_queue_check_queue_t(L);
	printf("chan_gc: %s, refs=%d\n", queue->name, queue->refs);

	uv_mutex_lock(&luv_queues_lock);
	if (queue->async_cb != LUA_REFNIL) {
		uv_close((uv_handle_t*)&queue->async, NULL);
		queue->async_cb = LUA_REFNIL;
	}

	luv_queue_release(queue);
	uv_mutex_unlock(&luv_queues_lock);
	return 0;
}
Example #29
0
int main() {
  assert(0 == uv_mutex_init(&mutex));
  assert(0 == uv_thread_create(&thread, thread_cb, NULL));

  uv_mutex_lock(&mutex);
  printf("main mutex start\n");
  sleep(1);
  printf("main mutex end\n");
  uv_mutex_unlock(&mutex);

  uv_thread_join(&thread);
  uv_mutex_destroy(&mutex);

  return 0;
}
Example #30
0
void UploadRequest::removeServer(GameServerSession* server) {
	std::unordered_map<uint32_t, UploadRequest*>::const_iterator it, itEnd;

	uv_mutex_lock(&mapLock);
	for(it = pendingRequests.begin(), itEnd = pendingRequests.end(); it != itEnd;) {
		UploadRequest* client = it->second;
		if(client->getGameServer() == server) {
			it = pendingRequests.erase(it);
			delete client;
		} else {
			++it;
		}
	}
	uv_mutex_unlock(&mapLock);
}