static void saturate_threadpool(void) { uv_work_t* req; ASSERT(0 == uv_cond_init(&signal_cond)); ASSERT(0 == uv_mutex_init(&signal_mutex)); ASSERT(0 == uv_mutex_init(&wait_mutex)); uv_mutex_lock(&signal_mutex); uv_mutex_lock(&wait_mutex); for (num_threads = 0; /* empty */; num_threads++) { req = malloc(sizeof(*req)); ASSERT(req != NULL); ASSERT(0 == uv_queue_work(uv_default_loop(), req, work_cb, done_cb)); /* Expect to get signalled within 350 ms, otherwise assume that * the thread pool is saturated. As with any timing dependent test, * this is obviously not ideal. */ if (uv_cond_timedwait(&signal_cond, &signal_mutex, (uint64_t)(350 * 1e6))) { ASSERT(0 == uv_cancel((uv_req_t*) req)); break; } } }
Session::Session() : state_(SESSION_STATE_CLOSED) , current_host_mark_(true) , pending_resolve_count_(0) , pending_pool_count_(0) , pending_workers_count_(0) , current_io_worker_(0) { uv_mutex_init(&state_mutex_); uv_mutex_init(&hosts_mutex_); }
Session::Session() : state_(SESSION_STATE_CLOSED) , connect_error_code_(CASS_OK) , current_host_mark_(true) , pending_pool_count_(0) , pending_workers_count_(0) , current_io_worker_(0) { uv_mutex_init(&state_mutex_); uv_mutex_init(&hosts_mutex_); uv_mutex_init(&keyspace_mutex_); }
IOWorker::IOWorker(Session* session) : session_(session) , config_(session->config()) , metrics_(session->metrics()) , protocol_version_(-1) , is_closing_(false) , pending_request_count_(0) , request_queue_(config_.queue_size_io()) { prepare_.data = this; uv_mutex_init(&keyspace_mutex_); uv_mutex_init(&unavailable_addresses_mutex_); }
inline static int uv__rwlock_fallback_init(uv_rwlock_t* rwlock) { if (uv_mutex_init(&rwlock->fallback_.read_mutex_)) return -1; if (uv_mutex_init(&rwlock->fallback_.write_mutex_)) { uv_mutex_destroy(&rwlock->fallback_.read_mutex_); return -1; } rwlock->fallback_.num_readers_ = 0; return 0; }
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { struct _uv_barrier* b; int rc; if (barrier == NULL || count == 0) return UV_EINVAL; b = uv__malloc(sizeof(*b)); if (b == NULL) return UV_ENOMEM; b->in = 0; b->out = 0; b->threshold = count; rc = uv_mutex_init(&b->mutex); if (rc != 0) goto error2; rc = uv_cond_init(&b->cond); if (rc != 0) goto error; barrier->b = b; return 0; error: uv_mutex_destroy(&b->mutex); error2: uv__free(b); return rc; }
/* Populates the object body with a mutex. */ static void initialize_mutex(MVMThreadContext *tc, MVMReentrantMutexBody *rm) { int init_stat; rm->mutex = MVM_malloc(sizeof(uv_mutex_t)); if ((init_stat = uv_mutex_init(rm->mutex)) < 0) MVM_exception_throw_adhoc(tc, "Failed to initialize mutex: %s", uv_strerror(init_stat)); }
void eventpool_init(enum eventpool_threads_t t) { /* * Make sure we execute in the main thread */ const uv_thread_t pth_cur_id = uv_thread_self(); assert(uv_thread_equal(&pth_main_id, &pth_cur_id)); if(eventpoolinit == 1) { return; } eventpoolinit = 1; threads = t; if((async_req = MALLOC(sizeof(uv_async_t))) == NULL) { OUT_OF_MEMORY /*LCOV_EXCL_LINE*/ } uv_async_init(uv_default_loop(), async_req, eventpool_execute); if(lockinit == 0) { lockinit = 1; // pthread_mutexattr_init(&listeners_attr); // pthread_mutexattr_settype(&listeners_attr, PTHREAD_MUTEX_RECURSIVE); // pthread_mutex_init(&listeners_lock, &listeners_attr); uv_mutex_init(&listeners_lock); } }
static void init_once(void) { unsigned int i; const char* val; nthreads = ARRAY_SIZE(default_threads); val = getenv("UV_THREADPOOL_SIZE"); if (val != NULL) nthreads = atoi(val); if (nthreads == 0) nthreads = 1; if (nthreads > MAX_THREADPOOL_SIZE) nthreads = MAX_THREADPOOL_SIZE; threads = default_threads; if (nthreads > ARRAY_SIZE(default_threads)) { threads = malloc(nthreads * sizeof(threads[0])); if (threads == NULL) { nthreads = ARRAY_SIZE(default_threads); threads = default_threads; } } if (uv_cond_init(&cond)) abort(); if (uv_mutex_init(&mutex)) abort(); QUEUE_INIT(&wq); for (i = 0; i < nthreads; i++) if (uv_thread_create(threads + i, worker, NULL)) abort(); initialized = 1; }
Adapter::Adapter() { adapter = nullptr; eventCallbackMaxCount = 0; eventCallbackBatchEventCounter = 0; eventCallbackBatchEventTotalCount = 0; eventCallbackBatchNumber = 0; eventCallback = nullptr; eventIntervalTimer = nullptr; asyncEvent = nullptr; asyncLog = nullptr; asyncStatus = nullptr; adapterCloseMutex = new uv_mutex_t(); if (uv_mutex_init(adapterCloseMutex) != 0) { std::cerr << "Not able to create adapterCloseMutex! Terminating." << std::endl; std::terminate(); } adapters.push_back(this); }
LatencyAwarePolicyTest() : ccm_(new CCM::Bridge("config.txt")) , cluster_(cass_cluster_new()) , thread_() { uv_mutex_init(&lock_); uv_cond_init(&condition_); // Create the cluster if (ccm_->create_cluster(3)) { ccm_->start_cluster(); } // Initialize the cluster for latency aware cass_cluster_set_reconnect_wait_time(cluster_.get(), 1); cass_cluster_set_connect_timeout(cluster_.get(), 240 * test_utils::ONE_SECOND_IN_MICROS); cass_cluster_set_request_timeout(cluster_.get(), 240 * test_utils::ONE_SECOND_IN_MICROS); test_utils::initialize_contact_points(cluster_.get(), ccm_->get_ip_prefix(), 3, 0); cass_cluster_set_latency_aware_routing(cluster_.get(), cass_true); cass_cluster_set_latency_aware_routing_settings(cluster_.get(), 1e6, 1, 1, 1, 1); // Handle deprecated and removed protocol versions [CASSANDRA-10146] // https://issues.apache.org/jira/browse/CASSANDRA-10146 int protocol_version = 1; if (test_utils::get_version() >= "3.0.0") { protocol_version = 3; } cass_cluster_set_protocol_version(cluster_.get(), protocol_version); // Protocol for this test doesn't matter so simply support all C* versions // Connect to the cluster session_ = test_utils::create_session(cluster_.get()); }
int uv__platform_loop_init(uv_loop_t* loop, int default_loop) { CFRunLoopSourceContext ctx; int r; if (uv__kqueue_init(loop)) return -1; loop->cf_loop = NULL; if ((r = uv_mutex_init(&loop->cf_mutex))) return r; if ((r = uv_sem_init(&loop->cf_sem, 0))) return r; QUEUE_INIT(&loop->cf_signals); memset(&ctx, 0, sizeof(ctx)); ctx.info = loop; ctx.perform = uv__cf_loop_cb; loop->cf_cb = CFRunLoopSourceCreate(NULL, 0, &ctx); if ((r = uv_thread_create(&loop->cf_thread, uv__cf_loop_runner, loop))) return r; /* Synchronize threads */ uv_sem_wait(&loop->cf_sem); assert(ACCESS_ONCE(CFRunLoopRef, loop->cf_loop) != NULL); return 0; }
int uv_barrier_init(uv_barrier_t* barrier, unsigned int count) { int err; barrier->n = count; barrier->count = 0; err = uv_mutex_init(&barrier->mutex); if (err) return -err; err = uv_sem_init(&barrier->turnstile1, 0); if (err) goto error2; err = uv_sem_init(&barrier->turnstile2, 1); if (err) goto error; return 0; error: uv_sem_destroy(&barrier->turnstile1); error2: uv_mutex_destroy(&barrier->mutex); return -err; }
void tor_init (void) { if (initialized) { return; } ERR_load_crypto_strings(); OpenSSL_add_all_algorithms(); // enable proper threading locks.length = CRYPTO_num_locks(); locks.item = malloc(locks.length * sizeof(uv_mutex_t)); for (size_t i = 0; i < locks.length; i++) { uv_mutex_init(&locks.item[i]); } CRYPTO_set_locking_callback(_locking_callback); CRYPTO_set_id_callback(uv_thread_self); CRYPTO_set_dynlock_create_callback(_dynlock_create_callback); CRYPTO_set_dynlock_lock_callback(_dynlock_lock_callback); CRYPTO_set_dynlock_destroy_callback(_dynlock_destroy_callback); ENGINE_load_builtin_engines(); ENGINE_register_all_complete(); initialized = true; }
JNIAPI Java_com_tangide_canvas_CanvasJNI_surfaceCreated(JNIEnv * env, jobject obj) { int argc = 2; struct stat st = {0}; lastRenderTime = getTime(); startupTime = lastRenderTime; const char* sysRoot = "/mnt/sdcard-ext/cantk-rt-v8"; if(!stat(sysRoot, &st) == 0) { sysRoot = "/mnt/sdcard/cantk-rt-v8"; } string str = "--sys-root=" + string(sysRoot); char* argv[3] = {"android", (char*)str.c_str(), NULL}; Config::init(argc, argv); V8Wrapper::init(argc, argv); uv_mutex_init(&gLock); const char* defaultAppIndex = "/mnt/sdcard-ext/cantk-rt-v8/scripts/cantk/index.html"; if(!stat(defaultAppIndex, &st) == 0) { defaultAppIndex = "/mnt/sdcard/cantk-rt-v8/scripts/cantk/index.html"; } V8Wrapper::loadApp(defaultAppIndex); }
Graph::Graph() :thread(NULL) ,is_running(false) { uv_mutex_init(&mutex); uv_cond_init(&cv); }
inline static int uv__rwlock_fallback_init(uv_rwlock_t* rwlock) { int err; err = uv_mutex_init(&rwlock->fallback_.read_mutex_); if (err) return err; err = uv_mutex_init(&rwlock->fallback_.write_mutex_); if (err) { uv_mutex_destroy(&rwlock->fallback_.read_mutex_); return err; } rwlock->fallback_.num_readers_ = 0; return 0; }
Mutex::Mutex() { int r = uv_mutex_init(&mutex); if (r != 0) { throw std::runtime_error("Failed to create mutex."); } }
/** * create loop, mutex, and start the thread */ UVEventLoop::UVEventLoop() { runUV = false; loop = uv_loop_new(); if (uv_mutex_init(&mutex) < 0) { // oops ; } StartUVRunner(); }
/* Creates the allocator data structure with bins. */ MVMFixedSizeAlloc * MVM_fixed_size_create(MVMThreadContext *tc) { int init_stat; MVMFixedSizeAlloc *al = MVM_malloc(sizeof(MVMFixedSizeAlloc)); al->size_classes = calloc(MVM_FSA_BINS, sizeof(MVMFixedSizeAllocSizeClass)); if ((init_stat = uv_mutex_init(&(al->complex_alloc_mutex))) < 0) MVM_exception_throw_adhoc(tc, "Failed to initialize mutex: %s", uv_strerror(init_stat)); return al; }
// initialize the threading infrastructure void jl_init_threadinginfra(void) { /* initialize the synchronization trees pool and the multiqueue */ multiq_init(); /* initialize the sleep mechanism */ uv_mutex_init(&sleep_lock); uv_cond_init(&sleep_alarm); }
static mrb_value mrb_uv_mutex_init(mrb_state *mrb, mrb_value self) { uv_mutex_t *m = (uv_mutex_t*)mrb_malloc(mrb, sizeof(uv_mutex_t)); mrb_uv_check_error(mrb, uv_mutex_init(m)); DATA_PTR(self) = m; DATA_TYPE(self) = &mrb_uv_mutex_type; return self; }
int uv_chan_init(uv_chan_t *chan) { int r = uv_mutex_init(&chan->mutex); if (r == -1) return r; QUEUE_INIT(&chan->q); return uv_cond_init(&chan->cond); }
int status_init(Status* status, int initial_count) { int rc; rc = uv_mutex_init(&status->mutex); if (rc != 0) return rc; rc = uv_cond_init(&status->cond); if (rc != 0) return rc; status->count = initial_count; return rc; }
void nub_loop_init(nub_loop_t* loop) { uv_async_t* async_handle; int er; er = uv_loop_init(&loop->uvloop); ASSERT(0 == er); er = uv_prepare_init(&loop->uvloop, &loop->queue_processor_); ASSERT(0 == er); loop->queue_processor_.data = loop; uv_unref((uv_handle_t*) &loop->queue_processor_); er = uv_mutex_init(&loop->queue_processor_lock_); ASSERT(0 == er); fuq_init(&loop->blocking_queue_); er = uv_sem_init(&loop->loop_lock_sem_, 0); ASSERT(0 == er); fuq_init(&loop->thread_dispose_queue_); er = uv_mutex_init(&loop->thread_dispose_lock_); ASSERT(0 == er); fuq_init(&loop->work_queue_); er = uv_mutex_init(&loop->work_lock_); ASSERT(0 == er); async_handle = (uv_async_t*) malloc(sizeof(*async_handle)); CHECK_NE(NULL, async_handle); er = uv_async_init(&loop->uvloop, async_handle, nub__thread_dispose); ASSERT(0 == er); async_handle->data = loop; loop->work_ping_ = async_handle; uv_unref((uv_handle_t*) loop->work_ping_); loop->ref_ = 0; loop->disposed_ = 0; er = uv_prepare_start(&loop->queue_processor_, nub__async_prepare_cb); ASSERT(0 == er); }
void MVM_nfg_init(MVMThreadContext *tc) { int init_stat; tc->instance->nfg = calloc(1, sizeof(MVMNFGState)); if ((init_stat = uv_mutex_init(&(tc->instance->nfg->update_mutex))) < 0) { fprintf(stderr, "MoarVM: Initialization of NFG update mutex failed\n %s\n", uv_strerror(init_stat)); exit(1); } cache_crlf(tc); }
void EIO_List(uv_work_t* req) { // This code exists in javascript for unix platforms #ifdef __APPLE__ if(!lockInitialised) { uv_mutex_init(&list_mutex); lockInitialised = TRUE; } ListBaton* data = static_cast<ListBaton*>(req->data); stDeviceListItem* devices = GetSerialDevices(); if (*(devices->length) > 0) { stDeviceListItem* next = devices; for (int i = 0, len = *(devices->length); i < len; i++) { stSerialDevice device = (* next).value; ListResultItem* resultItem = new ListResultItem(); resultItem->comName = device.port; if (device.locationId != NULL) { resultItem->locationId = device.locationId; } if (device.vendorId != NULL) { resultItem->vendorId = device.vendorId; } if (device.productId != NULL) { resultItem->productId = device.productId; } if (device.manufacturer != NULL) { resultItem->manufacturer = device.manufacturer; } if (device.serialNumber != NULL) { resultItem->serialNumber = device.serialNumber; } data->results.push_back(resultItem); stDeviceListItem* current = next; if (next->next != NULL) { next = next->next; } free(current); } } #endif }
static int uv__loop_init(uv_loop_t* loop, int default_loop) { unsigned int i; int err; uv__signal_global_once_init(); memset(loop, 0, sizeof(*loop)); RB_INIT(&loop->timer_handles); QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->active_reqs); QUEUE_INIT(&loop->idle_handles); QUEUE_INIT(&loop->async_handles); QUEUE_INIT(&loop->check_handles); QUEUE_INIT(&loop->prepare_handles); QUEUE_INIT(&loop->handle_queue); loop->nfds = 0; loop->watchers = NULL; loop->nwatchers = 0; QUEUE_INIT(&loop->pending_queue); QUEUE_INIT(&loop->watcher_queue); loop->closing_handles = NULL; loop->time = uv__hrtime() / 1000000; uv__async_init(&loop->async_watcher); loop->signal_pipefd[0] = -1; loop->signal_pipefd[1] = -1; loop->backend_fd = -1; loop->emfile_fd = -1; loop->timer_counter = 0; loop->stop_flag = 0; err = uv__platform_loop_init(loop, default_loop); if (err) return err; uv_signal_init(loop, &loop->child_watcher); uv__handle_unref(&loop->child_watcher); loop->child_watcher.flags |= UV__HANDLE_INTERNAL; for (i = 0; i < ARRAY_SIZE(loop->process_handles); i++) QUEUE_INIT(loop->process_handles + i); if (uv_mutex_init(&loop->wq_mutex)) abort(); if (uv_async_init(loop, &loop->wq_async, uv__work_done)) abort(); uv__handle_unref(&loop->wq_async); loop->wq_async.flags |= UV__HANDLE_INTERNAL; return 0; }
int uv_loop_init(uv_loop_t* loop) { int err; uv__signal_global_once_init(); memset(loop, 0, sizeof(*loop)); heap_init((struct heap*) &loop->timer_heap); QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->active_reqs); QUEUE_INIT(&loop->idle_handles); QUEUE_INIT(&loop->async_handles); QUEUE_INIT(&loop->check_handles); QUEUE_INIT(&loop->prepare_handles); QUEUE_INIT(&loop->handle_queue); loop->nfds = 0; loop->watchers = NULL; loop->nwatchers = 0; QUEUE_INIT(&loop->pending_queue); QUEUE_INIT(&loop->watcher_queue); loop->closing_handles = NULL; uv__update_time(loop); uv__async_init(&loop->async_watcher); loop->signal_pipefd[0] = -1; loop->signal_pipefd[1] = -1; loop->backend_fd = -1; loop->emfile_fd = -1; loop->timer_counter = 0; loop->stop_flag = 0; err = uv__platform_loop_init(loop); if (err) return err; uv_signal_init(loop, &loop->child_watcher); uv__handle_unref(&loop->child_watcher); loop->child_watcher.flags |= UV__HANDLE_INTERNAL; QUEUE_INIT(&loop->process_handles); if (uv_rwlock_init(&loop->cloexec_lock)) abort(); if (uv_mutex_init(&loop->wq_mutex)) abort(); if (uv_async_init(loop, &loop->wq_async, uv__work_done)) abort(); uv__handle_unref(&loop->wq_async); loop->wq_async.flags |= UV__HANDLE_INTERNAL; return 0; }
int main (int argc, char** argv) { int r; server_param = parse_arguments (argc, argv); LOG ("Extracting data ..."); training_set = server_extract_data (server_param); LOG ("Learning ..."); compile_training_set (training_set); factors = learn (training_set, server_param->model); factors_backup = copy_learned_factors (factors); LOG ("Learning completed"); complete = malloc(20 *sizeof(int)); memset(complete,0,20 *sizeof(int)); //parser_settings.on_headers_complete = on_headers_complete; parser_settings.on_url = on_url; parser_settings.on_header_value = on_value; uv_loop = uv_default_loop(); r = uv_tcp_init (uv_loop, &server); CHECK (r, "bind"); struct sockaddr_in address = uv_ip4_addr ("0.0.0.0", server_param->port); r = uv_tcp_bind (&server, address); CHECK (r, "bind"); uv_listen ( (uv_stream_t*) &server, 128, on_connect); LOGF ("listening on port %u", server_param->port); uv_timer_t timer; r = uv_timer_init(uv_default_loop(), &timer); assert(r == 0); r = uv_timer_start(&timer, timer_cb, 10000, 10000); assert(r == 0); r = uv_mutex_init(&factors_mutex); assert(r == 0); r = uv_mutex_init(&factors_backup_mutex); assert(r == 0); r = uv_mutex_init(&tset_mutex); assert(r == 0); uv_run (uv_loop); }