static void php_cassandra_log_initialize() { uv_rwlock_init(&log_lock); cass_log_set_level(CASS_LOG_ERROR); cass_log_set_callback(php_cassandra_log, NULL); }
static void init_metrics_index(struct rrdengine_instance *ctx) { struct page_cache *pg_cache = &ctx->pg_cache; pg_cache->metrics_index.JudyHS_array = (Pvoid_t) NULL; assert(0 == uv_rwlock_init(&pg_cache->metrics_index.lock)); }
/** cs_setopt_logfile : set log file **/ void cs_setopt_logfile(cspider_t *cspider, FILE *log) { PANIC(cspider); PANIC(log); cspider->log = log; cspider->log_lock = (uv_rwlock_t*)malloc(sizeof(uv_rwlock_t)); uv_rwlock_init(cspider->log_lock); }
static void init_replaceQ(struct rrdengine_instance *ctx) { struct page_cache *pg_cache = &ctx->pg_cache; pg_cache->replaceQ.head = NULL; pg_cache->replaceQ.tail = NULL; assert(0 == uv_rwlock_init(&pg_cache->replaceQ.lock)); }
static void init_commited_page_index(struct rrdengine_instance *ctx) { struct page_cache *pg_cache = &ctx->pg_cache; pg_cache->commited_page_index.JudyL_array = (Pvoid_t) NULL; assert(0 == uv_rwlock_init(&pg_cache->commited_page_index.lock)); pg_cache->commited_page_index.latest_corr_id = 0; pg_cache->commited_page_index.nr_commited_pages = 0; }
int uv_loop_init(uv_loop_t* loop) { int err; uv__signal_global_once_init(); memset(loop, 0, sizeof(*loop)); heap_init((struct heap*) &loop->timer_heap); QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->active_reqs); QUEUE_INIT(&loop->idle_handles); QUEUE_INIT(&loop->async_handles); QUEUE_INIT(&loop->check_handles); QUEUE_INIT(&loop->prepare_handles); QUEUE_INIT(&loop->handle_queue); loop->nfds = 0; loop->watchers = NULL; loop->nwatchers = 0; QUEUE_INIT(&loop->pending_queue); QUEUE_INIT(&loop->watcher_queue); loop->closing_handles = NULL; uv__update_time(loop); uv__async_init(&loop->async_watcher); loop->signal_pipefd[0] = -1; loop->signal_pipefd[1] = -1; loop->backend_fd = -1; loop->emfile_fd = -1; loop->timer_counter = 0; loop->stop_flag = 0; err = uv__platform_loop_init(loop); if (err) return err; uv_signal_init(loop, &loop->child_watcher); uv__handle_unref(&loop->child_watcher); loop->child_watcher.flags |= UV__HANDLE_INTERNAL; QUEUE_INIT(&loop->process_handles); if (uv_rwlock_init(&loop->cloexec_lock)) abort(); if (uv_mutex_init(&loop->wq_mutex)) abort(); if (uv_async_init(loop, &loop->wq_async, uv__work_done)) abort(); uv__handle_unref(&loop->wq_async); loop->wq_async.flags |= UV__HANDLE_INTERNAL; return 0; }
void init_page_cache(struct rrdengine_instance *ctx) { struct page_cache *pg_cache = &ctx->pg_cache; pg_cache->page_descriptors = 0; pg_cache->populated_pages = 0; assert(0 == uv_rwlock_init(&pg_cache->pg_cache_rwlock)); init_metrics_index(ctx); init_replaceQ(ctx); init_commited_page_index(ctx); }
struct pg_cache_page_index *create_page_index(uuid_t *id) { struct pg_cache_page_index *page_index; page_index = mallocz(sizeof(*page_index)); page_index->JudyL_array = (Pvoid_t) NULL; uuid_copy(page_index->id, *id); assert(0 == uv_rwlock_init(&page_index->lock)); page_index->oldest_time = INVALID_TIME; page_index->latest_time = INVALID_TIME; return page_index; }
void Workers::start(xmrig::Controller *controller) { # ifdef APP_DEBUG LOG_NOTICE("THREADS ------------------------------------------------------------------"); for (const xmrig::IThread *thread : controller->config()->threads()) { thread->print(); } LOG_NOTICE("--------------------------------------------------------------------------"); # endif # ifndef XMRIG_NO_ASM xmrig::CpuThread::patchAsmVariants(); # endif m_controller = controller; const std::vector<xmrig::IThread *> &threads = controller->config()->threads(); m_status.algo = controller->config()->algorithm().algo(); m_status.colors = controller->config()->isColors(); m_status.threads = threads.size(); for (const xmrig::IThread *thread : threads) { m_status.ways += thread->multiway(); } m_hashrate = new Hashrate(threads.size(), controller); uv_mutex_init(&m_mutex); uv_rwlock_init(&m_rwlock); m_sequence = 1; m_paused = 1; uv_async_init(uv_default_loop(), &m_async, Workers::onResult); uv_timer_init(uv_default_loop(), &m_timer); uv_timer_start(&m_timer, Workers::onTick, 500, 500); uint32_t offset = 0; for (xmrig::IThread *thread : threads) { Handle *handle = new Handle(thread, offset, m_status.ways); offset += thread->multiway(); m_workers.push_back(handle); handle->start(Workers::onReady); } if (controller->config()->isShouldSave()) { controller->config()->save(); } }
cspider_t *init_cspider() { cspider_t *spider = (cspider_t *)malloc(sizeof(cspider_t)); PANIC(spider); //init task queue spider->task_queue = initTaskQueue(); PANIC(spider->task_queue); spider->task_queue_doing = initTaskQueue(); PANIC(spider->task_queue_doing); //init data queue spider->data_queue = initDataQueue(); PANIC(spider->data_queue); spider->data_queue_doing = initDataQueue(); PANIC(spider->data_queue_doing); spider->threadpool_size = 4; spider->process = NULL; spider->save = NULL; spider->process_user_data = NULL; spider->save_user_data = NULL; spider->loop = uv_default_loop(); spider->idler = (uv_idle_t*)malloc(sizeof(uv_idle_t)); spider->lock = (uv_rwlock_t*)malloc(sizeof(uv_rwlock_t)); uv_rwlock_init(spider->lock); spider->save_lock = (uv_rwlock_t*)malloc(sizeof(uv_rwlock_t)); uv_rwlock_init(spider->save_lock); spider->idler->data = spider; spider->site = (site_t*)malloc(sizeof(site_t)); spider->site->user_agent = NULL; spider->site->proxy = NULL; spider->site->cookie = NULL; spider->site->timeout = 0; spider->log = NULL; spider->bloom = init_Bloom(); return spider; }
static mrb_value mrb_uv_rwlock_init(mrb_state *mrb, mrb_value self) { int err; uv_rwlock_t *rwlock; rwlock = (uv_rwlock_t*)mrb_malloc(mrb, sizeof(uv_rwlock_t)); err = uv_rwlock_init(rwlock); if (err < 0) { mrb_free(mrb, rwlock); mrb_uv_check_error(mrb, err); } DATA_PTR(self) = rwlock; DATA_TYPE(self) = &mrb_uv_rwlock_type; return self; }
static int RWLock_tp_init(RWLock *self, PyObject *args, PyObject *kwargs) { UNUSED_ARG(args); UNUSED_ARG(kwargs); RAISE_IF_INITIALIZED(self, -1); if (uv_rwlock_init(&self->uv_rwlock)) { PyErr_SetString(PyExc_ThreadError, "Error initializing RWLock"); return -1; } self->initialized = True; return 0; }
int worker_start(ls_worker_t* w) { LOG(" worker_start(%p)\n", w); for (size_t i = 0; i < master.num_plugins; ++i) { ls_plugin_t* plugin = master.plugins + i; if (plugin->worker_init(w), 0) { LOGE(" %s.worker_init() error\n", plugin->plugin_name); return -1; } } uv_rwlock_init(&w->callmodel_delta_lock); w->sessions = new vector<ls_session_t*>();// TODO // master_async在master中初始化 return uv_thread_create(&(w->thread), worker_thread, (void*)w); }
int main() { uv_thread_t threads[3]; int thread_nums[] = {1, 2, 1}; uv_barrier_init(&blocker, 4); shared_num = 0; uv_rwlock_init(&numlock); uv_thread_create(&threads[0], reader, &thread_nums[0]); uv_thread_create(&threads[1], reader, &thread_nums[1]); uv_thread_create(&threads[2], writer, &thread_nums[2]); uv_barrier_wait(&blocker); uv_barrier_destroy(&blocker); uv_rwlock_destroy(&numlock); return 0; }
// same as ./10-locks, but using try* functions when obtaining read and write locks // int main() { int r; const int count = 4; fprintf(stderr, "barrier: init\n"); uv_barrier_init(&blocker, count); shared_num = 0; // https://github.com/thlorenz/libuv-dox/blob/master/methods.md#rwlock fprintf(stderr, "rwlock: init\n"); r = uv_rwlock_init(&numlock); if (r) ERROR("rwlock_init", r); uv_thread_t threads[3]; int thread_nums[] = { 1, 2, 1 }; r = uv_thread_create(&threads[0], reader_entry, &thread_nums[0]); if (r) ERROR("thread_create", r); r = uv_thread_create(&threads[1], reader_entry, &thread_nums[1]); if (r) ERROR("thread_create", r); r = uv_thread_create(&threads[2], writer_entry, &thread_nums[2]); if (r) ERROR("thread_create", r); // https://github.com/thlorenz/libuv-dox/blob/master/methods.md#barrier fprintf(stderr, "barrier: wait\n"); uv_barrier_wait(&blocker); fprintf(stderr, "barrier: destroy\n"); uv_barrier_destroy(&blocker); fprintf(stderr, "rwlock: destroy\n"); uv_rwlock_destroy(&numlock); if (r) ERROR("rwlock_destroy", r); return 0; }
int uv_loop_init(uv_loop_t* loop) { void* saved_data; int err; saved_data = loop->data; memset(loop, 0, sizeof(*loop)); loop->data = saved_data; heap_init((struct heap*) &loop->timer_heap); QUEUE_INIT(&loop->wq); QUEUE_INIT(&loop->idle_handles); QUEUE_INIT(&loop->async_handles); QUEUE_INIT(&loop->check_handles); QUEUE_INIT(&loop->prepare_handles); QUEUE_INIT(&loop->handle_queue); loop->active_handles = 0; loop->active_reqs.count = 0; loop->nfds = 0; loop->watchers = NULL; loop->nwatchers = 0; QUEUE_INIT(&loop->pending_queue); QUEUE_INIT(&loop->watcher_queue); loop->closing_handles = NULL; uv__update_time(loop); loop->async_io_watcher.fd = -1; loop->async_wfd = -1; loop->signal_pipefd[0] = -1; loop->signal_pipefd[1] = -1; loop->backend_fd = -1; loop->emfile_fd = -1; loop->timer_counter = 0; loop->stop_flag = 0; err = uv__platform_loop_init(loop); if (err) return err; uv__signal_global_once_init(); err = uv_signal_init(loop, &loop->child_watcher); if (err) goto fail_signal_init; uv__handle_unref(&loop->child_watcher); loop->child_watcher.flags |= UV__HANDLE_INTERNAL; QUEUE_INIT(&loop->process_handles); err = uv_rwlock_init(&loop->cloexec_lock); if (err) goto fail_rwlock_init; err = uv_mutex_init(&loop->wq_mutex); if (err) goto fail_mutex_init; err = uv_async_init(loop, &loop->wq_async, uv__work_done); if (err) goto fail_async_init; uv__handle_unref(&loop->wq_async); loop->wq_async.flags |= UV__HANDLE_INTERNAL; return 0; fail_async_init: uv_mutex_destroy(&loop->wq_mutex); fail_mutex_init: uv_rwlock_destroy(&loop->cloexec_lock); fail_rwlock_init: uv__signal_loop_cleanup(loop); fail_signal_init: uv__platform_loop_delete(loop); return err; }
SessionContainer(const CassCluster* cluster) : cluster(cluster) { BOOST_REQUIRE(uv_rwlock_init(&sessions_lock) == 0); }
inline rwlock() { if (uv_rwlock_init(&mtx) != 0) { throw std::runtime_error("failed to initialize read-write lock"); } }
int database_init(Config_t * env) { db = dictCreate(&dictTypeMemDatabase, NULL); config = env; uv_rwlock_init(&numlock); }