static err_t peerclosed_tcp(int s, struct tcp_pcb *pcb) { sockfd_t * fd; // Get the socket struct and lock if (sock_verify(s) < 0) { if (tcp_close(pcb) != ERR_OK) tcp_abort(pcb); } else { fd = fds + s; // Get access. mutex_lock(fd->mutex); // Make sure no more reads/writes go through. fd->recv = fd->send = -1; // Wake up anyone who was waiting cond_broadcast(fd->recv_avail); cond_broadcast(fd->send_avail); mutex_unlock(fd->mutex); genwait_wake_all(&select_wait); } return ERR_OK; }
int rw_unlock(rwlock_t *rw) { mutex_lock(&(rw->lock)); if (rw->readers == -1) rw->readers = 0; else rw->readers--; cond_broadcast(&(rw->w_cond)); cond_broadcast(&(rw->r_cond)); mutex_unlock(&(rw->lock)); }
static void umem_lockup(void) { umem_cache_t *cp; (void) mutex_lock(&umem_init_lock); /* * If another thread is busy initializing the library, we must * wait for it to complete (by calling umem_init()) before allowing * the fork() to proceed. */ if (umem_ready == UMEM_READY_INITING && umem_init_thr != thr_self()) { (void) mutex_unlock(&umem_init_lock); (void) umem_init(); (void) mutex_lock(&umem_init_lock); } (void) mutex_lock(&umem_cache_lock); (void) mutex_lock(&umem_update_lock); (void) mutex_lock(&umem_flags_lock); umem_lockup_cache(&umem_null_cache); for (cp = umem_null_cache.cache_prev; cp != &umem_null_cache; cp = cp->cache_prev) umem_lockup_cache(cp); umem_lockup_log_header(umem_transaction_log); umem_lockup_log_header(umem_content_log); umem_lockup_log_header(umem_failure_log); umem_lockup_log_header(umem_slab_log); (void) cond_broadcast(&umem_update_cv); vmem_sbrk_lockup(); vmem_lockup(); }
void Thread::barrier( void ) { if ( mutex_lock( &barrier_mtx ) != 0 ) { printf( "Thread free fails: barrier_mutex not allocated" ); } if (g_verbose) printf("Thread blocking %d\n", m_id); barrier_count++; if ( barrier_count < Thread::thread_count ) { // wait for other threads to arrive while (barrier_count < Thread::thread_count && barrier_count != 0) { if (g_verbose) printf("Thread waiting %d\n", m_id); cond_wait( &barrier_cv, &barrier_mtx ); } } else { barrier_count = 0; if (g_verbose) printf("Thread broadcasting %d\n", m_id); cond_broadcast( &barrier_cv ); } if ( mutex_unlock( &barrier_mtx ) != 0 ) { printf( "Thread free fails: barrier_mutex could not unlock "); } if (g_verbose) printf("Thread restarting %d\n", m_id); }
static void __agent_schedule_abs(struct ice_agent *ag, const struct timeval *tv) { struct timeval nxt; long long diff; if (!ag) { ilog(LOG_ERR, "ice ag is NULL"); return; } nxt = *tv; mutex_lock(&ice_agents_timers_lock); if (ag->last_run.tv_sec) { /* make sure we don't run more often than we should */ diff = timeval_diff(&nxt, &ag->last_run); if (diff < TIMER_RUN_INTERVAL * 1000) timeval_add_usec(&nxt, TIMER_RUN_INTERVAL * 1000 - diff); } if (ag->next_check.tv_sec && timeval_cmp(&ag->next_check, &nxt) <= 0) goto nope; /* already scheduled sooner */ if (!g_tree_remove(ice_agents_timers, ag)) obj_hold(ag); /* if it wasn't removed, we make a new reference */ ag->next_check = nxt; g_tree_insert(ice_agents_timers, ag, ag); cond_broadcast(&ice_agents_timers_cond); nope: mutex_unlock(&ice_agents_timers_lock); }
/** @brief Function to unlock read write lock * * @param rwlock: A read write lock data structure * @return void */ void rwlock_unlock (rwlock_t * rwlock) { /* Unlock reader lock */ if (rwlock->read_cnt > 0) { mutex_lock (&rwlock->rd_cnt_mtx); rwlock->read_cnt--; /* if no reader anymore, signal to writer */ if (rwlock->read_cnt == 0) { cond_signal (&rwlock->wait_rd_cond); } mutex_unlock (&rwlock->rd_cnt_mtx); } /* Unlock a writer lock */ else { mutex_lock (&rwlock->rd_cnt_mtx); mutex_lock (&rwlock->wr_cnt_mtx); rwlock->write_cnt = 0; /* Singal to the waiting writer */ cond_signal (&rwlock->wr_cond); /* Broadcast to the waiting readers */ cond_broadcast (&rwlock->rd_cond); mutex_unlock (&rwlock->wr_cnt_mtx); mutex_unlock (&rwlock->rd_cnt_mtx); } return; }
int sema_post(sema_t *sem) { mutex_lock(&(sem->lock)); sem->count++; cond_broadcast(&(sem->cond)); mutex_unlock(&(sem->lock)); }
static void object_fetch_iter(struct object_fetch_env *env, Transaction *trans) { struct Rsread *res; int x; assert(trans->in != NULL && trans->in->id == RSREAD); res = &trans->in->msg.rsread; while (env->wait != NULL) cond_wait(env->wait); env->wait = cond_new(); unlock(); x = lseek(env->file->fd, trans->out->msg.tsread.offset, SEEK_SET); assert(x >= 0); x = write(env->file->fd, res->data, res->count); assert(res->count == (u32) x); lock(); raw_delete(trans->in->raw); trans->in->raw = NULL; cond_broadcast(env->wait); env->wait = NULL; }
/** @brief unlock a rwlock * * This function indicates that the calling * thread is done using the locked state in whichever mode it was granted access * for. Whether * a call to this function does or does not result in a thread being awakened * depends on the * situation and the policy you chose to implement. * It is illegal for an application to unlock a readers/writers lock that is not * locked. * * @para rwlock: * @return none **/ void rwlock_unlock(rwlock_t *rwlock) { /* unlock a reader lock */ if (&rwlock->rd_cnt > 0) { mutex_lock(&rwlock->rdcnt_mutex); rwlock->rd_cnt--; /* no more readers, try to give writer the chance to step in */ if (rwlock->rd_cnt == 0) cond_signal(&rwlock->writer_cond); /* unlock */ mutex_unlock(&rwlock->rdcnt_mutex); /* unlock a writer lock */ } else { mutex_lock(&rwlock->rdcnt_mutex); mutex_lock(&rwlock->wrcnt_mutex); rwlock->wr_cnt = 0; /* Signal writer that is waiting */ cond_signal(&rwlock->wr_cond); /* Broadcast to readers that are waiting */ cond_broadcast(&rwlock->rd_cond); mutex_unlock(&rwlock->wrcnt_mutex); mutex_unlock(&rwlock->rdcnt_mutex); } }
u64 object_reserve_oid(Worker *worker) { assert(storage_server_count > 0); /* is someone else in the process of requesting new oids? */ while (object_reserve_wait != NULL) cond_wait(object_reserve_wait); /* do we need to request a fresh batch of oids? */ if (object_reserve_remaining == 0) { /* the first storage server is considered the master */ Transaction *trans = trans_new(storage_servers[0], NULL, message_new()); struct Rsreserve *res; pthread_cond_t *wait; trans->out->tag = ALLOCTAG; trans->out->id = TSRESERVE; wait = object_reserve_wait = cond_new(); send_request(trans); object_reserve_wait = NULL; cond_broadcast(wait); res = &trans->in->msg.rsreserve; object_reserve_next = res->firstoid; object_reserve_remaining = res->count; } object_reserve_remaining--; return object_reserve_next++; }
/* * Assumes: by the time tpool_destroy() is called no one will use this * thread pool in any way and no one will try to dispatch entries to it. * Calling tpool_destroy() from a job in the pool will cause deadlock. */ void tpool_destroy(tpool_t *tpool) { tpool_active_t *activep; ASSERT(!tpool_member(tpool)); ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))); sig_mutex_lock(&tpool->tp_mutex); pthread_cleanup_push(sig_mutex_unlock, &tpool->tp_mutex); /* mark the pool as being destroyed; wakeup idle workers */ tpool->tp_flags |= TP_DESTROY; tpool->tp_flags &= ~TP_SUSPEND; (void) cond_broadcast(&tpool->tp_workcv); /* cancel all active workers */ for (activep = tpool->tp_active; activep; activep = activep->tpa_next) (void) pthread_cancel(activep->tpa_tid); /* wait for all active workers to finish */ while (tpool->tp_active != NULL) { tpool->tp_flags |= TP_WAIT; (void) sig_cond_wait(&tpool->tp_waitcv, &tpool->tp_mutex); } /* the last worker to terminate will wake us up */ while (tpool->tp_current != 0) (void) sig_cond_wait(&tpool->tp_busycv, &tpool->tp_mutex); pthread_cleanup_pop(1); /* sig_mutex_unlock(&tpool->tp_mutex); */ delete_pool(tpool); }
void cookie_cache_remove(struct cookie_cache *c, const str *s) { mutex_lock(&c->lock); g_hash_table_remove(c->current.cookies, s); g_hash_table_remove(c->old.cookies, s); cond_broadcast(&c->cond); mutex_unlock(&c->lock); }
/*ARGSUSED*/ static void * smb_ddiscover_service(void *arg) { char domain[SMB_PI_MAX_DOMAIN]; char sought_dc[MAXHOSTNAMELEN]; for (;;) { (void) mutex_lock(&smb_dclocator.sdl_mtx); while (!smb_dclocator.sdl_locate) (void) cond_wait(&smb_dclocator.sdl_cv, &smb_dclocator.sdl_mtx); (void) strlcpy(domain, smb_dclocator.sdl_domain, SMB_PI_MAX_DOMAIN); (void) strlcpy(sought_dc, smb_dclocator.sdl_dc, MAXHOSTNAMELEN); (void) mutex_unlock(&smb_dclocator.sdl_mtx); smb_ddiscover_main(domain, sought_dc); (void) mutex_lock(&smb_dclocator.sdl_mtx); smb_dclocator.sdl_locate = B_FALSE; (void) cond_broadcast(&smb_dclocator.sdl_cv); (void) mutex_unlock(&smb_dclocator.sdl_mtx); } /*NOTREACHED*/ return (NULL); }
void cookie_cache_insert(struct cookie_cache *c, const str *s, const str *r) { mutex_lock(&c->lock); g_hash_table_replace(c->current.cookies, str_chunk_insert(c->current.chunks, s), str_chunk_insert(c->current.chunks, r)); g_hash_table_remove(c->old.cookies, s); cond_broadcast(&c->cond); mutex_unlock(&c->lock); }
static void notify_waiters(tpool_t *tpool) { if (tpool->tp_head == NULL && tpool->tp_active == NULL) { tpool->tp_flags &= ~TP_WAIT; (void) cond_broadcast(&tpool->tp_waitcv); } }
void sml_run_the_world() { mutex_lock(&stop_the_world_lock); store_relaxed(&stop_the_world_flag, 0); cond_broadcast(&stop_the_world_cond); mutex_unlock(&stop_the_world_lock); }
int condvarBroadcast(condvar_t *condvar) { int err; err = cond_broadcast((cond_t *) condvar); condvar->counter++; return (err == 0 ? SYS_OK : SYS_ERR); }
/* * Abort impending domain logon requests. */ void smb_logon_abort(void) { (void) mutex_lock(&netlogon_mutex); if (netlogon_busy && !netlogon_abort) syslog(LOG_DEBUG, "logon abort"); netlogon_abort = B_TRUE; (void) cond_broadcast(&netlogon_cv); (void) mutex_unlock(&netlogon_mutex); }
/* * smb_lucache_unlock * * Unlock the cache */ static void smb_lucache_unlock(void) { (void) rw_unlock(&smb_uch.uc_cache_lck); (void) mutex_lock(&smb_uch.uc_mtx); smb_uch.uc_refcnt--; (void) cond_broadcast(&smb_uch.uc_cv); (void) mutex_unlock(&smb_uch.uc_mtx); }
void Condition::setFalse(void) { MutexLock lock_scope(_mutex); _state = 0; #if defined(POSIX_THREADS) cond_broadcast((cond_t *)_condition); // Wake all sleepers. #endif }
/* * Unlock the cache */ static void smb_cache_unlock(smb_cache_t *chandle) { (void) mutex_lock(&chandle->ch_mtx); assert(chandle->ch_nops > 0); chandle->ch_nops--; (void) cond_broadcast(&chandle->ch_cv); (void) mutex_unlock(&chandle->ch_mtx); (void) rw_unlock(&chandle->ch_cache_lck); }
int Condition::operator+=(int new_state) { MutexLock lock_scope(_mutex); _state += new_state; #if defined(POSIX_THREADS) cond_broadcast((cond_t *)_condition); // Wake all sleepers. #endif return(_state); }
static void lxt_server_loop(void) { lxt_req_t *lxt_req; lxt_server_arg_t *request; size_t request_size; char *door_result; size_t door_result_size; for (;;) { /* Wait for a request from a doors server thread. */ (void) mutex_lock(&lxt_req_lock); while (lxt_req_ptr == NULL) (void) cond_wait(&lxt_req_cv, &lxt_req_lock); /* We got a request, get a local pointer to it. */ lxt_req = lxt_req_ptr; lxt_req_ptr = NULL; (void) cond_broadcast(&lxt_req_cv); (void) mutex_unlock(&lxt_req_lock); /* Get a pointer to the request. */ request = lxt_req->lxtr_request; request_size = lxt_req->lxtr_request_size; lx_debug("lxt_server_loop: Linux thread request recieved, " "request = %p", request); /* Dispatch the request. */ assert((request->lxt_sa_op > LXT_SERVER_OP_PING) || (request->lxt_sa_op < LXT_SERVER_OP_MAX)); lxt_operations[request->lxt_sa_op].lxto_fp( request, request_size, &door_result, &door_result_size); lx_debug("lxt_server_loop: Linux thread request completed, " "request = %p", request); (void) mutex_lock(&lxt_req_lock); /* Set the result pointers for the calling door thread. */ lxt_req->lxtr_result = door_result; lxt_req->lxtr_result_size = door_result_size; /* Let the door thread know we're done. */ lxt_req->lxtr_complete = 1; (void) cond_signal(&lxt_req->lxtr_complete_cv); (void) mutex_unlock(&lxt_req_lock); } /*NOTREACHED*/ }
static void producer(void *v_t) { struct test *t = v_t; for (;;) { int *item = xalloc(sizeof *item); *item = t->producer_count; if (!queue_push(&t->queue, item, &t->producer)) { t->producer_blocked = TRUE; cond_broadcast(&t->cond); free(item); return; } t->producer_count++; /* Stop at 200 items */ if (t->producer_count == 200) { cond_broadcast(&t->cond); tasklet_stop(&t->producer); return; } } }
/* * This function should be called while holding the sphere mutex */ void demux_chunk_end(demux_t *dm, struct mutex *mutex, chunk_t *chunk) { demux_ent_t *ent; int ret; demux_chunk_t *dchunk = (demux_chunk_t *) chunk; ent = dm->entries + chunk->processor_id; // clear the chunk we just processed so that the next chunk can run on // this processor ret = kfifo_out(&ent->fifo, dchunk, sizeof(demux_chunk_t)); BUG_ON(ret != sizeof(demux_chunk_t)); inc_current_ticket(dm, dchunk->chunk.thread_id); cond_broadcast(&dm->next_chunk_cond); cond_signal(&ent->fifo_full_cond); }
void timerthread_obj_schedule_abs_nl(struct timerthread_obj *tt_obj, const struct timeval *tv) { if (!tt_obj) return; ilog(LOG_DEBUG, "scheduling timer object at %llu.%06lu", (unsigned long long) tv->tv_sec, (unsigned long) tv->tv_usec); struct timerthread *tt = tt_obj->tt; if (tt_obj->next_check.tv_sec && timeval_cmp(&tt_obj->next_check, tv) <= 0) return; /* already scheduled sooner */ if (!g_tree_remove(tt->tree, tt_obj)) obj_hold(tt_obj); /* if it wasn't removed, we make a new reference */ tt_obj->next_check = *tv; g_tree_insert(tt->tree, tt_obj, tt_obj); cond_broadcast(&tt->cond); }
int __nisdb_wulock(__nisdb_rwlock_t *rw) { int ret; pthread_t myself = pthread_self(); if (rw == 0) { #ifdef NISDB_MT_DEBUG /* This shouldn't happen */ abort(); #endif /* NISDB_MT_DEBUG */ return (EFAULT); } if (rw->destroyed != 0) return (ESHUTDOWN); if ((ret = mutex_lock(&rw->mutex)) != 0) return (ret); if (rw->destroyed != 0) { (void) mutex_unlock(&rw->mutex); return (ESHUTDOWN); } /* Sanity check */ if (rw->writer_count == 0 || rw->writer.id != myself || rw->writer.count == 0) { #ifdef NISDB_MT_DEBUG abort(); #endif /* NISDB_MT_DEBUG */ (void) mutex_unlock(&rw->mutex); return (ENOLCK); } rw->writer.count--; if (rw->writer.count == 0) { rw->writer.id = INV_PTHREAD_ID; rw->writer_count = 0; if ((ret = cond_broadcast(&rw->cv)) != 0) { (void) mutex_unlock(&rw->mutex); return (ret); } } return (mutex_unlock(&rw->mutex)); }
/* * Worker thread is terminating. */ static void worker_cleanup(tpool_t *tpool) { ASSERT(MUTEX_HELD(&tpool->tp_mutex)); if (--tpool->tp_current == 0 && (tpool->tp_flags & (TP_DESTROY | TP_ABANDON))) { if (tpool->tp_flags & TP_ABANDON) { sig_mutex_unlock(&tpool->tp_mutex); delete_pool(tpool); return; } if (tpool->tp_flags & TP_DESTROY) (void) cond_broadcast(&tpool->tp_busycv); } sig_mutex_unlock(&tpool->tp_mutex); }
static void consumer(void *v_t) { struct test *t = v_t; for (;;) { int *item = queue_shift(&t->queue, &t->consumer); if (!item) { t->consumer_blocked = TRUE; cond_broadcast(&t->cond); return; } assert(*item == t->consumer_count); free(item); t->consumer_count++; } }
/* * Like tpool_destroy(), but don't cancel workers or wait for them to finish. * The last worker to terminate will delete the pool. */ void tpool_abandon(tpool_t *tpool) { ASSERT(!(tpool->tp_flags & (TP_DESTROY | TP_ABANDON))); sig_mutex_lock(&tpool->tp_mutex); if (tpool->tp_current == 0) { /* no workers, just delete the pool */ sig_mutex_unlock(&tpool->tp_mutex); delete_pool(tpool); } else { /* wake up all workers, last one will delete the pool */ tpool->tp_flags |= TP_ABANDON; tpool->tp_flags &= ~TP_SUSPEND; (void) cond_broadcast(&tpool->tp_workcv); sig_mutex_unlock(&tpool->tp_mutex); } }