int pth_rwlock_acquire(pth_rwlock_t *rwlock, int op, int tryonly, pth_event_t ev_extra) { /* consistency checks */ if (rwlock == NULL) return pth_error(FALSE, EINVAL); if (!(rwlock->rw_state & PTH_RWLOCK_INITIALIZED)) return pth_error(FALSE, EDEADLK); /* acquire lock */ if (op == PTH_RWLOCK_RW) { /* read-write lock is simple */ if (!pth_mutex_acquire(&(rwlock->rw_mutex_rw), tryonly, ev_extra)) return FALSE; rwlock->rw_mode = PTH_RWLOCK_RW; } else { /* read-only lock is more complicated to get right */ if (!pth_mutex_acquire(&(rwlock->rw_mutex_rd), tryonly, ev_extra)) return FALSE; rwlock->rw_readers++; if (rwlock->rw_readers == 1) { if (!pth_mutex_acquire(&(rwlock->rw_mutex_rw), tryonly, ev_extra)) { rwlock->rw_readers--; pth_shield { pth_mutex_release(&(rwlock->rw_mutex_rd)); } return FALSE; } } rwlock->rw_mode = PTH_RWLOCK_RD; pth_mutex_release(&(rwlock->rw_mutex_rd)); }
int main() { pth_t thread; int rc; int i, j, s; arg = -1; res = 0; pth_init(); thread = pth_spawn(PTH_ATTR_DEFAULT, thread_routine, NULL); for(i = 0; i < 100; i++) { s = 0; for(j = 0; j < 10000; j++) { pth_mutex_acquire(&mutex, FALSE, NULL); res = -1; arg = j; pth_mutex_release(&mutex); pth_cond_notify(&c1, FALSE); pth_mutex_acquire(&mutex, FALSE, NULL); if(res < 0) pth_cond_await(&c2, &mutex, NULL); s += res; arg = -1; pth_mutex_release(&mutex); } } printf("%d\n", s); return 0; }
/* Initialize the mutex *PRIV. If JUST_CHECK is true, only do this if it is not already initialized. */ static int mutex_pth_init (ath_mutex_t *priv, int just_check) { int err = 0; if (just_check) pth_mutex_acquire (&check_init_lock, 0, NULL); if (!*priv || !just_check) { pth_mutex_t *lock = malloc (sizeof (pth_mutex_t)); if (!lock) err = ENOMEM; if (!err) { err = pth_mutex_init (lock); if (err == FALSE) err = errno; else err = 0; if (err) free (lock); else *priv = (ath_mutex_t) lock; } } if (just_check) pth_mutex_release (&check_init_lock); return err; }
/* Unlock a mutex. A return value of 0 indicates success */ TSRM_API int tsrm_mutex_unlock(MUTEX_T mutexp) { TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex unlocked thread: %ld", tsrm_thread_id())); #ifdef TSRM_WIN32 LeaveCriticalSection(mutexp); return 0; #elif defined(GNUPTH) if (pth_mutex_release(mutexp)) { return 0; } return -1; #elif defined(PTHREADS) return pthread_mutex_unlock(mutexp); #elif defined(NSAPI) crit_exit(mutexp); return 0; #elif defined(PI3WEB) return PISync_unlock(mutexp); #elif defined(TSRM_ST) return st_mutex_unlock(mutexp); #elif defined(BETHREADS) if (atomic_add(&mutexp->ben, -1) != 1) return release_sem(mutexp->sem); return 0; #endif }
/* Returns True is the pinentry is currently active. If WAITSECONDS is greater than zero the function will wait for this many seconds before returning. */ int pinentry_active_p (ctrl_t ctrl, int waitseconds) { (void)ctrl; if (waitseconds > 0) { pth_event_t evt; int rc; evt = pth_event (PTH_EVENT_TIME, pth_timeout (waitseconds, 0)); if (!pth_mutex_acquire (&entry_lock, 0, evt)) { if (pth_event_occurred (evt)) rc = gpg_error (GPG_ERR_TIMEOUT); else rc = gpg_error (GPG_ERR_INTERNAL); pth_event_free (evt, PTH_FREE_THIS); return rc; } pth_event_free (evt, PTH_FREE_THIS); } else { if (!pth_mutex_acquire (&entry_lock, 1, NULL)) return gpg_error (GPG_ERR_LOCKED); } if (!pth_mutex_release (&entry_lock)) log_error ("failed to release the entry lock at %d\n", __LINE__); return 0; }
int ath_mutex_unlock (ath_mutex_t *lock) { int ret = mutex_pth_init (lock, 1); if (ret) return ret; ret = pth_mutex_release ((pth_mutex_t *) *lock); return ret == FALSE ? errno : 0; }
static void putdown(unsigned int k) { pth_mutex_acquire(&(tab->mutex), FALSE, NULL); (tab->status)[k] = thinking; printstate(); test((k + 1) % PHILNUM); test((k - 1 + PHILNUM) % PHILNUM); pth_mutex_release(&(tab->mutex)); return; }
/* Unlock the mutex */ int SDL_mutexV(SDL_mutex *mutex) { if ( mutex == NULL ) { SDL_SetError("Passed a NULL mutex"); return -1; } pth_mutex_release(&(mutex->mutexpth_p)); return(0); }
static void pickup(unsigned int k) { pth_mutex_acquire(&(tab->mutex), FALSE, NULL); (tab->status)[k] = hungry; printstate(); if (!test(k)) pth_cond_await(&((tab->condition)[k]), &(tab->mutex), NULL); printstate(); pth_mutex_release(&(tab->mutex)); return; }
void * thread_routine(void *dummy) { while(1) { pth_mutex_acquire(&mutex, FALSE, NULL); if(arg < 0) pth_cond_await(&c1, &mutex, NULL); res = arg; arg = -1; pth_mutex_release(&mutex); pth_cond_notify(&c2, FALSE); } }
/* Unlock the pinentry so that another thread can start one and disconnect that pinentry - we do this after the unlock so that a stalled pinentry does not block other threads. Fixme: We should have a timeout in Assuan for the disconnect operation. */ static int unlock_pinentry (int rc) { assuan_context_t ctx = entry_ctx; entry_ctx = NULL; if (!pth_mutex_release (&entry_lock)) { log_error ("failed to release the entry lock\n"); if (!rc) rc = gpg_error (GPG_ERR_INTERNAL); } assuan_release (ctx); return rc; }
intern void pth_mutex_releaseall(pth_t thread) { pth_ringnode_t *rn, *rnf; if (thread == NULL) return; /* iterate over all mutexes of thread */ rn = rnf = pth_ring_first(&(thread->mutexring)); while (rn != NULL) { pth_mutex_release((pth_mutex_t *)rn); rn = pth_ring_next(&(thread->mutexring), rn); if (rn == rnf) break; } return; }
Array < eibaddr_t > GroupCache::LastUpdates (uint16_t start, uint8_t Timeout, uint16_t & end, pth_event_t stop) { Array < eibaddr_t > a; pth_event_t timeout = pth_event (PTH_EVENT_RTIME, pth_time (Timeout, 0)); do { if (pos < 0x100) { if (pos < start && start < ((pos - 0x100) & 0xffff)) start = (pos - 0x100) & 0xffff; } else { if (start < ((pos - 0x100) & 0xffff) || start > pos) start = (pos - 0x100) & 0xffff; } TRACEPRINTF (t, 8, this, "LastUpdates start: %d pos: %d", start, pos); while (start != pos && !updates[start & 0xff]) start++; if (start != pos) { while (start != pos) { if (updates[start & 0xff]) a.add (updates[start & 0xff]); start++; } end = pos; pth_event_free (timeout, PTH_FREE_THIS); return a; } if (pth_event_status (timeout) == PTH_STATUS_OCCURRED) { end = pos; pth_event_free (timeout, PTH_FREE_THIS); return a; } pth_event_concat (timeout, stop, NULL); pth_mutex_acquire (&mutex, 0, 0); pth_cond_await (&cond, &mutex, timeout); pth_mutex_release (&mutex); pth_event_isolate (timeout); } while (1); }
/* Unlock a mutex. A return value of 0 indicates success */ TSRM_API int tsrm_mutex_unlock(MUTEX_T mutexp) {/*{{{*/ TSRM_ERROR((TSRM_ERROR_LEVEL_INFO, "Mutex unlocked thread: %ld", tsrm_thread_id())); #ifdef TSRM_WIN32 LeaveCriticalSection(mutexp); return 0; #elif defined(GNUPTH) if (pth_mutex_release(mutexp)) { return 0; } return -1; #elif defined(PTHREADS) return pthread_mutex_unlock(mutexp); #elif defined(TSRM_ST) return st_mutex_unlock(mutexp); #endif }/*}}}*/
GroupCacheEntry GroupCache::Read (eibaddr_t addr, unsigned Timeout, uint16_t age) { TRACEPRINTF (t, 4, this, "GroupCacheRead %s %d %d", FormatGroupAddr (addr)(), Timeout, age); bool rm = false; GroupCacheEntry *c; if (!enable) { GroupCacheEntry f; f.src = 0; f.dst = 0; TRACEPRINTF (t, 4, this, "GroupCache not enabled"); return f; } c = find (addr); if (c && age && c->recvtime + age < time (0)) rm = true; if (c && !rm) { TRACEPRINTF (t, 4, this, "GroupCache found: %s", FormatEIBAddr (c->src)()); return *c; } if (!Timeout) { GroupCacheEntry f; f.src = 0; f.dst = addr; TRACEPRINTF (t, 4, this, "GroupCache no entry"); return f; } A_GroupValue_Read_PDU apdu; T_DATA_XXX_REQ_PDU tpdu; L_Data_PDU *l; pth_event_t timeout = pth_event (PTH_EVENT_RTIME, pth_time (Timeout, 0)); tpdu.data = apdu.ToPacket (); l = new L_Data_PDU (FakeL2); l->data = tpdu.ToPacket (); l->source = 0; l->dest = addr; l->AddrType = GroupAddress; layer3->send_L_Data (l); do { c = find (addr); rm = false; if (c && age && c->recvtime + age < time (0)) rm = true; if (c && !rm) { TRACEPRINTF (t, 4, this, "GroupCache found: %s", FormatEIBAddr (c->src)()); pth_event_free (timeout, PTH_FREE_THIS); return *c; } if (pth_event_status (timeout) == PTH_STATUS_OCCURRED && c) { GroupCacheEntry gc; gc.src = 0; gc.dst = addr; TRACEPRINTF (t, 4, this, "GroupCache reread timeout"); pth_event_free (timeout, PTH_FREE_THIS); return gc; } if (pth_event_status (timeout) == PTH_STATUS_OCCURRED) { c = new GroupCacheEntry; c->src = 0; c->dst = addr; c->recvtime = time (0); add (c); TRACEPRINTF (t, 4, this, "GroupCache timeout"); pth_event_free (timeout, PTH_FREE_THIS); return *c; } pth_mutex_acquire (&mutex, 0, 0); pth_cond_await (&cond, &mutex, timeout); pth_mutex_release (&mutex); } while (1); }
static int mutex_exit( void ) { return pth_mutex_release( &mutex_log ); }
static int mutex_exit( pth_mutex_t *mutex ) { return pth_mutex_release( mutex ); }
void FrSimThreadCondVar::unlock() { pth_mutex_release(&mutex); isLocked = false; }
static void unlock_trusttable (void) { if (!pth_mutex_release (&trusttable_lock)) log_fatal ("failed to release mutex in %s\n", __FILE__); }
int ldap_pvt_thread_mutex_unlock( ldap_pvt_thread_mutex_t *mutex ) { return( pth_mutex_release( mutex ) ? 0 : errno ); }
int main(int argc, char *argv[]) { if (argc < 5) { return 1; } auto processNum = 4; auto threadNum = atoi(argv[4]); auto dtime = atol(argv[3]) * 1000; auto isPth = std::string(argv[2]) == "pth"; auto taskNum = atoi(argv[1]); long cycles = 0; switch(taskNum) { case 1: task = task1; break; case 2: task = task2; break; case 3: task = task3; break; } time_start(); for (auto i = 0; i < processNum; ++i) { if (fork() != 0) { continue; } if (isPth) { pth_init(); pth_attr_t attr = pth_attr_new(); pth_attr_set(attr, PTH_ATTR_NAME, "task"); pth_attr_set(attr, PTH_ATTR_STACK_SIZE, 64*1024); pth_attr_set(attr, PTH_ATTR_JOINABLE, true); pth_mutex_t mutex; pth_mutex_init(&mutex); pth_cond_init(&pthCond); while (time_stop() < dtime) { for (auto i = workingNum; i < threadNum; ++i) { ++workingNum; pth_spawn(attr, task, &isPth); } int rc; if ((rc = pth_mutex_acquire(&mutex, FALSE, NULL)) != 0) { std::cout << "pthread_mutex_lock " << rc << " " << strerror(rc) << std::endl; return 3; } if (workingNum == threadNum) { if ((rc = pth_cond_await(&pthCond, &mutex, NULL)) != 0) { std::cout << "pthread_cond_wait " << rc << " " << strerror(rc) << std::endl; return 3; } } if ((rc = pth_mutex_release(&mutex)) != 0) { std::cout << "pthread_mutex_unlock " << rc << " " << strerror(rc) << std::endl; return 3; } cycles += threadNum - workingNum; } } else { pthread_attr_t attr; pthread_attr_setstacksize(&attr, 64*1024); pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; pthread_mutexattr_t mutexattr; pthread_mutex_init(&mutex, &mutexattr); pthread_condattr_t condattr; pthread_cond_init(&cond, &condattr); pthread_t pid; while (time_stop() < dtime) { for (auto i = workingNum; i < threadNum; ++i) { ++workingNum; if (pthread_create(&pid, NULL, task, &isPth) == -1) { return 2; } } int rc; if ((rc = pthread_mutex_lock(&mutex)) != 0) { std::cout << "pthread_mutex_lock " << rc << " " << strerror(rc) << std::endl; return 3; } if (workingNum == threadNum) { if ((rc = pthread_cond_wait(&cond, &mutex)) != 0) { std::cout << "pthread_cond_wait " << rc << " " << strerror(rc) << std::endl; return 3; } } if ((rc = pthread_mutex_unlock(&mutex)) != 0) { std::cout << "pthread_mutex_unlock " << rc << " " << strerror(rc) << std::endl; return 3; } //pthread_join(pids.front(), NULL); //pids.pop_front(); cycles += threadNum - workingNum; } } std::cout << cycles << std::endl; return 0; } for (auto i = 0; i < processNum; ++i) { wait(NULL); } return 0; }