/* * Initialization. * This function may be called multiple times when we are in * SML's commandline. */ void smlgtk_runtime_init() { static int init = FALSE; LOG("RUNTIME: smlgtk_runtime_init"); if (*init) { /* call this only once or glib will choke */ g_thread_init(NULL); init = TRUE; } else { /* Clean up previous versions of these data package */ cond_destroy(&event_pending); cond_destroy(&threadOk); cond_destroy(&response); g_mutex_free(signal_pending); } signal_queue_clear(); cond_create(&event_pending); cond_create(&response); cond_create(&threadOk); signal_pending = g_mutex_new(); signal_handler_added = FALSE; memset(&smlgtk_event,sizeof(struct smlgtk_event),0); if (thread_is_running) CHECK(hostthread_cancel(runtime_thread)); thread_is_running = FALSE; }
void queue_destroy(FixedSizeQueue *queue) { cond_destroy(queue->not_full); cond_destroy(queue->not_empty); mutex_destroy(queue->mutex); free(queue); }
/* Destroy shared resources */ void destroySharedResources() { lst_destroy(g_lst_children); mutex_destroy(&g_mutex); cond_destroy(&g_child_cv); cond_destroy(&g_monitoring_cv); }
/* * Clean up the state of the system */ void smlgtk_runtime_cleanup() { LOG("RUNTIME: smlgtk_cleanup"); hostthread_cancel(runtime_thread); cond_destroy(&event_pending); cond_destroy(&threadOk); g_mutex_free(signal_pending); cond_destroy(&response); }
/** @brief destroy a rwlock * * This function should ¡°deactivate¡± the lock * pointed to by rwlock. * It is illegal for an application to use a readers/writers lock after it has * been destroyed (unless * and until it is later re-initialized). It is illegal for an application to * invoke rwlock destroy() * on a lock while the lock is held or while threads are waiting on it. * * @para rwlock: * @return none **/ void rwlock_destroy(rwlock_t *rwlock) { mutex_destroy(&rwlock->rdcnt_mutex); mutex_destroy(&rwlock->wrcnt_mutex); cond_destroy(&rwlock->rd_cond); cond_destroy(&rwlock->wr_cond); cond_destroy(&rwlock->writer_cond); rwlock->rd_cnt= INVALID_CNT; rwlock->wr_cnt= INVALID_CNT; return; }
// Destroy an existing socket static void sock_close(int fd) { if (!fds[fd].inuse) { dbglog(DBG_ERROR, "sock_close: socket %d not in use!\n", fd); return; } // Destroy all the sync objects cond_destroy(fds[fd].connect); cond_destroy(fds[fd].recv_avail); cond_destroy(fds[fd].send_avail); mutex_destroy(fds[fd].mutex); fds[fd].inuse = 0; }
void simple_condwait(void) { unsigned long long start; mutex_t mutex; cond_t cond; struct cond_mutex cm = { .mutex = &mutex, .cond = &cond, }; thread_t cond_signaler_tid; fprintf(stderr, "%s\n", __FUNCTION__); check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT, 0), 0); check("cond_init", cond_init(&cond, 0), 0); check("mutex_lock", mutex_lock(&mutex), 0); check("thread_spawn", thread_spawn(&cond_signaler_tid, 2, cond_signaler, &cm), 0); thread_msleep(11); start = rt_timer_tsc(); check("cond_wait", cond_wait(&cond, &mutex, XN_INFINITE), 0); check_sleep("cond_wait", start); thread_msleep(10); check("mutex_unlock", mutex_unlock(&mutex), 0); check("thread_join", thread_join(cond_signaler_tid), 0); check("mutex_destroy", mutex_destroy(&mutex), 0); check("cond_destroy", cond_destroy(&cond), 0); }
static void control_destroy(struct sml_control *control) { assert(tlv_get(current_control) == control); #ifndef WITHOUT_MULTITHREAD /* To release the thread local heap exclusively, it must be * occupied by the current thread. */ assert(IS_ACTIVE(load_relaxed(&control->state))); #endif /* !WITHOUT_MULTITHREAD */ if (control->thread_local_heap) { sml_heap_mutator_destroy(control->thread_local_heap); control->thread_local_heap = NULL; } /* Pointers in the stack is safely ignored since the thread has * been terminated. */ control->frame_stack = NULL; control_leave(control); control_unregister(control); tlv_set(current_control, NULL); mutex_destroy(&control->inactive_wait_lock); cond_destroy(&control->inactive_wait_cond); free(control); }
/* * ----------------------------------------------------------------- * cfsd_fscache_destroy * * Description: * Arguments: * Returns: * Preconditions: */ void cfsd_fscache_destroy(cfsd_fscache_object_t *fscache_object_p) { int xx; dbug_enter("cfsd_fscache_destroy"); dbug_precond(fscache_object_p); /* dbug_assert(fscache_object_p->i_refcnt == 0); */ /* close down the message file descriptor */ if (fscache_object_p->i_ofd >= 0) { if (close(fscache_object_p->i_ofd)) dbug_print(("error", "cannot close fscache fd error %d", errno)); fscache_object_p->i_ofd = -1; } /* destroy the locking mutex */ xx = mutex_destroy(&fscache_object_p->i_lock); dbug_assert(xx == 0); /* destroy the conditional variable */ xx = cond_destroy(&fscache_object_p->i_cvwait); dbug_assert(xx == 0); cfsd_free(fscache_object_p); dbug_leave("cfsd_fscache_destroy"); }
ilb_status_t ilb_close(ilb_handle_t h) { ilb_handle_impl_t *hi = (ilb_handle_impl_t *)h; if (h == ILB_INVALID_HANDLE) return (ILB_STATUS_EINVAL); if (mutex_lock(&hi->h_lock) != 0) return (ILB_STATUS_INTERNAL); /* Somebody has done a close, no need to do anything. */ if (hi->h_closing) { return (ILB_STATUS_OK); } else { hi->h_closing = B_TRUE; hi->h_error = ILB_STATUS_HANDLE_CLOSING; } /* Wait until there is nobody waiting. */ while (hi->h_waiter > 0) { if (cond_wait(&hi->h_cv, &hi->h_lock) != 0) { (void) mutex_unlock(&hi->h_lock); return (ILB_STATUS_INTERNAL); } } /* No one is waiting, proceed to free the handle. */ (void) close(hi->h_socket); (void) mutex_destroy(&hi->h_lock); (void) cond_destroy(&hi->h_cv); free(hi); return (ILB_STATUS_OK); }
void cond_destroy_whilewait(void) { unsigned long long start; mutex_t mutex; cond_t cond; struct cond_mutex cm = { .mutex = &mutex, .cond = &cond, .tid = thread_self(), }; thread_t cond_destroyer_tid; struct sigaction sa = { .sa_handler = sighandler, .sa_flags = SA_RESTART, }; sigemptyset(&sa.sa_mask); fprintf(stderr, "%s\n", __FUNCTION__); check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0); check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT, 0), 0); check("cond_init", cond_init(&cond, 0), 0); check("mutex_lock", mutex_lock(&mutex), 0); check("thread_spawn", thread_spawn(&cond_destroyer_tid, 2, cond_destroyer, &cm), 0); thread_msleep(11); start = rt_timer_tsc(); #ifdef XENO_POSIX check("cond_wait", cond_wait(&cond, &mutex, 10 * NS_PER_MS), -ETIMEDOUT); check_sleep("cond_wait", start); thread_msleep(10); check("mutex_unlock", mutex_unlock(&mutex), 0); #else /* native */ check("cond_wait", cond_wait(&cond, &mutex, XN_INFINITE), -EIDRM); check_sleep("cond_wait", start); #endif /* native */ check("thread_join", thread_join(cond_destroyer_tid), 0); check("mutex_destroy", mutex_destroy(&mutex), 0); #ifdef XENO_POSIX check("cond_destroy", cond_destroy(&cond), 0); #else /* native */ check("cond_destroy", cond_destroy(&cond), -ESRCH); #endif /* native */ }
/* free console structure */ static void free_cons(vntsd_cons_t *consp) { assert(consp); (void) mutex_destroy(&consp->lock); (void) cond_destroy(&consp->cvp); free(consp); }
/** @brief Function to destroy read write lock * * @param rwlock: A read write lock data structure * @return void */ void rwlock_destroy (rwlock_t * rwlock) { if (rwlock->read_cnt > 0 || rwlock->write_cnt > 0) { lprintf ("ERROR: Cannot destroy the rwlock!"); return; } mutex_destroy (&rwlock->rd_cnt_mtx); mutex_destroy (&rwlock->wr_cnt_mtx); cond_destroy (&rwlock->rd_cond); cond_destroy (&rwlock->wr_cond); cond_destroy (&rwlock->wait_rd_cond); return; }
void *cond_destroyer(void *cookie) { unsigned long long start; struct cond_mutex *cm = cookie; start = rt_timer_tsc(); check("mutex_lock", mutex_lock(cm->mutex), 0); check_sleep("mutex_lock", start); thread_msleep(10); #ifdef XENO_POSIX check("cond_destroy", cond_destroy(cm->cond), -EBUSY); #else /* native */ check("cond_destroy", cond_destroy(cm->cond), 0); #endif /* native */ check("mutex_unlock", mutex_unlock(cm->mutex), 0); return NULL; }
Condition::~Condition(void) { MutexDestroy(_mutex); #if defined(POSIX_THREADS) cond_destroy((cond_t *)_condition); free(_condition); #endif }
/** * @brief Destroys an initialized the thrgroup * * Destroys the mutex's and other related stuff * * @param eg An initialized thread group to be destroyed * @return 0 on success, nonzero otherwise * * @pre thrgrp_init_group(eg) has been called * @post eg is uninitialized (but still allocated) */ int thrgrp_destroy_group(thrgrp_group_t *eg){ int ret=0; mutex_lock(&(eg->lock)); /* make sure that the queues are empty */ if(eg->zombie_in || eg->zombie_out) ret = 1; mutex_unlock(&(eg->lock)); ret |= mutex_destroy(&(eg->lock)); ret |= cond_destroy(&(eg->cv)); return ret; }
void unbounded_buffer_destroy(unbounded_buffer_t *b) { node_t *node = b->head; while (node != NULL) { node_t *old_node = node; node = old_node->next; mem_free(old_node); } mutex_destroy(&b->mutex); mutex_destroy(&b->head_mutex); mutex_destroy(&b->tail_mutex); cond_destroy(&b->fill); }
/* * release(destroy) the IPC between the reader and writer */ void tlm_release_reader_writer_ipc(tlm_cmd_t *cmd) { if (--cmd->tc_ref <= 0) { (void) mutex_lock(&cmd->tc_mtx); tlm_release_buffers(cmd->tc_buffers); (void) cond_destroy(&cmd->tc_cv); (void) mutex_unlock(&cmd->tc_mtx); (void) mutex_destroy(&cmd->tc_mtx); free(cmd); } }
int condvarDestroy(condvar_t *condvar) { int err; #ifdef __linux__ err = pthread_cond_destroy((cond_t *) &condvar->cond); #else err = cond_destroy((cond_t *) condvar); #endif return (err == 0 ? SYS_OK : SYS_ERR); }
/* add a console */ static void do_add_cons(vntsd_t *vntsdp, int cons_no) { vcc_console_t console; vntsd_group_t *groupp; int rv; char err_msg[VNTSD_LINE_LEN]; (void) snprintf(err_msg, sizeof (err_msg), "do_add_cons():Can not add console=%d", cons_no); /* get console configuration from vcc */ if ((rv = vntsd_vcc_ioctl(VCC_CONS_INFO, cons_no, (void *)&console)) != VNTSD_SUCCESS) { vntsd_log(rv, err_msg); return; } /* clean up the console if console was deleted and added again */ delete_cons_before_add(vntsdp, console.tcp_port, console.cons_no); /* initialize console */ if ((rv = alloc_cons_with_group(vntsdp, &console, &groupp)) != VNTSD_SUCCESS) { /* no memory to add this new console */ vntsd_log(rv, err_msg); return; } if (groupp != NULL) { /* new group */ /* create listen thread for this console */ if (create_listen_thread(groupp)) { vntsd_log(VNTSD_ERR_CREATE_LISTEN_THR, err_msg); (void) cond_destroy(&groupp->cvp); (void) mutex_destroy(&groupp->lock); free(groupp); } } }
void absolute_condwait(void) { unsigned long long start; mutex_t mutex; cond_t cond; fprintf(stderr, "%s\n", __FUNCTION__); check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT, 0), 0); check("cond_init", cond_init(&cond, 1), 0); check("mutex_lock", mutex_lock(&mutex), 0); start = rt_timer_tsc(); check("cond_wait", cond_wait_until(&cond, &mutex, timer_read() + 10 * NS_PER_MS), -ETIMEDOUT); check_sleep("cond_wait", start); check("mutex_unlock", mutex_unlock(&mutex), 0); check("mutex_destroy", mutex_destroy(&mutex), 0); check("cond_destroy", cond_destroy(&cond), 0); }
void ThreadsManager::exit() { // Force the woken up threads to exit idle_loop() and hence terminate allThreadsShouldExit = true; for (int i = 0; i < MAX_THREADS; i++) { // Wake up all the threads and waits for termination if (i != 0) { threads[i].wake_up(); while (threads[i].state != Thread::TERMINATED) {} } // Now we can safely destroy the locks and wait conditions lock_destroy(&threads[i].sleepLock); cond_destroy(&threads[i].sleepCond); for (int j = 0; j < MAX_ACTIVE_SPLIT_POINTS; j++) lock_destroy(&(threads[i].splitPoints[j].lock)); } lock_destroy(&mpLock); }
int rwlock_destroy(rwlock_t *rw) { mutex_destroy(&(rw->lock)); cond_destroy(&(rw->r_cond)); cond_destroy(&(rw->w_cond)); }
int sema_destroy(sema_t *sem) { mutex_destroy(&(sem->lock)); cond_destroy(&(sem->cond)); }
DDS_Topic DDS_DomainParticipant_find_topic (DDS_DomainParticipant dp, const char *topic_name, DDS_Duration_t *timeout) { Topic_t *tp; #ifdef THREADS_USED int ret; struct timespec ts; TopicWait_t *wp, *xp, *prev_wp; #else Ticks_t d, now, end_time; /* *10ms */ #endif ctrc_begind (DCPS_ID, DCPS_DP_F_TOP, &dp, sizeof (dp)); ctrc_contd (topic_name, strlen (topic_name) + 1); ctrc_contd (timeout, sizeof (DDS_Duration_t)); ctrc_endd (); /* Get Domain Participant. */ if (!domain_ptr (dp, 1, NULL)) { log_printf (DCPS_ID, 0, "find_topic(): domain participant not found!\r\n"); return (NULL); } tp = topic_lookup (&dp->participant, topic_name); if (tp) { if (!lock_take (tp->lock)) { tp->entity.flags |= EF_LOCAL; tp->nlrefs++; lock_release (tp->lock); } lock_release (dp->lock); return ((DDS_Topic) tp); } #ifdef THREADS_USED for (wp = dp->topic_wait; wp; wp = wp->next) if (!strcmp (topic_name, wp->name)) break; if (wp) wp->nthreads++; else { wp = mds_pool_alloc (&dcps_mem_blocks [MB_TOPIC_WAIT]); if (!wp) { lock_release (dp->lock); return (NULL); } wp->next = dp->topic_wait; cond_init (wp->condition); wp->name = topic_name; wp->topic = NULL; wp->nthreads = 1; dp->topic_wait = wp; } duration2timespec (timeout, &ts); do { if (ts.tv_sec || ts.tv_nsec) ret = cond_wait_to (wp->condition, dp->lock, ts); else ret = cond_wait (wp->condition, dp->lock); } while (!wp->topic && !ret); tp = wp->topic; if (!--wp->nthreads) { for (xp = dp->topic_wait, prev_wp = NULL; xp != NULL && xp != wp; prev_wp = xp, xp = xp->next) ; if (prev_wp) prev_wp->next = wp->next; else dp->topic_wait = wp->next; cond_destroy (wp->condition); mds_pool_free (&dcps_mem_blocks [MB_TOPIC_WAIT], wp); } lock_release (dp->lock); #else if (dds_listener_state) { lock_release (dp->lock); return (NULL); } /* Wait until timeout elapsed for discovery to add the topic. */ now = sys_getticks (); if (timeout->sec == DDS_DURATION_INFINITE_SEC || timeout->nanosec == DDS_DURATION_INFINITE_NSEC) end_time = now + 0x7ffffffe; else end_time = now + duration2ticks ((Duration_t *) timeout); for (;;) { d = end_time - now; if (d >= 0x7fffffffUL) break; DDS_schedule (d * TMR_UNIT_MS); tp = topic_lookup (&dp->participant, topic_name); if (tp) { tp->entity.flags |= EF_LOCAL; tp->nlrefs++; break; } now = sys_getticks (); } #endif return (tp); }
/* clean up a group */ void vntsd_clean_group(vntsd_group_t *groupp) { timestruc_t to; D1(stderr, "t@%d clean_group() group=%s tcp=%lld\n", thr_self(), groupp->group_name, groupp->tcp_port); (void) mutex_lock(&groupp->lock); /* prevent from reentry */ if (groupp->status & VNTSD_GROUP_CLEANUP) { (void) mutex_unlock(&groupp->lock); return; } groupp->status |= VNTSD_GROUP_CLEANUP; vntsd_free_que(&groupp->conspq, (clean_func_t)cleanup_cons); (void) mutex_unlock(&groupp->lock); /* walk through no cons client queue */ while (groupp->no_cons_clientpq != NULL) { groupp->status |= VNTSD_GROUP_SIG_WAIT; (void) vntsd_que_walk(groupp->no_cons_clientpq, (el_func_t)vntsd_notify_client_cons_del); to.tv_sec = VNTSD_CV_WAIT_DELTIME; to.tv_nsec = 0; (void) cond_reltimedwait(&groupp->cvp, &groupp->lock, &to); } if (groupp->listen_tid == thr_self()) { /* listen thread is exiting */ (void) mutex_lock(&(groupp->vntsd->lock)); (void) vntsd_que_rm(&groupp->vntsd->grouppq, groupp); (void) mutex_unlock(&groupp->vntsd->lock); (void) cond_destroy(&groupp->cvp); (void) mutex_unlock(&groupp->lock); (void) mutex_destroy(&groupp->lock); free(groupp); return; } /* signal listen thread to exit */ groupp->status |= VNTSD_GROUP_SIG_WAIT; while (groupp->status & VNTSD_GROUP_SIG_WAIT) { (void) thr_kill(groupp->listen_tid, SIGUSR1); to.tv_sec = VNTSD_CV_WAIT_DELTIME; to.tv_nsec = 0; /* wait listen thread to exit */ (void) cond_reltimedwait(&groupp->cvp, &groupp->lock, &to); } (void) mutex_unlock(&groupp->lock); (void) thr_join(groupp->listen_tid, NULL, NULL); /* free group */ (void) cond_destroy(&groupp->cvp); (void) mutex_destroy(&groupp->lock); free(groupp); }
/* * Initialize a console, if console is associated with with a * new group, intialize the group. */ static int alloc_cons_with_group(vntsd_t *vntsdp, vcc_console_t *consp, vntsd_group_t **new_groupp) { vntsd_group_t *groupp = NULL; int rv; *new_groupp = NULL; /* match group by tcp port */ (void) mutex_lock(&vntsdp->lock); groupp = vntsd_que_find(vntsdp->grouppq, (compare_func_t)grp_by_tcp, (void *)&(consp->tcp_port)); (void) mutex_unlock(&vntsdp->lock); if (groupp != NULL) { /* group with same tcp port found */ if (strcmp(groupp->group_name, consp->group_name)) { /* conflict group name */ vntsd_log(VNTSD_ERR_VCC_GRP_NAME, "group name is different from existing group"); return (VNTSD_ERR_VCC_CTRL_DATA); } } else { /* new group */ groupp = alloc_group(vntsdp, consp->group_name, consp->tcp_port); if (groupp == NULL) { return (VNTSD_ERR_NO_MEM); } assert(groupp->conspq == NULL); /* queue group to vntsdp */ (void) mutex_lock(&vntsdp->lock); rv = vntsd_que_append(&vntsdp->grouppq, groupp); (void) mutex_unlock(&vntsdp->lock); if (rv != VNTSD_SUCCESS) { return (rv); } *new_groupp = groupp; } /* intialize console */ if (alloc_cons(groupp, consp) == NULL) { /* no memory */ if (new_groupp != NULL) { /* clean up new group */ (void) cond_destroy(&groupp->cvp); (void) mutex_destroy(&groupp->lock); free(groupp); } return (VNTSD_ERR_NO_MEM); } return (VNTSD_SUCCESS); }
void sig_norestart_double(void) { unsigned long long start; mutex_t mutex; cond_t cond; struct cond_mutex cm = { .mutex = &mutex, .cond = &cond, .tid = thread_self(), }; thread_t double_killer_tid; struct sigaction sa = { .sa_handler = sighandler, .sa_flags = 0, }; sigemptyset(&sa.sa_mask); fprintf(stderr, "%s\n", __FUNCTION__); check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0); check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT, 0), 0); check("cond_init", cond_init(&cond, 0), 0); check("mutex_lock", mutex_lock(&mutex), 0); check("thread_spawn", thread_spawn(&double_killer_tid, 2, double_killer, &cm), 0); thread_msleep(11); sig_seen = 0; start = rt_timer_tsc(); check("cond_wait", cond_wait(&cond, &mutex, XN_INFINITE), 0); check_sleep("cond_wait", start); check("sig_seen", sig_seen, 2); thread_msleep(10); check("mutex_unlock", mutex_unlock(&mutex), 0); check("thread_join", thread_join(double_killer_tid), 0); check("mutex_destroy", mutex_destroy(&mutex), 0); check("cond_destroy", cond_destroy(&cond), 0); } void sig_restart_double(void) { unsigned long long start; mutex_t mutex; cond_t cond; struct cond_mutex cm = { .mutex = &mutex, .cond = &cond, .tid = thread_self(), }; thread_t double_killer_tid; struct sigaction sa = { .sa_handler = sighandler, .sa_flags = SA_RESTART, }; sigemptyset(&sa.sa_mask); fprintf(stderr, "%s\n", __FUNCTION__); check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0); check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT, 0), 0); check("cond_init", cond_init(&cond, 0), 0); check("mutex_lock", mutex_lock(&mutex), 0); check("thread_spawn", thread_spawn(&double_killer_tid, 2, double_killer, &cm), 0); thread_msleep(11); sig_seen = 0; start = rt_timer_tsc(); check("cond_wait", cond_wait(&cond, &mutex, XN_INFINITE), 0); check_sleep("cond_wait", start); check("sig_seen", sig_seen, 2); thread_msleep(10); check("mutex_unlock", mutex_unlock(&mutex), 0); check("thread_join", thread_join(double_killer_tid), 0); check("mutex_destroy", mutex_destroy(&mutex), 0); check("cond_destroy", cond_destroy(&cond), 0); }
void sig_norestart_condwait(void) { unsigned long long start; mutex_t mutex; cond_t cond; struct cond_mutex cm = { .mutex = &mutex, .cond = &cond, .tid = thread_self(), }; thread_t cond_killer_tid; struct sigaction sa = { .sa_handler = sighandler, .sa_flags = 0, }; sigemptyset(&sa.sa_mask); fprintf(stderr, "%s\n", __FUNCTION__); check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0); check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT, 0), 0); check("cond_init", cond_init(&cond, 0), 0); check("mutex_lock", mutex_lock(&mutex), 0); check("thread_spawn", thread_spawn(&cond_killer_tid, 2, cond_killer, &cm), 0); thread_msleep(11); start = rt_timer_tsc(); sig_seen = 0; #ifdef XENO_POSIX check("cond_wait", cond_wait(&cond, &mutex, XN_INFINITE), 0); #else /* native */ { int err = cond_wait(&cond, &mutex, XN_INFINITE); if (err == 0) err = -EINTR; check("cond_wait", err, -EINTR); } #endif /* native */ check_sleep("cond_wait", start); check("sig_seen", sig_seen, 1); check("mutex_unlock", mutex_unlock(&mutex), 0); check("thread_join", thread_join(cond_killer_tid), 0); check("mutex_destroy", mutex_destroy(&mutex), 0); check("cond_destroy", cond_destroy(&cond), 0); } void sig_restart_condwait(void) { unsigned long long start; mutex_t mutex; cond_t cond; struct cond_mutex cm = { .mutex = &mutex, .cond = &cond, .tid = thread_self(), }; thread_t cond_killer_tid; struct sigaction sa = { .sa_handler = sighandler, .sa_flags = 0, }; sigemptyset(&sa.sa_mask); fprintf(stderr, "%s\n", __FUNCTION__); check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0); check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT, 0), 0); check("cond_init", cond_init(&cond, 0), 0); check("mutex_lock", mutex_lock(&mutex), 0); check("thread_spawn", thread_spawn(&cond_killer_tid, 2, cond_killer, &cm), 0); thread_msleep(11); start = rt_timer_tsc(); sig_seen = 0; #ifdef XENO_POSIX check("cond_wait", cond_wait(&cond, &mutex, XN_INFINITE), 0); #else /* native */ { int err = cond_wait(&cond, &mutex, XN_INFINITE); if (err == 0) err = -EINTR; check("cond_wait", err, -EINTR); } #endif /* native */ check_sleep("cond_wait", start); check("sig_seen", sig_seen, 1); check("mutex_unlock", mutex_unlock(&mutex), 0); check("thread_join", thread_join(cond_killer_tid), 0); check("mutex_destroy", mutex_destroy(&mutex), 0); check("cond_destroy", cond_destroy(&cond), 0); } void *mutex_killer(void *cookie) { unsigned long long start; struct cond_mutex *cm = cookie; start = rt_timer_tsc(); check("mutex_lock", mutex_lock(cm->mutex), 0); check_sleep("mutex_lock", start); check("cond_signal", cond_signal(cm->cond), 0); thread_msleep(10); check("thread_kill", thread_kill(cm->tid, SIGRTMIN), 0); check("mutex_unlock", mutex_unlock(cm->mutex), 0); return NULL; }
int ldap_pvt_thread_cond_destroy( ldap_pvt_thread_cond_t *cv ) { return( cond_destroy( cv ) ); }