int _pthread_kill(pthread_t pthread, int sig) { struct pthread *curthread; int ret; /* Check for invalid signal numbers: */ if (sig < 0 || sig > _SIG_MAXSIG) /* Invalid signal: */ return (EINVAL); curthread = _get_curthread(); /* * Ensure the thread is in the list of active threads, and the * signal is valid (signal 0 specifies error checking only) and * not being ignored: */ if (curthread == pthread) { if (sig > 0) _thr_send_sig(pthread, sig); ret = 0; } else if ((ret = _thr_find_thread(curthread, pthread, /*include dead*/0)) == 0) { if (sig > 0) _thr_send_sig(pthread, sig); THR_THREAD_UNLOCK(curthread, pthread); } /* Return the completion status: */ return (ret); }
int _pthread_cancel(pthread_t pthread) { struct pthread *curthread = tls_get_curthread(); int oldval, newval = 0; int oldtype; int ret; /* * POSIX says _pthread_cancel should be async cancellation safe, * so we temporarily disable async cancellation. */ _pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype); if ((ret = _thr_ref_add(curthread, pthread, 0)) != 0) { _pthread_setcanceltype(oldtype, NULL); return (ret); } do { oldval = pthread->cancelflags; if (oldval & THR_CANCEL_NEEDED) break; newval = oldval | THR_CANCEL_NEEDED; } while (!atomic_cmpset_acq_int(&pthread->cancelflags, oldval, newval)); if (!(oldval & THR_CANCEL_NEEDED) && SHOULD_ASYNC_CANCEL(newval)) _thr_send_sig(pthread, SIGCANCEL); _thr_ref_delete(curthread, pthread); _pthread_setcanceltype(oldtype, NULL); return (0); }
int _pthread_cancel(pthread_t pthread) { struct pthread *curthread = _get_curthread(); int ret; /* * POSIX says _pthread_cancel should be async cancellation safe. * _thr_find_thread and THR_THREAD_UNLOCK will enter and leave critical * region automatically. */ if ((ret = _thr_find_thread(curthread, pthread, 0)) == 0) { if (!pthread->cancel_pending) { pthread->cancel_pending = 1; if (pthread->state != PS_DEAD) _thr_send_sig(pthread, SIGCANCEL); } THR_THREAD_UNLOCK(curthread, pthread); } return (ret); }