/* The next two functions are similar to pthread_setcanceltype() but more specialized for the use in the cancelable functions like write(). They do not need to check parameters etc. */ int attribute_hidden __pthread_enable_asynccancel (void) { struct pthread *self = THREAD_SELF; int oldval = THREAD_GETMEM (self, cancelhandling); while (1) { int newval = oldval | CANCELTYPE_BITMASK; if (newval == oldval) break; int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); if (__builtin_expect (curval == oldval, 1)) { if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { THREAD_SETMEM (self, result, PTHREAD_CANCELED); __do_cancel (); } break; } /* Prepare the next round. */ oldval = curval; } return oldval; }
int attribute_hidden __pthread_enable_asynccancel (void) { struct pthread *self = THREAD_SELF; int oldval; if (is_recording()) { pthread_log_record (0, PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling, 1); oldval = THREAD_GETMEM (self, cancelhandling); pthread_log_record (oldval, PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling, 0); } else if (is_replaying()) { pthread_log_replay (PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling); oldval = pthread_log_replay (PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling); } else { oldval = THREAD_GETMEM (self, cancelhandling); } while (1) { int newval = oldval | CANCELTYPE_BITMASK; if (newval == oldval) break; int curval; if (is_recording()) { pthread_log_record (0, PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling, 1); curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); pthread_log_record (curval, PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling, 0); } else if (is_replaying()) { pthread_log_replay (PTHREAD_CANCELHANDLING_ENTER, (u_long) &self->cancelhandling); curval = pthread_log_replay (PTHREAD_CANCELHANDLING_EXIT, (u_long) &self->cancelhandling); } else { curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); } if (__builtin_expect (curval == oldval, 1)) { if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { THREAD_SETMEM (self, result, PTHREAD_CANCELED); __do_cancel (); } break; } /* Prepare the next round. */ oldval = curval; } return oldval; }
int __pthread_setcanceltype (int type, int *oldtype) { if (type < PTHREAD_CANCEL_DEFERRED || type > PTHREAD_CANCEL_ASYNCHRONOUS) return EINVAL; #ifndef SIGCANCEL if (type == PTHREAD_CANCEL_ASYNCHRONOUS) return ENOTSUP; #endif volatile struct pthread *self = THREAD_SELF; int oldval = THREAD_GETMEM (self, cancelhandling); while (1) { int newval = (type == PTHREAD_CANCEL_ASYNCHRONOUS ? oldval | CANCELTYPE_BITMASK : oldval & ~CANCELTYPE_BITMASK); /* Store the old value. */ if (oldtype != NULL) *oldtype = ((oldval & CANCELTYPE_BITMASK) ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED); /* Avoid doing unnecessary work. The atomic operation can potentially be expensive if the memory has to be locked and remote cache lines have to be invalidated. */ if (oldval == newval) break; /* Update the cancel handling word. This has to be done atomically since other bits could be modified as well. */ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); if (__glibc_likely (curval == oldval)) { if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { THREAD_SETMEM (self, result, PTHREAD_CANCELED); __do_cancel (); } break; } /* Prepare for the next round. */ oldval = curval; } return 0; }
int attribute_protected __pthread_setcancelstate ( int state, int *oldstate) { volatile struct pthread *self; if (state < PTHREAD_CANCEL_ENABLE || state > PTHREAD_CANCEL_DISABLE) return EINVAL; self = THREAD_SELF; int oldval = THREAD_GETMEM (self, cancelhandling); while (1) { int newval = (state == PTHREAD_CANCEL_DISABLE ? oldval | CANCELSTATE_BITMASK : oldval & ~CANCELSTATE_BITMASK); /* Store the old value. */ if (oldstate != NULL) *oldstate = ((oldval & CANCELSTATE_BITMASK) ? PTHREAD_CANCEL_DISABLE : PTHREAD_CANCEL_ENABLE); /* Avoid doing unnecessary work. The atomic operation can potentially be expensive if the memory has to be locked and remote cache lines have to be invalidated. */ if (oldval == newval) break; /* Update the cancel handling word. This has to be done atomically since other bits could be modified as well. */ int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval, oldval); if (__builtin_expect (curval == oldval, 1)) { if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) __do_cancel (); break; } /* Prepare for the next round. */ oldval = curval; } return 0; }
int pthread_cancel (pthread_t th) { volatile struct pthread *pd = (volatile struct pthread *) th; /* Make sure the descriptor is valid. */ if (INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; #ifdef SHARED pthread_cancel_init (); #endif int result = 0; int oldval; int newval; do { again: oldval = pd->cancelhandling; newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK; /* Avoid doing unnecessary work. The atomic operation can potentially be expensive if the bug has to be locked and remote cache lines have to be invalidated. */ if (oldval == newval) break; /* If the cancellation is handled asynchronously just send a signal. We avoid this if possible since it's more expensive. */ if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { /* Mark the cancellation as "in progress". */ if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, oldval | CANCELING_BITMASK, oldval)) goto again; #ifdef SIGCANCEL /* The cancellation handler will take care of marking the thread as canceled. */ pid_t pid = getpid (); INTERNAL_SYSCALL_DECL (err); int val = INTERNAL_SYSCALL_CALL (tgkill, err, pid, pd->tid, SIGCANCEL); if (INTERNAL_SYSCALL_ERROR_P (val, err)) result = INTERNAL_SYSCALL_ERRNO (val, err); #else /* It should be impossible to get here at all, since pthread_setcanceltype should never have allowed PTHREAD_CANCEL_ASYNCHRONOUS to be set. */ abort (); #endif break; } /* A single-threaded process should be able to kill itself, since there is nothing in the POSIX specification that says that it cannot. So we set multiple_threads to true so that cancellation points get executed. */ THREAD_SETMEM (THREAD_SELF, header.multiple_threads, 1); #ifndef TLS_MULTIPLE_THREADS_IN_TCB __pthread_multiple_threads = *__libc_multiple_threads_ptr = 1; #endif } /* Mark the thread as canceled. This has to be done atomically since other bits could be modified as well. */ while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval, oldval)); return result; }
int pthread_cancel ( pthread_t th) { volatile struct pthread *pd = (volatile struct pthread *) th; /* Make sure the descriptor is valid. */ if (INVALID_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; #ifdef SHARED pthread_cancel_init (); #endif int result = 0; int oldval; int newval; do { again: oldval = pd->cancelhandling; newval = oldval | CANCELING_BITMASK | CANCELED_BITMASK; /* Avoid doing unnecessary work. The atomic operation can potentially be expensive if the bug has to be locked and remote cache lines have to be invalidated. */ if (oldval == newval) break; /* If the cancellation is handled asynchronously just send a signal. We avoid this if possible since it's more expensive. */ if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval)) { /* Mark the cancellation as "in progress". */ if (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, oldval | CANCELING_BITMASK, oldval)) goto again; /* The cancellation handler will take care of marking the thread as canceled. */ INTERNAL_SYSCALL_DECL (err); /* One comment: The PID field in the TCB can temporarily be changed (in fork). But this must not affect this code here. Since this function would have to be called while the thread is executing fork, it would have to happen in a signal handler. But this is no allowed, pthread_cancel is not guaranteed to be async-safe. */ int val; #if defined(__ASSUME_TGKILL) && __ASSUME_TGKILL val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid), pd->tid, SIGCANCEL); #else # ifdef __NR_tgkill val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid), pd->tid, SIGCANCEL); if (INTERNAL_SYSCALL_ERROR_P (val, err) && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS) # endif val = INTERNAL_SYSCALL (tkill, err, 2, pd->tid, SIGCANCEL); #endif if (INTERNAL_SYSCALL_ERROR_P (val, err)) result = INTERNAL_SYSCALL_ERRNO (val, err); break; } } /* Mark the thread as canceled. This has to be done atomically since other bits could be modified as well. */ while (atomic_compare_and_exchange_bool_acq (&pd->cancelhandling, newval, oldval)); return result; }