/* For asynchronous cancellation we use a signal. This is the handler. */ static void sighandler_setxid (int sig, siginfo_t *si, void *ctx) { /* Safety check. It would be possible to call this function for other signals and send a signal from another process. This is not correct and might even be a security problem. Try to catch as many incorrect invocations as possible. */ if (sig != SIGSETXID #ifdef __ASSUME_CORRECT_SI_PID /* Kernels before 2.5.75 stored the thread ID and not the process ID in si_pid so we skip this test. */ || si->si_pid != THREAD_GETMEM (THREAD_SELF, pid) #endif || si->si_code != SI_TKILL) return; INTERNAL_SYSCALL_DECL (err); INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0], __xidcmd->id[1], __xidcmd->id[2]); if (atomic_decrement_val (&__xidcmd->cntr) == 0) lll_futex_wake (&__xidcmd->cntr, 1); /* Reset the SETXID flag. */ struct pthread *self = THREAD_SELF; int flags = THREAD_GETMEM (self, cancelhandling); THREAD_SETMEM (self, cancelhandling, flags & ~SETXID_BITMASK); /* And release the futex. */ self->setxid_futex = 1; lll_futex_wake (&self->setxid_futex, 1); }
/* For asynchronous cancellation we use a signal. This is the handler. */ static void sighandler_setxid (int sig, siginfo_t *si, void *ctx) { #ifdef __ASSUME_CORRECT_SI_PID /* Determine the process ID. It might be negative if the thread is in the middle of a fork() call. */ pid_t pid = THREAD_GETMEM (THREAD_SELF, pid); if (__builtin_expect (pid < 0, 0)) pid = -pid; #endif /* Safety check. It would be possible to call this function for other signals and send a signal from another process. This is not correct and might even be a security problem. Try to catch as many incorrect invocations as possible. */ if (sig != SIGSETXID #ifdef __ASSUME_CORRECT_SI_PID /* Kernels before 2.5.75 stored the thread ID and not the process ID in si_pid so we skip this test. */ || si->si_pid != pid #endif || si->si_code != SI_TKILL) return; INTERNAL_SYSCALL_DECL (err); INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0], __xidcmd->id[1], __xidcmd->id[2]); /* Reset the SETXID flag. */ struct pthread *self = THREAD_SELF; int flags, newval; do { flags = THREAD_GETMEM (self, cancelhandling); newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, flags & ~SETXID_BITMASK, flags); } while (flags != newval); /* And release the futex. */ self->setxid_futex = 1; lll_futex_wake (&self->setxid_futex, 1, LLL_PRIVATE); if (atomic_decrement_val (&__xidcmd->cntr) == 0) lll_futex_wake (&__xidcmd->cntr, 1, LLL_PRIVATE); }
/* We use the SIGSETXID signal in the setuid, setgid, etc. implementations to tell each thread to call the respective setxid syscall on itself. This is the handler. */ static void sighandler_setxid (int sig, siginfo_t *si, void *ctx) { /* Determine the process ID. It might be negative if the thread is in the middle of a fork() call. */ pid_t pid = THREAD_GETMEM (THREAD_SELF, pid); int result; if (__glibc_unlikely (pid < 0)) pid = -pid; /* Safety check. It would be possible to call this function for other signals and send a signal from another process. This is not correct and might even be a security problem. Try to catch as many incorrect invocations as possible. */ if (sig != SIGSETXID || si->si_pid != pid || si->si_code != SI_TKILL) return; INTERNAL_SYSCALL_DECL (err); result = INTERNAL_SYSCALL_NCS (__xidcmd->syscall_no, err, 3, __xidcmd->id[0], __xidcmd->id[1], __xidcmd->id[2]); int error = 0; if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) error = INTERNAL_SYSCALL_ERRNO (result, err); __nptl_setxid_error (__xidcmd, error); /* Reset the SETXID flag. */ struct pthread *self = THREAD_SELF; int flags, newval; do { flags = THREAD_GETMEM (self, cancelhandling); newval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, flags & ~SETXID_BITMASK, flags); } while (flags != newval); /* And release the futex. */ self->setxid_futex = 1; futex_wake (&self->setxid_futex, 1, FUTEX_PRIVATE); if (atomic_decrement_val (&__xidcmd->cntr) == 0) futex_wake ((unsigned int *) &__xidcmd->cntr, 1, FUTEX_PRIVATE); }
static int internal_function nscd_getpw_r (const char *key, size_t keylen, request_type type, struct passwd *resultbuf, char *buffer, size_t buflen, struct passwd **result) { int gc_cycle; int nretries = 0; /* If the mapping is available, try to search there instead of communicating with the nscd. */ struct mapped_database *mapped; mapped = __nscd_get_map_ref (GETFDPW, "passwd", &map_handle, &gc_cycle); retry:; const char *pw_name = NULL; int retval = -1; const char *recend = (const char *) ~UINTMAX_C (0); pw_response_header pw_resp; if (mapped != NO_MAPPING) { struct datahead *found = __nscd_cache_search (type, key, keylen, mapped, sizeof pw_resp); if (found != NULL) { pw_name = (const char *) (&found->data[0].pwdata + 1); pw_resp = found->data[0].pwdata; recend = (const char *) found->data + found->recsize; /* Now check if we can trust pw_resp fields. If GC is in progress, it can contain anything. */ if (mapped->head->gc_cycle != gc_cycle) { retval = -2; goto out; } } } int sock = -1; if (pw_name == NULL) { sock = __nscd_open_socket (key, keylen, type, &pw_resp, sizeof (pw_resp)); if (sock == -1) { __nss_not_use_nscd_passwd = 1; goto out; } } /* No value found so far. */ *result = NULL; if (__glibc_unlikely (pw_resp.found == -1)) { /* The daemon does not cache this database. */ __nss_not_use_nscd_passwd = 1; goto out_close; } if (pw_resp.found == 1) { /* Set the information we already have. */ resultbuf->pw_uid = pw_resp.pw_uid; resultbuf->pw_gid = pw_resp.pw_gid; char *p = buffer; /* get pw_name */ resultbuf->pw_name = p; p += pw_resp.pw_name_len; /* get pw_passwd */ resultbuf->pw_passwd = p; p += pw_resp.pw_passwd_len; /* get pw_gecos */ resultbuf->pw_gecos = p; p += pw_resp.pw_gecos_len; /* get pw_dir */ resultbuf->pw_dir = p; p += pw_resp.pw_dir_len; /* get pw_pshell */ resultbuf->pw_shell = p; p += pw_resp.pw_shell_len; ssize_t total = p - buffer; if (__glibc_unlikely (pw_name + total > recend)) goto out_close; if (__glibc_unlikely (buflen < total)) { __set_errno (ERANGE); retval = ERANGE; goto out_close; } retval = 0; if (pw_name == NULL) { ssize_t nbytes = __readall (sock, buffer, total); if (__glibc_unlikely (nbytes != total)) { /* The `errno' to some value != ERANGE. */ __set_errno (ENOENT); retval = ENOENT; } else *result = resultbuf; } else { /* Copy the various strings. */ memcpy (resultbuf->pw_name, pw_name, total); /* Try to detect corrupt databases. */ if (resultbuf->pw_name[pw_resp.pw_name_len - 1] != '\0' || resultbuf->pw_passwd[pw_resp.pw_passwd_len - 1] != '\0' || resultbuf->pw_gecos[pw_resp.pw_gecos_len - 1] != '\0' || resultbuf->pw_dir[pw_resp.pw_dir_len - 1] != '\0' || resultbuf->pw_shell[pw_resp.pw_shell_len - 1] != '\0') { /* We cannot use the database. */ retval = mapped->head->gc_cycle != gc_cycle ? -2 : -1; goto out_close; } *result = resultbuf; } } else { /* Set errno to 0 to indicate no error, just no found record. */ __set_errno (0); /* Even though we have not found anything, the result is zero. */ retval = 0; } out_close: if (sock != -1) close_not_cancel_no_status (sock); out: if (__nscd_drop_map_ref (mapped, &gc_cycle) != 0) { /* When we come here this means there has been a GC cycle while we were looking for the data. This means the data might have been inconsistent. Retry if possible. */ if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1) { /* nscd is just running gc now. Disable using the mapping. */ if (atomic_decrement_val (&mapped->counter) == 0) __nscd_unmap (mapped); mapped = NO_MAPPING; } if (retval != -1) goto retry; } return retval; }
/* Test various atomic.h macros. */ static int do_test (void) { atomic_t mem, expected; int ret = 0; #ifdef atomic_compare_and_exchange_val_acq mem = 24; if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24 || mem != 35) { puts ("atomic_compare_and_exchange_val_acq test 1 failed"); ret = 1; } mem = 12; if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12 || mem != 12) { puts ("atomic_compare_and_exchange_val_acq test 2 failed"); ret = 1; } mem = -15; if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15 || mem != -56) { puts ("atomic_compare_and_exchange_val_acq test 3 failed"); ret = 1; } mem = -1; if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1 || mem != -1) { puts ("atomic_compare_and_exchange_val_acq test 4 failed"); ret = 1; } #endif mem = 24; if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24) || mem != 35) { puts ("atomic_compare_and_exchange_bool_acq test 1 failed"); ret = 1; } mem = 12; if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15) || mem != 12) { puts ("atomic_compare_and_exchange_bool_acq test 2 failed"); ret = 1; } mem = -15; if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15) || mem != -56) { puts ("atomic_compare_and_exchange_bool_acq test 3 failed"); ret = 1; } mem = -1; if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0) || mem != -1) { puts ("atomic_compare_and_exchange_bool_acq test 4 failed"); ret = 1; } mem = 64; if (atomic_exchange_acq (&mem, 31) != 64 || mem != 31) { puts ("atomic_exchange_acq test failed"); ret = 1; } mem = 2; if (atomic_exchange_and_add (&mem, 11) != 2 || mem != 13) { puts ("atomic_exchange_and_add test failed"); ret = 1; } mem = 2; if (atomic_exchange_and_add_acq (&mem, 11) != 2 || mem != 13) { puts ("atomic_exchange_and_add test failed"); ret = 1; } mem = 2; if (atomic_exchange_and_add_rel (&mem, 11) != 2 || mem != 13) { puts ("atomic_exchange_and_add test failed"); ret = 1; } mem = -21; atomic_add (&mem, 22); if (mem != 1) { puts ("atomic_add test failed"); ret = 1; } mem = -1; atomic_increment (&mem); if (mem != 0) { puts ("atomic_increment test failed"); ret = 1; } mem = 2; if (atomic_increment_val (&mem) != 3) { puts ("atomic_increment_val test failed"); ret = 1; } mem = 0; if (atomic_increment_and_test (&mem) || mem != 1) { puts ("atomic_increment_and_test test 1 failed"); ret = 1; } mem = 35; if (atomic_increment_and_test (&mem) || mem != 36) { puts ("atomic_increment_and_test test 2 failed"); ret = 1; } mem = -1; if (! atomic_increment_and_test (&mem) || mem != 0) { puts ("atomic_increment_and_test test 3 failed"); ret = 1; } mem = 17; atomic_decrement (&mem); if (mem != 16) { puts ("atomic_decrement test failed"); ret = 1; } if (atomic_decrement_val (&mem) != 15) { puts ("atomic_decrement_val test failed"); ret = 1; } mem = 0; if (atomic_decrement_and_test (&mem) || mem != -1) { puts ("atomic_decrement_and_test test 1 failed"); ret = 1; } mem = 15; if (atomic_decrement_and_test (&mem) || mem != 14) { puts ("atomic_decrement_and_test test 2 failed"); ret = 1; } mem = 1; if (! atomic_decrement_and_test (&mem) || mem != 0) { puts ("atomic_decrement_and_test test 3 failed"); ret = 1; } mem = 1; if (atomic_decrement_if_positive (&mem) != 1 || mem != 0) { puts ("atomic_decrement_if_positive test 1 failed"); ret = 1; } mem = 0; if (atomic_decrement_if_positive (&mem) != 0 || mem != 0) { puts ("atomic_decrement_if_positive test 2 failed"); ret = 1; } mem = -1; if (atomic_decrement_if_positive (&mem) != -1 || mem != -1) { puts ("atomic_decrement_if_positive test 3 failed"); ret = 1; } mem = -12; if (! atomic_add_negative (&mem, 10) || mem != -2) { puts ("atomic_add_negative test 1 failed"); ret = 1; } mem = 0; if (atomic_add_negative (&mem, 100) || mem != 100) { puts ("atomic_add_negative test 2 failed"); ret = 1; } mem = 15; if (atomic_add_negative (&mem, -10) || mem != 5) { puts ("atomic_add_negative test 3 failed"); ret = 1; } mem = -12; if (atomic_add_negative (&mem, 14) || mem != 2) { puts ("atomic_add_negative test 4 failed"); ret = 1; } mem = 0; if (! atomic_add_negative (&mem, -1) || mem != -1) { puts ("atomic_add_negative test 5 failed"); ret = 1; } mem = -31; if (atomic_add_negative (&mem, 31) || mem != 0) { puts ("atomic_add_negative test 6 failed"); ret = 1; } mem = -34; if (atomic_add_zero (&mem, 31) || mem != -3) { puts ("atomic_add_zero test 1 failed"); ret = 1; } mem = -36; if (! atomic_add_zero (&mem, 36) || mem != 0) { puts ("atomic_add_zero test 2 failed"); ret = 1; } mem = 113; if (atomic_add_zero (&mem, -13) || mem != 100) { puts ("atomic_add_zero test 3 failed"); ret = 1; } mem = -18; if (atomic_add_zero (&mem, 20) || mem != 2) { puts ("atomic_add_zero test 4 failed"); ret = 1; } mem = 10; if (atomic_add_zero (&mem, -20) || mem != -10) { puts ("atomic_add_zero test 5 failed"); ret = 1; } mem = 10; if (! atomic_add_zero (&mem, -10) || mem != 0) { puts ("atomic_add_zero test 6 failed"); ret = 1; } mem = 0; atomic_bit_set (&mem, 1); if (mem != 2) { puts ("atomic_bit_set test 1 failed"); ret = 1; } mem = 8; atomic_bit_set (&mem, 3); if (mem != 8) { puts ("atomic_bit_set test 2 failed"); ret = 1; } #ifdef TEST_ATOMIC64 mem = 16; atomic_bit_set (&mem, 35); if (mem != 0x800000010LL) { puts ("atomic_bit_set test 3 failed"); ret = 1; } #endif mem = 0; if (atomic_bit_test_set (&mem, 1) || mem != 2) { puts ("atomic_bit_test_set test 1 failed"); ret = 1; } mem = 8; if (! atomic_bit_test_set (&mem, 3) || mem != 8) { puts ("atomic_bit_test_set test 2 failed"); ret = 1; } #ifdef TEST_ATOMIC64 mem = 16; if (atomic_bit_test_set (&mem, 35) || mem != 0x800000010LL) { puts ("atomic_bit_test_set test 3 failed"); ret = 1; } mem = 0x100000000LL; if (! atomic_bit_test_set (&mem, 32) || mem != 0x100000000LL) { puts ("atomic_bit_test_set test 4 failed"); ret = 1; } #endif #ifdef catomic_compare_and_exchange_val_acq mem = 24; if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24 || mem != 35) { puts ("catomic_compare_and_exchange_val_acq test 1 failed"); ret = 1; } mem = 12; if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12 || mem != 12) { puts ("catomic_compare_and_exchange_val_acq test 2 failed"); ret = 1; } mem = -15; if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15 || mem != -56) { puts ("catomic_compare_and_exchange_val_acq test 3 failed"); ret = 1; } mem = -1; if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1 || mem != -1) { puts ("catomic_compare_and_exchange_val_acq test 4 failed"); ret = 1; } #endif mem = 24; if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24) || mem != 35) { puts ("catomic_compare_and_exchange_bool_acq test 1 failed"); ret = 1; } mem = 12; if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15) || mem != 12) { puts ("catomic_compare_and_exchange_bool_acq test 2 failed"); ret = 1; } mem = -15; if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15) || mem != -56) { puts ("catomic_compare_and_exchange_bool_acq test 3 failed"); ret = 1; } mem = -1; if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0) || mem != -1) { puts ("catomic_compare_and_exchange_bool_acq test 4 failed"); ret = 1; } mem = 2; if (catomic_exchange_and_add (&mem, 11) != 2 || mem != 13) { puts ("catomic_exchange_and_add test failed"); ret = 1; } mem = -21; catomic_add (&mem, 22); if (mem != 1) { puts ("catomic_add test failed"); ret = 1; } mem = -1; catomic_increment (&mem); if (mem != 0) { puts ("catomic_increment test failed"); ret = 1; } mem = 2; if (catomic_increment_val (&mem) != 3) { puts ("catomic_increment_val test failed"); ret = 1; } mem = 17; catomic_decrement (&mem); if (mem != 16) { puts ("catomic_decrement test failed"); ret = 1; } if (catomic_decrement_val (&mem) != 15) { puts ("catomic_decrement_val test failed"); ret = 1; } /* Tests for C11-like atomics. */ mem = 11; if (atomic_load_relaxed (&mem) != 11 || atomic_load_acquire (&mem) != 11) { puts ("atomic_load_{relaxed,acquire} test failed"); ret = 1; } atomic_store_relaxed (&mem, 12); if (mem != 12) { puts ("atomic_store_relaxed test failed"); ret = 1; } atomic_store_release (&mem, 13); if (mem != 13) { puts ("atomic_store_release test failed"); ret = 1; } mem = 14; expected = 14; if (!atomic_compare_exchange_weak_relaxed (&mem, &expected, 25) || mem != 25 || expected != 14) { puts ("atomic_compare_exchange_weak_relaxed test 1 failed"); ret = 1; } if (atomic_compare_exchange_weak_relaxed (&mem, &expected, 14) || mem != 25 || expected != 25) { puts ("atomic_compare_exchange_weak_relaxed test 2 failed"); ret = 1; } mem = 14; expected = 14; if (!atomic_compare_exchange_weak_acquire (&mem, &expected, 25) || mem != 25 || expected != 14) { puts ("atomic_compare_exchange_weak_acquire test 1 failed"); ret = 1; } if (atomic_compare_exchange_weak_acquire (&mem, &expected, 14) || mem != 25 || expected != 25) { puts ("atomic_compare_exchange_weak_acquire test 2 failed"); ret = 1; } mem = 14; expected = 14; if (!atomic_compare_exchange_weak_release (&mem, &expected, 25) || mem != 25 || expected != 14) { puts ("atomic_compare_exchange_weak_release test 1 failed"); ret = 1; } if (atomic_compare_exchange_weak_release (&mem, &expected, 14) || mem != 25 || expected != 25) { puts ("atomic_compare_exchange_weak_release test 2 failed"); ret = 1; } mem = 23; if (atomic_exchange_acquire (&mem, 42) != 23 || mem != 42) { puts ("atomic_exchange_acquire test failed"); ret = 1; } mem = 23; if (atomic_exchange_release (&mem, 42) != 23 || mem != 42) { puts ("atomic_exchange_release test failed"); ret = 1; } mem = 23; if (atomic_fetch_add_relaxed (&mem, 1) != 23 || mem != 24) { puts ("atomic_fetch_add_relaxed test failed"); ret = 1; } mem = 23; if (atomic_fetch_add_acquire (&mem, 1) != 23 || mem != 24) { puts ("atomic_fetch_add_acquire test failed"); ret = 1; } mem = 23; if (atomic_fetch_add_release (&mem, 1) != 23 || mem != 24) { puts ("atomic_fetch_add_release test failed"); ret = 1; } mem = 23; if (atomic_fetch_add_acq_rel (&mem, 1) != 23 || mem != 24) { puts ("atomic_fetch_add_acq_rel test failed"); ret = 1; } mem = 3; if (atomic_fetch_and_acquire (&mem, 2) != 3 || mem != 2) { puts ("atomic_fetch_and_acquire test failed"); ret = 1; } mem = 4; if (atomic_fetch_or_relaxed (&mem, 2) != 4 || mem != 6) { puts ("atomic_fetch_or_relaxed test failed"); ret = 1; } mem = 4; if (atomic_fetch_or_acquire (&mem, 2) != 4 || mem != 6) { puts ("atomic_fetch_or_acquire test failed"); ret = 1; } /* This is a single-threaded test, so we can't test the effects of the fences. */ atomic_thread_fence_acquire (); atomic_thread_fence_release (); atomic_thread_fence_seq_cst (); return ret; }
int __nscd_getgrouplist (const char *user, gid_t group, long int *size, gid_t **groupsp, long int limit) { size_t userlen = strlen (user) + 1; int gc_cycle; int nretries = 0; /* If the mapping is available, try to search there instead of communicating with the nscd. */ struct mapped_database *mapped; mapped = __nscd_get_map_ref (GETFDGR, "group", &__gr_map_handle, &gc_cycle); retry:; char *respdata = NULL; int retval = -1; int sock = -1; initgr_response_header initgr_resp; if (mapped != NO_MAPPING) { struct datahead *found = __nscd_cache_search (INITGROUPS, user, userlen, mapped, sizeof initgr_resp); if (found != NULL) { respdata = (char *) (&found->data[0].initgrdata + 1); initgr_resp = found->data[0].initgrdata; char *recend = (char *) found->data + found->recsize; /* Now check if we can trust initgr_resp fields. If GC is in progress, it can contain anything. */ if (mapped->head->gc_cycle != gc_cycle) { retval = -2; goto out; } if (respdata + initgr_resp.ngrps * sizeof (int32_t) > recend) goto out; } } /* If we do not have the cache mapped, try to get the data over the socket. */ if (respdata == NULL) { sock = __nscd_open_socket (user, userlen, INITGROUPS, &initgr_resp, sizeof (initgr_resp)); if (sock == -1) { /* nscd not running or wrong version. */ __nss_not_use_nscd_group = 1; goto out; } } if (initgr_resp.found == 1) { /* The following code assumes that gid_t and int32_t are the same size. This is the case for al existing implementation. If this should change some code needs to be added which doesn't use memcpy but instead copies each array element one by one. */ assert (sizeof (int32_t) == sizeof (gid_t)); assert (initgr_resp.ngrps >= 0); /* Make sure we have enough room. We always count GROUP in even though we might not end up adding it. */ if (*size < initgr_resp.ngrps + 1) { gid_t *newp = realloc (*groupsp, (initgr_resp.ngrps + 1) * sizeof (gid_t)); if (newp == NULL) /* We cannot increase the buffer size. */ goto out_close; *groupsp = newp; *size = initgr_resp.ngrps + 1; } if (respdata == NULL) { /* Read the data from the socket. */ if ((size_t) __readall (sock, *groupsp, initgr_resp.ngrps * sizeof (gid_t)) == initgr_resp.ngrps * sizeof (gid_t)) retval = initgr_resp.ngrps; } else { /* Just copy the data. */ retval = initgr_resp.ngrps; memcpy (*groupsp, respdata, retval * sizeof (gid_t)); } } else { if (__glibc_unlikely (initgr_resp.found == -1)) { /* The daemon does not cache this database. */ __nss_not_use_nscd_group = 1; goto out_close; } /* No group found yet. */ retval = 0; assert (*size >= 1); } /* Check whether GROUP is part of the mix. If not, add it. */ if (retval >= 0) { int cnt; for (cnt = 0; cnt < retval; ++cnt) if ((*groupsp)[cnt] == group) break; if (cnt == retval) (*groupsp)[retval++] = group; } out_close: if (sock != -1) close_not_cancel_no_status (sock); out: if (__nscd_drop_map_ref (mapped, &gc_cycle) != 0) { /* When we come here this means there has been a GC cycle while we were looking for the data. This means the data might have been inconsistent. Retry if possible. */ if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1) { /* nscd is just running gc now. Disable using the mapping. */ if (atomic_decrement_val (&mapped->counter) == 0) __nscd_unmap (mapped); mapped = NO_MAPPING; } if (retval != -1) goto retry; } return retval; }
/* Try to get a file descriptor for the shared meory segment containing the database. */ static struct mapped_database * get_mapping (request_type type, const char *key, struct mapped_database **mappedp) { struct mapped_database *result = NO_MAPPING; #ifdef SCM_RIGHTS const size_t keylen = strlen (key) + 1; char resdata[keylen]; int saved_errno = errno; int mapfd = -1; /* Send the request. */ struct iovec iov[2]; request_header req; int sock = open_socket (); if (sock < 0) goto out; req.version = NSCD_VERSION; req.type = type; req.key_len = keylen; iov[0].iov_base = &req; iov[0].iov_len = sizeof (req); iov[1].iov_base = (void *) key; iov[1].iov_len = keylen; if (__builtin_expect (TEMP_FAILURE_RETRY (__writev (sock, iov, 2)) != iov[0].iov_len + iov[1].iov_len, 0)) /* We cannot even write the request. */ goto out_close2; /* Room for the data sent along with the file descriptor. We expect the key name back. */ iov[0].iov_base = resdata; iov[0].iov_len = keylen; union { struct cmsghdr hdr; char bytes[CMSG_SPACE (sizeof (int))]; } buf; struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1, .msg_control = buf.bytes, .msg_controllen = sizeof (buf) }; struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN (sizeof (int)); /* This access is well-aligned since BUF is correctly aligned for an int and CMSG_DATA preserves this alignment. */ *(int *) CMSG_DATA (cmsg) = -1; msg.msg_controllen = cmsg->cmsg_len; if (wait_on_socket (sock) <= 0) goto out_close2; # ifndef MSG_NOSIGNAL # define MSG_NOSIGNAL 0 # endif if (__builtin_expect (TEMP_FAILURE_RETRY (__recvmsg (sock, &msg, MSG_NOSIGNAL)) != keylen, 0)) goto out_close2; mapfd = *(int *) CMSG_DATA (cmsg); if (__builtin_expect (CMSG_FIRSTHDR (&msg)->cmsg_len != CMSG_LEN (sizeof (int)), 0)) goto out_close; struct stat64 st; if (__builtin_expect (strcmp (resdata, key) != 0, 0) || __builtin_expect (fstat64 (mapfd, &st) != 0, 0) || __builtin_expect (st.st_size < sizeof (struct database_pers_head), 0)) goto out_close; struct database_pers_head head; if (__builtin_expect (TEMP_FAILURE_RETRY (__pread (mapfd, &head, sizeof (head), 0)) != sizeof (head), 0)) goto out_close; if (__builtin_expect (head.version != DB_VERSION, 0) || __builtin_expect (head.header_size != sizeof (head), 0) /* This really should not happen but who knows, maybe the update thread got stuck. */ || __builtin_expect (! head.nscd_certainly_running && head.timestamp + MAPPING_TIMEOUT < time (NULL), 0)) goto out_close; size_t size = (sizeof (head) + roundup (head.module * sizeof (ref_t), ALIGN) + head.data_size); if (__builtin_expect (st.st_size < size, 0)) goto out_close; /* The file is large enough, map it now. */ void *mapping = __mmap (NULL, size, PROT_READ, MAP_SHARED, mapfd, 0); if (__builtin_expect (mapping != MAP_FAILED, 1)) { /* Allocate a record for the mapping. */ struct mapped_database *newp = malloc (sizeof (*newp)); if (newp == NULL) { /* Ugh, after all we went through the memory allocation failed. */ __munmap (mapping, size); goto out_close; } newp->head = mapping; newp->data = ((char *) mapping + head.header_size + roundup (head.module * sizeof (ref_t), ALIGN)); newp->mapsize = size; /* Set counter to 1 to show it is usable. */ newp->counter = 1; result = newp; } out_close: __close (mapfd); out_close2: __close (sock); out: __set_errno (saved_errno); #endif /* SCM_RIGHTS */ struct mapped_database *oldval = *mappedp; *mappedp = result; if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0) __nscd_unmap (oldval); return result; } struct mapped_database * __nscd_get_map_ref (request_type type, const char *name, struct locked_map_ptr *mapptr, int *gc_cyclep) { struct mapped_database *cur = mapptr->mapped; if (cur == NO_MAPPING) return cur; int cnt = 0; while (atomic_compare_and_exchange_val_acq (&mapptr->lock, 1, 0) != 0) { // XXX Best number of rounds? if (++cnt > 5) return NO_MAPPING; atomic_delay (); } cur = mapptr->mapped; if (__builtin_expect (cur != NO_MAPPING, 1)) { /* If not mapped or timestamp not updated, request new map. */ if (cur == NULL || (cur->head->nscd_certainly_running == 0 && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL))) cur = get_mapping (type, name, &mapptr->mapped); if (__builtin_expect (cur != NO_MAPPING, 1)) { if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0, 0)) cur = NO_MAPPING; else atomic_increment (&cur->counter); } } mapptr->lock = 0; return cur; } const struct datahead * __nscd_cache_search (request_type type, const char *key, size_t keylen, const struct mapped_database *mapped) { unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module; ref_t work = mapped->head->array[hash]; while (work != ENDREF) { struct hashentry *here = (struct hashentry *) (mapped->data + work); if (type == here->type && keylen == here->len && memcmp (key, mapped->data + here->key, keylen) == 0) { /* We found the entry. Increment the appropriate counter. */ const struct datahead *dh = (struct datahead *) (mapped->data + here->packet); /* See whether we must ignore the entry or whether something is wrong because garbage collection is in progress. */ if (dh->usable && ((char *) dh + dh->allocsize <= (char *) mapped->head + mapped->mapsize)) return dh; } work = here->next; } return NULL; }
int __nscd_setnetgrent (const char *group, struct __netgrent *datap) { int gc_cycle; int nretries = 0; size_t group_len = strlen (group) + 1; /* If the mapping is available, try to search there instead of communicating with the nscd. */ struct mapped_database *mapped; mapped = __nscd_get_map_ref (GETFDNETGR, "netgroup", &map_handle, &gc_cycle); retry:; char *respdata = NULL; int retval = -1; netgroup_response_header netgroup_resp; if (mapped != NO_MAPPING) { struct datahead *found = __nscd_cache_search (GETNETGRENT, group, group_len, mapped, sizeof netgroup_resp); if (found != NULL) { respdata = (char *) (&found->data[0].netgroupdata + 1); netgroup_resp = found->data[0].netgroupdata; /* Now check if we can trust pw_resp fields. If GC is in progress, it can contain anything. */ if (mapped->head->gc_cycle != gc_cycle) { retval = -2; goto out; } } } int sock = -1; if (respdata == NULL) { sock = __nscd_open_socket (group, group_len, GETNETGRENT, &netgroup_resp, sizeof (netgroup_resp)); if (sock == -1) { /* nscd not running or wrong version. */ __nss_not_use_nscd_netgroup = 1; goto out; } } if (netgroup_resp.found == 1) { size_t datalen = netgroup_resp.result_len; /* If we do not have to read the data here it comes from the mapped data and does not have to be freed. */ if (respdata == NULL) { /* The data will come via the socket. */ respdata = malloc (datalen); if (respdata == NULL) goto out_close; if ((size_t) __readall (sock, respdata, datalen) != datalen) { free (respdata); goto out_close; } } datap->data = respdata; datap->data_size = datalen; datap->cursor = respdata; datap->first = 1; datap->nip = (service_user *) -1l; datap->known_groups = NULL; datap->needed_groups = NULL; retval = 1; } else { if (__glibc_unlikely (netgroup_resp.found == -1)) { /* The daemon does not cache this database. */ __nss_not_use_nscd_netgroup = 1; goto out_close; } /* Set errno to 0 to indicate no error, just no found record. */ __set_errno (0); /* Even though we have not found anything, the result is zero. */ retval = 0; } out_close: if (sock != -1) close_not_cancel_no_status (sock); out: if (__nscd_drop_map_ref (mapped, &gc_cycle) != 0) { /* When we come here this means there has been a GC cycle while we were looking for the data. This means the data might have been inconsistent. Retry if possible. */ if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1) { /* nscd is just running gc now. Disable using the mapping. */ if (atomic_decrement_val (&mapped->counter) == 0) __nscd_unmap (mapped); mapped = NO_MAPPING; } if (retval != -1) goto retry; } return retval; }
/* Test various atomic.h macros. */ static int do_test (void) { atomic_t mem; int ret = 0; #ifdef atomic_compare_and_exchange_val_acq mem = 24; if (atomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24 || mem != 35) { puts ("atomic_compare_and_exchange_val_acq test 1 failed"); ret = 1; } mem = 12; if (atomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12 || mem != 12) { puts ("atomic_compare_and_exchange_val_acq test 2 failed"); ret = 1; } mem = -15; if (atomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15 || mem != -56) { puts ("atomic_compare_and_exchange_val_acq test 3 failed"); ret = 1; } mem = -1; if (atomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1 || mem != -1) { puts ("atomic_compare_and_exchange_val_acq test 4 failed"); ret = 1; } #endif mem = 24; if (atomic_compare_and_exchange_bool_acq (&mem, 35, 24) || mem != 35) { puts ("atomic_compare_and_exchange_bool_acq test 1 failed"); ret = 1; } mem = 12; if (! atomic_compare_and_exchange_bool_acq (&mem, 10, 15) || mem != 12) { puts ("atomic_compare_and_exchange_bool_acq test 2 failed"); ret = 1; } mem = -15; if (atomic_compare_and_exchange_bool_acq (&mem, -56, -15) || mem != -56) { puts ("atomic_compare_and_exchange_bool_acq test 3 failed"); ret = 1; } mem = -1; if (! atomic_compare_and_exchange_bool_acq (&mem, 17, 0) || mem != -1) { puts ("atomic_compare_and_exchange_bool_acq test 4 failed"); ret = 1; } mem = 64; if (atomic_exchange_acq (&mem, 31) != 64 || mem != 31) { puts ("atomic_exchange_acq test failed"); ret = 1; } mem = 2; if (atomic_exchange_and_add (&mem, 11) != 2 || mem != 13) { puts ("atomic_exchange_and_add test failed"); ret = 1; } mem = -21; atomic_add (&mem, 22); if (mem != 1) { puts ("atomic_add test failed"); ret = 1; } mem = -1; atomic_increment (&mem); if (mem != 0) { puts ("atomic_increment test failed"); ret = 1; } mem = 2; if (atomic_increment_val (&mem) != 3) { puts ("atomic_increment_val test failed"); ret = 1; } mem = 0; if (atomic_increment_and_test (&mem) || mem != 1) { puts ("atomic_increment_and_test test 1 failed"); ret = 1; } mem = 35; if (atomic_increment_and_test (&mem) || mem != 36) { puts ("atomic_increment_and_test test 2 failed"); ret = 1; } mem = -1; if (! atomic_increment_and_test (&mem) || mem != 0) { puts ("atomic_increment_and_test test 3 failed"); ret = 1; } mem = 17; atomic_decrement (&mem); if (mem != 16) { puts ("atomic_decrement test failed"); ret = 1; } if (atomic_decrement_val (&mem) != 15) { puts ("atomic_decrement_val test failed"); ret = 1; } mem = 0; if (atomic_decrement_and_test (&mem) || mem != -1) { puts ("atomic_decrement_and_test test 1 failed"); ret = 1; } mem = 15; if (atomic_decrement_and_test (&mem) || mem != 14) { puts ("atomic_decrement_and_test test 2 failed"); ret = 1; } mem = 1; if (! atomic_decrement_and_test (&mem) || mem != 0) { puts ("atomic_decrement_and_test test 3 failed"); ret = 1; } mem = 1; if (atomic_decrement_if_positive (&mem) != 1 || mem != 0) { puts ("atomic_decrement_if_positive test 1 failed"); ret = 1; } mem = 0; if (atomic_decrement_if_positive (&mem) != 0 || mem != 0) { puts ("atomic_decrement_if_positive test 2 failed"); ret = 1; } mem = -1; if (atomic_decrement_if_positive (&mem) != -1 || mem != -1) { puts ("atomic_decrement_if_positive test 3 failed"); ret = 1; } mem = -12; if (! atomic_add_negative (&mem, 10) || mem != -2) { puts ("atomic_add_negative test 1 failed"); ret = 1; } mem = 0; if (atomic_add_negative (&mem, 100) || mem != 100) { puts ("atomic_add_negative test 2 failed"); ret = 1; } mem = 15; if (atomic_add_negative (&mem, -10) || mem != 5) { puts ("atomic_add_negative test 3 failed"); ret = 1; } mem = -12; if (atomic_add_negative (&mem, 14) || mem != 2) { puts ("atomic_add_negative test 4 failed"); ret = 1; } mem = 0; if (! atomic_add_negative (&mem, -1) || mem != -1) { puts ("atomic_add_negative test 5 failed"); ret = 1; } mem = -31; if (atomic_add_negative (&mem, 31) || mem != 0) { puts ("atomic_add_negative test 6 failed"); ret = 1; } mem = -34; if (atomic_add_zero (&mem, 31) || mem != -3) { puts ("atomic_add_zero test 1 failed"); ret = 1; } mem = -36; if (! atomic_add_zero (&mem, 36) || mem != 0) { puts ("atomic_add_zero test 2 failed"); ret = 1; } mem = 113; if (atomic_add_zero (&mem, -13) || mem != 100) { puts ("atomic_add_zero test 3 failed"); ret = 1; } mem = -18; if (atomic_add_zero (&mem, 20) || mem != 2) { puts ("atomic_add_zero test 4 failed"); ret = 1; } mem = 10; if (atomic_add_zero (&mem, -20) || mem != -10) { puts ("atomic_add_zero test 5 failed"); ret = 1; } mem = 10; if (! atomic_add_zero (&mem, -10) || mem != 0) { puts ("atomic_add_zero test 6 failed"); ret = 1; } mem = 0; atomic_bit_set (&mem, 1); if (mem != 2) { puts ("atomic_bit_set test 1 failed"); ret = 1; } mem = 8; atomic_bit_set (&mem, 3); if (mem != 8) { puts ("atomic_bit_set test 2 failed"); ret = 1; } #ifdef TEST_ATOMIC64 mem = 16; atomic_bit_set (&mem, 35); if (mem != 0x800000010LL) { puts ("atomic_bit_set test 3 failed"); ret = 1; } #endif mem = 0; if (atomic_bit_test_set (&mem, 1) || mem != 2) { puts ("atomic_bit_test_set test 1 failed"); ret = 1; } mem = 8; if (! atomic_bit_test_set (&mem, 3) || mem != 8) { puts ("atomic_bit_test_set test 2 failed"); ret = 1; } #ifdef TEST_ATOMIC64 mem = 16; if (atomic_bit_test_set (&mem, 35) || mem != 0x800000010LL) { puts ("atomic_bit_test_set test 3 failed"); ret = 1; } mem = 0x100000000LL; if (! atomic_bit_test_set (&mem, 32) || mem != 0x100000000LL) { puts ("atomic_bit_test_set test 4 failed"); ret = 1; } #endif #ifdef catomic_compare_and_exchange_val_acq mem = 24; if (catomic_compare_and_exchange_val_acq (&mem, 35, 24) != 24 || mem != 35) { puts ("catomic_compare_and_exchange_val_acq test 1 failed"); ret = 1; } mem = 12; if (catomic_compare_and_exchange_val_acq (&mem, 10, 15) != 12 || mem != 12) { puts ("catomic_compare_and_exchange_val_acq test 2 failed"); ret = 1; } mem = -15; if (catomic_compare_and_exchange_val_acq (&mem, -56, -15) != -15 || mem != -56) { puts ("catomic_compare_and_exchange_val_acq test 3 failed"); ret = 1; } mem = -1; if (catomic_compare_and_exchange_val_acq (&mem, 17, 0) != -1 || mem != -1) { puts ("catomic_compare_and_exchange_val_acq test 4 failed"); ret = 1; } #endif mem = 24; if (catomic_compare_and_exchange_bool_acq (&mem, 35, 24) || mem != 35) { puts ("catomic_compare_and_exchange_bool_acq test 1 failed"); ret = 1; } mem = 12; if (! catomic_compare_and_exchange_bool_acq (&mem, 10, 15) || mem != 12) { puts ("catomic_compare_and_exchange_bool_acq test 2 failed"); ret = 1; } mem = -15; if (catomic_compare_and_exchange_bool_acq (&mem, -56, -15) || mem != -56) { puts ("catomic_compare_and_exchange_bool_acq test 3 failed"); ret = 1; } mem = -1; if (! catomic_compare_and_exchange_bool_acq (&mem, 17, 0) || mem != -1) { puts ("catomic_compare_and_exchange_bool_acq test 4 failed"); ret = 1; } mem = 2; if (catomic_exchange_and_add (&mem, 11) != 2 || mem != 13) { puts ("catomic_exchange_and_add test failed"); ret = 1; } mem = -21; catomic_add (&mem, 22); if (mem != 1) { puts ("catomic_add test failed"); ret = 1; } mem = -1; catomic_increment (&mem); if (mem != 0) { puts ("catomic_increment test failed"); ret = 1; } mem = 2; if (catomic_increment_val (&mem) != 3) { puts ("catomic_increment_val test failed"); ret = 1; } mem = 17; catomic_decrement (&mem); if (mem != 16) { puts ("catomic_decrement test failed"); ret = 1; } if (catomic_decrement_val (&mem) != 15) { puts ("catomic_decrement_val test failed"); ret = 1; } return ret; }
uint64_t atomic_dec_64_nv (volatile uint64_t *target) { return atomic_decrement_val (target) - 1; }
unsigned long atomic_dec_ulong_nv (volatile unsigned long *target) { return atomic_decrement_val (target) - 1; }
unsigned int atomic_dec_uint_nv (volatile unsigned int *target) { return atomic_decrement_val (target) - 1; }
unsigned short atomic_dec_ushort_nv (volatile unsigned short *target) { return atomic_decrement_val (target) - 1; }
unsigned char atomic_dec_uchar_nv (volatile unsigned char *target) { return atomic_decrement_val (target) - 1; }