__complex__ long double __cexpl (__complex__ long double x) { __complex__ long double retval; int rcls = fpclassify (__real__ x); int icls = fpclassify (__imag__ x); if (__builtin_expect (rcls >= FP_ZERO, 1)) { /* Real part is finite. */ if (__builtin_expect (icls >= FP_ZERO, 1)) { /* Imaginary part is finite. */ const int t = (int) ((LDBL_MAX_EXP - 1) * M_LN2l); long double sinix, cosix; if (__builtin_expect (icls != FP_SUBNORMAL, 1)) { __sincosl (__imag__ x, &sinix, &cosix); } else { sinix = __imag__ x; cosix = 1.0; } if (__real__ x > t) { long double exp_t = __ieee754_expl (t); __real__ x -= t; sinix *= exp_t; cosix *= exp_t; if (__real__ x > t) { __real__ x -= t; sinix *= exp_t; cosix *= exp_t; } } if (__real__ x > t) { /* Overflow (original real part of x > 3t). */ __real__ retval = LDBL_MAX * cosix; __imag__ retval = LDBL_MAX * sinix; } else { long double exp_val = __ieee754_expl (__real__ x); __real__ retval = exp_val * cosix; __imag__ retval = exp_val * sinix; } } else { /* If the imaginary part is +-inf or NaN and the real part is not +-inf the result is NaN + iNaN. */ __real__ retval = __nanl (""); __imag__ retval = __nanl (""); feraiseexcept (FE_INVALID); } } else if (__builtin_expect (rcls == FP_INFINITE, 1)) { /* Real part is infinite. */ if (__builtin_expect (icls >= FP_ZERO, 1)) { /* Imaginary part is finite. */ long double value = signbit (__real__ x) ? 0.0 : HUGE_VALL; if (icls == FP_ZERO) { /* Imaginary part is 0.0. */ __real__ retval = value; __imag__ retval = __imag__ x; } else { long double sinix, cosix; if (__builtin_expect (icls != FP_SUBNORMAL, 1)) { __sincosl (__imag__ x, &sinix, &cosix); } else { sinix = __imag__ x; cosix = 1.0; } __real__ retval = __copysignl (value, cosix); __imag__ retval = __copysignl (value, sinix); } } else if (signbit (__real__ x) == 0) { __real__ retval = HUGE_VALL; __imag__ retval = __nanl (""); if (icls == FP_INFINITE) feraiseexcept (FE_INVALID); } else { __real__ retval = 0.0; __imag__ retval = __copysignl (0.0, __imag__ x); } } else { /* If the real part is NaN the result is NaN + iNaN. */ __real__ retval = __nanl (""); __imag__ retval = __nanl (""); if (rcls != FP_NAN || icls != FP_NAN) feraiseexcept (FE_INVALID); } return retval; }
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) { AutoMutex lock(mLock); int active; status_t result = NO_ERROR; audio_track_cblk_t* cblk = mCblk; uint32_t framesReq = audioBuffer->frameCount; uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS; audioBuffer->frameCount = 0; audioBuffer->size = 0; uint32_t framesAvail = cblk->framesAvailable(); cblk->lock.lock(); if (cblk->flags & CBLK_INVALID_MSK) { goto create_new_track; } cblk->lock.unlock(); if (framesAvail == 0) { cblk->lock.lock(); goto start_loop_here; while (framesAvail == 0) { active = mActive; if (UNLIKELY(!active)) { LOGV("Not active and NO_MORE_BUFFERS"); cblk->lock.unlock(); return NO_MORE_BUFFERS; } if (UNLIKELY(!waitCount)) { cblk->lock.unlock(); return WOULD_BLOCK; } if (!(cblk->flags & CBLK_INVALID_MSK)) { mLock.unlock(); result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs)); cblk->lock.unlock(); mLock.lock(); if (mActive == 0) { return status_t(STOPPED); } cblk->lock.lock(); } if (cblk->flags & CBLK_INVALID_MSK) { goto create_new_track; } if (__builtin_expect(result!=NO_ERROR, false)) { cblk->waitTimeMs += waitTimeMs; if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) { // timing out when a loop has been set and we have already written upto loop end // is a normal condition: no need to wake AudioFlinger up. if (cblk->user < cblk->loopEnd) { LOGW( "obtainBuffer timed out (is the CPU pegged?) %p " "user=%08x, server=%08x", this, cblk->user, cblk->server); //unlock cblk mutex before calling mAudioTrack->start() (see issue #1617140) cblk->lock.unlock(); result = mAudioTrack->start(); cblk->lock.lock(); if (result == DEAD_OBJECT) { android_atomic_or(CBLK_INVALID_ON, &cblk->flags); create_new_track: result = restoreTrack_l(cblk, false); } if (result != NO_ERROR) { LOGW("obtainBuffer create Track error %d", result); cblk->lock.unlock(); return result; } } cblk->waitTimeMs = 0; } if (--waitCount == 0) { cblk->lock.unlock(); return TIMED_OUT; } } // read the server count again start_loop_here: framesAvail = cblk->framesAvailable_l(); } cblk->lock.unlock(); } // restart track if it was disabled by audioflinger due to previous underrun if (mActive && (cblk->flags & CBLK_DISABLED_MSK)) { android_atomic_and(~CBLK_DISABLED_ON, &cblk->flags); LOGW("obtainBuffer() track %p disabled, restarting", this); mAudioTrack->start(); } cblk->waitTimeMs = 0; if (framesReq > framesAvail) { framesReq = framesAvail; } uint32_t u = cblk->user; uint32_t bufferEnd = cblk->userBase + cblk->frameCount; if (u + framesReq > bufferEnd) { framesReq = bufferEnd - u; } audioBuffer->flags = mMuted ? Buffer::MUTE : 0; audioBuffer->channelCount = mChannelCount; audioBuffer->frameCount = framesReq; audioBuffer->size = framesReq * cblk->frameSize; if (audio_is_linear_pcm(mFormat)) { audioBuffer->format = AUDIO_FORMAT_PCM_16_BIT; } else { audioBuffer->format = mFormat; } audioBuffer->raw = (int8_t *)cblk->buffer(u); active = mActive; return active ? status_t(NO_ERROR) : status_t(STOPPED); }
static int internal_function FCT (const CHAR *pattern, const CHAR *string, const CHAR *string_end, bool no_leading_period, int flags) { register const CHAR *p = pattern, *n = string; register UCHAR c; #ifdef _LIBC # if WIDE_CHAR_VERSION const char *collseq = (const char *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQWC); # else const UCHAR *collseq = (const UCHAR *) _NL_CURRENT(LC_COLLATE, _NL_COLLATE_COLLSEQMB); # endif #endif while ((c = *p++) != L_('\0')) { bool new_no_leading_period = false; c = FOLD (c); switch (c) { case L_('?'): if (__builtin_expect (flags & FNM_EXTMATCH, 0) && *p == '(') { int res; res = EXT (c, p, n, string_end, no_leading_period, flags); if (res != -1) return res; } if (n == string_end) return FNM_NOMATCH; else if (*n == L_('/') && (flags & FNM_FILE_NAME)) return FNM_NOMATCH; else if (*n == L_('.') && no_leading_period) return FNM_NOMATCH; break; case L_('\\'): if (!(flags & FNM_NOESCAPE)) { c = *p++; if (c == L_('\0')) /* Trailing \ loses. */ return FNM_NOMATCH; c = FOLD (c); } if (n == string_end || FOLD ((UCHAR) *n) != c) return FNM_NOMATCH; break; case L_('*'): if (__builtin_expect (flags & FNM_EXTMATCH, 0) && *p == '(') { int res; res = EXT (c, p, n, string_end, no_leading_period, flags); if (res != -1) return res; } if (n != string_end && *n == L_('.') && no_leading_period) return FNM_NOMATCH; for (c = *p++; c == L_('?') || c == L_('*'); c = *p++) { if (*p == L_('(') && (flags & FNM_EXTMATCH) != 0) { const CHAR *endp = END (p); if (endp != p) { /* This is a pattern. Skip over it. */ p = endp; continue; } } if (c == L_('?')) { /* A ? needs to match one character. */ if (n == string_end) /* There isn't another character; no match. */ return FNM_NOMATCH; else if (*n == L_('/') && __builtin_expect (flags & FNM_FILE_NAME, 0)) /* A slash does not match a wildcard under FNM_FILE_NAME. */ return FNM_NOMATCH; else /* One character of the string is consumed in matching this ? wildcard, so *??? won't match if there are less than three characters. */ ++n; } } if (c == L_('\0')) /* The wildcard(s) is/are the last element of the pattern. If the name is a file name and contains another slash this means it cannot match, unless the FNM_LEADING_DIR flag is set. */ { int result = (flags & FNM_FILE_NAME) == 0 ? 0 : FNM_NOMATCH; if (flags & FNM_FILE_NAME) { if (flags & FNM_LEADING_DIR) result = 0; else { if (MEMCHR (n, L_('/'), string_end - n) == NULL) result = 0; } } return result; } else { const CHAR *endp; endp = MEMCHR (n, (flags & FNM_FILE_NAME) ? L_('/') : L_('\0'), string_end - n); if (endp == NULL) endp = string_end; if (c == L_('[') || (__builtin_expect (flags & FNM_EXTMATCH, 0) != 0 && (c == L_('@') || c == L_('+') || c == L_('!')) && *p == L_('('))) { int flags2 = ((flags & FNM_FILE_NAME) ? flags : (flags & ~FNM_PERIOD)); bool no_leading_period2 = no_leading_period; for (--p; n < endp; ++n, no_leading_period2 = false) if (FCT (p, n, string_end, no_leading_period2, flags2) == 0) return 0; } else if (c == L_('/') && (flags & FNM_FILE_NAME)) { while (n < string_end && *n != L_('/')) ++n; if (n < string_end && *n == L_('/') && (FCT (p, n + 1, string_end, flags & FNM_PERIOD, flags) == 0)) return 0; } else { int flags2 = ((flags & FNM_FILE_NAME) ? flags : (flags & ~FNM_PERIOD)); int no_leading_period2 = no_leading_period; if (c == L_('\\') && !(flags & FNM_NOESCAPE)) c = *p; c = FOLD (c); for (--p; n < endp; ++n, no_leading_period2 = false) if (FOLD ((UCHAR) *n) == c && (FCT (p, n, string_end, no_leading_period2, flags2) == 0)) return 0; } } /* If we come here no match is possible with the wildcard. */ return FNM_NOMATCH; case L_('['): { /* Nonzero if the sense of the character class is inverted. */ register bool not; CHAR cold; UCHAR fn; if (posixly_correct == 0) posixly_correct = getenv ("POSIXLY_CORRECT") != NULL ? 1 : -1; if (n == string_end) return FNM_NOMATCH; if (*n == L_('.') && no_leading_period) return FNM_NOMATCH; if (*n == L_('/') && (flags & FNM_FILE_NAME)) /* `/' cannot be matched. */ return FNM_NOMATCH; not = (*p == L_('!') || (posixly_correct < 0 && *p == L_('^'))); if (not) ++p; fn = FOLD ((UCHAR) *n); c = *p++; for (;;) { if (!(flags & FNM_NOESCAPE) && c == L_('\\')) { if (*p == L_('\0')) return FNM_NOMATCH; c = FOLD ((UCHAR) *p); ++p; if (c == fn) goto matched; } else if (c == L_('[') && *p == L_(':')) { /* Leave room for the null. */ CHAR str[CHAR_CLASS_MAX_LENGTH + 1]; size_t c1 = 0; #if defined _LIBC || WIDE_CHAR_SUPPORT wctype_t wt; #endif const CHAR *startp = p; for (;;) { if (c1 == CHAR_CLASS_MAX_LENGTH) /* The name is too long and therefore the pattern is ill-formed. */ return FNM_NOMATCH; c = *++p; if (c == L_(':') && p[1] == L_(']')) { p += 2; break; } if (c < L_('a') || c >= L_('z')) { /* This cannot possibly be a character class name. Match it as a normal range. */ p = startp; c = L_('['); goto normal_bracket; } str[c1++] = c; } str[c1] = L_('\0'); #if defined _LIBC || WIDE_CHAR_SUPPORT wt = IS_CHAR_CLASS (str); if (wt == 0) /* Invalid character class name. */ return FNM_NOMATCH; # if defined _LIBC && ! WIDE_CHAR_VERSION /* The following code is glibc specific but does there a good job in speeding up the code since we can avoid the btowc() call. */ if (_ISCTYPE ((UCHAR) *n, wt)) goto matched; # else if (ISWCTYPE (BTOWC ((UCHAR) *n), wt)) goto matched; # endif #else if ((STREQ (str, L_("alnum")) && ISALNUM ((UCHAR) *n)) || (STREQ (str, L_("alpha")) && ISALPHA ((UCHAR) *n)) || (STREQ (str, L_("blank")) && ISBLANK ((UCHAR) *n)) || (STREQ (str, L_("cntrl")) && ISCNTRL ((UCHAR) *n)) || (STREQ (str, L_("digit")) && ISDIGIT ((UCHAR) *n)) || (STREQ (str, L_("graph")) && ISGRAPH ((UCHAR) *n)) || (STREQ (str, L_("lower")) && ISLOWER ((UCHAR) *n)) || (STREQ (str, L_("print")) && ISPRINT ((UCHAR) *n)) || (STREQ (str, L_("punct")) && ISPUNCT ((UCHAR) *n)) || (STREQ (str, L_("space")) && ISSPACE ((UCHAR) *n)) || (STREQ (str, L_("upper")) && ISUPPER ((UCHAR) *n)) || (STREQ (str, L_("xdigit")) && ISXDIGIT ((UCHAR) *n))) goto matched; #endif c = *p++; } #ifdef _LIBC else if (c == L_('[') && *p == L_('=')) { UCHAR str[1]; uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); const CHAR *startp = p; c = *++p; if (c == L_('\0')) { p = startp; c = L_('['); goto normal_bracket; } str[0] = c; c = *++p; if (c != L_('=') || p[1] != L_(']')) { p = startp; c = L_('['); goto normal_bracket; } p += 2; if (nrules == 0) { if ((UCHAR) *n == str[0]) goto matched; } else { const int32_t *table; # if WIDE_CHAR_VERSION const int32_t *weights; const int32_t *extra; # else const unsigned char *weights; const unsigned char *extra; # endif const int32_t *indirect; int32_t idx; const UCHAR *cp = (const UCHAR *) str; /* This #include defines a local function! */ # if WIDE_CHAR_VERSION # include <locale/weightwc.h> # else # include <locale/weight.h> # endif # if WIDE_CHAR_VERSION table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEWC); weights = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTWC); extra = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAWC); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTWC); # else table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_TABLEMB); weights = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_WEIGHTMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_EXTRAMB); indirect = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_INDIRECTMB); # endif idx = findidx (&cp); if (idx != 0) { /* We found a table entry. Now see whether the character we are currently at has the same equivalance class value. */ int len = weights[idx]; int32_t idx2; const UCHAR *np = (const UCHAR *) n; idx2 = findidx (&np); if (idx2 != 0 && len == weights[idx2]) { int cnt = 0; while (cnt < len && (weights[idx + 1 + cnt] == weights[idx2 + 1 + cnt])) ++cnt; if (cnt == len) goto matched; } } } c = *p++; } #endif else if (c == L_('\0')) /* [ (unterminated) loses. */ return FNM_NOMATCH; else { bool is_range = false; #ifdef _LIBC bool is_seqval = false; if (c == L_('[') && *p == L_('.')) { uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); const CHAR *startp = p; size_t c1 = 0; while (1) { c = *++p; if (c == L_('.') && p[1] == L_(']')) { p += 2; break; } if (c == '\0') return FNM_NOMATCH; ++c1; } /* We have to handling the symbols differently in ranges since then the collation sequence is important. */ is_range = *p == L_('-') && p[1] != L_('\0'); if (nrules == 0) { /* There are no names defined in the collation data. Therefore we only accept the trivial names consisting of the character itself. */ if (c1 != 1) return FNM_NOMATCH; if (!is_range && *n == startp[1]) goto matched; cold = startp[1]; c = *p++; } else { int32_t table_size; const int32_t *symb_table; # ifdef WIDE_CHAR_VERSION char str[c1]; size_t strcnt; # else # define str (startp + 1) # endif const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; # ifdef WIDE_CHAR_VERSION /* We have to convert the name to a single-byte string. This is possible since the names consist of ASCII characters and the internal representation is UCS4. */ for (strcnt = 0; strcnt < c1; ++strcnt) str[strcnt] = startp[1 + strcnt]; # endif table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && c1 == extra[symb_table[2 * elem + 1]] && memcmp (str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] != 0) { /* Compare the byte sequence but only if this is not part of a range. */ # ifdef WIDE_CHAR_VERSION int32_t *wextra; idx += 1 + extra[idx]; /* Adjust for the alignment. */ idx = (idx + 3) & ~3; wextra = (int32_t *) &extra[idx + 4]; # endif if (! is_range) { # ifdef WIDE_CHAR_VERSION for (c1 = 0; (int32_t) c1 < wextra[idx]; ++c1) if (n[c1] != wextra[1 + c1]) break; if ((int32_t) c1 == wextra[idx]) goto matched; # else for (c1 = 0; c1 < extra[idx]; ++c1) if (n[c1] != extra[1 + c1]) break; if (c1 == extra[idx]) goto matched; # endif } /* Get the collation sequence value. */ is_seqval = true; # ifdef WIDE_CHAR_VERSION cold = wextra[1 + wextra[idx]]; # else /* Adjust for the alignment. */ idx += 1 + extra[idx]; idx = (idx + 3) & ~4; cold = *((int32_t *) &extra[idx]); # endif c = *p++; } else if (c1 == 1) { /* No valid character. Match it as a single byte. */ if (!is_range && *n == str[0]) goto matched; cold = str[0]; c = *p++; } else return FNM_NOMATCH; } } else # undef str #endif { c = FOLD (c); normal_bracket: /* We have to handling the symbols differently in ranges since then the collation sequence is important. */ is_range = (*p == L_('-') && p[1] != L_('\0') && p[1] != L_(']')); if (!is_range && c == fn) goto matched; cold = c; c = *p++; } if (c == L_('-') && *p != L_(']')) { #if _LIBC /* We have to find the collation sequence value for C. Collation sequence is nothing we can regularly access. The sequence value is defined by the order in which the definitions of the collation values for the various characters appear in the source file. A strange concept, nowhere documented. */ uint32_t fcollseq; uint32_t lcollseq; UCHAR cend = *p++; # ifdef WIDE_CHAR_VERSION /* Search in the `names' array for the characters. */ fcollseq = __collseq_table_lookup (collseq, fn); if (fcollseq == ~((uint32_t) 0)) /* XXX We don't know anything about the character we are supposed to match. This means we are failing. */ goto range_not_matched; if (is_seqval) lcollseq = cold; else lcollseq = __collseq_table_lookup (collseq, cold); # else fcollseq = collseq[fn]; lcollseq = is_seqval ? cold : collseq[(UCHAR) cold]; # endif is_seqval = false; if (cend == L_('[') && *p == L_('.')) { uint32_t nrules = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_NRULES); const CHAR *startp = p; size_t c1 = 0; while (1) { c = *++p; if (c == L_('.') && p[1] == L_(']')) { p += 2; break; } if (c == '\0') return FNM_NOMATCH; ++c1; } if (nrules == 0) { /* There are no names defined in the collation data. Therefore we only accept the trivial names consisting of the character itself. */ if (c1 != 1) return FNM_NOMATCH; cend = startp[1]; } else { int32_t table_size; const int32_t *symb_table; # ifdef WIDE_CHAR_VERSION char str[c1]; size_t strcnt; # else # define str (startp + 1) # endif const unsigned char *extra; int32_t idx; int32_t elem; int32_t second; int32_t hash; # ifdef WIDE_CHAR_VERSION /* We have to convert the name to a single-byte string. This is possible since the names consist of ASCII characters and the internal representation is UCS4. */ for (strcnt = 0; strcnt < c1; ++strcnt) str[strcnt] = startp[1 + strcnt]; # endif table_size = _NL_CURRENT_WORD (LC_COLLATE, _NL_COLLATE_SYMB_HASH_SIZEMB); symb_table = (const int32_t *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_TABLEMB); extra = (const unsigned char *) _NL_CURRENT (LC_COLLATE, _NL_COLLATE_SYMB_EXTRAMB); /* Locate the character in the hashing table. */ hash = elem_hash (str, c1); idx = 0; elem = hash % table_size; second = hash % (table_size - 2); while (symb_table[2 * elem] != 0) { /* First compare the hashing value. */ if (symb_table[2 * elem] == hash && (c1 == extra[symb_table[2 * elem + 1]]) && memcmp (str, &extra[symb_table[2 * elem + 1] + 1], c1) == 0) { /* Yep, this is the entry. */ idx = symb_table[2 * elem + 1]; idx += 1 + extra[idx]; break; } /* Next entry. */ elem += second; } if (symb_table[2 * elem] != 0) { /* Compare the byte sequence but only if this is not part of a range. */ # ifdef WIDE_CHAR_VERSION int32_t *wextra; idx += 1 + extra[idx]; /* Adjust for the alignment. */ idx = (idx + 3) & ~4; wextra = (int32_t *) &extra[idx + 4]; # endif /* Get the collation sequence value. */ is_seqval = true; # ifdef WIDE_CHAR_VERSION cend = wextra[1 + wextra[idx]]; # else /* Adjust for the alignment. */ idx += 1 + extra[idx]; idx = (idx + 3) & ~4; cend = *((int32_t *) &extra[idx]); # endif } else if (symb_table[2 * elem] != 0 && c1 == 1) { cend = str[0]; c = *p++; } else return FNM_NOMATCH; } # undef str } else { if (!(flags & FNM_NOESCAPE) && cend == L_('\\')) cend = *p++; if (cend == L_('\0')) return FNM_NOMATCH; cend = FOLD (cend); } /* XXX It is not entirely clear to me how to handle characters which are not mentioned in the collation specification. */ if ( # ifdef WIDE_CHAR_VERSION lcollseq == 0xffffffff || # endif lcollseq <= fcollseq) { /* We have to look at the upper bound. */ uint32_t hcollseq; if (is_seqval) hcollseq = cend; else { # ifdef WIDE_CHAR_VERSION hcollseq = __collseq_table_lookup (collseq, cend); if (hcollseq == ~((uint32_t) 0)) { /* Hum, no information about the upper bound. The matching succeeds if the lower bound is matched exactly. */ if (lcollseq != fcollseq) goto range_not_matched; goto matched; } # else hcollseq = collseq[cend]; # endif } if (lcollseq <= hcollseq && fcollseq <= hcollseq) goto matched; } # ifdef WIDE_CHAR_VERSION range_not_matched: # endif #else /* We use a boring value comparison of the character values. This is better than comparing using `strcoll' since the latter would have surprising and sometimes fatal consequences. */ UCHAR cend = *p++; if (!(flags & FNM_NOESCAPE) && cend == L_('\\')) cend = *p++; if (cend == L_('\0')) return FNM_NOMATCH; /* It is a range. */ if (cold <= fn && fn <= cend) goto matched; #endif c = *p++; } } if (c == L_(']')) break; } if (!not) return FNM_NOMATCH; break; matched: /* Skip the rest of the [...] that already matched. */ do { ignore_next: c = *p++; if (c == L_('\0')) /* [... (unterminated) loses. */ return FNM_NOMATCH; if (!(flags & FNM_NOESCAPE) && c == L_('\\')) { if (*p == L_('\0')) return FNM_NOMATCH; /* XXX 1003.2d11 is unclear if this is right. */ ++p; } else if (c == L_('[') && *p == L_(':')) { int c1 = 0; const CHAR *startp = p; while (1) { c = *++p; if (++c1 == CHAR_CLASS_MAX_LENGTH) return FNM_NOMATCH; if (*p == L_(':') && p[1] == L_(']')) break; if (c < L_('a') || c >= L_('z')) { p = startp; goto ignore_next; } } p += 2; c = *p++; } else if (c == L_('[') && *p == L_('=')) { c = *++p; if (c == L_('\0')) return FNM_NOMATCH; c = *++p; if (c != L_('=') || p[1] != L_(']')) return FNM_NOMATCH; p += 2; c = *p++; } else if (c == L_('[') && *p == L_('.')) { ++p; while (1) { c = *++p; if (c == '\0') return FNM_NOMATCH; if (*p == L_('.') && p[1] == L_(']')) break; } p += 2; c = *p++; } } while (c != L_(']')); if (not) return FNM_NOMATCH; } break; case L_('+'): case L_('@'): case L_('!'): if (__builtin_expect (flags & FNM_EXTMATCH, 0) && *p == '(') { int res; res = EXT (c, p, n, string_end, no_leading_period, flags); if (res != -1) return res; } goto normal_match; case L_('/'): if (NO_LEADING_PERIOD (flags)) { if (n == string_end || c != (UCHAR) *n) return FNM_NOMATCH; new_no_leading_period = true; break; } /* FALLTHROUGH */ default: normal_match: if (n == string_end || c != FOLD ((UCHAR) *n)) return FNM_NOMATCH; } no_leading_period = new_no_leading_period; ++n; } if (n == string_end) return 0; if ((flags & FNM_LEADING_DIR) && n != string_end && *n == L_('/')) /* The FNM_LEADING_DIR flag says that "foo*" matches "foobar/frobozz". */ return 0; return FNM_NOMATCH; }
/* Walk through the table and remove all entries which lifetime ended. We have a problem here. To actually remove the entries we must get the write-lock. But since we want to keep the time we have the lock as short as possible we cannot simply acquire the lock when we start looking for timedout entries. Therefore we do it in two stages: first we look for entries which must be invalidated and remember them. Then we get the lock and actually remove them. This is complicated by the way we have to free the data structures since some hash table entries share the same data. */ time_t prune_cache (struct database_dyn *table, time_t now, int fd) { size_t cnt = table->head->module; /* If this table is not actually used don't do anything. */ if (cnt == 0) { if (fd != -1) { /* Reply to the INVALIDATE initiator. */ int32_t resp = 0; writeall (fd, &resp, sizeof (resp)); } /* No need to do this again anytime soon. */ return 24 * 60 * 60; } /* If we check for the modification of the underlying file we invalidate the entries also in this case. */ if (table->check_file && now != LONG_MAX) { struct traced_file *runp = table->traced_files; while (runp != NULL) { #ifdef HAVE_INOTIFY if (runp->inotify_descr == -1) #endif { struct stat64 st; if (stat64 (runp->fname, &st) < 0) { char buf[128]; /* We cannot stat() the file, disable file checking if the file does not exist. */ dbg_log (_("cannot stat() file `%s': %s"), runp->fname, strerror_r (errno, buf, sizeof (buf))); if (errno == ENOENT) table->check_file = 0; } else { if (st.st_mtime != table->file_mtime) { /* The file changed. Invalidate all entries. */ now = LONG_MAX; table->file_mtime = st.st_mtime; } } } runp = runp->next; } } /* We run through the table and find values which are not valid anymore. Note that for the initial step, finding the entries to be removed, we don't need to get any lock. It is at all timed assured that the linked lists are set up correctly and that no second thread prunes the cache. */ bool *mark; size_t memory_needed = cnt * sizeof (bool); bool mark_use_alloca; if (__glibc_likely (memory_needed <= MAX_STACK_USE)) { mark = alloca (cnt * sizeof (bool)); memset (mark, '\0', memory_needed); mark_use_alloca = true; } else { mark = xcalloc (1, memory_needed); mark_use_alloca = false; } size_t first = cnt + 1; size_t last = 0; char *const data = table->data; bool any = false; if (__glibc_unlikely (debug_level > 2)) dbg_log (_("pruning %s cache; time %ld"), dbnames[table - dbs], (long int) now); #define NO_TIMEOUT LONG_MAX time_t next_timeout = NO_TIMEOUT; do { ref_t run = table->head->array[--cnt]; while (run != ENDREF) { struct hashentry *runp = (struct hashentry *) (data + run); struct datahead *dh = (struct datahead *) (data + runp->packet); /* Some debug support. */ if (__glibc_unlikely (debug_level > 2)) { char buf[INET6_ADDRSTRLEN]; const char *str; if (runp->type == GETHOSTBYADDR || runp->type == GETHOSTBYADDRv6) { inet_ntop (runp->type == GETHOSTBYADDR ? AF_INET : AF_INET6, data + runp->key, buf, sizeof (buf)); str = buf; } else str = data + runp->key; dbg_log (_("considering %s entry \"%s\", timeout %" PRIu64), serv2str[runp->type], str, dh->timeout); } /* Check whether the entry timed out. */ if (dh->timeout < now) { /* This hash bucket could contain entries which need to be looked at. */ mark[cnt] = true; first = MIN (first, cnt); last = MAX (last, cnt); /* We only have to look at the data of the first entries since the count information is kept in the data part which is shared. */ if (runp->first) { /* At this point there are two choices: we reload the value or we discard it. Do not change NRELOADS if we never not reload the record. */ if ((reload_count != UINT_MAX && __builtin_expect (dh->nreloads >= reload_count, 0)) /* We always remove negative entries. */ || dh->notfound /* Discard everything if the user explicitly requests it. */ || now == LONG_MAX) { /* Remove the value. */ dh->usable = false; /* We definitely have some garbage entries now. */ any = true; } else { /* Reload the value. We do this only for the initially used key, not the additionally added derived value. */ assert (runp->type < LASTREQ && readdfcts[runp->type] != NULL); time_t timeout = readdfcts[runp->type] (table, runp, dh); next_timeout = MIN (next_timeout, timeout); /* If the entry has been replaced, we might need cleanup. */ any |= !dh->usable; } } } else { assert (dh->usable); next_timeout = MIN (next_timeout, dh->timeout); } run = runp->next; } } while (cnt > 0); if (__glibc_unlikely (fd != -1)) { /* Reply to the INVALIDATE initiator that the cache has been invalidated. */ int32_t resp = 0; writeall (fd, &resp, sizeof (resp)); } if (first <= last) { struct hashentry *head = NULL; /* Now we have to get the write lock since we are about to modify the table. */ if (__glibc_unlikely (pthread_rwlock_trywrlock (&table->lock) != 0)) { ++table->head->wrlockdelayed; pthread_rwlock_wrlock (&table->lock); } while (first <= last) { if (mark[first]) { ref_t *old = &table->head->array[first]; ref_t run = table->head->array[first]; assert (run != ENDREF); do { struct hashentry *runp = (struct hashentry *) (data + run); struct datahead *dh = (struct datahead *) (data + runp->packet); if (! dh->usable) { /* We need the list only for debugging but it is more costly to avoid creating the list than doing it. */ runp->dellist = head; head = runp; /* No need for an atomic operation, we have the write lock. */ --table->head->nentries; run = *old = runp->next; } else { old = &runp->next; run = runp->next; } } while (run != ENDREF); } ++first; } /* It's all done. */ pthread_rwlock_unlock (&table->lock); /* Make sure the data is saved to disk. */ if (table->persistent) msync (table->head, data + table->head->first_free - (char *) table->head, MS_ASYNC); /* One extra pass if we do debugging. */ if (__glibc_unlikely (debug_level > 0)) { struct hashentry *runp = head; while (runp != NULL) { char buf[INET6_ADDRSTRLEN]; const char *str; if (runp->type == GETHOSTBYADDR || runp->type == GETHOSTBYADDRv6) { inet_ntop (runp->type == GETHOSTBYADDR ? AF_INET : AF_INET6, data + runp->key, buf, sizeof (buf)); str = buf; } else str = data + runp->key; dbg_log ("remove %s entry \"%s\"", serv2str[runp->type], str); runp = runp->dellist; } } } if (__glibc_unlikely (! mark_use_alloca)) free (mark); /* Run garbage collection if any entry has been removed or replaced. */ if (any) gc (table); /* If there is no entry in the database and we therefore have no new timeout value, tell the caller to wake up in 24 hours. */ return next_timeout == NO_TIMEOUT ? 24 * 60 * 60 : next_timeout - now; }
/* This function is used by `setenv' and `putenv'. The difference between the two functions is that for the former must create a new string which is then placed in the environment, while the argument of `putenv' must be used directly. This is all complicated by the fact that we try to reuse values once generated for a `setenv' call since we can never free the strings. */ static int __add_to_environ (const char *name, const char *value, const char *combined, int replace) { register char **ep; register size_t size; const size_t namelen = strlen (name); const size_t vallen = value != NULL ? strlen (value) + 1 : 0; LOCK; /* We have to get the pointer now that we have the lock and not earlier since another thread might have created a new environment. */ ep = __environ; size = 0; if (ep != NULL) { for (; *ep != NULL; ++ep) if (!strncmp (*ep, name, namelen) && (*ep)[namelen] == '=') break; else ++size; } if (ep == NULL || __builtin_expect (*ep == NULL, 1)) { char **new_environ; /* We allocated this space; we can extend it. */ new_environ = (char **) realloc (last_environ, (size + 2) * sizeof (char *)); if (new_environ == NULL) { UNLOCK; return -1; } /* If the whole entry is given add it. */ if (combined != NULL) /* We must not add the string to the search tree since it belongs to the user. */ new_environ[size] = (char *) combined; else { /* See whether the value is already known. */ #ifdef USE_TSEARCH # ifdef __GNUC__ char new_value[namelen + 1 + vallen]; # else char *new_value = (char *) alloca (namelen + 1 + vallen); # endif # ifdef _LIBC __mempcpy (__mempcpy (__mempcpy (new_value, name, namelen), "=", 1), value, vallen); # else memcpy (new_value, name, namelen); new_value[namelen] = '='; memcpy (&new_value[namelen + 1], value, vallen); # endif new_environ[size] = KNOWN_VALUE (new_value); if (__builtin_expect (new_environ[size] == NULL, 1)) #endif { new_environ[size] = (char *) malloc (namelen + 1 + vallen); if (__builtin_expect (new_environ[size] == NULL, 0)) { __set_errno (ENOMEM); UNLOCK; return -1; } #ifdef USE_TSEARCH memcpy (new_environ[size], new_value, namelen + 1 + vallen); #else memcpy (new_environ[size], name, namelen); new_environ[size][namelen] = '='; memcpy (&new_environ[size][namelen + 1], value, vallen); #endif /* And save the value now. We cannot do this when we remove the string since then we cannot decide whether it is a user string or not. */ STORE_VALUE (new_environ[size]); } } if (__environ != last_environ) memcpy ((char *) new_environ, (char *) __environ, size * sizeof (char *)); new_environ[size + 1] = NULL; last_environ = __environ = new_environ; } else if (replace) { char *np; /* Use the user string if given. */ if (combined != NULL) np = (char *) combined; else { #ifdef USE_TSEARCH # ifdef __GNUC__ char new_value[namelen + 1 + vallen]; # else char *new_value = (char *) alloca (namelen + 1 + vallen); # endif # ifdef _LIBC __mempcpy (__mempcpy (__mempcpy (new_value, name, namelen), "=", 1), value, vallen); # else memcpy (new_value, name, namelen); new_value[namelen] = '='; memcpy (&new_value[namelen + 1], value, vallen); # endif np = KNOWN_VALUE (new_value); if (__builtin_expect (np == NULL, 1)) #endif { np = malloc (namelen + 1 + vallen); if (__builtin_expect (np == NULL, 0)) { UNLOCK; return -1; } #ifdef USE_TSEARCH memcpy (np, new_value, namelen + 1 + vallen); #else memcpy (np, name, namelen); np[namelen] = '='; memcpy (&np[namelen + 1], value, vallen); #endif /* And remember the value. */ STORE_VALUE (np); } } *ep = np; } UNLOCK; return 0; }
float __ieee754_lgammaf_r(float x, int *signgamp) { float t,y,z,nadj,p,p1,p2,p3,q,r,w; int i,hx,ix; GET_FLOAT_WORD(hx,x); /* purge off +-inf, NaN, +-0, and negative arguments */ *signgamp = 1; ix = hx&0x7fffffff; if(__builtin_expect(ix>=0x7f800000, 0)) return x*x; if(__builtin_expect(ix==0, 0)) { if (hx < 0) *signgamp = -1; return one/fabsf(x); } if(__builtin_expect(ix<0x1c800000, 0)) { /* |x|<2**-70, return -log(|x|) */ if(hx<0) { *signgamp = -1; return -__ieee754_logf(-x); } else return -__ieee754_logf(x); } if(hx<0) { if(ix>=0x4b000000) /* |x|>=2**23, must be -integer */ return x/zero; t = sin_pif(x); if(t==zero) return one/fabsf(t); /* -integer */ nadj = __ieee754_logf(pi/fabsf(t*x)); if(t<zero) *signgamp = -1; x = -x; } /* purge off 1 and 2 */ if (ix==0x3f800000||ix==0x40000000) r = 0; /* for x < 2.0 */ else if(ix<0x40000000) { if(ix<=0x3f666666) { /* lgamma(x) = lgamma(x+1)-log(x) */ r = -__ieee754_logf(x); if(ix>=0x3f3b4a20) {y = one-x; i= 0;} else if(ix>=0x3e6d3308) {y= x-(tc-one); i=1;} else {y = x; i=2;} } else { r = zero; if(ix>=0x3fdda618) {y=(float)2.0-x;i=0;} /* [1.7316,2] */ else if(ix>=0x3F9da620) {y=x-tc;i=1;} /* [1.23,1.73] */ else {y=x-one;i=2;} } switch(i) { case 0: z = y*y; p1 = a0+z*(a2+z*(a4+z*(a6+z*(a8+z*a10)))); p2 = z*(a1+z*(a3+z*(a5+z*(a7+z*(a9+z*a11))))); p = y*p1+p2; r += (p-(float)0.5*y); break; case 1: z = y*y; w = z*y; p1 = t0+w*(t3+w*(t6+w*(t9 +w*t12))); /* parallel comp */ p2 = t1+w*(t4+w*(t7+w*(t10+w*t13))); p3 = t2+w*(t5+w*(t8+w*(t11+w*t14))); p = z*p1-(tt-w*(p2+y*p3)); r += (tf + p); break; case 2: p1 = y*(u0+y*(u1+y*(u2+y*(u3+y*(u4+y*u5))))); p2 = one+y*(v1+y*(v2+y*(v3+y*(v4+y*v5)))); r += (-(float)0.5*y + p1/p2); } } else if(ix<0x41000000) { /* x < 8.0 */ i = (int)x; t = zero; y = x-(float)i; p = y*(s0+y*(s1+y*(s2+y*(s3+y*(s4+y*(s5+y*s6)))))); q = one+y*(r1+y*(r2+y*(r3+y*(r4+y*(r5+y*r6))))); r = half*y+p/q; z = one; /* lgamma(1+s) = log(s) + lgamma(s) */ switch(i) { case 7: z *= (y+(float)6.0); /* FALLTHRU */ case 6: z *= (y+(float)5.0); /* FALLTHRU */ case 5: z *= (y+(float)4.0); /* FALLTHRU */ case 4: z *= (y+(float)3.0); /* FALLTHRU */ case 3: z *= (y+(float)2.0); /* FALLTHRU */ r += __ieee754_logf(z); break; } /* 8.0 <= x < 2**58 */ } else if (ix < 0x5c800000) { t = __ieee754_logf(x); z = one/x; y = z*z; w = w0+z*(w1+y*(w2+y*(w3+y*(w4+y*(w5+y*w6))))); r = (x-half)*(t-one)+w; } else /* 2**58 <= x <= inf */ r = x*(__ieee754_logf(x)-one); if(hx<0) r = nadj - r; return r; }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { struct stat64 statbuf; struct stat64 *statp = NULL; if (__builtin_expect (name[0], '\1') == '\0') { /* POSIX.1-1990 says an empty name gets ENOENT; but `open' might like it fine. */ __set_errno (ENOENT); return NULL; } #ifdef O_DIRECTORY /* Test whether O_DIRECTORY works. */ if (o_directory_works == 0) tryopen_o_directory (); /* We can skip the expensive `stat' call if O_DIRECTORY works. */ if (o_directory_works < 0) #endif { /* We first have to check whether the name is for a directory. We cannot do this after the open() call since the open/close operation performed on, say, a tape device might have undesirable effects. */ if (__builtin_expect (__xstat64 (_STAT_VER, name, &statbuf), 0) < 0) return NULL; if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); return NULL; } } int flags = O_RDONLY|O_NDELAY|EXTRA_FLAGS|O_LARGEFILE; #ifdef O_CLOEXEC flags |= O_CLOEXEC; #endif int fd = open_not_cancel_2 (name, flags); if (__builtin_expect (fd, 0) < 0) return NULL; #ifdef O_DIRECTORY if (o_directory_works <= 0) #endif { /* Now make sure this really is a directory and nothing changed since the `stat' call. */ if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &statbuf), 0) < 0) goto lose; if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); lose: close_not_cancel_no_status (fd); return NULL; } statp = &statbuf; } return __alloc_dir (fd, true, statp); }
void * __emutls_get_address (struct __emutls_object *obj) { if (! __gthread_active_p ()) { if (__builtin_expect (obj->loc.ptr == NULL, 0)) obj->loc.ptr = emutls_alloc (obj); return obj->loc.ptr; } #ifndef __GTHREADS abort (); #else pointer offset = obj->loc.offset; if (__builtin_expect (offset == 0, 0)) { static __gthread_once_t once = __GTHREAD_ONCE_INIT; __gthread_once (&once, emutls_init); __gthread_mutex_lock (&emutls_mutex); offset = obj->loc.offset; if (offset == 0) { offset = ++emutls_size; obj->loc.offset = offset; } __gthread_mutex_unlock (&emutls_mutex); } struct __emutls_array *arr = __gthread_getspecific (emutls_key); if (__builtin_expect (arr == NULL, 0)) { pointer size = offset + 32; arr = calloc (size + 1, sizeof (void *)); if (arr == NULL) abort (); arr->size = size; __gthread_setspecific (emutls_key, (void *) arr); } else if (__builtin_expect (offset > arr->size, 0)) { pointer orig_size = arr->size; pointer size = orig_size * 2; if (offset > size) size = offset + 32; arr = realloc (arr, (size + 1) * sizeof (void *)); if (arr == NULL) abort (); arr->size = size; memset (arr->data + orig_size, 0, (size - orig_size) * sizeof (void *)); __gthread_setspecific (emutls_key, (void *) arr); } void *ret = arr->data[offset - 1]; if (__builtin_expect (ret == NULL, 0)) { ret = emutls_alloc (obj); arr->data[offset - 1] = ret; } return ret; #endif }
int gconv_init (struct __gconv_step *step) { /* Determine which direction. */ struct utf16_data *new_data; enum direction dir = illegal_dir; enum variant var = illegal_var; int result; if (__strcasecmp (step->__from_name, "UTF-16//") == 0) { dir = from_utf16; var = UTF_16; } else if (__strcasecmp (step->__to_name, "UTF-16//") == 0) { dir = to_utf16; var = UTF_16; } else if (__strcasecmp (step->__from_name, "UTF-16BE//") == 0) { dir = from_utf16; var = UTF_16BE; } else if (__strcasecmp (step->__to_name, "UTF-16BE//") == 0) { dir = to_utf16; var = UTF_16BE; } else if (__strcasecmp (step->__from_name, "UTF-16LE//") == 0) { dir = from_utf16; var = UTF_16LE; } else if (__strcasecmp (step->__to_name, "UTF-16LE//") == 0) { dir = to_utf16; var = UTF_16LE; } result = __GCONV_NOCONV; if (__builtin_expect (dir, to_utf16) != illegal_dir) { new_data = (struct utf16_data *) malloc (sizeof (struct utf16_data)); result = __GCONV_NOMEM; if (new_data != NULL) { new_data->dir = dir; new_data->var = var; step->__data = new_data; if (dir == from_utf16) { step->__min_needed_from = MIN_NEEDED_FROM; step->__max_needed_from = MAX_NEEDED_FROM; step->__min_needed_to = MIN_NEEDED_TO; step->__max_needed_to = MIN_NEEDED_TO; } else { step->__min_needed_from = MIN_NEEDED_TO; step->__max_needed_from = MIN_NEEDED_TO; step->__min_needed_to = MIN_NEEDED_FROM; step->__max_needed_to = MAX_NEEDED_FROM; } step->__stateful = 0; result = __GCONV_OK; } } return result; }
int pthread_timedjoin_np ( pthread_t threadid, void **thread_return, const struct timespec *abstime) { struct pthread *self; struct pthread *pd = (struct pthread *) threadid; int result; /* Make sure the descriptor is valid. */ if (INVALID_NOT_TERMINATED_TD_P (pd)) /* Not a valid thread handle. */ return ESRCH; /* Is the thread joinable?. */ if (IS_DETACHED (pd)) /* We cannot wait for the thread. */ return EINVAL; self = THREAD_SELF; if (pd == self || self->joinid == pd) /* This is a deadlock situation. The threads are waiting for each other to finish. Note that this is a "may" error. To be 100% sure we catch this error we would have to lock the data structures but it is not necessary. In the unlikely case that two threads are really caught in this situation they will deadlock. It is the programmer's problem to figure this out. */ return EDEADLK; /* Wait for the thread to finish. If it is already locked something is wrong. There can only be one waiter. */ if (__builtin_expect (atomic_compare_and_exchange_bool_acq (&pd->joinid, self, NULL), 0)) /* There is already somebody waiting for the thread. */ return EINVAL; /* During the wait we change to asynchronous cancellation. If we are cancelled the thread we are waiting for must be marked as un-wait-ed for again. */ pthread_cleanup_push (cleanup, &pd->joinid); /* Switch to asynchronous cancellation. */ int oldtype = CANCEL_ASYNC (); /* Wait for the child. */ result = lll_timedwait_tid (pd->tid, abstime); /* Restore cancellation mode. */ CANCEL_RESET (oldtype); /* Remove the handler. */ pthread_cleanup_pop (0); /* We might have timed out. */ if (result == 0) { /* Store the return value if the caller is interested. */ if (thread_return != NULL) *thread_return = pd->result; /* Free the TCB. */ __free_tcb (pd); } else pd->joinid = NULL; return result; }
/* Set timer TIMERID to VALUE, returning old value in OVLAUE. */ int timer_settime (timer_t timerid, int flags, const struct itimerspec *value, struct itimerspec *ovalue) { struct timer_node *timer; struct thread_node *thread = NULL; struct timespec now; int have_now = 0, need_wakeup = 0; int retval = -1; timer = timer_id2ptr (timerid); if (timer == NULL) { __set_errno (EINVAL); goto bail; } if (value->it_interval.tv_nsec < 0 || value->it_interval.tv_nsec >= 1000000000 || value->it_value.tv_nsec < 0 || value->it_value.tv_nsec >= 1000000000) { __set_errno (EINVAL); goto bail; } /* Will need to know current time since this is a relative timer; might as well make the system call outside of the lock now! */ if ((flags & TIMER_ABSTIME) == 0) { clock_gettime (timer->clock, &now); have_now = 1; } pthread_mutex_lock (&__timer_mutex); timer_addref (timer); /* One final check of timer validity; this one is possible only until we have the mutex, because it accesses the inuse flag. */ if (! timer_valid(timer)) { __set_errno (EINVAL); goto unlock_bail; } if (ovalue != NULL) { ovalue->it_interval = timer->value.it_interval; if (timer->armed) { if (! have_now) { pthread_mutex_unlock (&__timer_mutex); clock_gettime (timer->clock, &now); have_now = 1; pthread_mutex_lock (&__timer_mutex); timer_addref (timer); } timespec_sub (&ovalue->it_value, &timer->expirytime, &now); } else { ovalue->it_value.tv_sec = 0; ovalue->it_value.tv_nsec = 0; } } timer->value = *value; list_unlink_ip (&timer->links); timer->armed = 0; thread = timer->thread; /* A value of { 0, 0 } causes the timer to be stopped. */ if (value->it_value.tv_sec != 0 || __builtin_expect (value->it_value.tv_nsec != 0, 1)) { if ((flags & TIMER_ABSTIME) != 0) /* The user specified the expiration time. */ timer->expirytime = value->it_value; else timespec_add (&timer->expirytime, &now, &value->it_value); /* Only need to wake up the thread if timer is inserted at the head of the queue. */ if (thread != NULL) need_wakeup = __timer_thread_queue_timer (thread, timer); timer->armed = 1; } retval = 0; unlock_bail: timer_delref (timer); pthread_mutex_unlock (&__timer_mutex); bail: if (thread != NULL && need_wakeup) __timer_thread_wakeup (thread); return retval; }
__complex__ long double __clog10l (__complex__ long double x) { __complex__ long double result; int rcls = fpclassify (__real__ x); int icls = fpclassify (__imag__ x); if (__builtin_expect (rcls == FP_ZERO && icls == FP_ZERO, 0)) { /* Real and imaginary part are 0.0. */ __imag__ result = signbit (__real__ x) ? M_PIl : 0.0; __imag__ result = __copysignl (__imag__ result, __imag__ x); /* Yes, the following line raises an exception. */ __real__ result = -1.0 / fabsl (__real__ x); } else if (__builtin_expect (rcls != FP_NAN && icls != FP_NAN, 1)) { /* Neither real nor imaginary part is NaN. */ long double absx = fabsl (__real__ x), absy = fabsl (__imag__ x); int scale = 0; if (absx < absy) { long double t = absx; absx = absy; absy = t; } if (absx > LDBL_MAX / 2.0L) { scale = -1; absx = __scalbnl (absx, scale); absy = (absy >= LDBL_MIN * 2.0L ? __scalbnl (absy, scale) : 0.0L); } else if (absx < LDBL_MIN && absy < LDBL_MIN) { scale = LDBL_MANT_DIG; absx = __scalbnl (absx, scale); absy = __scalbnl (absy, scale); } if (absx == 1.0L && scale == 0) { long double absy2 = absy * absy; if (absy2 <= LDBL_MIN * 2.0L * M_LN10l) __real__ result = (absy2 / 2.0L - absy2 * absy2 / 4.0L) * M_LOG10El; else __real__ result = __log1pl (absy2) * (M_LOG10El / 2.0L); } else if (absx > 1.0L && absx < 2.0L && absy < 1.0L && scale == 0) { long double d2m1 = (absx - 1.0L) * (absx + 1.0L); if (absy >= LDBL_EPSILON) d2m1 += absy * absy; __real__ result = __log1pl (d2m1) * (M_LOG10El / 2.0L); } else if (absx < 1.0L && absx >= 0.75L && absy < LDBL_EPSILON / 2.0L && scale == 0) { long double d2m1 = (absx - 1.0L) * (absx + 1.0L); __real__ result = __log1pl (d2m1) * (M_LOG10El / 2.0L); } else if (absx < 1.0L && (absx >= 0.75L || absy >= 0.5L) && scale == 0) { long double d2m1 = __x2y2m1l (absx, absy); __real__ result = __log1pl (d2m1) * (M_LOG10El / 2.0L); } else { long double d = __ieee754_hypotl (absx, absy); __real__ result = __ieee754_log10l (d) - scale * M_LOG10_2l; } __imag__ result = M_LOG10El * __ieee754_atan2l (__imag__ x, __real__ x); } else { __imag__ result = __nanl (""); if (rcls == FP_INFINITE || icls == FP_INFINITE) /* Real or imaginary part is infinite. */ __real__ result = HUGE_VALL; else __real__ result = __nanl (""); } return result; }
double __remquo (double x, double y, int *quo) { int64_t hx, hy; uint64_t sx, qs; int cquo; EXTRACT_WORDS64 (hx, x); EXTRACT_WORDS64 (hy, y); sx = hx & UINT64_C(0x8000000000000000); qs = sx ^ (hy & UINT64_C(0x8000000000000000)); hy &= UINT64_C(0x7fffffffffffffff); hx &= UINT64_C(0x7fffffffffffffff); /* Purge off exception values. */ if (__glibc_unlikely (hy == 0)) return (x * y) / (x * y); /* y = 0 */ if (__builtin_expect (hx >= UINT64_C(0x7ff0000000000000) /* x not finite */ || hy > UINT64_C(0x7ff0000000000000), 0))/* y is NaN */ return (x * y) / (x * y); if (hy <= UINT64_C(0x7fbfffffffffffff)) x = __ieee754_fmod (x, 8 * y); /* now x < 8y */ if (__glibc_unlikely (hx == hy)) { *quo = qs ? -1 : 1; return zero * x; } INSERT_WORDS64 (x, hx); INSERT_WORDS64 (y, hy); cquo = 0; if (x >= 4 * y) { x -= 4 * y; cquo += 4; } if (x >= 2 * y) { x -= 2 * y; cquo += 2; } if (hy < UINT64_C(0x0020000000000000)) { if (x + x > y) { x -= y; ++cquo; if (x + x >= y) { x -= y; ++cquo; } } } else { double y_half = 0.5 * y; if (x > y_half) { x -= y; ++cquo; if (x >= y_half) { x -= y; ++cquo; } } } *quo = qs ? -cquo : cquo; if (sx) x = -x; return x; }
static void ptmalloc_init (void) { #if __STD_C const char* s; #else char* s; #endif int secure = 0; if(__malloc_initialized >= 0) return; __malloc_initialized = 0; #ifdef _LIBC # if defined SHARED && !USE___THREAD /* ptmalloc_init_minimal may already have been called via __libc_malloc_pthread_startup, above. */ if (mp_.pagesize == 0) # endif #endif ptmalloc_init_minimal(); #ifndef NO_THREADS # if defined _LIBC /* We know __pthread_initialize_minimal has already been called, and that is enough. */ # define NO_STARTER # endif # ifndef NO_STARTER /* With some threads implementations, creating thread-specific data or initializing a mutex may call malloc() itself. Provide a simple starter version (realloc() won't work). */ save_malloc_hook = __malloc_hook; save_memalign_hook = __memalign_hook; save_free_hook = __free_hook; __malloc_hook = malloc_starter; __memalign_hook = memalign_starter; __free_hook = free_starter; # ifdef _LIBC /* Initialize the pthreads interface. */ if (__pthread_initialize != NULL) __pthread_initialize(); # endif /* !defined _LIBC */ # endif /* !defined NO_STARTER */ #endif /* !defined NO_THREADS */ mutex_init(&main_arena.mutex); main_arena.next = &main_arena; #if defined _LIBC && defined SHARED /* In case this libc copy is in a non-default namespace, never use brk. Likewise if dlopened from statically linked program. */ Dl_info di; struct link_map *l; if (_dl_open_hook != NULL || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0 && l->l_ns != LM_ID_BASE)) __morecore = __failing_morecore; #endif mutex_init(&list_lock); tsd_key_create(&arena_key, NULL); tsd_setspecific(arena_key, (Void_t *)&main_arena); thread_atfork(ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2); #ifndef NO_THREADS # ifndef NO_STARTER __malloc_hook = save_malloc_hook; __memalign_hook = save_memalign_hook; __free_hook = save_free_hook; # else # undef NO_STARTER # endif #endif #ifdef _LIBC secure = __libc_enable_secure; s = NULL; if (__builtin_expect (_environ != NULL, 1)) { char **runp = _environ; char *envline; while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL, 0)) { size_t len = strcspn (envline, "="); if (envline[len] != '=') /* This is a "MALLOC_" variable at the end of the string without a '=' character. Ignore it since otherwise we will access invalid memory below. */ continue; switch (len) { case 6: if (memcmp (envline, "CHECK_", 6) == 0) s = &envline[7]; break; case 8: if (! secure) { if (memcmp (envline, "TOP_PAD_", 8) == 0) mALLOPt(M_TOP_PAD, atoi(&envline[9])); else if (memcmp (envline, "PERTURB_", 8) == 0) mALLOPt(M_PERTURB, atoi(&envline[9])); } break; case 9: if (! secure) { if (memcmp (envline, "MMAP_MAX_", 9) == 0) mALLOPt(M_MMAP_MAX, atoi(&envline[10])); #ifdef PER_THREAD else if (memcmp (envline, "ARENA_MAX", 9) == 0) mALLOPt(M_ARENA_MAX, atoi(&envline[10])); #endif } break; #ifdef PER_THREAD case 10: if (! secure) { if (memcmp (envline, "ARENA_TEST", 10) == 0) mALLOPt(M_ARENA_TEST, atoi(&envline[11])); } break; #endif case 15: if (! secure) { if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0) mALLOPt(M_TRIM_THRESHOLD, atoi(&envline[16])); else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0) mALLOPt(M_MMAP_THRESHOLD, atoi(&envline[16])); } break; default: break; } } } #else if (! secure) { if((s = getenv("MALLOC_TRIM_THRESHOLD_"))) mALLOPt(M_TRIM_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_TOP_PAD_"))) mALLOPt(M_TOP_PAD, atoi(s)); if((s = getenv("MALLOC_PERTURB_"))) mALLOPt(M_PERTURB, atoi(s)); if((s = getenv("MALLOC_MMAP_THRESHOLD_"))) mALLOPt(M_MMAP_THRESHOLD, atoi(s)); if((s = getenv("MALLOC_MMAP_MAX_"))) mALLOPt(M_MMAP_MAX, atoi(s)); } s = getenv("MALLOC_CHECK_"); #endif if(s && s[0]) { mALLOPt(M_CHECK_ACTION, (int)(s[0] - '0')); if (check_action != 0) __malloc_check_init(); } void (*hook) (void) = force_reg (__malloc_initialize_hook); if (hook != NULL) (*hook)(); __malloc_initialized = 1; }
bool ElfRelocations::ApplyRelReloc(const ELF::Rel* rel, ELF::Addr sym_addr, bool resolved CRAZY_UNUSED, Error* error) { const ELF::Word rel_type = ELF_R_TYPE(rel->r_info); const ELF::Word CRAZY_UNUSED rel_symbol = ELF_R_SYM(rel->r_info); const ELF::Addr reloc = static_cast<ELF::Addr>(rel->r_offset + load_bias_); RLOG(" rel reloc=%p offset=%p type=%d\n", reloc, rel->r_offset, rel_type); // Apply the relocation. ELF::Addr* CRAZY_UNUSED target = reinterpret_cast<ELF::Addr*>(reloc); switch (rel_type) { #ifdef __arm__ case R_ARM_JUMP_SLOT: RLOG(" R_ARM_JUMP_SLOT target=%p addr=%p\n", target, sym_addr); *target = sym_addr; break; case R_ARM_GLOB_DAT: RLOG(" R_ARM_GLOB_DAT target=%p addr=%p\n", target, sym_addr); *target = sym_addr; break; case R_ARM_ABS32: RLOG(" R_ARM_ABS32 target=%p (%p) addr=%p\n", target, *target, sym_addr); *target += sym_addr; break; case R_ARM_REL32: RLOG(" R_ARM_REL32 target=%p (%p) addr=%p offset=%p\n", target, *target, sym_addr, rel->r_offset); *target += sym_addr - rel->r_offset; break; case R_ARM_RELATIVE: RLOG(" R_ARM_RELATIVE target=%p (%p) bias=%p\n", target, *target, load_bias_); if (__builtin_expect(rel_symbol, 0)) { *error = "Invalid relative relocation with symbol"; return false; } *target += load_bias_; break; case R_ARM_COPY: // NOTE: These relocations are forbidden in shared libraries. // The Android linker has special code to deal with this, which // is not needed here. RLOG(" R_ARM_COPY\n"); *error = "Invalid R_ARM_COPY relocation in shared library"; return false; #endif // __arm__ #ifdef __i386__ case R_386_JMP_SLOT: *target = sym_addr; break; case R_386_GLOB_DAT: *target = sym_addr; break; case R_386_RELATIVE: if (rel_symbol) { *error = "Invalid relative relocation with symbol"; return false; } *target += load_bias_; break; case R_386_32: *target += sym_addr; break; case R_386_PC32: *target += (sym_addr - reloc); break; #endif // __i386__ #ifdef __mips__ case R_MIPS_REL32: if (resolved) *target += sym_addr; else *target += load_bias_; break; #endif // __mips__ default: error->Format("Invalid relocation type (%d)", rel_type); return false; } return true; }
ucs4le_internal_loop_unaligned (struct __gconv_step *step, struct __gconv_step_data *step_data, const unsigned char **inptrp, const unsigned char *inend, unsigned char **outptrp, unsigned char *outend, size_t *irreversible) { int flags = step_data->__flags; const unsigned char *inptr = *inptrp; unsigned char *outptr = *outptrp; size_t n_convert = MIN (inend - inptr, outend - outptr) / 4; int result; size_t cnt; for (cnt = 0; cnt < n_convert; ++cnt, inptr += 4) { if (__builtin_expect (inptr[3] > 0x80, 0)) { /* The value is too large. We don't try transliteration here since this is not an error because of the lack of possibilities to represent the result. This is a genuine bug in the input since UCS4 does not allow such values. */ if (irreversible == NULL) /* We are transliterating, don't try to correct anything. */ return __GCONV_ILLEGAL_INPUT; if (flags & __GCONV_IGNORE_ERRORS) { /* Just ignore this character. */ ++*irreversible; continue; } *inptrp = inptr; *outptrp = outptr; return __GCONV_ILLEGAL_INPUT; } # if __BYTE_ORDER == __BIG_ENDIAN outptr[3] = inptr[0]; outptr[2] = inptr[1]; outptr[1] = inptr[2]; outptr[0] = inptr[3]; # else outptr[0] = inptr[0]; outptr[1] = inptr[1]; outptr[2] = inptr[2]; outptr[3] = inptr[3]; # endif outptr += 4; } *inptrp = inptr; *outptrp = outptr; /* Determine the status. */ if (*inptrp == inend) result = __GCONV_EMPTY_INPUT; else if (*inptrp + 4 > inend) result = __GCONV_INCOMPLETE_INPUT; else { assert (*outptrp + 4 > outend); result = __GCONV_FULL_OUTPUT; } return result; }
__complex__ long double __csinl (__complex__ long double x) { __complex__ long double retval; int negate = signbit (__real__ x); int rcls = fpclassify (__real__ x); int icls = fpclassify (__imag__ x); __real__ x = fabsl (__real__ x); if (__builtin_expect (icls >= FP_ZERO, 1)) { /* Imaginary part is finite. */ if (__builtin_expect (rcls >= FP_ZERO, 1)) { /* Real part is finite. */ const int t = (int) ((LDBL_MAX_EXP - 1) * M_LN2l); long double sinix, cosix; if (__builtin_expect (rcls != FP_SUBNORMAL, 1)) { __sincosl (__real__ x, &sinix, &cosix); } else { sinix = __real__ x; cosix = 1.0; } if (fabsl (__imag__ x) > t) { long double exp_t = __ieee754_expl (t); long double ix = fabsl (__imag__ x); if (signbit (__imag__ x)) cosix = -cosix; ix -= t; sinix *= exp_t / 2.0L; cosix *= exp_t / 2.0L; if (ix > t) { ix -= t; sinix *= exp_t; cosix *= exp_t; } if (ix > t) { /* Overflow (original imaginary part of x > 3t). */ __real__ retval = LDBL_MAX * sinix; __imag__ retval = LDBL_MAX * cosix; } else { long double exp_val = __ieee754_expl (ix); __real__ retval = exp_val * sinix; __imag__ retval = exp_val * cosix; } } else { __real__ retval = __ieee754_coshl (__imag__ x) * sinix; __imag__ retval = __ieee754_sinhl (__imag__ x) * cosix; } if (negate) __real__ retval = -__real__ retval; if (fabsl (__real__ retval) < LDBL_MIN) { volatile long double force_underflow = __real__ retval * __real__ retval; (void) force_underflow; } if (fabsl (__imag__ retval) < LDBL_MIN) { volatile long double force_underflow = __imag__ retval * __imag__ retval; (void) force_underflow; } } else { if (icls == FP_ZERO) { /* Imaginary part is 0.0. */ __real__ retval = __nanl (""); __imag__ retval = __imag__ x; if (rcls == FP_INFINITE) feraiseexcept (FE_INVALID); } else { __real__ retval = __nanl (""); __imag__ retval = __nanl (""); feraiseexcept (FE_INVALID); } } } else if (icls == FP_INFINITE) { /* Imaginary part is infinite. */ if (rcls == FP_ZERO) { /* Real part is 0.0. */ __real__ retval = __copysignl (0.0, negate ? -1.0 : 1.0); __imag__ retval = __imag__ x; } else if (rcls > FP_ZERO) { /* Real part is finite. */ long double sinix, cosix; if (__builtin_expect (rcls != FP_SUBNORMAL, 1)) { __sincosl (__real__ x, &sinix, &cosix); } else { sinix = __real__ x; cosix = 1.0; } __real__ retval = __copysignl (HUGE_VALL, sinix); __imag__ retval = __copysignl (HUGE_VALL, cosix); if (negate) __real__ retval = -__real__ retval; if (signbit (__imag__ x)) __imag__ retval = -__imag__ retval; } else { /* The addition raises the invalid exception. */ __real__ retval = __nanl (""); __imag__ retval = HUGE_VALL; if (rcls == FP_INFINITE) feraiseexcept (FE_INVALID); } } else { if (rcls == FP_ZERO) __real__ retval = __copysignl (0.0, negate ? -1.0 : 1.0); else __real__ retval = __nanl (""); __imag__ retval = __nanl (""); } return retval; }
int fchownat (int fd, const char *file, uid_t owner, gid_t group, int flag) { if (flag & ~AT_SYMLINK_NOFOLLOW) { __set_errno (EINVAL); return -1; } char *buf = NULL; if (fd != AT_FDCWD && file[0] != '/') { size_t filelen = strlen (file); static const char procfd[] = "/proc/self/fd/%d/%s"; /* Buffer for the path name we are going to use. It consists of - the string /proc/self/fd/ - the file descriptor number - the file name provided. The final NUL is included in the sizeof. A bit of overhead due to the format elements compensates for possible negative numbers. */ size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen; buf = alloca (buflen); __snprintf (buf, buflen, procfd, fd, file); file = buf; } int result; INTERNAL_SYSCALL_DECL (err); #if __ASSUME_32BITUIDS > 0 if (flag & AT_SYMLINK_NOFOLLOW) result = INTERNAL_SYSCALL (lchown32, err, 3, CHECK_STRING (file), owner, group); else result = INTERNAL_SYSCALL (chown32, err, 3, CHECK_STRING (file), owner, group); #else # ifdef __NR_chown32 if (__libc_missing_32bit_uids <= 0) { if (flag & AT_SYMLINK_NOFOLLOW) result = INTERNAL_SYSCALL (lchown32, err, 3, CHECK_STRING (file), owner, group); else result = INTERNAL_SYSCALL (chown32, err, 3, CHECK_STRING (file), owner, group); if (__builtin_expect (!INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return result; if (INTERNAL_SYSCALL_ERRNO (result, err) != ENOSYS) goto fail; __libc_missing_32bit_uids = 1; } # endif /* __NR_chown32 */ if (((owner + 1) > (gid_t) ((__kernel_uid_t) -1U)) || ((group + 1) > (gid_t) ((__kernel_gid_t) -1U))) { __set_errno (EINVAL); return -1; } if (flag & AT_SYMLINK_NOFOLLOW) result = INTERNAL_SYSCALL (lchown, err, 3, CHECK_STRING (file), owner, group); else result = INTERNAL_SYSCALL (chown, err, 3, CHECK_STRING (file), owner, group); #endif if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 0)) { fail: __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf); result = -1; } return result; }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { DIR *dirp; struct stat64 statbuf; int fd; size_t allocation; int save_errno; if (__builtin_expect (name[0], '\1') == '\0') { /* POSIX.1-1990 says an empty name gets ENOENT; but `open' might like it fine. */ __set_errno (ENOENT); return NULL; } #ifdef O_DIRECTORY /* Test whether O_DIRECTORY works. */ if (o_directory_works == 0) tryopen_o_directory (); /* We can skip the expensive `stat' call if O_DIRECTORY works. */ if (o_directory_works < 0) #endif { /* We first have to check whether the name is for a directory. We cannot do this after the open() call since the open/close operation performed on, say, a tape device might have undesirable effects. */ if (__builtin_expect (__xstat64 (_STAT_VER, name, &statbuf), 0) < 0) return NULL; if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); return NULL; } } fd = __open64 (name, O_RDONLY|O_NDELAY|EXTRA_FLAGS); if (__builtin_expect (fd, 0) < 0) return NULL; /* Now make sure this really is a directory and nothing changed since the `stat' call. We do not have to perform the test for the descriptor being associated with a directory if we know the O_DIRECTORY flag is honored by the kernel. */ if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &statbuf), 0) < 0) goto lose; #ifdef O_DIRECTORY if (o_directory_works <= 0) #endif { if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { save_errno = ENOTDIR; goto lose; } } if (__builtin_expect (__fcntl (fd, F_SETFD, FD_CLOEXEC), 0) < 0) goto lose; #ifdef _STATBUF_ST_BLKSIZE if (__builtin_expect ((size_t) statbuf.st_blksize < sizeof (struct dirent), 0)) allocation = sizeof (struct dirent); else allocation = statbuf.st_blksize; #else allocation = (BUFSIZ < sizeof (struct dirent) ? sizeof (struct dirent) : BUFSIZ); #endif dirp = (DIR *) calloc (1, sizeof (DIR) + allocation); /* Zero-fill. */ if (dirp == NULL) lose: { save_errno = errno; (void) __close (fd); __set_errno (save_errno); return NULL; } dirp->data = (char *) (dirp + 1); dirp->allocation = allocation; dirp->fd = fd; __libc_lock_init (dirp->lock); return dirp; }
int __lll_trylock_elision (int *futex, short *adapt_count) { __asm__ volatile (".machinemode \"zarch_nohighgprs\"\n\t" ".machine \"all\"" : : : "memory"); /* Implement POSIX semantics by forbiding nesting elided trylocks. Sorry. After the abort the code is re-executed non transactional and if the lock was already locked return an error. */ if (__builtin_tx_nesting_depth () > 0) { /* Note that this abort may terminate an outermost transaction that was created outside glibc. This persistently aborts the current transactions to force them to use the default lock instead of retrying transactions until their try_tbegin is zero. */ __builtin_tabort (_HTM_FIRST_USER_ABORT_CODE | 1); } /* Only try a transaction if it's worth it. */ if (*adapt_count <= 0) { unsigned status; if (__builtin_expect ((status = __builtin_tbegin ((void *)0)) == _HTM_TBEGIN_STARTED, 1)) { if (*futex == 0) return 0; /* Lock was busy. Fall back to normal locking. */ /* Since we are in a non-nested transaction there is no need to abort, which is expensive. */ __builtin_tend (); /* Note: Changing the adapt_count here might abort a transaction on a different cpu, but that could happen anyway when the futex is acquired, so there's no need to check the nesting depth here. */ if (aconf.skip_lock_busy > 0) *adapt_count = aconf.skip_lock_busy; } else { if (status != _HTM_TBEGIN_TRANSIENT) { /* A persistent abort (cc 1 or 3) indicates that a retry is probably futile. Use the normal locking now and for the next couple of calls. Be careful to avoid writing to the lock. */ if (aconf.skip_trylock_internal_abort > 0) *adapt_count = aconf.skip_trylock_internal_abort; } } /* Could do some retries here. */ } else { /* Lost updates are possible, but harmless. Due to races this might lead to *adapt_count becoming less than zero. */ (*adapt_count)--; } return lll_trylock (*futex); }
__complex__ double __cexp (__complex__ double x) { __complex__ double retval; int rcls = fpclassify (__real__ x); int icls = fpclassify (__imag__ x); if (__builtin_expect (rcls >= FP_ZERO, 1)) { /* Real part is finite. */ if (__builtin_expect (icls >= FP_ZERO, 1)) { /* Imaginary part is finite. */ double exp_val = __ieee754_exp (__real__ x); double sinix, cosix; __sincos (__imag__ x, &sinix, &cosix); if (isfinite (exp_val)) { __real__ retval = exp_val * cosix; __imag__ retval = exp_val * sinix; } else { __real__ retval = __copysign (exp_val, cosix); __imag__ retval = __copysign (exp_val, sinix); } } else { /* If the imaginary part is +-inf or NaN and the real part is not +-inf the result is NaN + iNaN. */ __real__ retval = __nan (""); __imag__ retval = __nan (""); feraiseexcept (FE_INVALID); } } else if (__builtin_expect (rcls == FP_INFINITE, 1)) { /* Real part is infinite. */ if (__builtin_expect (icls >= FP_ZERO, 1)) { /* Imaginary part is finite. */ double value = signbit (__real__ x) ? 0.0 : HUGE_VAL; if (icls == FP_ZERO) { /* Imaginary part is 0.0. */ __real__ retval = value; __imag__ retval = __imag__ x; } else { double sinix, cosix; __sincos (__imag__ x, &sinix, &cosix); __real__ retval = __copysign (value, cosix); __imag__ retval = __copysign (value, sinix); } } else if (signbit (__real__ x) == 0) { __real__ retval = HUGE_VAL; __imag__ retval = __nan (""); if (icls == FP_INFINITE) feraiseexcept (FE_INVALID); } else { __real__ retval = 0.0; __imag__ retval = __copysign (0.0, __imag__ x); } } else { /* If the real part is NaN the result is NaN + iNaN. */ __real__ retval = __nan (""); __imag__ retval = __nan (""); if (rcls != FP_NAN || icls != FP_NAN) feraiseexcept (FE_INVALID); } return retval; }
/* Get information about the file NAME relative to FD in ST. */ int __fxstatat (int vers, int fd, const char *file, struct stat *st, int flag) { int result; INTERNAL_SYSCALL_DECL (err); struct stat64 st64; #ifdef __NR_fstatat64 # ifndef __ASSUME_ATFCTS if (__have_atfcts >= 0) # endif { result = INTERNAL_SYSCALL (fstatat64, err, 4, fd, file, &st64, flag); # ifndef __ASSUME_ATFCTS if (__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1) && INTERNAL_SYSCALL_ERRNO (result, err) == ENOSYS) __have_atfcts = -1; else # endif if (!__builtin_expect (INTERNAL_SYSCALL_ERROR_P (result, err), 1)) return __xstat32_conv (vers, &st64, st); else { __set_errno (INTERNAL_SYSCALL_ERRNO (result, err)); return -1; } } #endif #ifndef __ASSUME_ATFCTS if (__glibc_unlikely (flag & ~AT_SYMLINK_NOFOLLOW)) { __set_errno (EINVAL); return -1; } char *buf = NULL; if (fd != AT_FDCWD && file[0] != '/') { size_t filelen = strlen (file); if (__glibc_unlikely (filelen == 0)) { __set_errno (ENOENT); return -1; } static const char procfd[] = "/proc/self/fd/%d/%s"; /* Buffer for the path name we are going to use. It consists of - the string /proc/self/fd/ - the file descriptor number - the file name provided. The final NUL is included in the sizeof. A bit of overhead due to the format elements compensates for possible negative numbers. */ size_t buflen = sizeof (procfd) + sizeof (int) * 3 + filelen; buf = alloca (buflen); __snprintf (buf, buflen, procfd, fd, file); file = buf; } if (vers == _STAT_VER_KERNEL) { if (flag & AT_SYMLINK_NOFOLLOW) result = INTERNAL_SYSCALL (lstat, err, 2, file, (struct kernel_stat *) st); else result = INTERNAL_SYSCALL (stat, err, 2, file, (struct kernel_stat *) st); goto out; } if (flag & AT_SYMLINK_NOFOLLOW) result = INTERNAL_SYSCALL (lstat64, err, 2, file, &st64); else result = INTERNAL_SYSCALL (stat64, err, 2, file, &st64); if (__glibc_likely (!INTERNAL_SYSCALL_ERROR_P (result, err))) return __xstat32_conv (vers, &st64, st); out: if (__glibc_unlikely (INTERNAL_SYSCALL_ERROR_P (result, err))) { __atfct_seterrno (INTERNAL_SYSCALL_ERRNO (result, err), fd, buf); result = -1; } return result; #endif }
/** * TML requires this to be called after every read */ inline void afterread_TML(TxThread* tx) { CFENCE; if (__builtin_expect(timestamp.val != tx->start_time, false)) tx->tmabort(tx); }
static void pthread_initialize(void) { struct sigaction sa; sigset_t mask; /* If already done (e.g. by a constructor called earlier!), bail out */ if (__pthread_initial_thread_bos != NULL) return; #ifdef TEST_FOR_COMPARE_AND_SWAP /* Test if compare-and-swap is available */ __pthread_has_cas = compare_and_swap_is_available(); #endif #ifdef FLOATING_STACKS /* We don't need to know the bottom of the stack. Give the pointer some value to signal that initialization happened. */ __pthread_initial_thread_bos = (void *) -1l; #else /* Determine stack size limits . */ __pthread_init_max_stacksize (); # ifdef _STACK_GROWS_UP /* The initial thread already has all the stack it needs */ __pthread_initial_thread_bos = (char *) ((long)CURRENT_STACK_FRAME &~ (STACK_SIZE - 1)); # else /* For the initial stack, reserve at least STACK_SIZE bytes of stack below the current stack address, and align that on a STACK_SIZE boundary. */ __pthread_initial_thread_bos = (char *)(((long)CURRENT_STACK_FRAME - 2 * STACK_SIZE) & ~(STACK_SIZE - 1)); # endif #endif /* Update the descriptor for the initial thread. */ __pthread_initial_thread.p_pid = __getpid(); /* Likewise for the resolver state _res. */ __pthread_initial_thread.p_resp = &_res; #ifdef __SIGRTMIN /* Initialize real-time signals. */ init_rtsigs (); #endif /* Setup signal handlers for the initial thread. Since signal handlers are shared between threads, these settings will be inherited by all other threads. */ sa.sa_handler = pthread_handle_sigrestart; sigemptyset(&sa.sa_mask); sa.sa_flags = 0; __libc_sigaction(__pthread_sig_restart, &sa, NULL); sa.sa_handler = pthread_handle_sigcancel; // sa.sa_flags = 0; __libc_sigaction(__pthread_sig_cancel, &sa, NULL); if (__pthread_sig_debug > 0) { sa.sa_handler = pthread_handle_sigdebug; sigemptyset(&sa.sa_mask); // sa.sa_flags = 0; __libc_sigaction(__pthread_sig_debug, &sa, NULL); } /* Initially, block __pthread_sig_restart. Will be unblocked on demand. */ sigemptyset(&mask); sigaddset(&mask, __pthread_sig_restart); sigprocmask(SIG_BLOCK, &mask, NULL); /* Register an exit function to kill all other threads. */ /* Do it early so that user-registered atexit functions are called before pthread_*exit_process. */ #ifndef HAVE_Z_NODELETE if (__builtin_expect (&__dso_handle != NULL, 1)) __cxa_atexit ((void (*) (void *)) pthread_atexit_process, NULL, __dso_handle); else #endif on_exit (pthread_onexit_process, NULL); /* How many processors. */ __pthread_smp_kernel = is_smp_system (); }
long __lrintl (long double x) { double xh, xl; long res, hi, lo; int save_round; ldbl_unpack (x, &xh, &xl); /* Limit the range of values handled by the conversion to long. We do this because we aren't sure whether that conversion properly raises FE_INVALID. */ if ( #if __LONG_MAX__ == 2147483647 __builtin_expect ((__builtin_fabs (xh) <= (double) __LONG_MAX__ + 2), 1) #else __builtin_expect ((__builtin_fabs (xh) <= -(double) (-__LONG_MAX__ - 1)), 1) #endif #if !defined (FE_INVALID) || 1 #endif ) { save_round = __fegetround (); #if __LONG_MAX__ == 2147483647 long long llhi = (long long) xh; if (llhi != (long) llhi) hi = llhi < 0 ? -__LONG_MAX__ - 1 : __LONG_MAX__; else hi = llhi; xh -= hi; #else if (__glibc_unlikely ((xh == -(double) (-__LONG_MAX__ - 1)))) { /* When XH is 9223372036854775808.0, converting to long long will overflow, resulting in an invalid operation. However, XL might be negative and of sufficient magnitude that the overall long double is in fact in range. Avoid raising an exception. In any case we need to convert this value specially, because the converted value is not exactly represented as a double thus subtracting HI from XH suffers rounding error. */ hi = __LONG_MAX__; xh = 1.0; } else { hi = (long) xh; xh -= hi; } #endif ldbl_canonicalize (&xh, &xl); lo = (long) xh; /* Peg at max/min values, assuming that the above conversions do so. Strictly speaking, we can return anything for values that overflow, but this is more useful. */ res = hi + lo; /* This is just sign(hi) == sign(lo) && sign(res) != sign(hi). */ if (__glibc_unlikely (((~(hi ^ lo) & (res ^ hi)) < 0))) goto overflow; xh -= lo; ldbl_canonicalize (&xh, &xl); hi = res; switch (save_round) { case FE_TONEAREST: if (fabs (xh) < 0.5 || (fabs (xh) == 0.5 && ((xh > 0.0 && xl < 0.0) || (xh < 0.0 && xl > 0.0) || (xl == 0.0 && (res & 1) == 0)))) return res; if (xh < 0.0) res -= 1; else res += 1; break; case FE_TOWARDZERO: if (res > 0 && (xh < 0.0 || (xh == 0.0 && xl < 0.0))) res -= 1; else if (res < 0 && (xh > 0.0 || (xh == 0.0 && xl > 0.0))) res += 1; return res; break; case FE_UPWARD: if (xh > 0.0 || (xh == 0.0 && xl > 0.0)) res += 1; break; case FE_DOWNWARD: if (xh < 0.0 || (xh == 0.0 && xl < 0.0)) res -= 1; break; } if (__glibc_unlikely (((~(hi ^ (res - hi)) & (res ^ hi)) < 0))) goto overflow; return res; } else { if (xh > 0.0) hi = __LONG_MAX__; else if (xh < 0.0) hi = -__LONG_MAX__ - 1; else /* Nan */ hi = 0; } overflow: #ifdef FE_INVALID feraiseexcept (FE_INVALID); #endif return hi; }
int __pthread_initialize_manager(void) { int manager_pipe[2]; int pid; struct pthread_request request; #ifndef HAVE_Z_NODELETE if (__builtin_expect (&__dso_handle != NULL, 1)) __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL, __dso_handle); #endif if (__pthread_max_stacksize == 0) __pthread_init_max_stacksize (); /* If basic initialization not done yet (e.g. we're called from a constructor run before our constructor), do it now */ if (__pthread_initial_thread_bos == NULL) pthread_initialize(); /* Setup stack for thread manager */ __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE); if (__pthread_manager_thread_bos == NULL) return -1; __pthread_manager_thread_tos = __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE; /* Setup pipe to communicate with thread manager */ if (__libc_pipe(manager_pipe) == -1) { free(__pthread_manager_thread_bos); return -1; } /* Start the thread manager */ pid = 0; if (__builtin_expect (__pthread_initial_thread.p_report_events, 0)) { /* It's a bit more complicated. We have to report the creation of the manager thread. */ int idx = __td_eventword (TD_CREATE); uint32_t mask = __td_eventmask (TD_CREATE); if ((mask & (__pthread_threads_events.event_bits[idx] | __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx])) != 0) { __pthread_lock(__pthread_manager_thread.p_lock, NULL); #ifdef NEED_SEPARATE_REGISTER_STACK pid = __clone2(__pthread_manager_event, (void **) __pthread_manager_thread_bos, THREAD_MANAGER_STACK_SIZE, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, (void *)(long)manager_pipe[0]); #elif _STACK_GROWS_UP pid = __clone(__pthread_manager_event, (void **) __pthread_manager_thread_bos, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, (void *)(long)manager_pipe[0]); #else pid = __clone(__pthread_manager_event, (void **) __pthread_manager_thread_tos, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, (void *)(long)manager_pipe[0]); #endif if (pid != -1) { /* Now fill in the information about the new thread in the newly created thread's data structure. We cannot let the new thread do this since we don't know whether it was already scheduled when we send the event. */ __pthread_manager_thread.p_eventbuf.eventdata = &__pthread_manager_thread; __pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE; __pthread_last_event = &__pthread_manager_thread; __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1; __pthread_manager_thread.p_pid = pid; /* Now call the function which signals the event. */ __linuxthreads_create_event (); } /* Now restart the thread. */ __pthread_unlock(__pthread_manager_thread.p_lock); } } if (__builtin_expect (pid, 0) == 0) { #ifdef NEED_SEPARATE_REGISTER_STACK pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos, THREAD_MANAGER_STACK_SIZE, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, (void *)(long)manager_pipe[0]); #elif _STACK_GROWS_UP pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, (void *)(long)manager_pipe[0]); #else pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos, CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND, (void *)(long)manager_pipe[0]); #endif } if (__builtin_expect (pid, 0) == -1) { free(__pthread_manager_thread_bos); __libc_close(manager_pipe[0]); __libc_close(manager_pipe[1]); return -1; } __pthread_manager_request = manager_pipe[1]; /* writing end */ __pthread_manager_reader = manager_pipe[0]; /* reading end */ __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1; __pthread_manager_thread.p_pid = pid; /* Make gdb aware of new thread manager */ if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0) { raise(__pthread_sig_debug); /* We suspend ourself and gdb will wake us up when it is ready to handle us. */ __pthread_wait_for_restart_signal(thread_self()); } /* Synchronize debugging of the thread manager */ request.req_kind = REQ_DEBUG; TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request, (char *) &request, sizeof(request))); return 0; }
unsigned gomp_resolve_num_threads (unsigned specified, unsigned count) { struct gomp_thread *thread = gomp_thread(); struct gomp_task_icv *icv; unsigned threads_requested, max_num_threads, num_threads; unsigned long remaining; icv = gomp_icv (false); if (specified == 1) return 1; else if (thread->ts.active_level >= 1 && !icv->nest_var) return 1; else if (thread->ts.active_level >= gomp_max_active_levels_var) return 1; /* If NUM_THREADS not specified, use nthreads_var. */ if (specified == 0) threads_requested = icv->nthreads_var; else threads_requested = specified; max_num_threads = threads_requested; /* If dynamic threads are enabled, bound the number of threads that we launch. */ if (icv->dyn_var) { unsigned dyn = gomp_dynamic_max_threads (); if (dyn < max_num_threads) max_num_threads = dyn; /* Optimization for parallel sections. */ if (count && count < max_num_threads) max_num_threads = count; } /* ULONG_MAX stands for infinity. */ if (__builtin_expect (gomp_thread_limit_var == ULONG_MAX, 1) || max_num_threads == 1) return max_num_threads; #ifdef HAVE_SYNC_BUILTINS do { remaining = gomp_remaining_threads_count; num_threads = max_num_threads; if (num_threads > remaining) num_threads = remaining + 1; } while (__sync_val_compare_and_swap (&gomp_remaining_threads_count, remaining, remaining - num_threads + 1) != remaining); #else gomp_mutex_lock (&gomp_remaining_threads_lock); num_threads = max_num_threads; remaining = gomp_remaining_threads_count; if (num_threads > remaining) num_threads = remaining + 1; gomp_remaining_threads_count -= num_threads - 1; gomp_mutex_unlock (&gomp_remaining_threads_lock); #endif return num_threads; }
bool ElfRelocations::ApplyRelaReloc(const ELF::Rela* rela, ELF::Addr sym_addr, bool resolved CRAZY_UNUSED, Error* error) { const ELF::Word rela_type = ELF_R_TYPE(rela->r_info); const ELF::Word CRAZY_UNUSED rela_symbol = ELF_R_SYM(rela->r_info); const ELF::Sword CRAZY_UNUSED addend = rela->r_addend; const ELF::Addr reloc = static_cast<ELF::Addr>(rela->r_offset + load_bias_); RLOG(" rela reloc=%p offset=%p type=%d addend=%p\n", reloc, rela->r_offset, rela_type, addend); // Apply the relocation. ELF::Addr* CRAZY_UNUSED target = reinterpret_cast<ELF::Addr*>(reloc); switch (rela_type) { #ifdef __aarch64__ case R_AARCH64_JUMP_SLOT: RLOG(" R_AARCH64_JUMP_SLOT target=%p addr=%p\n", target, sym_addr + addend); *target = sym_addr + addend; break; case R_AARCH64_GLOB_DAT: RLOG(" R_AARCH64_GLOB_DAT target=%p addr=%p\n", target, sym_addr + addend); *target = sym_addr + addend; break; case R_AARCH64_ABS64: RLOG(" R_AARCH64_ABS64 target=%p (%p) addr=%p\n", target, *target, sym_addr + addend); *target += sym_addr + addend; break; case R_AARCH64_RELATIVE: RLOG(" R_AARCH64_RELATIVE target=%p (%p) bias=%p\n", target, *target, load_bias_ + addend); if (__builtin_expect(rela_symbol, 0)) { *error = "Invalid relative relocation with symbol"; return false; } *target = load_bias_ + addend; break; case R_AARCH64_COPY: // NOTE: These relocations are forbidden in shared libraries. RLOG(" R_AARCH64_COPY\n"); *error = "Invalid R_AARCH64_COPY relocation in shared library"; return false; #endif // __aarch64__ default: error->Format("Invalid relocation type (%d)", rela_type); return false; } return true; }
status_t AudioRecord::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) { AutoMutex lock(mLock); int active; status_t result = NO_ERROR; audio_track_cblk_t* cblk = mCblk; uint32_t framesReq = audioBuffer->frameCount; uint32_t waitTimeMs = (waitCount < 0) ? cblk->bufferTimeoutMs : WAIT_PERIOD_MS; audioBuffer->frameCount = 0; audioBuffer->size = 0; uint32_t framesReady = cblk->framesReady(); if (framesReady == 0) { cblk->lock.lock(); goto start_loop_here; while (framesReady == 0) { active = mActive; if (UNLIKELY(!active)) { cblk->lock.unlock(); return NO_MORE_BUFFERS; } if (UNLIKELY(!waitCount)) { cblk->lock.unlock(); return WOULD_BLOCK; } if (!(cblk->flags & CBLK_INVALID_MSK)) { mLock.unlock(); result = cblk->cv.waitRelative(cblk->lock, milliseconds(waitTimeMs)); cblk->lock.unlock(); mLock.lock(); if (mActive == 0) { return status_t(STOPPED); } cblk->lock.lock(); } if (cblk->flags & CBLK_INVALID_MSK) { goto create_new_record; } if (__builtin_expect(result!=NO_ERROR, false)) { cblk->waitTimeMs += waitTimeMs; if (cblk->waitTimeMs >= cblk->bufferTimeoutMs) { LOGW( "obtainBuffer timed out (is the CPU pegged?) " "user=%08x, server=%08x", cblk->user, cblk->server); cblk->lock.unlock(); result = mAudioRecord->start(); cblk->lock.lock(); if (result == DEAD_OBJECT) { android_atomic_or(CBLK_INVALID_ON, &cblk->flags); create_new_record: result = AudioRecord::restoreRecord_l(cblk); } if (result != NO_ERROR) { LOGW("obtainBuffer create Track error %d", result); cblk->lock.unlock(); return result; } cblk->waitTimeMs = 0; } if (--waitCount == 0) { cblk->lock.unlock(); return TIMED_OUT; } } // read the server count again start_loop_here: framesReady = cblk->framesReady(); } cblk->lock.unlock(); } cblk->waitTimeMs = 0; if (framesReq > framesReady) { framesReq = framesReady; } uint32_t u = cblk->user; uint32_t bufferEnd = cblk->userBase + cblk->frameCount; if (u + framesReq > bufferEnd) { framesReq = bufferEnd - u; } audioBuffer->flags = 0; audioBuffer->channelCount= mChannelCount; audioBuffer->format = mFormat; audioBuffer->frameCount = framesReq; audioBuffer->size = framesReq*cblk->frameSize; audioBuffer->raw = (int8_t*)cblk->buffer(u); active = mActive; return active ? status_t(NO_ERROR) : status_t(STOPPED); }
static long int __nis_findfastest_with_timeout (dir_binding *bind, const struct timeval *timeout) { static const struct timeval TIMEOUT00 = { 0, 0 }; struct findserv_req *pings; struct sockaddr_in sin, saved_sin; int found = -1; u_int32_t xid_seed; int sock, dontblock = 1; CLIENT *clnt; u_long i, j, pings_count, pings_max, fastest = -1; struct cu_data *cu; pings_max = bind->server_len * 2; /* Reserve a little bit more memory for multihomed hosts */ pings_count = 0; pings = malloc (sizeof (struct findserv_req) * pings_max); xid_seed = (u_int32_t) (time (NULL) ^ getpid ()); if (__builtin_expect (pings == NULL, 0)) return -1; memset (&sin, '\0', sizeof (sin)); sin.sin_family = AF_INET; for (i = 0; i < bind->server_len; i++) for (j = 0; j < bind->server_val[i].ep.ep_len; ++j) if (strcmp (bind->server_val[i].ep.ep_val[j].family, "inet") == 0) if ((bind->server_val[i].ep.ep_val[j].proto == NULL) || (bind->server_val[i].ep.ep_val[j].proto[0] == '-') || (bind->server_val[i].ep.ep_val[j].proto[0] == '\0')) { sin.sin_addr.s_addr = inetstr2int (bind->server_val[i].ep.ep_val[j].uaddr); if (sin.sin_addr.s_addr == 0) continue; sin.sin_port = htons (__pmap_getnisport (&sin, NIS_PROG, NIS_VERSION, IPPROTO_UDP)); if (sin.sin_port == 0) continue; if (pings_count >= pings_max) { struct findserv_req *new_pings; pings_max += 10; new_pings = realloc (pings, sizeof (struct findserv_req) * pings_max); if (__builtin_expect (new_pings == NULL, 0)) { free (pings); return -1; } pings = new_pings; } memcpy ((char *) &pings[pings_count].sin, (char *) &sin, sizeof (sin)); memcpy ((char *)&saved_sin, (char *)&sin, sizeof(sin)); pings[pings_count].xid = xid_seed + pings_count; pings[pings_count].server_nr = i; pings[pings_count].server_ep = j; ++pings_count; } /* Make sure at least one server was assigned */ if (pings_count == 0) { free (pings); return -1; } /* Create RPC handle */ sock = socket (AF_INET, SOCK_DGRAM, IPPROTO_UDP); clnt = clntudp_create (&saved_sin, NIS_PROG, NIS_VERSION, *timeout, &sock); if (clnt == NULL) { close (sock); free (pings); return -1; } auth_destroy (clnt->cl_auth); clnt->cl_auth = authunix_create_default (); cu = (struct cu_data *) clnt->cl_private; ioctl (sock, FIONBIO, &dontblock); /* Send to all servers the NULLPROC */ for (i = 0; i < pings_count; ++i) { /* clntudp_call() will increment, subtract one */ *((u_int32_t *) (cu->cu_outbuf)) = pings[i].xid - 1; memcpy ((char *) &cu->cu_raddr, (char *) &pings[i].sin, sizeof (struct sockaddr_in)); /* Transmit to NULLPROC, return immediately. */ clnt_call (clnt, NULLPROC, (xdrproc_t) xdr_void, (caddr_t) 0, (xdrproc_t) xdr_void, (caddr_t) 0, TIMEOUT00); } while (found == -1) { /* Receive reply from NULLPROC asynchronously. Note null inproc. */ int rc = clnt_call (clnt, NULLPROC, (xdrproc_t) NULL, (caddr_t) 0, (xdrproc_t) xdr_void, (caddr_t) 0, *timeout); if (RPC_SUCCESS == rc) { u_int32_t val; memcpy (&val, cu->cu_inbuf, sizeof (u_int32_t)); fastest = val - xid_seed; if (fastest < pings_count) { bind->server_used = pings[fastest].server_nr; bind->current_ep = pings[fastest].server_ep; found = 1; } } else { /* clnt_perror(clnt, "__nis_findfastest"); */ break; } } auth_destroy (clnt->cl_auth); clnt_destroy (clnt); close (sock); free (pings); return found; }