Ejemplo n.º 1
0
double
__ieee754_atanh (double x)
{
  double xa = fabs (x);
  double t;
  if (isless (xa, 0.5))
    {
      if (__glibc_unlikely (xa < 0x1.0p-28))
	{
	  math_force_eval (huge + x);
	  return x;
	}

      t = xa + xa;
      t = 0.5 * __log1p (t + t * xa / (1.0 - xa));
    }
  else if (__glibc_likely (isless (xa, 1.0)))
    t = 0.5 * __log1p ((xa + xa) / (1.0 - xa));
  else
    {
      if (isgreater (xa, 1.0))
	return (x - x) / (x - x);

      return x / 0.0;
    }

  return __copysign (t, x);
}
Ejemplo n.º 2
0
/* Reserve storage for the data of the file associated with FD.  */
int
posix_fallocate (int fd, __off_t offset, __off_t len)
{
#ifdef __NR_fallocate
# ifndef __ASSUME_FALLOCATE
  if (__glibc_likely (__have_fallocate >= 0))
# endif
    {
      INTERNAL_SYSCALL_DECL (err);
# ifdef INTERNAL_SYSCALL_TYPES
      int res = INTERNAL_SYSCALL_TYPES (fallocate, err, 4, int, fd,
					int, 0, off_t, offset,
					off_t, len);
# else
      int res = INTERNAL_SYSCALL (fallocate, err, 4, fd, 0, offset, len);
# endif

      if (! INTERNAL_SYSCALL_ERROR_P (res, err))
	return 0;

# ifndef __ASSUME_FALLOCATE
      if (__glibc_unlikely (INTERNAL_SYSCALL_ERRNO (res, err) == ENOSYS))
	__have_fallocate = -1;
      else
# endif
	if (INTERNAL_SYSCALL_ERRNO (res, err) != EOPNOTSUPP)
	  return INTERNAL_SYSCALL_ERRNO (res, err);
    }
Ejemplo n.º 3
0
int
accept4 (int fd, __SOCKADDR_ARG addr, socklen_t *addr_len, int flags)
{
  if (__glibc_likely (have_accept4 >= 0))
    {
      int ret = SOCKETCALL_CANCEL (accept4, fd, addr.__sockaddr__, addr_len,
				   flags);
      /* The kernel returns -EINVAL for unknown socket operations.
	 We need to convert that error to an ENOSYS error.  */
      if (__builtin_expect (ret < 0, 0)
	  && have_accept4 == 0
	  && errno == EINVAL)
	{
	  /* Try another call, this time with the FLAGS parameter
	     cleared and an invalid file descriptor.  This call will not
	     cause any harm and it will return immediately.  */
	  ret = SOCKETCALL_CANCEL (invalid, -1);
	  if (errno == EINVAL)
	    {
	      have_accept4 = -1;
	      __set_errno (ENOSYS);
	    }
	  else
	    {
	      have_accept4 = 1;
	      __set_errno (EINVAL);
	    }
	  return -1;
	}
      return ret;
    }
  __set_errno (ENOSYS);
  return -1;
}
Ejemplo n.º 4
0
float
__ieee754_atanhf (float x)
{
  float xa = fabsf (x);
  float t;
  if (isless (xa, 0.5f))
    {
      if (__glibc_unlikely (xa < 0x1.0p-28f))
	{
	  math_force_eval (huge + x);
	  math_check_force_underflow (x);
	  return x;
	}

      t = xa + xa;
      t = 0.5f * __log1pf (t + t * xa / (1.0f - xa));
    }
  else if (__glibc_likely (isless (xa, 1.0f)))
    t = 0.5f * __log1pf ((xa + xa) / (1.0f - xa));
  else
    {
      if (isgreater (xa, 1.0f))
	return (x - x) / (x - x);

      return x / 0.0f;
    }

  return __copysignf (t, x);
}
Ejemplo n.º 5
0
bool
__libc_scratch_buffer_grow (struct scratch_buffer *buffer)
{
  void *new_ptr;
  size_t new_length = buffer->length * 2;

  /* Discard old buffer.  */
  scratch_buffer_free (buffer);

  /* Check for overflow.  */
  if (__glibc_likely (new_length >= buffer->length))
    new_ptr = malloc (new_length);
  else
    {
      __set_errno (ENOMEM);
      new_ptr = NULL;
    }

  if (__glibc_unlikely (new_ptr == NULL))
    {
      /* Buffer must remain valid to free.  */
      scratch_buffer_init (buffer);
      return false;
    }

  /* Install new heap-based buffer.  */
  buffer->data = new_ptr;
  buffer->length = new_length;
  return true;
}
Ejemplo n.º 6
0
long double
__ieee754_exp2l (long double x)
{
  if (__glibc_likely (isless (x, (long double) LDBL_MAX_EXP)))
    {
      if (__builtin_expect (isgreaterequal (x, (long double) (LDBL_MIN_EXP
							      - LDBL_MANT_DIG
							      - 1)), 1))
	{
	  int intx = (int) x;
	  long double fractx = x - intx;
	  long double result;
	  if (fabsl (fractx) < LDBL_EPSILON / 4.0L)
	    result = __scalbnl (1.0L + fractx, intx);
	  else
	    result = __scalbnl (__ieee754_expl (M_LN2l * fractx), intx);
	  math_check_force_underflow_nonneg (result);
	  return result;
	}
      else
	{
	  /* Underflow or exact zero.  */
	  if (isinf (x))
	    return 0;
	  else
	    return LDBL_MIN * LDBL_MIN;
	}
    }
  else
    /* Infinity, NaN or overflow.  */
    return LDBL_MAX * x;
}
Ejemplo n.º 7
0
enum nss_status
_nss_nis_setnetgrent (const char *group, struct __netgrent *netgrp)
{
  int len;
  enum nss_status status;

  status = NSS_STATUS_SUCCESS;

  if (__glibc_unlikely (group == NULL || group[0] == '\0'))
    return NSS_STATUS_UNAVAIL;

  char *domain;
  if (__glibc_unlikely (yp_get_default_domain (&domain)))
    return NSS_STATUS_UNAVAIL;

  status = yperr2nss (yp_match (domain, "netgroup", group, strlen (group),
				&netgrp->data, &len));
  if (__glibc_likely (status == NSS_STATUS_SUCCESS))
    {
      /* Our implementation of yp_match already allocates a buffer
	 which is one byte larger than the value in LEN specifies
	 and the last byte is filled with NUL.  So we can simply
	 use that buffer.  */
      assert (len >= 0);
      assert (malloc_usable_size (netgrp->data) >= len + 1);
      assert (netgrp->data[len] == '\0');

      netgrp->data_size = len;
      netgrp->cursor = netgrp->data;
    }

  return status;
}
Ejemplo n.º 8
0
static inline __attribute__((always_inline)) pid_t
really_getpid (pid_t oldval)
{
  if (__glibc_likely (oldval == 0))
    {
      pid_t selftid = THREAD_GETMEM (THREAD_SELF, tid);
      if (__glibc_likely (selftid != 0))
	return selftid;
    }

  INTERNAL_SYSCALL_DECL (err);
  pid_t result = INTERNAL_SYSCALL (getpid, err, 0);

  /* We do not set the PID field in the TID here since we might be
     called from a signal handler while the thread executes fork.  */
  if (oldval == 0)
    THREAD_SETMEM (THREAD_SELF, tid, result);
  return result;
}
Ejemplo n.º 9
0
int
__pthread_once (pthread_once_t *once_control, void (*init_routine) (void))
{
  /* Fast path.  See __pthread_once_slow.  */
  int val;
  val = atomic_load_acquire (once_control);
  if (__glibc_likely ((val & __PTHREAD_ONCE_DONE) != 0))
    return 0;
  else
    return __pthread_once_slow (once_control, init_routine);
}
Ejemplo n.º 10
0
static bool_t
xdr_endpoint (XDR *xdrs, endpoint *objp)
{
  bool_t res =  xdr_string (xdrs, &objp->uaddr, ~0);
  if (__builtin_expect (res, TRUE))
    {
      res = xdr_string (xdrs, &objp->family, ~0);
      if (__glibc_likely (res))
	res = xdr_string (xdrs, &objp->proto, ~0);
    }
  return res;
}
Ejemplo n.º 11
0
int
__pthread_setcanceltype (int type, int *oldtype)
{
  if (type < PTHREAD_CANCEL_DEFERRED || type > PTHREAD_CANCEL_ASYNCHRONOUS)
    return EINVAL;

#ifndef SIGCANCEL
  if (type == PTHREAD_CANCEL_ASYNCHRONOUS)
    return ENOTSUP;
#endif

  volatile struct pthread *self = THREAD_SELF;

  int oldval = THREAD_GETMEM (self, cancelhandling);
  while (1)
    {
      int newval = (type == PTHREAD_CANCEL_ASYNCHRONOUS
		    ? oldval | CANCELTYPE_BITMASK
		    : oldval & ~CANCELTYPE_BITMASK);

      /* Store the old value.  */
      if (oldtype != NULL)
	*oldtype = ((oldval & CANCELTYPE_BITMASK)
		    ? PTHREAD_CANCEL_ASYNCHRONOUS : PTHREAD_CANCEL_DEFERRED);

      /* Avoid doing unnecessary work.  The atomic operation can
	 potentially be expensive if the memory has to be locked and
	 remote cache lines have to be invalidated.  */
      if (oldval == newval)
	break;

      /* Update the cancel handling word.  This has to be done
	 atomically since other bits could be modified as well.  */
      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
					      oldval);
      if (__glibc_likely (curval == oldval))
	{
	  if (CANCEL_ENABLED_AND_CANCELED_AND_ASYNCHRONOUS (newval))
	    {
	      THREAD_SETMEM (self, result, PTHREAD_CANCELED);
	      __do_cancel ();
	    }

	  break;
	}

      /* Prepare for the next round.  */
      oldval = curval;
    }

  return 0;
}
Ejemplo n.º 12
0
/* Convert multiple precision number *X into double precision number *Y.  The
   result is correctly rounded to the nearest/even.  */
void
__mp_dbl (const mp_no *x, double *y, int p)
{
  if (X[0] == 0)
    {
      *y = 0;
      return;
    }

  if (__glibc_likely (EX > -42 || (EX == -42 && X[1] >= TWO10)))
    norm (x, y, p);
  else
    denorm (x, y, p);
}
Ejemplo n.º 13
0
char *
strerror (int errnum)
{
  char *ret = __strerror_r (errnum, NULL, 0);
  int saved_errno;

  if (__glibc_likely (ret != NULL))
    return ret;
  saved_errno = errno;
  if (buf == NULL)
    buf = malloc (1024);
  __set_errno (saved_errno);
  if (buf == NULL)
    return _("Unknown error");
  return __strerror_r (errnum, buf, 1024);
}
Ejemplo n.º 14
0
static inline int
realtime_getres (struct timespec *res)
{
  long int clk_tck = sysconf (_SC_CLK_TCK);

  if (__glibc_likely (clk_tck != -1))
    {
      /* This implementation assumes that the realtime clock has a
	 resolution higher than 1 second.  This is the case for any
	 reasonable implementation.  */
      res->tv_sec = 0;
      res->tv_nsec = 1000000000 / clk_tck;
      return 0;
    }

  return -1;
}
Ejemplo n.º 15
0
void
__attribute_noinline__
pthread_cancel_init (void)
{
  void *resume;
  void *personality;
  void *forcedunwind;
  void *getcfa;
  void *handle;

  if (__glibc_likely (libgcc_s_handle != NULL))
    {
      /* Force gcc to reload all values.  */
      asm volatile ("" ::: "memory");
      return;
    }

  handle = __libc_dlopen (LIBGCC_S_SO);

  if (handle == NULL
      || (resume = __libc_dlsym (handle, "_Unwind_Resume")) == NULL
      || (personality = __libc_dlsym (handle, "__gcc_personality_v0")) == NULL
      || (forcedunwind = __libc_dlsym (handle, "_Unwind_ForcedUnwind"))
	 == NULL
      || (getcfa = __libc_dlsym (handle, "_Unwind_GetCFA")) == NULL
#ifdef ARCH_CANCEL_INIT
      || ARCH_CANCEL_INIT (handle)
#endif
      )
    __libc_fatal (LIBGCC_S_SO " must be installed for pthread_cancel to work\n");

  PTR_MANGLE (resume);
  __libgcc_s_resume = resume;
  PTR_MANGLE (personality);
  libgcc_s_personality = personality;
  PTR_MANGLE (forcedunwind);
  libgcc_s_forcedunwind = forcedunwind;
  PTR_MANGLE (getcfa);
  libgcc_s_getcfa = getcfa;
  /* Make sure libgcc_s_handle is written last.  Otherwise,
     pthread_cancel_init might return early even when the pointer the
     caller is interested in is not initialized yet.  */
  atomic_write_barrier ();
  libgcc_s_handle = handle;
}
Ejemplo n.º 16
0
int
pthread_key_delete (pthread_key_t key)
{
  int result = EINVAL;

  if (__glibc_likely (key < PTHREAD_KEYS_MAX))
    {
      unsigned int seq = __pthread_keys[key].seq;

      if (__builtin_expect (! KEY_UNUSED (seq), 1)
	  && ! atomic_compare_and_exchange_bool_acq (&__pthread_keys[key].seq,
						     seq + 1, seq))
	/* We deleted a valid key.  */
	result = 0;
    }

  return result;
}
Ejemplo n.º 17
0
    http_server scheduler::get_http_server(int port,
            std::function<void(int, const std::string&, const std::string&, std::map<std::string, std::string>&, char*, size_t)> f)
    {
        auto sch_impl = std::dynamic_pointer_cast<scheduler_impl>(m_impl);
        auto& impl = sch_impl->m_util_impls[HTTP_SVR];
        if (!impl) {
            SOCK_TYPE type = (port >= 0) ? NORM_TRANS : UNIX_DOMAIN;
            auto http_impl = std::make_shared<http_impl_t<tcp_server_impl>>(type);
            //创建tcp服务端的监听套接字,允许接收任意ip地址发送的服务请求,监听请求的端口为port
            http_impl->fd = http_impl->conn_sock.create(PRT_TCP, USR_SERVER, nullptr, (uint16_t)port);

            if (__glibc_likely(http_impl->fd > 0)) {
                http_impl->m_util.m_app_prt = PRT_HTTP;
                http_impl->m_util.m_sch = this;
                http_impl->m_util.m_protocol_hook = [this](int fd, char* data, size_t len) {
                    auto this_sch_impl = std::dynamic_pointer_cast<scheduler_impl>(m_impl);
                    if (fd < 0 || fd >= this_sch_impl->m_ev_array.size() || !this_sch_impl->m_ev_array[fd])
                        return -(int)len;

                    auto conn = std::dynamic_pointer_cast<http_conn_t<tcp_server_conn>>(this_sch_impl->m_ev_array[fd]);
                    return http_parser(false, conn, data, len);
                };

                http_impl->m_util.m_f = [this](int fd, const std::string& ip_addr, uint16_t port, char *data, size_t len) {
                    auto this_sch_impl = std::dynamic_pointer_cast<scheduler_impl>(m_impl);
                    auto& this_http_svr = this_sch_impl->m_util_impls[HTTP_SVR];
                    auto this_http_impl = std::dynamic_pointer_cast<http_impl_t<tcp_server_impl>>(this_http_svr);
                    tcp_callback_for_http<std::shared_ptr<http_impl_t<tcp_server_impl>>, tcp_server_conn>(
                            false, this_http_impl, fd, data, len);
                };
                http_impl->m_http_svr = std::move(f);

                http_impl->sch_impl = sch_impl;
                http_impl->f = std::bind(&http_impl_t<tcp_server_impl>::tcp_server_callback, http_impl.get(), _1);
                sch_impl->add_event(http_impl);
                impl = http_impl;
            }
        }

        http_server obj;
        obj.m_impl = impl;
        return obj;
    }
Ejemplo n.º 18
0
int
__pthread_mutex_lock (pthread_mutex_t *mutex)
{
  assert (sizeof (mutex->__size) >= sizeof (mutex->__data));

  unsigned int type = PTHREAD_MUTEX_TYPE_ELISION (mutex);

  LIBC_PROBE (mutex_entry, 1, mutex);

  if (__builtin_expect (type & ~(PTHREAD_MUTEX_KIND_MASK_NP
				 | PTHREAD_MUTEX_ELISION_FLAGS_NP), 0))
    return __pthread_mutex_lock_full (mutex);

  if (__glibc_likely (type == PTHREAD_MUTEX_TIMED_NP))
    {
      FORCE_ELISION (mutex, goto elision);
    simple:
      /* Normal mutex.  */
      LLL_MUTEX_LOCK (mutex);
      assert (mutex->__data.__owner == 0);
    }
Ejemplo n.º 19
0
bool
__libc_scratch_buffer_grow_preserve (struct scratch_buffer *buffer)
{
  size_t new_length = 2 * buffer->length;
  void *new_ptr;

  if (buffer->data == buffer->__space)
    {
      /* Move buffer to the heap.  No overflow is possible because
	 buffer->length describes a small buffer on the stack.  */
      new_ptr = malloc (new_length);
      if (new_ptr == NULL)
	return false;
      memcpy (new_ptr, buffer->__space, buffer->length);
    }
  else
    {
      /* Buffer was already on the heap.  Check for overflow.  */
      if (__glibc_likely (new_length >= buffer->length))
	new_ptr = realloc (buffer->data, new_length);
      else
	{
	  __set_errno (ENOMEM);
	  new_ptr = NULL;
	}

      if (__glibc_unlikely (new_ptr == NULL))
	{
	  /* Deallocate, but buffer must remain valid to free.  */
	  free (buffer->data);
	  scratch_buffer_init (buffer);
	  return false;
	}
    }

  /* Install new heap-based buffer.  */
  buffer->data = new_ptr;
  buffer->length = new_length;
  return true;
}
Ejemplo n.º 20
0
void
internal_function attribute_hidden
__pthread_disable_asynccancel (int oldtype)
{
  /* If asynchronous cancellation was enabled before we do not have
     anything to do.  */
  if (oldtype & CANCELTYPE_BITMASK)
    return;

  struct pthread *self = THREAD_SELF;
  int newval;

  int oldval = THREAD_GETMEM (self, cancelhandling);

  while (1)
    {
      newval = oldval & ~CANCELTYPE_BITMASK;

      int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling, newval,
					      oldval);
      if (__glibc_likely (curval == oldval))
	break;

      /* Prepare the next round.  */
      oldval = curval;
    }

  /* We cannot return when we are being canceled.  Upon return the
     thread might be things which would have to be undone.  The
     following loop should loop until the cancellation signal is
     delivered.  */
  while (__builtin_expect ((newval & (CANCELING_BITMASK | CANCELED_BITMASK))
			   == CANCELING_BITMASK, 0))
    {
      lll_futex_wait (&self->cancelhandling, newval, LLL_PRIVATE);
      newval = THREAD_GETMEM (self, cancelhandling);
    }
}
Ejemplo n.º 21
0
int
__lll_timedwait_tid (int *tidp, const struct timespec *abstime)
{
  /* Reject invalid timeouts.  */
  if (__glibc_unlikely (abstime->tv_nsec < 0)
      || __glibc_unlikely (abstime->tv_nsec >= 1000000000))
    return EINVAL;

  /* Repeat until thread terminated.  */
  int tid;
  while ((tid = atomic_load_relaxed (tidp)) != 0)
    {
      /* See exit-thread.h for details.  */
      if (tid == NACL_EXITING_TID)
	/* The thread should now be in the process of exiting, so it will
	   finish quick enough that the timeout doesn't matter.  If any
	   thread ever stays in this state for long, there is something
	   catastrophically wrong.  */
	atomic_spin_nop ();
      else
	{
	  assert (tid > 0);

	  /* If *FUTEX == TID, wait until woken or timeout.  */
	  int err = __nacl_irt_futex.futex_wait_abs ((volatile int *) tidp,
						     tid, abstime);
	  if (err != 0)
	    {
	      if (__glibc_likely (err == ETIMEDOUT))
		return err;
	      assert (err == EAGAIN);
	    }
	}
    }

  return 0;
}
Ejemplo n.º 22
0
void
__cleanup_fct_attribute
__pthread_register_cancel_defer (__pthread_unwind_buf_t *buf)
{
  struct pthread_unwind_buf *ibuf = (struct pthread_unwind_buf *) buf;
  struct pthread *self = THREAD_SELF;

  /* Store old info.  */
  ibuf->priv.data.prev = THREAD_GETMEM (self, cleanup_jmp_buf);
  ibuf->priv.data.cleanup = THREAD_GETMEM (self, cleanup);

  int cancelhandling = THREAD_GETMEM (self, cancelhandling);

  /* Disable asynchronous cancellation for now.  */
  if (__glibc_unlikely (cancelhandling & CANCELTYPE_BITMASK))
    while (1)
      {
	int curval = THREAD_ATOMIC_CMPXCHG_VAL (self, cancelhandling,
						cancelhandling
						& ~CANCELTYPE_BITMASK,
						cancelhandling);
	if (__glibc_likely (curval == cancelhandling))
	  /* Successfully replaced the value.  */
	  break;

	/* Prepare for the next round.  */
	cancelhandling = curval;
      }

  ibuf->priv.data.canceltype = (cancelhandling & CANCELTYPE_BITMASK
				? PTHREAD_CANCEL_ASYNCHRONOUS
				: PTHREAD_CANCEL_DEFERRED);

  /* Store the new cleanup handler info.  */
  THREAD_SETMEM (self, cleanup_jmp_buf, (struct pthread_unwind_buf *) buf);
}
Ejemplo n.º 23
0
int
__open_catalog (const char *cat_name, const char *nlspath, const char *env_var,
		__nl_catd catalog)
{
  int fd = -1;
  struct stat64 st;
  int swapping;
  size_t cnt;
  size_t max_offset;
  size_t tab_size;
  const char *lastp;
  int result = -1;
  char *buf = NULL;

  if (strchr (cat_name, '/') != NULL || nlspath == NULL)
    fd = open_not_cancel_2 (cat_name, O_RDONLY);
  else
    {
      const char *run_nlspath = nlspath;
#define ENOUGH(n)							      \
  if (__glibc_unlikely (bufact + (n) >= bufmax))			      \
    {									      \
      char *old_buf = buf;						      \
      bufmax += (bufmax < 256 + (n)) ? 256 + (n) : bufmax;		      \
      buf = realloc (buf, bufmax);					      \
      if (__glibc_unlikely (buf == NULL))				      \
	{								      \
	  free (old_buf);						      \
	  return -1;							      \
	}								      \
    }

      /* The RUN_NLSPATH variable contains a colon separated list of
	 descriptions where we expect to find catalogs.  We have to
	 recognize certain % substitutions and stop when we found the
	 first existing file.  */
      size_t bufact;
      size_t bufmax = 0;
      size_t len;

      fd = -1;
      while (*run_nlspath != '\0')
	{
	  bufact = 0;

	  if (*run_nlspath == ':')
	    {
	      /* Leading colon or adjacent colons - treat same as %N.  */
	      len = strlen (cat_name);
	      ENOUGH (len);
	      memcpy (&buf[bufact], cat_name, len);
	      bufact += len;
	    }
	  else
	    while (*run_nlspath != ':' && *run_nlspath != '\0')
	      if (*run_nlspath == '%')
		{
		  const char *tmp;

		  ++run_nlspath;	/* We have seen the `%'.  */
		  switch (*run_nlspath++)
		    {
		    case 'N':
		      /* Use the catalog name.  */
		      len = strlen (cat_name);
		      ENOUGH (len);
		      memcpy (&buf[bufact], cat_name, len);
		      bufact += len;
		      break;
		    case 'L':
		      /* Use the current locale category value.  */
		      len = strlen (env_var);
		      ENOUGH (len);
		      memcpy (&buf[bufact], env_var, len);
		      bufact += len;
		      break;
		    case 'l':
		      /* Use language element of locale category value.  */
		      tmp = env_var;
		      do
			{
			  ENOUGH (1);
			  buf[bufact++] = *tmp++;
			}
		      while (*tmp != '\0' && *tmp != '_' && *tmp != '.');
		      break;
		    case 't':
		      /* Use territory element of locale category value.  */
		      tmp = env_var;
		      do
			++tmp;
		      while (*tmp != '\0' && *tmp != '_' && *tmp != '.');
		      if (*tmp == '_')
			{
			  ++tmp;
			  do
			    {
			      ENOUGH (1);
			      buf[bufact++] = *tmp++;
			    }
			  while (*tmp != '\0' && *tmp != '.');
			}
		      break;
		    case 'c':
		      /* Use code set element of locale category value.  */
		      tmp = env_var;
		      do
			++tmp;
		      while (*tmp != '\0' && *tmp != '.');
		      if (*tmp == '.')
			{
			  ++tmp;
			  do
			    {
			      ENOUGH (1);
			      buf[bufact++] = *tmp++;
			    }
			  while (*tmp != '\0');
			}
		      break;
		    case '%':
		      ENOUGH (1);
		      buf[bufact++] = '%';
		      break;
		    default:
		      /* Unknown variable: ignore this path element.  */
		      bufact = 0;
		      while (*run_nlspath != '\0' && *run_nlspath != ':')
			++run_nlspath;
		      break;
		    }
		}
	      else
		{
		  ENOUGH (1);
		  buf[bufact++] = *run_nlspath++;
		}

	  ENOUGH (1);
	  buf[bufact] = '\0';

	  if (bufact != 0)
	    {
	      fd = open_not_cancel_2 (buf, O_RDONLY);
	      if (fd >= 0)
		break;
	    }

	  ++run_nlspath;
	}
    }

  /* Avoid dealing with directories and block devices */
  if (__builtin_expect (fd, 0) < 0)
    {
      free (buf);
      return -1;
    }

  if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st), 0) < 0)
    goto close_unlock_return;

  if (__builtin_expect (!S_ISREG (st.st_mode), 0)
      || (size_t) st.st_size < sizeof (struct catalog_obj))
    {
      /* `errno' is not set correctly but the file is not usable.
	 Use an reasonable error value.  */
      __set_errno (EINVAL);
      goto close_unlock_return;
    }

  catalog->file_size = st.st_size;
#ifdef _POSIX_MAPPED_FILES
# ifndef MAP_COPY
    /* Linux seems to lack read-only copy-on-write.  */
#  define MAP_COPY MAP_PRIVATE
# endif
# ifndef MAP_FILE
    /* Some systems do not have this flag; it is superfluous.  */
#  define MAP_FILE 0
# endif
  catalog->file_ptr =
    (struct catalog_obj *) __mmap (NULL, st.st_size, PROT_READ,
				   MAP_FILE|MAP_COPY, fd, 0);
  if (__builtin_expect (catalog->file_ptr != (struct catalog_obj *) MAP_FAILED,
			1))
    /* Tell the world we managed to mmap the file.  */
    catalog->status = mmapped;
  else
#endif /* _POSIX_MAPPED_FILES */
    {
      /* mmap failed perhaps because the system call is not
	 implemented.  Try to load the file.  */
      size_t todo;
      catalog->file_ptr = malloc (st.st_size);
      if (catalog->file_ptr == NULL)
	goto close_unlock_return;

      todo = st.st_size;
      /* Save read, handle partial reads.  */
      do
	{
	  size_t now = read_not_cancel (fd, (((char *) catalog->file_ptr)
					     + (st.st_size - todo)), todo);
	  if (now == 0 || now == (size_t) -1)
	    {
#ifdef EINTR
	      if (now == (size_t) -1 && errno == EINTR)
		continue;
#endif
	      free ((void *) catalog->file_ptr);
	      goto close_unlock_return;
	    }
	  todo -= now;
	}
      while (todo > 0);
      catalog->status = malloced;
    }

  /* Determine whether the file is a catalog file and if yes whether
     it is written using the correct byte order.  Else we have to swap
     the values.  */
  if (__glibc_likely (catalog->file_ptr->magic == CATGETS_MAGIC))
    swapping = 0;
  else if (catalog->file_ptr->magic == SWAPU32 (CATGETS_MAGIC))
    swapping = 1;
  else
    {
    invalid_file:
      /* Invalid file.  Free the resources and mark catalog as not
	 usable.  */
#ifdef _POSIX_MAPPED_FILES
      if (catalog->status == mmapped)
	__munmap ((void *) catalog->file_ptr, catalog->file_size);
      else
#endif	/* _POSIX_MAPPED_FILES */
	free (catalog->file_ptr);
      goto close_unlock_return;
    }

#define SWAP(x) (swapping ? SWAPU32 (x) : (x))

  /* Get dimensions of the used hashing table.  */
  catalog->plane_size = SWAP (catalog->file_ptr->plane_size);
  catalog->plane_depth = SWAP (catalog->file_ptr->plane_depth);

  /* The file contains two versions of the pointer tables.  Pick the
     right one for the local byte order.  */
#if __BYTE_ORDER == __LITTLE_ENDIAN
  catalog->name_ptr = &catalog->file_ptr->name_ptr[0];
#elif __BYTE_ORDER == __BIG_ENDIAN
  catalog->name_ptr = &catalog->file_ptr->name_ptr[catalog->plane_size
						  * catalog->plane_depth
						  * 3];
#else
# error Cannot handle __BYTE_ORDER byte order
#endif

  /* The rest of the file contains all the strings.  They are
     addressed relative to the position of the first string.  */
  catalog->strings =
    (const char *) &catalog->file_ptr->name_ptr[catalog->plane_size
					       * catalog->plane_depth * 3 * 2];

  /* Determine the largest string offset mentioned in the table.  */
  max_offset = 0;
  tab_size = 3 * catalog->plane_size * catalog->plane_depth;
  for (cnt = 2; cnt < tab_size; cnt += 3)
    if (catalog->name_ptr[cnt] > max_offset)
      max_offset = catalog->name_ptr[cnt];

  /* Now we can check whether the file is large enough to contain the
     tables it says it contains.  */
  if ((size_t) st.st_size
      <= (sizeof (struct catalog_obj) + 2 * tab_size + max_offset))
    /* The last string is not contained in the file.  */
    goto invalid_file;

  lastp = catalog->strings + max_offset;
  max_offset = (st.st_size
		- sizeof (struct catalog_obj) + 2 * tab_size + max_offset);
  while (*lastp != '\0')
    {
      if (--max_offset == 0)
	goto invalid_file;
      ++lastp;
    }

  /* We succeeded.  */
  result = 0;

  /* Release the lock again.  */
 close_unlock_return:
  close_not_cancel_no_status (fd);
  free (buf);

  return result;
}
Ejemplo n.º 24
0
int
gai_suspend (const struct gaicb *const list[], int ent,
	     const struct timespec *timeout)
{
  struct waitlist waitlist[ent];
  struct requestlist *requestlist[ent];
#ifndef DONT_NEED_GAI_MISC_COND
  pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
#endif
  int cnt;
  unsigned int cntr = 1;
  int none = 1;
  int result;

  /* Request the mutex.  */
  pthread_mutex_lock (&__gai_requests_mutex);

  /* There is not yet a finished request.  Signal the request that
     we are working for it.  */
  for (cnt = 0; cnt < ent; ++cnt)
    if (list[cnt] != NULL && list[cnt]->__return == EAI_INPROGRESS)
      {
	requestlist[cnt] = __gai_find_request (list[cnt]);

	if (requestlist[cnt] != NULL)
	  {
#ifndef DONT_NEED_GAI_MISC_COND
	    waitlist[cnt].cond = &cond;
#endif
	    waitlist[cnt].next = requestlist[cnt]->waiting;
	    waitlist[cnt].counterp = &cntr;
	    waitlist[cnt].sigevp = NULL;
	    waitlist[cnt].caller_pid = 0;	/* Not needed.  */
	    requestlist[cnt]->waiting = &waitlist[cnt];
	    none = 0;
	  }
      }

  if (none)
    {
      if (cnt < ent)
	/* There is an entry which is finished.  */
	result = 0;
      else
	result = EAI_ALLDONE;
    }
  else
    {
      /* There is no request done but some are still being worked on.  */
      int oldstate;

      /* Since `pthread_cond_wait'/`pthread_cond_timedwait' are cancelation
	 points we must be careful.  We added entries to the waiting lists
	 which we must remove.  So defer cancelation for now.  */
      pthread_setcancelstate (PTHREAD_CANCEL_DISABLE, &oldstate);

#ifdef DONT_NEED_GAI_MISC_COND
      result = 0;
      GAI_MISC_WAIT (result, cntr, timeout, 1);
#else
      if (timeout == NULL)
	result = pthread_cond_wait (&cond, &__gai_requests_mutex);
      else
	{
	  /* We have to convert the relative timeout value into an
	     absolute time value with pthread_cond_timedwait expects.  */
	  struct timeval now;
	  struct timespec abstime;

	  __gettimeofday (&now, NULL);
	  abstime.tv_nsec = timeout->tv_nsec + now.tv_usec * 1000;
	  abstime.tv_sec = timeout->tv_sec + now.tv_sec;
	  if (abstime.tv_nsec >= 1000000000)
	    {
	      abstime.tv_nsec -= 1000000000;
	      abstime.tv_sec += 1;
	    }

	  result = pthread_cond_timedwait (&cond, &__gai_requests_mutex,
					   &abstime);
	}
#endif

      /* Now remove the entry in the waiting list for all requests
	 which didn't terminate.  */
      for (cnt = 0; cnt < ent; ++cnt)
	if (list[cnt] != NULL && list[cnt]->__return == EAI_INPROGRESS
	    && requestlist[cnt] != NULL)
	  {
	    struct waitlist **listp = &requestlist[cnt]->waiting;

	    /* There is the chance that we cannot find our entry anymore.
	       This could happen if the request terminated and restarted
	       again.  */
	    while (*listp != NULL && *listp != &waitlist[cnt])
	      listp = &(*listp)->next;

	    if (*listp != NULL)
	      *listp = (*listp)->next;
	  }

      /* Now it's time to restore the cancelation state.  */
      pthread_setcancelstate (oldstate, NULL);

#ifndef DONT_NEED_GAI_MISC_COND
      /* Release the conditional variable.  */
      if (pthread_cond_destroy (&cond) != 0)
	/* This must never happen.  */
	abort ();
#endif

      if (result != 0)
	{
	  /* An error occurred.  Possibly it's EINTR.  We have to translate
	     the timeout error report of `pthread_cond_timedwait' to the
	     form expected from `gai_suspend'.  */
	  if (__glibc_likely (result == ETIMEDOUT))
	    result = EAI_AGAIN;
	  else if (result == EINTR)
	    result = EAI_INTR;
	  else
	    result = EAI_SYSTEM;
	}
    }

  /* Release the mutex.  */
  pthread_mutex_unlock (&__gai_requests_mutex);

  return result;
}
Ejemplo n.º 25
0
__complex__ long double
__clogl (__complex__ long double x)
{
  __complex__ long double result;
  int rcls = fpclassify (__real__ x);
  int icls = fpclassify (__imag__ x);

  if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
    {
      /* Real and imaginary part are 0.0.  */
      __imag__ result = signbit (__real__ x) ? M_PIl : 0.0;
      __imag__ result = __copysignl (__imag__ result, __imag__ x);
      /* Yes, the following line raises an exception.  */
      __real__ result = -1.0 / fabsl (__real__ x);
    }
  else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
    {
      /* Neither real nor imaginary part is NaN.  */
      long double absx = fabsl (__real__ x), absy = fabsl (__imag__ x);
      int scale = 0;

      if (absx < absy)
	{
	  long double t = absx;
	  absx = absy;
	  absy = t;
	}

      if (absx > LDBL_MAX / 2.0L)
	{
	  scale = -1;
	  absx = __scalbnl (absx, scale);
	  absy = (absy >= LDBL_MIN * 2.0L ? __scalbnl (absy, scale) : 0.0L);
	}
      else if (absx < LDBL_MIN && absy < LDBL_MIN)
	{
	  scale = LDBL_MANT_DIG;
	  absx = __scalbnl (absx, scale);
	  absy = __scalbnl (absy, scale);
	}

      if (absx == 1.0L && scale == 0)
	{
	  __real__ result = __log1pl (absy * absy) / 2.0L;
	  math_check_force_underflow_nonneg (__real__ result);
	}
      else if (absx > 1.0L && absx < 2.0L && absy < 1.0L && scale == 0)
	{
	  long double d2m1 = (absx - 1.0L) * (absx + 1.0L);
	  if (absy >= LDBL_EPSILON)
	    d2m1 += absy * absy;
	  __real__ result = __log1pl (d2m1) / 2.0L;
	}
      else if (absx < 1.0L
	       && absx >= 0.5L
	       && absy < LDBL_EPSILON / 2.0L
	       && scale == 0)
	{
	  long double d2m1 = (absx - 1.0L) * (absx + 1.0L);
	  __real__ result = __log1pl (d2m1) / 2.0L;
	}
      else if (absx < 1.0L
	       && absx >= 0.5L
	       && scale == 0
	       && absx * absx + absy * absy >= 0.5L)
	{
	  long double d2m1 = __x2y2m1l (absx, absy);
	  __real__ result = __log1pl (d2m1) / 2.0L;
	}
      else
	{
	  long double d = __ieee754_hypotl (absx, absy);
	  __real__ result = __ieee754_logl (d) - scale * M_LN2l;
	}

      __imag__ result = __ieee754_atan2l (__imag__ x, __real__ x);
    }
  else
    {
      __imag__ result = __nanl ("");
      if (rcls == FP_INFINITE || icls == FP_INFINITE)
	/* Real or imaginary part is infinite.  */
	__real__ result = HUGE_VALL;
      else
	__real__ result = __nanl ("");
    }

  return result;
}
Ejemplo n.º 26
0
__complex__ double
__ccosh (__complex__ double x)
{
  __complex__ double retval;
  int rcls = fpclassify (__real__ x);
  int icls = fpclassify (__imag__ x);

  if (__glibc_likely (rcls >= FP_ZERO))
    {
      /* Real part is finite.  */
      if (__glibc_likely (icls >= FP_ZERO))
	{
	  /* Imaginary part is finite.  */
	  const int t = (int) ((DBL_MAX_EXP - 1) * M_LN2);
	  double sinix, cosix;

	  if (__glibc_likely (fabs (__imag__ x) > DBL_MIN))
	    {
	      __sincos (__imag__ x, &sinix, &cosix);
	    }
	  else
	    {
	      sinix = __imag__ x;
	      cosix = 1.0;
	    }

	  if (fabs (__real__ x) > t)
	    {
	      double exp_t = __ieee754_exp (t);
	      double rx = fabs (__real__ x);
	      if (signbit (__real__ x))
		sinix = -sinix;
	      rx -= t;
	      sinix *= exp_t / 2.0;
	      cosix *= exp_t / 2.0;
	      if (rx > t)
		{
		  rx -= t;
		  sinix *= exp_t;
		  cosix *= exp_t;
		}
	      if (rx > t)
		{
		  /* Overflow (original real part of x > 3t).  */
		  __real__ retval = DBL_MAX * cosix;
		  __imag__ retval = DBL_MAX * sinix;
		}
	      else
		{
		  double exp_val = __ieee754_exp (rx);
		  __real__ retval = exp_val * cosix;
		  __imag__ retval = exp_val * sinix;
		}
	    }
	  else
	    {
	      __real__ retval = __ieee754_cosh (__real__ x) * cosix;
	      __imag__ retval = __ieee754_sinh (__real__ x) * sinix;
	    }

	  if (fabs (__real__ retval) < DBL_MIN)
	    {
	      volatile double force_underflow
		= __real__ retval * __real__ retval;
	      (void) force_underflow;
	    }
	  if (fabs (__imag__ retval) < DBL_MIN)
	    {
	      volatile double force_underflow
		= __imag__ retval * __imag__ retval;
	      (void) force_underflow;
	    }
	}
      else
	{
	  __imag__ retval = __real__ x == 0.0 ? 0.0 : __nan ("");
	  __real__ retval = __nan ("") + __nan ("");

	  if (icls == FP_INFINITE)
	    feraiseexcept (FE_INVALID);
	}
    }
  else if (rcls == FP_INFINITE)
    {
      /* Real part is infinite.  */
      if (__glibc_likely (icls > FP_ZERO))
	{
	  /* Imaginary part is finite.  */
	  double sinix, cosix;

	  if (__glibc_likely (fabs (__imag__ x) > DBL_MIN))
	    {
	      __sincos (__imag__ x, &sinix, &cosix);
	    }
	  else
	    {
	      sinix = __imag__ x;
	      cosix = 1.0;
	    }

	  __real__ retval = __copysign (HUGE_VAL, cosix);
	  __imag__ retval = (__copysign (HUGE_VAL, sinix)
			     * __copysign (1.0, __real__ x));
	}
      else if (icls == FP_ZERO)
	{
	  /* Imaginary part is 0.0.  */
	  __real__ retval = HUGE_VAL;
	  __imag__ retval = __imag__ x * __copysign (1.0, __real__ x);
	}
      else
	{
	  /* The addition raises the invalid exception.  */
	  __real__ retval = HUGE_VAL;
	  __imag__ retval = __nan ("") + __nan ("");

	  if (icls == FP_INFINITE)
	    feraiseexcept (FE_INVALID);
	}
    }
  else
    {
      __real__ retval = __nan ("");
      __imag__ retval = __imag__ x == 0.0 ? __imag__ x : __nan ("");
    }

  return retval;
}
Ejemplo n.º 27
0
static void
ptmalloc_init (void)
{
  if (__malloc_initialized >= 0)
    return;

  __malloc_initialized = 0;

#ifdef SHARED
  /* In case this libc copy is in a non-default namespace, never use brk.
     Likewise if dlopened from statically linked program.  */
  Dl_info di;
  struct link_map *l;

  if (_dl_open_hook != NULL
      || (_dl_addr (ptmalloc_init, &di, &l, NULL) != 0
          && l->l_ns != LM_ID_BASE))
    __morecore = __failing_morecore;
#endif

  tsd_key_create (&arena_key, NULL);
  tsd_setspecific (arena_key, (void *) &main_arena);
  thread_atfork (ptmalloc_lock_all, ptmalloc_unlock_all, ptmalloc_unlock_all2);
  const char *s = NULL;
  if (__glibc_likely (_environ != NULL))
    {
      char **runp = _environ;
      char *envline;

      while (__builtin_expect ((envline = next_env_entry (&runp)) != NULL,
                               0))
        {
          size_t len = strcspn (envline, "=");

          if (envline[len] != '=')
            /* This is a "MALLOC_" variable at the end of the string
               without a '=' character.  Ignore it since otherwise we
               will access invalid memory below.  */
            continue;

          switch (len)
            {
            case 6:
              if (memcmp (envline, "CHECK_", 6) == 0)
                s = &envline[7];
              break;
            case 8:
              if (!__builtin_expect (internal_libc_enable_secure, 0))
                {
                  if (memcmp (envline, "TOP_PAD_", 8) == 0)
                    __libc_mallopt (M_TOP_PAD, atoi (&envline[9]));
                  else if (memcmp (envline, "PERTURB_", 8) == 0)
                    __libc_mallopt (M_PERTURB, atoi (&envline[9]));
                }
              break;
            case 9:
              if (!__builtin_expect (internal_libc_enable_secure, 0))
                {
                  if (memcmp (envline, "MMAP_MAX_", 9) == 0)
                    __libc_mallopt (M_MMAP_MAX, atoi (&envline[10]));
                  else if (memcmp (envline, "ARENA_MAX", 9) == 0)
                    __libc_mallopt (M_ARENA_MAX, atoi (&envline[10]));
                }
              break;
            case 10:
              if (!__builtin_expect (internal_libc_enable_secure, 0))
                {
                  if (memcmp (envline, "ARENA_TEST", 10) == 0)
                    __libc_mallopt (M_ARENA_TEST, atoi (&envline[11]));
                }
              break;
            case 15:
              if (!__builtin_expect (internal_libc_enable_secure, 0))
                {
                  if (memcmp (envline, "TRIM_THRESHOLD_", 15) == 0)
                    __libc_mallopt (M_TRIM_THRESHOLD, atoi (&envline[16]));
                  else if (memcmp (envline, "MMAP_THRESHOLD_", 15) == 0)
                    __libc_mallopt (M_MMAP_THRESHOLD, atoi (&envline[16]));
                }
              break;
            default:
              break;
            }
        }
    }
  /* if (s && s[0])
    {
      __libc_mallopt (M_CHECK_ACTION, (int) (s[0] - '0'));
      if (check_action != 0)
        __malloc_check_init ();
    }
  void (*hook) (void) = atomic_forced_read (__malloc_initialize_hook);
  if (hook != NULL)
    (*hook)(); */
  __malloc_initialized = 1;
}
Ejemplo n.º 28
0
__complex__ double
__ctanh (__complex__ double x)
{
  __complex__ double res;

  if (__glibc_unlikely (!isfinite (__real__ x) || !isfinite (__imag__ x)))
    {
      if (isinf (__real__ x))
	{
	  __real__ res = __copysign (1.0, __real__ x);
	  if (isfinite (__imag__ x) && fabs (__imag__ x) > 1.0)
	    {
	      double sinix, cosix;
	      __sincos (__imag__ x, &sinix, &cosix);
	      __imag__ res = __copysign (0.0, sinix * cosix);
	    }
	  else
	    __imag__ res = __copysign (0.0, __imag__ x);
	}
      else if (__imag__ x == 0.0)
	{
	  res = x;
	}
      else
	{
	  __real__ res = __nan ("");
	  __imag__ res = __nan ("");

	  if (isinf (__imag__ x))
	    feraiseexcept (FE_INVALID);
	}
    }
  else
    {
      double sinix, cosix;
      double den;
      const int t = (int) ((DBL_MAX_EXP - 1) * M_LN2 / 2);

      /* tanh(x+iy) = (sinh(2x) + i*sin(2y))/(cosh(2x) + cos(2y))
	 = (sinh(x)*cosh(x) + i*sin(y)*cos(y))/(sinh(x)^2 + cos(y)^2).  */

      if (__glibc_likely (fabs (__imag__ x) > DBL_MIN))
	{
	  __sincos (__imag__ x, &sinix, &cosix);
	}
      else
	{
	  sinix = __imag__ x;
	  cosix = 1.0;
	}

      if (fabs (__real__ x) > t)
	{
	  /* Avoid intermediate overflow when the imaginary part of
	     the result may be subnormal.  Ignoring negligible terms,
	     the real part is +/- 1, the imaginary part is
	     sin(y)*cos(y)/sinh(x)^2 = 4*sin(y)*cos(y)/exp(2x).  */
	  double exp_2t = __ieee754_exp (2 * t);

	  __real__ res = __copysign (1.0, __real__ x);
	  __imag__ res = 4 * sinix * cosix;
	  __real__ x = fabs (__real__ x);
	  __real__ x -= t;
	  __imag__ res /= exp_2t;
	  if (__real__ x > t)
	    {
	      /* Underflow (original real part of x has absolute value
		 > 2t).  */
	      __imag__ res /= exp_2t;
	    }
	  else
	    __imag__ res /= __ieee754_exp (2 * __real__ x);
	}
      else
	{
	  double sinhrx, coshrx;
	  if (fabs (__real__ x) > DBL_MIN)
	    {
	      sinhrx = __ieee754_sinh (__real__ x);
	      coshrx = __ieee754_cosh (__real__ x);
	    }
	  else
	    {
	      sinhrx = __real__ x;
	      coshrx = 1.0;
	    }

	  if (fabs (sinhrx) > fabs (cosix) * DBL_EPSILON)
	    den = sinhrx * sinhrx + cosix * cosix;
	  else
	    den = cosix * cosix;
	  __real__ res = sinhrx * coshrx / den;
	  __imag__ res = sinix * cosix / den;
	}
      math_check_force_underflow_complex (res);
    }

  return res;
}
Ejemplo n.º 29
0
__complex__ long double
__ccoshl (__complex__ long double x)
{
  __complex__ long double retval;
  int rcls = fpclassify (__real__ x);
  int icls = fpclassify (__imag__ x);

  if (__glibc_likely (rcls >= FP_ZERO))
    {
      /* Real part is finite.  */
      if (__glibc_likely (icls >= FP_ZERO))
	{
	  /* Imaginary part is finite.  */
	  const int t = (int) ((LDBL_MAX_EXP - 1) * M_LN2l);
	  long double sinix, cosix;

	  if (__glibc_likely (fabsl (__imag__ x) > LDBL_MIN))
	    {
	      __sincosl (__imag__ x, &sinix, &cosix);
	    }
	  else
	    {
	      sinix = __imag__ x;
	      cosix = 1.0;
	    }

	  if (fabsl (__real__ x) > t)
	    {
	      long double exp_t = __ieee754_expl (t);
	      long double rx = fabsl (__real__ x);
	      if (signbit (__real__ x))
		sinix = -sinix;
	      rx -= t;
	      sinix *= exp_t / 2.0L;
	      cosix *= exp_t / 2.0L;
	      if (rx > t)
		{
		  rx -= t;
		  sinix *= exp_t;
		  cosix *= exp_t;
		}
	      if (rx > t)
		{
		  /* Overflow (original real part of x > 3t).  */
		  __real__ retval = LDBL_MAX * cosix;
		  __imag__ retval = LDBL_MAX * sinix;
		}
	      else
		{
		  long double exp_val = __ieee754_expl (rx);
		  __real__ retval = exp_val * cosix;
		  __imag__ retval = exp_val * sinix;
		}
	    }
	  else
	    {
	      __real__ retval = __ieee754_coshl (__real__ x) * cosix;
	      __imag__ retval = __ieee754_sinhl (__real__ x) * sinix;
	    }

	  math_check_force_underflow_complex (retval);
	}
      else
	{
	  __imag__ retval = __real__ x == 0.0 ? 0.0 : __nanl ("");
	  __real__ retval = __nanl ("") + __nanl ("");

	  if (icls == FP_INFINITE)
	    feraiseexcept (FE_INVALID);
	}
    }
  else if (rcls == FP_INFINITE)
    {
      /* Real part is infinite.  */
      if (__glibc_likely (icls > FP_ZERO))
	{
	  /* Imaginary part is finite.  */
	  long double sinix, cosix;

	  if (__glibc_likely (fabsl (__imag__ x) > LDBL_MIN))
	    {
	      __sincosl (__imag__ x, &sinix, &cosix);
	    }
	  else
	    {
	      sinix = __imag__ x;
	      cosix = 1.0;
	    }

	  __real__ retval = __copysignl (HUGE_VALL, cosix);
	  __imag__ retval = (__copysignl (HUGE_VALL, sinix)
			     * __copysignl (1.0, __real__ x));
	}
      else if (icls == FP_ZERO)
	{
	  /* Imaginary part is 0.0.  */
	  __real__ retval = HUGE_VALL;
	  __imag__ retval = __imag__ x * __copysignl (1.0, __real__ x);
	}
      else
	{
	  /* The addition raises the invalid exception.  */
	  __real__ retval = HUGE_VALL;
	  __imag__ retval = __nanl ("") + __nanl ("");

	  if (icls == FP_INFINITE)
	    feraiseexcept (FE_INVALID);
	}
    }
  else
    {
      __real__ retval = __nanl ("");
      __imag__ retval = __imag__ x == 0.0 ? __imag__ x : __nanl ("");
    }

  return retval;
}
Ejemplo n.º 30
0
__complex__ float
__clog10f (__complex__ float x)
{
  __complex__ float result;
  int rcls = fpclassify (__real__ x);
  int icls = fpclassify (__imag__ x);

  if (__glibc_unlikely (rcls == FP_ZERO && icls == FP_ZERO))
    {
      /* Real and imaginary part are 0.0.  */
      __imag__ result = signbit (__real__ x) ? M_PI_LOG10Ef : 0.0;
      __imag__ result = __copysignf (__imag__ result, __imag__ x);
      /* Yes, the following line raises an exception.  */
      __real__ result = -1.0 / fabsf (__real__ x);
    }
  else if (__glibc_likely (rcls != FP_NAN && icls != FP_NAN))
    {
      /* Neither real nor imaginary part is NaN.  */
      float absx = fabsf (__real__ x), absy = fabsf (__imag__ x);
      int scale = 0;

      if (absx < absy)
	{
	  float t = absx;
	  absx = absy;
	  absy = t;
	}

      if (absx > FLT_MAX / 2.0f)
	{
	  scale = -1;
	  absx = __scalbnf (absx, scale);
	  absy = (absy >= FLT_MIN * 2.0f ? __scalbnf (absy, scale) : 0.0f);
	}
      else if (absx < FLT_MIN && absy < FLT_MIN)
	{
	  scale = FLT_MANT_DIG;
	  absx = __scalbnf (absx, scale);
	  absy = __scalbnf (absy, scale);
	}

      if (absx == 1.0f && scale == 0)
	{
	  float absy2 = absy * absy;
	  if (absy2 <= FLT_MIN * 2.0f * (float) M_LN10)
	    {
	      float force_underflow = absy2 * absy2;
	      __real__ result = absy2 * ((float) M_LOG10E / 2.0f);
	      math_force_eval (force_underflow);
	    }
	  else
	    __real__ result = __log1pf (absy2) * ((float) M_LOG10E / 2.0f);
	}
      else if (absx > 1.0f && absx < 2.0f && absy < 1.0f && scale == 0)
	{
	  float d2m1 = (absx - 1.0f) * (absx + 1.0f);
	  if (absy >= FLT_EPSILON)
	    d2m1 += absy * absy;
	  __real__ result = __log1pf (d2m1) * ((float) M_LOG10E / 2.0f);
	}
      else if (absx < 1.0f
	       && absx >= 0.75f
	       && absy < FLT_EPSILON / 2.0f
	       && scale == 0)
	{
	  float d2m1 = (absx - 1.0f) * (absx + 1.0f);
	  __real__ result = __log1pf (d2m1) * ((float) M_LOG10E / 2.0f);
	}
      else if (absx < 1.0f && (absx >= 0.75f || absy >= 0.5f) && scale == 0)
	{
	  float d2m1 = __x2y2m1f (absx, absy);
	  __real__ result = __log1pf (d2m1) * ((float) M_LOG10E / 2.0f);
	}
      else
	{
	  float d = __ieee754_hypotf (absx, absy);
	  __real__ result = __ieee754_log10f (d) - scale * M_LOG10_2f;
	}

      __imag__ result = M_LOG10E * __ieee754_atan2f (__imag__ x, __real__ x);
    }
  else
    {
      __imag__ result = __nanf ("");
      if (rcls == FP_INFINITE || icls == FP_INFINITE)
	/* Real or imaginary part is infinite.  */
	__real__ result = HUGE_VALF;
      else
	__real__ result = __nanf ("");
    }

  return result;
}