/* Dynamically load code from a file.  One day NaCl might provide a
   syscall that provides this functionality without needing to make a
   copy of the code.  offset and size do not need to be page-aligned. */
int nacl_dyncode_map (int fd, void *dest, size_t offset, size_t size)
{
  size_t alignment_padding = offset & (getpagesize() - 1);
  uint8_t *mapping;
  if (alignment_padding == 0 && (size & (getpagesize() - 1)) == 0) {
    /* First try mmap using PROT_EXEC directly. */
    mapping = __mmap(dest, size, PROT_READ | PROT_EXEC,
                     MAP_PRIVATE | MAP_FIXED, fd, offset);
    if (mapping == dest) {
      return 0;
    } else if (mapping != MAP_FAILED) {
      /* Mapped to an unexpected location.  Unmap and fall back. */
      __munmap(mapping, size);
    }
  }
  mapping = __mmap (NULL, size + alignment_padding,
                    PROT_READ, MAP_PRIVATE, fd,
                    offset - alignment_padding);
  if (mapping == MAP_FAILED)
    return -1;
  int result = __nacl_dyncode_create (dest, mapping + alignment_padding, size);

  /* Tell Valgrind about this mapping. */
  __nacl_dyncode_map_for_valgrind (dest, size, offset, mapping);
  int munmap_result = __munmap (mapping, size);
  if (result != 0 || munmap_result != 0)
    return -1;
  return 0;
}
示例#2
0
/* Return resources used for loaded message catalog.  */
int
catclose (nl_catd catalog_desc)
{
  __nl_catd catalog;

  /* Be generous if catalog which failed to be open is used.  */
  if (catalog_desc == (nl_catd) -1)
    {
      __set_errno (EBADF);
      return -1;
    }

  catalog = (__nl_catd) catalog_desc;

#ifdef _POSIX_MAPPED_FILES
  if (catalog->status == mmapped)
    __munmap ((void *) catalog->file_ptr, catalog->file_size);
  else
#endif	/* _POSIX_MAPPED_FILES */
    if (catalog->status == malloced)
      free ((void *) catalog->file_ptr);
    else
      {
	__set_errno (EBADF);
	return -1;
      }

  free ((void *) catalog);

  return 0;
}
void
__nscd_unmap (struct mapped_database *mapped)
{
  assert (mapped->counter == 0);
  __munmap ((void *) mapped->head, mapped->mapsize);
  free (mapped);
}
int munmap(void *start, size_t length) {	
#ifndef LIBC_STATIC
  if(munmap_bt !=NULL)
      munmap_bt(start) ;
#endif
    return __munmap(start, length);
}
/* From S11 on forward munmap's addr is void * */
int
munmap(void *addr, size_t len)
#endif
{
#if !defined(HAVE___MUNMAP) && \
    !defined(HAVE_SYSCALL) && defined(HAVE_DLSYM)
    static int (*realmunmap)(void*, size_t);
#endif

    opal_mem_hooks_release_hook(addr, len, 0);

#if defined(HAVE___MUNMAP)
    return __munmap(addr, len);
#elif defined(HAVE_SYSCALL)
    return syscall(SYS_munmap, addr, len);
#elif defined(HAVE_DLSYM)
    if (NULL == realmunmap) {
        union {
            int (*munmap_fp)(void*, size_t);
            void *munmap_p;
        } tmp;

        tmp.munmap_p = dlsym(RTLD_NEXT, "munmap");
        realmunmap = tmp.munmap_fp;
    }
    return realmunmap(addr, len);
#else
    #error "Can not determine how to call munmap"
#endif
}
示例#6
0
int munmap(void *start, size_t length) {	
#if (!defined LIBC_STATIC) && (defined _MMAP_MTK_DEBUG_)
    if(recordMunmapFunc != NULL)
        recordMunmapFunc(start, length);
#endif
    return __munmap(start, length);
}
示例#7
0
文件: pr63527.c 项目: 0day-ci/gcc
void
_dl_unload_cache (void)
{
  if (cache != ((void *)0) && cache != (struct cache_file *) -1)
    {
      __munmap (cache, cachesize);
      cache = ((void *)0) ;
    }
}
示例#8
0
/* If the system does not support MAP_COPY we cannot leave the file open
   all the time since this would create problems when the file is replaced.
   Therefore we provide this function to close the file and open it again
   once needed.  */
void
_dl_unload_cache (void)
{
  if (cache != NULL && cache != (struct cache_file *) -1)
    {
      __munmap (cache, cachesize);
      cache = NULL;
    }
}
示例#9
0
int
_IO_file_close_mmap (_IO_FILE *fp)
{
  /* In addition to closing the file descriptor we have to unmap the file.  */
  (void) __munmap (fp->_IO_buf_base, fp->_IO_buf_end - fp->_IO_buf_base);
  fp->_IO_buf_base = fp->_IO_buf_end = NULL;
  /* Cancelling close should be avoided if possible since it leaves an
     unrecoverable state behind.  */
  return close_not_cancel (fp->_fileno);
}
void *nacl_dyncode_alloc_fixed (void *dest, size_t code_size, size_t data_size,
                                size_t data_offset)
{
  /* TODO(eaeltsin): probably these alignment requirements are overly strict.
     If really so, support unaligned case.  */
  assert (dest == round_up_to_pagesize (dest));
  assert (data_offset == round_up_to_pagesize (data_offset));

  nacl_dyncode_alloc_init ();

  if (nacl_next_code > dest)
    {
      return NULL;
    }
  nacl_next_code = dest;

  code_size = round_up_to_pagesize (code_size);
  data_size = round_up_to_pagesize (data_size);

  if (data_size != 0)
    {
      size_t last_offset = nacl_next_data - nacl_next_code;
      if (data_offset > last_offset)
        {
          /* Leaves unused space in the data area. */
          nacl_next_data += data_offset - last_offset;
        }
      else if (data_offset < last_offset)
        {
          /* Cannot move code. */
          return NULL;
        }
      assert (nacl_next_code + data_offset == nacl_next_data);

      /* Check whether the data space is available and reserve it.
         MAP_FIXED cannot be used because it overwrites existing mappings.
         Instead, fail if returned value is different from address hint.  */
      void *mapped = __mmap (nacl_next_data, data_size, PROT_NONE,
                             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
      if (mapped == MAP_FAILED)
        {
          return NULL;
        }
      if (mapped != nacl_next_data)
        {
          __munmap (nacl_next_data, data_size);
          return NULL;
        }
    }

  nacl_next_data += data_size;
  nacl_next_code += code_size;
  return dest;
}
示例#11
0
int
munmap(void *addr, size_t len)
{
    if (__syscall_logger) {
        __syscall_logger(stack_logging_type_vm_deallocate, (uintptr_t)mach_task_self(), (uintptr_t)addr, len, 0, 0);
    }
    
    int result = __munmap(addr, len);

    return result;
}
示例#12
0
文件: assert.c 项目: Boshin/workspace
void
__assert_fail_base (const char *fmt, const char *assertion, const char *file,
		    unsigned int line, const char *function)
{
  char *str;

#ifdef FATAL_PREPARE
  FATAL_PREPARE;
#endif

  int total;
  if (__asprintf (&str, fmt,
		  __progname, __progname[0] ? ": " : "",
		  file, line,
		  function ? function : "", function ? ": " : "",
		  assertion, &total) >= 0)
    {
      /* Print the message.  */
      (void) __fxprintf (NULL, "%s", str);
      (void) fflush (stderr);

      total = (total + 1 + GLRO(dl_pagesize) - 1) & ~(GLRO(dl_pagesize) - 1);
      struct abort_msg_s *buf = __mmap (NULL, total, PROT_READ | PROT_WRITE,
					MAP_ANON | MAP_PRIVATE, -1, 0);
      if (__builtin_expect (buf != MAP_FAILED, 1))
	{
	  buf->size = total;
	  strcpy (buf->msg, str);

	  /* We have to free the old buffer since the application might
	     catch the SIGABRT signal.  */
	  struct abort_msg_s *old = atomic_exchange_acq (&__abort_msg, buf);

	  if (old != NULL)
	    __munmap (old, old->size);
	}

      free (str);
    }
  else
    {
      /* At least print a minimal message.  */
      static const char errstr[] = "Unexpected error.\n";
      __libc_write (STDERR_FILENO, errstr, sizeof (errstr) - 1);
    }

  abort ();
}
/* Allocate space for code and data simultaneously.
   This is a simple allocator that doesn't know how to deallocate.  */
void *nacl_dyncode_alloc (size_t code_size, size_t data_size,
                          size_t data_offset)
{
  assert (data_offset == round_up_to_pagesize (data_offset));

  nacl_dyncode_alloc_init ();

  code_size = round_up_to_pagesize (code_size);
  data_size = round_up_to_pagesize (data_size);

  if (data_size != 0)
    {
      size_t last_offset = nacl_next_data - nacl_next_code;
      if (data_offset > last_offset)
        {
          /* Leaves unused space in the data area. */
          nacl_next_data += data_offset - last_offset;
        }
      else if (data_offset < last_offset)
        {
          /* Leaves unused space in the code area. */
          nacl_next_code += last_offset - data_offset;
        }
      assert (nacl_next_code + data_offset == nacl_next_data);

      /* Check whether the data space is available and reserve it.
         MAP_FIXED cannot be used because it overwrites existing mappings.
         Instead, fail if returned value is different from address hint.
         TODO(mseaborn): Retry on failure or avoid failure by
         reserving a big chunk of address space at startup. */
      void *mapped = __mmap (nacl_next_data, data_size, PROT_NONE,
                             MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
      if (mapped == MAP_FAILED)
        {
          return NULL;
        }
      if (mapped != nacl_next_data)
        {
          __munmap (nacl_next_data, data_size);
          return NULL;
        }
    }

  void *code_addr = nacl_next_code;
  nacl_next_data += data_size;
  nacl_next_code += code_size;
  return code_addr;
}
示例#14
0
文件: munmap.c 项目: CptFrazz/xnu
int
munmap(void *addr, size_t len)
{
	size_t	offset;

	if (len == 0) {
		/*
		 * Standard compliance now requires the system to return EINVAL
		 * for munmap(addr, 0).  Return success now to maintain
		 * backwards compatibility.
		 */
		return 0;
	}
	/*
	 * Page-align "addr" since the system now requires it
	 * for standards compliance.
	 * Update "len" to reflect the adjustment and still cover the same area.
	 */
	offset = ((uintptr_t) addr) & PAGE_MASK;
	addr = (void *) (((uintptr_t) addr) & ~PAGE_MASK);
	len += offset;
	return __munmap(addr, len);
}
示例#15
0
internal_function
_dl_load_cache_lookup (const char *name)
{
  int left, right, middle;
  int cmpres;
  const char *cache_data;
  uint32_t cache_data_size;
  const char *best;

  /* Print a message if the loading of libs is traced.  */
  if (__builtin_expect (GLRO_dl_debug_mask & DL_DEBUG_LIBS, 0))
    _dl_debug_printf (" search cache=%s\n", LD_SO_CACHE);

  if (cache == NULL)
    {
      /* Read the contents of the file.  */
      void *file = _dl_sysdep_read_whole_file (LD_SO_CACHE, &cachesize,
					       PROT_READ);

      /* We can handle three different cache file formats here:
	 - the old libc5/glibc2.0/2.1 format
	 - the old format with the new format in it
	 - only the new format
	 The following checks if the cache contains any of these formats.  */
      if (file != MAP_FAILED && cachesize > sizeof *cache
	  && memcmp (file, CACHEMAGIC, sizeof CACHEMAGIC - 1) == 0)
	{
	  size_t offset;
	  /* Looks ok.  */
	  cache = file;

	  /* Check for new version.  */
	  offset = ALIGN_CACHE (sizeof (struct cache_file)
				+ cache->nlibs * sizeof (struct file_entry));

	  cache_new = (struct cache_file_new *) ((void *) cache + offset);
	  if (cachesize < (offset + sizeof (struct cache_file_new))
	      || memcmp (cache_new->magic, CACHEMAGIC_VERSION_NEW,
			 sizeof CACHEMAGIC_VERSION_NEW - 1) != 0)
	    cache_new = (void *) -1;
	}
      else if (file != MAP_FAILED && cachesize > sizeof *cache_new
	       && memcmp (file, CACHEMAGIC_VERSION_NEW,
			  sizeof CACHEMAGIC_VERSION_NEW - 1) == 0)
	{
	  cache_new = file;
	  cache = file;
	}
      else
	{
	  if (file != MAP_FAILED)
	    __munmap (file, cachesize);
	  cache = (void *) -1;
	}

      assert (cache != NULL);
    }

  if (cache == (void *) -1)
    /* Previously looked for the cache file and didn't find it.  */
    return NULL;

  best = NULL;

  if (cache_new != (void *) -1)
    {
      uint64_t platform;
      int disable_hwcap = 0;

      /* This is where the strings start.  */
      cache_data = (const char *) cache_new;

      /* Now we can compute how large the string table is.  */
      cache_data_size = (const char *) cache + cachesize - cache_data;

      platform = _dl_string_platform (GLRO(dl_platform));
      if (platform != (uint64_t) -1)
	platform = 1ULL << platform;

      if (__access ("/etc/ld.so.nohwcap", F_OK) == 0)
	disable_hwcap = 1;

#define _DL_HWCAP_TLS_MASK (1LL << 63)
      uint64_t hwcap_exclude = ~((GLRO(dl_hwcap) & GLRO(dl_hwcap_mask))
				 | _DL_HWCAP_PLATFORM | _DL_HWCAP_TLS_MASK);

      /* Only accept hwcap if it's for the right platform.  */
#define HWCAP_CHECK \
      if (lib->hwcap & hwcap_exclude)					      \
	continue;							      \
      if (GLRO(dl_osversion) && lib->osversion > GLRO(dl_osversion))	      \
	continue;							      \
      if (disable_hwcap && lib->hwcap != 0)				      \
	continue;							      \
      if (_DL_PLATFORMS_COUNT						      \
	  && (lib->hwcap & _DL_HWCAP_PLATFORM) != 0			      \
	  && (lib->hwcap & _DL_HWCAP_PLATFORM) != platform)		      \
	continue
      SEARCH_CACHE (cache_new);
    }
  else
    {
      /* This is where the strings start.  */
      cache_data = (const char *) &cache->libs[cache->nlibs];

      /* Now we can compute how large the string table is.  */
      cache_data_size = (const char *) cache + cachesize - cache_data;

#undef HWCAP_CHECK
#define HWCAP_CHECK do {} while (0)
      SEARCH_CACHE (cache);
    }

  /* Print our result if wanted.  */
  if (__builtin_expect (GLRO_dl_debug_mask & DL_DEBUG_LIBS, 0)
      && best != NULL)
    _dl_debug_printf ("  trying file=%s\n", best);

  return best;
}
示例#16
0
int
__open_catalog (const char *cat_name, const char *nlspath, const char *env_var,
		__nl_catd catalog)
{
  int fd = -1;
  struct stat64 st;
  int swapping;
  size_t cnt;
  size_t max_offset;
  size_t tab_size;
  const char *lastp;
  int result = -1;
  char *buf = NULL;

  if (strchr (cat_name, '/') != NULL || nlspath == NULL)
    fd = open_not_cancel_2 (cat_name, O_RDONLY);
  else
    {
      const char *run_nlspath = nlspath;
#define ENOUGH(n)							      \
  if (__glibc_unlikely (bufact + (n) >= bufmax))			      \
    {									      \
      char *old_buf = buf;						      \
      bufmax += (bufmax < 256 + (n)) ? 256 + (n) : bufmax;		      \
      buf = realloc (buf, bufmax);					      \
      if (__glibc_unlikely (buf == NULL))				      \
	{								      \
	  free (old_buf);						      \
	  return -1;							      \
	}								      \
    }

      /* The RUN_NLSPATH variable contains a colon separated list of
	 descriptions where we expect to find catalogs.  We have to
	 recognize certain % substitutions and stop when we found the
	 first existing file.  */
      size_t bufact;
      size_t bufmax = 0;
      size_t len;

      fd = -1;
      while (*run_nlspath != '\0')
	{
	  bufact = 0;

	  if (*run_nlspath == ':')
	    {
	      /* Leading colon or adjacent colons - treat same as %N.  */
	      len = strlen (cat_name);
	      ENOUGH (len);
	      memcpy (&buf[bufact], cat_name, len);
	      bufact += len;
	    }
	  else
	    while (*run_nlspath != ':' && *run_nlspath != '\0')
	      if (*run_nlspath == '%')
		{
		  const char *tmp;

		  ++run_nlspath;	/* We have seen the `%'.  */
		  switch (*run_nlspath++)
		    {
		    case 'N':
		      /* Use the catalog name.  */
		      len = strlen (cat_name);
		      ENOUGH (len);
		      memcpy (&buf[bufact], cat_name, len);
		      bufact += len;
		      break;
		    case 'L':
		      /* Use the current locale category value.  */
		      len = strlen (env_var);
		      ENOUGH (len);
		      memcpy (&buf[bufact], env_var, len);
		      bufact += len;
		      break;
		    case 'l':
		      /* Use language element of locale category value.  */
		      tmp = env_var;
		      do
			{
			  ENOUGH (1);
			  buf[bufact++] = *tmp++;
			}
		      while (*tmp != '\0' && *tmp != '_' && *tmp != '.');
		      break;
		    case 't':
		      /* Use territory element of locale category value.  */
		      tmp = env_var;
		      do
			++tmp;
		      while (*tmp != '\0' && *tmp != '_' && *tmp != '.');
		      if (*tmp == '_')
			{
			  ++tmp;
			  do
			    {
			      ENOUGH (1);
			      buf[bufact++] = *tmp++;
			    }
			  while (*tmp != '\0' && *tmp != '.');
			}
		      break;
		    case 'c':
		      /* Use code set element of locale category value.  */
		      tmp = env_var;
		      do
			++tmp;
		      while (*tmp != '\0' && *tmp != '.');
		      if (*tmp == '.')
			{
			  ++tmp;
			  do
			    {
			      ENOUGH (1);
			      buf[bufact++] = *tmp++;
			    }
			  while (*tmp != '\0');
			}
		      break;
		    case '%':
		      ENOUGH (1);
		      buf[bufact++] = '%';
		      break;
		    default:
		      /* Unknown variable: ignore this path element.  */
		      bufact = 0;
		      while (*run_nlspath != '\0' && *run_nlspath != ':')
			++run_nlspath;
		      break;
		    }
		}
	      else
		{
		  ENOUGH (1);
		  buf[bufact++] = *run_nlspath++;
		}

	  ENOUGH (1);
	  buf[bufact] = '\0';

	  if (bufact != 0)
	    {
	      fd = open_not_cancel_2 (buf, O_RDONLY);
	      if (fd >= 0)
		break;
	    }

	  ++run_nlspath;
	}
    }

  /* Avoid dealing with directories and block devices */
  if (__builtin_expect (fd, 0) < 0)
    {
      free (buf);
      return -1;
    }

  if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st), 0) < 0)
    goto close_unlock_return;

  if (__builtin_expect (!S_ISREG (st.st_mode), 0)
      || (size_t) st.st_size < sizeof (struct catalog_obj))
    {
      /* `errno' is not set correctly but the file is not usable.
	 Use an reasonable error value.  */
      __set_errno (EINVAL);
      goto close_unlock_return;
    }

  catalog->file_size = st.st_size;
#ifdef _POSIX_MAPPED_FILES
# ifndef MAP_COPY
    /* Linux seems to lack read-only copy-on-write.  */
#  define MAP_COPY MAP_PRIVATE
# endif
# ifndef MAP_FILE
    /* Some systems do not have this flag; it is superfluous.  */
#  define MAP_FILE 0
# endif
  catalog->file_ptr =
    (struct catalog_obj *) __mmap (NULL, st.st_size, PROT_READ,
				   MAP_FILE|MAP_COPY, fd, 0);
  if (__builtin_expect (catalog->file_ptr != (struct catalog_obj *) MAP_FAILED,
			1))
    /* Tell the world we managed to mmap the file.  */
    catalog->status = mmapped;
  else
#endif /* _POSIX_MAPPED_FILES */
    {
      /* mmap failed perhaps because the system call is not
	 implemented.  Try to load the file.  */
      size_t todo;
      catalog->file_ptr = malloc (st.st_size);
      if (catalog->file_ptr == NULL)
	goto close_unlock_return;

      todo = st.st_size;
      /* Save read, handle partial reads.  */
      do
	{
	  size_t now = read_not_cancel (fd, (((char *) catalog->file_ptr)
					     + (st.st_size - todo)), todo);
	  if (now == 0 || now == (size_t) -1)
	    {
#ifdef EINTR
	      if (now == (size_t) -1 && errno == EINTR)
		continue;
#endif
	      free ((void *) catalog->file_ptr);
	      goto close_unlock_return;
	    }
	  todo -= now;
	}
      while (todo > 0);
      catalog->status = malloced;
    }

  /* Determine whether the file is a catalog file and if yes whether
     it is written using the correct byte order.  Else we have to swap
     the values.  */
  if (__glibc_likely (catalog->file_ptr->magic == CATGETS_MAGIC))
    swapping = 0;
  else if (catalog->file_ptr->magic == SWAPU32 (CATGETS_MAGIC))
    swapping = 1;
  else
    {
    invalid_file:
      /* Invalid file.  Free the resources and mark catalog as not
	 usable.  */
#ifdef _POSIX_MAPPED_FILES
      if (catalog->status == mmapped)
	__munmap ((void *) catalog->file_ptr, catalog->file_size);
      else
#endif	/* _POSIX_MAPPED_FILES */
	free (catalog->file_ptr);
      goto close_unlock_return;
    }

#define SWAP(x) (swapping ? SWAPU32 (x) : (x))

  /* Get dimensions of the used hashing table.  */
  catalog->plane_size = SWAP (catalog->file_ptr->plane_size);
  catalog->plane_depth = SWAP (catalog->file_ptr->plane_depth);

  /* The file contains two versions of the pointer tables.  Pick the
     right one for the local byte order.  */
#if __BYTE_ORDER == __LITTLE_ENDIAN
  catalog->name_ptr = &catalog->file_ptr->name_ptr[0];
#elif __BYTE_ORDER == __BIG_ENDIAN
  catalog->name_ptr = &catalog->file_ptr->name_ptr[catalog->plane_size
						  * catalog->plane_depth
						  * 3];
#else
# error Cannot handle __BYTE_ORDER byte order
#endif

  /* The rest of the file contains all the strings.  They are
     addressed relative to the position of the first string.  */
  catalog->strings =
    (const char *) &catalog->file_ptr->name_ptr[catalog->plane_size
					       * catalog->plane_depth * 3 * 2];

  /* Determine the largest string offset mentioned in the table.  */
  max_offset = 0;
  tab_size = 3 * catalog->plane_size * catalog->plane_depth;
  for (cnt = 2; cnt < tab_size; cnt += 3)
    if (catalog->name_ptr[cnt] > max_offset)
      max_offset = catalog->name_ptr[cnt];

  /* Now we can check whether the file is large enough to contain the
     tables it says it contains.  */
  if ((size_t) st.st_size
      <= (sizeof (struct catalog_obj) + 2 * tab_size + max_offset))
    /* The last string is not contained in the file.  */
    goto invalid_file;

  lastp = catalog->strings + max_offset;
  max_offset = (st.st_size
		- sizeof (struct catalog_obj) + 2 * tab_size + max_offset);
  while (*lastp != '\0')
    {
      if (--max_offset == 0)
	goto invalid_file;
      ++lastp;
    }

  /* We succeeded.  */
  result = 0;

  /* Release the lock again.  */
 close_unlock_return:
  close_not_cancel_no_status (fd);
  free (buf);

  return result;
}
示例#17
0
static void
decide_maybe_mmap (_IO_FILE *fp)
{
  /* We use the file in read-only mode.  This could mean we can
     mmap the file and use it without any copying.  But not all
     file descriptors are for mmap-able objects and on 32-bit
     machines we don't want to map files which are too large since
     this would require too much virtual memory.  */
  struct stat64 st;

  if (_IO_SYSSTAT (fp, &st) == 0
      && S_ISREG (st.st_mode) && st.st_size != 0
      /* Limit the file size to 1MB for 32-bit machines.  */
      && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024)
      /* Sanity check.  */
      && (fp->_offset == _IO_pos_BAD || fp->_offset <= st.st_size))
    {
      /* Try to map the file.  */
      void *p;

      p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED, fp->_fileno, 0);
      if (p != MAP_FAILED)
	{
	  /* OK, we managed to map the file.  Set the buffer up and use a
	     special jump table with simplified underflow functions which
	     never tries to read anything from the file.  */

	  if (__lseek64 (fp->_fileno, st.st_size, SEEK_SET) != st.st_size)
	    {
	      (void) __munmap (p, st.st_size);
	      fp->_offset = _IO_pos_BAD;
	    }
	  else
	    {
	      _IO_setb (fp, p, (char *) p + st.st_size, 0);

	      if (fp->_offset == _IO_pos_BAD)
		fp->_offset = 0;

	      _IO_setg (fp, p, p + fp->_offset, p + st.st_size);
	      fp->_offset = st.st_size;

	      if (fp->_mode <= 0)
		_IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps_mmap;
	      else
		_IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps_mmap;
	      fp->_wide_data->_wide_vtable = &_IO_wfile_jumps_mmap;

	      return;
	    }
	}
    }

  /* We couldn't use mmap, so revert to the vanilla file operations.  */

  if (fp->_mode <= 0)
    _IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
  else
    _IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps;
  fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;
}
示例#18
0
/* Guts of underflow callback if we mmap the file.  This stats the file and
   updates the stream state to match.  In the normal case we return zero.
   If the file is no longer eligible for mmap, its jump tables are reset to
   the vanilla ones and we return nonzero.  */
static int
mmap_remap_check (_IO_FILE *fp)
{
  struct stat64 st;

  if (_IO_SYSSTAT (fp, &st) == 0
      && S_ISREG (st.st_mode) && st.st_size != 0
      /* Limit the file size to 1MB for 32-bit machines.  */
      && (sizeof (ptrdiff_t) > 4 || st.st_size < 1*1024*1024))
    {
      const size_t pagesize = __getpagesize ();
# define ROUNDED(x)	(((x) + pagesize - 1) & ~(pagesize - 1))
      if (ROUNDED (st.st_size) < ROUNDED (fp->_IO_buf_end
					  - fp->_IO_buf_base))
	{
	  /* We can trim off some pages past the end of the file.  */
	  (void) __munmap (fp->_IO_buf_base + ROUNDED (st.st_size),
			   ROUNDED (fp->_IO_buf_end - fp->_IO_buf_base)
			   - ROUNDED (st.st_size));
	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
	}
      else if (ROUNDED (st.st_size) > ROUNDED (fp->_IO_buf_end
					       - fp->_IO_buf_base))
	{
	  /* The file added some pages.  We need to remap it.  */
	  void *p;
#ifdef _G_HAVE_MREMAP
	  p = __mremap (fp->_IO_buf_base, ROUNDED (fp->_IO_buf_end
						   - fp->_IO_buf_base),
			ROUNDED (st.st_size), MREMAP_MAYMOVE);
	  if (p == MAP_FAILED)
	    {
	      (void) __munmap (fp->_IO_buf_base,
			       fp->_IO_buf_end - fp->_IO_buf_base);
	      goto punt;
	    }
#else
	  (void) __munmap (fp->_IO_buf_base,
			   fp->_IO_buf_end - fp->_IO_buf_base);
	  p = __mmap64 (NULL, st.st_size, PROT_READ, MAP_SHARED,
			fp->_fileno, 0);
	  if (p == MAP_FAILED)
	    goto punt;
#endif
	  fp->_IO_buf_base = p;
	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
	}
      else
	{
	  /* The number of pages didn't change.  */
	  fp->_IO_buf_end = fp->_IO_buf_base + st.st_size;
	}
# undef ROUNDED

      fp->_offset -= fp->_IO_read_end - fp->_IO_read_ptr;
      _IO_setg (fp, fp->_IO_buf_base,
		fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base
		? fp->_IO_buf_base + fp->_offset : fp->_IO_buf_end,
		fp->_IO_buf_end);

      /* If we are already positioned at or past the end of the file, don't
	 change the current offset.  If not, seek past what we have mapped,
	 mimicking the position left by a normal underflow reading into its
	 buffer until EOF.  */

      if (fp->_offset < fp->_IO_buf_end - fp->_IO_buf_base)
	{
	  if (__lseek64 (fp->_fileno, fp->_IO_buf_end - fp->_IO_buf_base,
			 SEEK_SET)
	      != fp->_IO_buf_end - fp->_IO_buf_base)
	    fp->_flags |= _IO_ERR_SEEN;
	  else
	    fp->_offset = fp->_IO_buf_end - fp->_IO_buf_base;
	}

      return 0;
    }
  else
    {
      /* Life is no longer good for mmap.  Punt it.  */
      (void) __munmap (fp->_IO_buf_base,
		       fp->_IO_buf_end - fp->_IO_buf_base);
    punt:
      fp->_IO_buf_base = fp->_IO_buf_end = NULL;
      _IO_setg (fp, NULL, NULL, NULL);
      if (fp->_mode <= 0)
	_IO_JUMPS_FILE_plus (fp) = &_IO_file_jumps;
      else
	_IO_JUMPS_FILE_plus (fp) = &_IO_wfile_jumps;
      fp->_wide_data->_wide_vtable = &_IO_wfile_jumps;

      return 1;
    }
}
示例#19
0
int munmap_no_record(void *start, size_t length)
{
	return __munmap(start, length);

}
/* Try to get a file descriptor for the shared meory segment
   containing the database.  */
static struct mapped_database *
get_mapping (request_type type, const char *key,
	     struct mapped_database **mappedp)
{
  struct mapped_database *result = NO_MAPPING;
#ifdef SCM_RIGHTS
  const size_t keylen = strlen (key) + 1;
  char resdata[keylen];
  int saved_errno = errno;

  int mapfd = -1;

  /* Send the request.  */
  struct iovec iov[2];
  request_header req;

  int sock = open_socket ();
  if (sock < 0)
    goto out;

  req.version = NSCD_VERSION;
  req.type = type;
  req.key_len = keylen;

  iov[0].iov_base = &req;
  iov[0].iov_len = sizeof (req);
  iov[1].iov_base = (void *) key;
  iov[1].iov_len = keylen;

  if (__builtin_expect (TEMP_FAILURE_RETRY (__writev (sock, iov, 2))
			!= iov[0].iov_len + iov[1].iov_len, 0))
    /* We cannot even write the request.  */
    goto out_close2;

  /* Room for the data sent along with the file descriptor.  We expect
     the key name back.  */
  iov[0].iov_base = resdata;
  iov[0].iov_len = keylen;

  union
  {
    struct cmsghdr hdr;
    char bytes[CMSG_SPACE (sizeof (int))];
  } buf;
  struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1,
			.msg_control = buf.bytes,
			.msg_controllen = sizeof (buf) };
  struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg);

  cmsg->cmsg_level = SOL_SOCKET;
  cmsg->cmsg_type = SCM_RIGHTS;
  cmsg->cmsg_len = CMSG_LEN (sizeof (int));

  /* This access is well-aligned since BUF is correctly aligned for an
     int and CMSG_DATA preserves this alignment.  */
  *(int *) CMSG_DATA (cmsg) = -1;

  msg.msg_controllen = cmsg->cmsg_len;

  if (wait_on_socket (sock) <= 0)
    goto out_close2;

# ifndef MSG_NOSIGNAL
#  define MSG_NOSIGNAL 0
# endif
  if (__builtin_expect (TEMP_FAILURE_RETRY (__recvmsg (sock, &msg,
						       MSG_NOSIGNAL))
			!= keylen, 0))
    goto out_close2;

  mapfd = *(int *) CMSG_DATA (cmsg);

  if (__builtin_expect (CMSG_FIRSTHDR (&msg)->cmsg_len
			!= CMSG_LEN (sizeof (int)), 0))
    goto out_close;

  struct stat64 st;
  if (__builtin_expect (strcmp (resdata, key) != 0, 0)
      || __builtin_expect (fstat64 (mapfd, &st) != 0, 0)
      || __builtin_expect (st.st_size < sizeof (struct database_pers_head), 0))
    goto out_close;

  struct database_pers_head head;
  if (__builtin_expect (TEMP_FAILURE_RETRY (__pread (mapfd, &head,
						     sizeof (head), 0))
			!= sizeof (head), 0))
    goto out_close;

  if (__builtin_expect (head.version != DB_VERSION, 0)
      || __builtin_expect (head.header_size != sizeof (head), 0)
      /* This really should not happen but who knows, maybe the update
	 thread got stuck.  */
      || __builtin_expect (! head.nscd_certainly_running
			   && head.timestamp + MAPPING_TIMEOUT < time (NULL),
			   0))
    goto out_close;

  size_t size = (sizeof (head) + roundup (head.module * sizeof (ref_t), ALIGN)
		 + head.data_size);

  if (__builtin_expect (st.st_size < size, 0))
    goto out_close;

  /* The file is large enough, map it now.  */
  void *mapping = __mmap (NULL, size, PROT_READ, MAP_SHARED, mapfd, 0);
  if (__builtin_expect (mapping != MAP_FAILED, 1))
    {
      /* Allocate a record for the mapping.  */
      struct mapped_database *newp = malloc (sizeof (*newp));
      if (newp == NULL)
	{
	  /* Ugh, after all we went through the memory allocation failed.  */
	  __munmap (mapping, size);
	  goto out_close;
	}

      newp->head = mapping;
      newp->data = ((char *) mapping + head.header_size
		    + roundup (head.module * sizeof (ref_t), ALIGN));
      newp->mapsize = size;
      /* Set counter to 1 to show it is usable.  */
      newp->counter = 1;

      result = newp;
    }

 out_close:
  __close (mapfd);
 out_close2:
  __close (sock);
 out:
  __set_errno (saved_errno);
#endif	/* SCM_RIGHTS */

  struct mapped_database *oldval = *mappedp;
  *mappedp = result;

  if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0)
    __nscd_unmap (oldval);

  return result;
}


struct mapped_database *
__nscd_get_map_ref (request_type type, const char *name,
		    struct locked_map_ptr *mapptr, int *gc_cyclep)
{
  struct mapped_database *cur = mapptr->mapped;
  if (cur == NO_MAPPING)
    return cur;

  int cnt = 0;
  while (atomic_compare_and_exchange_val_acq (&mapptr->lock, 1, 0) != 0)
    {
      // XXX Best number of rounds?
      if (++cnt > 5)
	return NO_MAPPING;

      atomic_delay ();
    }

  cur = mapptr->mapped;

  if (__builtin_expect (cur != NO_MAPPING, 1))
    {
      /* If not mapped or timestamp not updated, request new map.  */
      if (cur == NULL
	  || (cur->head->nscd_certainly_running == 0
	      && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL)))
	cur = get_mapping (type, name, &mapptr->mapped);

      if (__builtin_expect (cur != NO_MAPPING, 1))
	{
	  if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0,
				0))
	    cur = NO_MAPPING;
	  else
	    atomic_increment (&cur->counter);
	}
    }

  mapptr->lock = 0;

  return cur;
}


const struct datahead *
__nscd_cache_search (request_type type, const char *key, size_t keylen,
		     const struct mapped_database *mapped)
{
  unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module;

  ref_t work = mapped->head->array[hash];
  while (work != ENDREF)
    {
      struct hashentry *here = (struct hashentry *) (mapped->data + work);

      if (type == here->type && keylen == here->len
	  && memcmp (key, mapped->data + here->key, keylen) == 0)
	{
	  /* We found the entry.  Increment the appropriate counter.  */
	  const struct datahead *dh
	    = (struct datahead *) (mapped->data + here->packet);

	  /* See whether we must ignore the entry or whether something
	     is wrong because garbage collection is in progress.  */
	  if (dh->usable && ((char *) dh + dh->allocsize
			     <= (char *) mapped->head + mapped->mapsize))
	    return dh;
	}

      work = here->next;
    }

  return NULL;
}
示例#21
0
int
internal_function
__gconv_load_cache (void)
{
#if 0
  int fd;
  struct stat64 st;
  struct gconvcache_header *header;
#endif

  /* We cannot use the cache if the GCONV_PATH environment variable is
     set.  */
//  __gconv_path_envvar = getenv ("GCONV_PATH");
//  if (__gconv_path_envvar != NULL)
    return -1;

#if 0
  /* See whether the cache file exists.  */
  fd = __open (GCONV_MODULES_CACHE, O_RDONLY);
  if (__builtin_expect (fd, 0) == -1)
    /* Not available.  */
    return -1;

  /* Get information about the file.  */
  if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st), 0) < 0
      /* We do not have to start looking at the file if it cannot contain
	 at least the cache header.  */
      || (size_t) st.st_size < sizeof (struct gconvcache_header))
    {
    close_and_exit:
      __close (fd);
      return -1;
    }

  /* Make the file content available.  */
  cache_size = st.st_size;
#ifdef _POSIX_MAPPED_FILES
  gconv_cache = __mmap (NULL, cache_size, PROT_READ, MAP_SHARED, fd, 0);
  if (__builtin_expect (gconv_cache == MAP_FAILED, 0))
#endif
    {
      size_t already_read;

      gconv_cache = malloc (cache_size);
      if (gconv_cache == NULL)
	goto close_and_exit;

      already_read = 0;
      do
	{
	  ssize_t n = __read (fd, (char *) gconv_cache + already_read,
			      cache_size - already_read);
	  if (__builtin_expect (n, 0) == -1)
	    {
	      free (gconv_cache);
	      gconv_cache = NULL;
	      goto close_and_exit;
	    }

	  already_read += n;
	}
      while (already_read < cache_size);

      cache_malloced = 1;
    }

  /* We don't need the file descriptor anymore.  */
  __close (fd);

  /* Check the consistency.  */
  header = (struct gconvcache_header *) gconv_cache;
  if (__builtin_expect (header->magic, GCONVCACHE_MAGIC) != GCONVCACHE_MAGIC
      || __builtin_expect (header->string_offset >= cache_size, 0)
      || __builtin_expect (header->hash_offset >= cache_size, 0)
      || __builtin_expect (header->hash_size == 0, 0)
      || __builtin_expect ((header->hash_offset
			    + header->hash_size * sizeof (struct hash_entry))
			   > cache_size, 0)
      || __builtin_expect (header->module_offset >= cache_size, 0)
      || __builtin_expect (header->otherconv_offset > cache_size, 0))
    {
      if (cache_malloced)
	{
	  free (gconv_cache);
	  cache_malloced = 0;
	}
#ifdef _POSIX_MAPPED_FILES
      else
	__munmap (gconv_cache, cache_size);
#endif
      gconv_cache = NULL;

      return -1;
    }

  /* That worked.  */
  return 0;
#endif
}
示例#22
0
文件: arena.c 项目: Xilinx/eglibc
internal_function
new_heap(size_t size, size_t top_pad)
{
  size_t page_mask = GLRO(dl_pagesize) - 1;
  char *p1, *p2;
  unsigned long ul;
  heap_info *h;

  if(size+top_pad < HEAP_MIN_SIZE)
    size = HEAP_MIN_SIZE;
  else if(size+top_pad <= HEAP_MAX_SIZE)
    size += top_pad;
  else if(size > HEAP_MAX_SIZE)
    return 0;
  else
    size = HEAP_MAX_SIZE;
  size = (size + page_mask) & ~page_mask;

  /* A memory region aligned to a multiple of HEAP_MAX_SIZE is needed.
     No swap space needs to be reserved for the following large
     mapping (on Linux, this is the case for all non-writable mappings
     anyway). */
  p2 = MAP_FAILED;
  if(aligned_heap_area) {
    p2 = (char *)MMAP(aligned_heap_area, HEAP_MAX_SIZE, PROT_NONE,
		      MAP_NORESERVE);
    aligned_heap_area = NULL;
    if (p2 != MAP_FAILED && ((unsigned long)p2 & (HEAP_MAX_SIZE-1))) {
      __munmap(p2, HEAP_MAX_SIZE);
      p2 = MAP_FAILED;
    }
  }
  if(p2 == MAP_FAILED) {
    p1 = (char *)MMAP(0, HEAP_MAX_SIZE<<1, PROT_NONE, MAP_NORESERVE);
    if(p1 != MAP_FAILED) {
      p2 = (char *)(((unsigned long)p1 + (HEAP_MAX_SIZE-1))
		    & ~(HEAP_MAX_SIZE-1));
      ul = p2 - p1;
      if (ul)
	__munmap(p1, ul);
      else
	aligned_heap_area = p2 + HEAP_MAX_SIZE;
      __munmap(p2 + HEAP_MAX_SIZE, HEAP_MAX_SIZE - ul);
    } else {
      /* Try to take the chance that an allocation of only HEAP_MAX_SIZE
	 is already aligned. */
      p2 = (char *)MMAP(0, HEAP_MAX_SIZE, PROT_NONE, MAP_NORESERVE);
      if(p2 == MAP_FAILED)
	return 0;
      if((unsigned long)p2 & (HEAP_MAX_SIZE-1)) {
	__munmap(p2, HEAP_MAX_SIZE);
	return 0;
      }
    }
  }
  if(__mprotect(p2, size, PROT_READ|PROT_WRITE) != 0) {
    __munmap(p2, HEAP_MAX_SIZE);
    return 0;
  }
  h = (heap_info *)p2;
  h->size = size;
  h->mprotect_size = size;
  THREAD_STAT(stat_n_heaps++);
  return h;
}
示例#23
0
char *dcngettext(const char *domainname, const char *msgid1, const char *msgid2, unsigned long int n, int category)
{
	static struct msgcat *volatile cats;
	struct msgcat *p;
	struct __locale_struct *loc = CURRENT_LOCALE;
	struct __locale_map *lm;
	const char *dirname, *locname, *catname;
	size_t dirlen, loclen, catlen, domlen;

	if (!domainname) domainname = __gettextdomain();

	domlen = strlen(domainname);
	if (domlen > NAME_MAX) goto notrans;

	dirname = gettextdir(domainname, &dirlen);
	if (!dirname) goto notrans;

	switch (category) {
	case LC_MESSAGES:
		locname = loc->messages_name;
		if (!*locname) goto notrans;
		break;
	case LC_TIME:
	case LC_MONETARY:
	case LC_COLLATE:
		lm = loc->cat[category-2];
		if (!lm) goto notrans;
		locname = lm->name;
		break;
	default:
notrans:
		return (char *) ((n == 1) ? msgid1 : msgid2);
	}

	catname = catnames[category-2];
	catlen = catlens[category-2];
	loclen = strlen(locname);

	size_t namelen = dirlen+1 + loclen+1 + catlen+1 + domlen+3;
	char name[namelen+1], *s = name;

	memcpy(s, dirname, dirlen);
	s[dirlen] = '/';
	s += dirlen + 1;
	memcpy(s, locname, loclen);
	s[loclen] = '/';
	s += loclen + 1;
	memcpy(s, catname, catlen);
	s[catlen] = '/';
	s += catlen + 1;
	memcpy(s, domainname, domlen);
	s[domlen] = '.';
	s[domlen+1] = 'm';
	s[domlen+2] = 'o';
	s[domlen+3] = 0;

	for (p=cats; p; p=p->next)
		if (!strcmp(p->name, name))
			break;

	if (!p) {
		void *old_cats;
		size_t map_size;
		const void *map = __map_file(name, &map_size);
		if (!map) goto notrans;
		p = malloc(sizeof *p + namelen + 1);
		if (!p) {
			__munmap((void *)map, map_size);
			goto notrans;
		}
		p->map = map;
		p->map_size = map_size;
		memcpy(p->name, name, namelen+1);
		do {
			old_cats = cats;
			p->next = old_cats;
		} while (a_cas_p(&cats, old_cats, p) != old_cats);
	}

	const char *trans = __mo_lookup(p->map, p->map_size, msgid1);
	if (!trans) goto notrans;

	/* Non-plural-processing gettext forms pass a null pointer as
	 * msgid2 to request that dcngettext suppress plural processing. */
	if (!msgid2) return (char *)trans;

	if (!p->plural_rule) {
		const char *rule = "n!=1;";
		unsigned long np = 2;
		const char *r = __mo_lookup(p->map, p->map_size, "");
		char *z;
		while (r && strncmp(r, "Plural-Forms:", 13)) {
			z = strchr(r, '\n');
			r = z ? z+1 : 0;
		}
		if (r) {
			r += 13;
			while (isspace(*r)) r++;
			if (!strncmp(r, "nplurals=", 9)) {
				np = strtoul(r+9, &z, 10);
				r = z;
			}
			while (*r && *r != ';') r++;
			if (*r) {
				r++;
				while (isspace(*r)) r++;
				if (!strncmp(r, "plural=", 7))
					rule = r+7;
			}
		}
		a_store(&p->nplurals, np);
		a_cas_p(&p->plural_rule, 0, (void *)rule);
	}
	if (p->nplurals) {
		unsigned long plural = __pleval(p->plural_rule, n);
		if (plural > p->nplurals) goto notrans;
		while (plural--) {
			size_t rem = p->map_size - (trans - (char *)p->map);
			size_t l = strnlen(trans, rem);
			if (l+1 >= rem)
				goto notrans;
			trans += l+1;
		}
	}
	return (char *)trans;
}
示例#24
0
文件: spawni.c 项目: doniexun/glibc
/* Spawn a new process executing PATH with the attributes describes in *ATTRP.
   Before running the process perform the actions described in FILE-ACTIONS. */
static int
__spawnix (pid_t * pid, const char *file,
	   const posix_spawn_file_actions_t * file_actions,
	   const posix_spawnattr_t * attrp, char *const argv[],
	   char *const envp[], int xflags,
	   int (*exec) (const char *, char *const *, char *const *))
{
  pid_t new_pid;
  struct posix_spawn_args args;
  int ec;

  if (__pipe2 (args.pipe, O_CLOEXEC))
    return errno;

  /* To avoid imposing hard limits on posix_spawn{p} the total number of
     arguments is first calculated to allocate a mmap to hold all possible
     values.  */
  ptrdiff_t argc = 0;
  /* Linux allows at most max (0x7FFFFFFF, 1/4 stack size) arguments
     to be used in a execve call.  We limit to INT_MAX minus one due the
     compatiblity code that may execute a shell script (maybe_script_execute)
     where it will construct another argument list with an additional
     argument.  */
  ptrdiff_t limit = INT_MAX - 1;
  while (argv[argc++] != NULL)
    if (argc == limit)
      {
	errno = E2BIG;
	return errno;
      }

  int prot = (PROT_READ | PROT_WRITE
	     | ((GL (dl_stack_flags) & PF_X) ? PROT_EXEC : 0));

  /* Add a slack area for child's stack.  */
  size_t argv_size = (argc * sizeof (void *)) + 512;
  size_t stack_size = ALIGN_UP (argv_size, GLRO(dl_pagesize));
  void *stack = __mmap (NULL, stack_size, prot,
			MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0);
  if (__glibc_unlikely (stack == MAP_FAILED))
    {
      close_not_cancel (args.pipe[0]);
      close_not_cancel (args.pipe[1]);
      return errno;
    }

  /* Disable asynchronous cancellation.  */
  int cs = LIBC_CANCEL_ASYNC ();

  args.file = file;
  args.exec = exec;
  args.fa = file_actions;
  args.attr = attrp ? attrp : &(const posix_spawnattr_t) { 0 };
  args.argv = argv;
  args.argc = argc;
  args.envp = envp;
  args.xflags = xflags;

  __sigprocmask (SIG_BLOCK, &SIGALL_SET, &args.oldmask);

  /* The clone flags used will create a new child that will run in the same
     memory space (CLONE_VM) and the execution of calling thread will be
     suspend until the child calls execve or _exit.  These condition as
     signal below either by pipe write (_exit with SPAWN_ERROR) or
     a successful execve.
     Also since the calling thread execution will be suspend, there is not
     need for CLONE_SETTLS.  Although parent and child share the same TLS
     namespace, there will be no concurrent access for TLS variables (errno
     for instance).  */
  new_pid = CLONE (__spawni_child, STACK (stack, stack_size), stack_size,
		   CLONE_VM | CLONE_VFORK | SIGCHLD, &args);

  close_not_cancel (args.pipe[1]);

  if (new_pid > 0)
    {
      if (__read (args.pipe[0], &ec, sizeof ec) != sizeof ec)
	ec = 0;
      else
	__waitpid (new_pid, NULL, 0);
    }
  else
    ec = -new_pid;

  __munmap (stack, stack_size);

  close_not_cancel (args.pipe[0]);

  if (!ec && new_pid)
    *pid = new_pid;

  __sigprocmask (SIG_SETMASK, &args.oldmask, 0);

  LIBC_CANCEL_RESET (cs);

  return ec;
}

/* Spawn a new process executing PATH with the attributes describes in *ATTRP.
   Before running the process perform the actions described in FILE-ACTIONS. */
int
__spawni (pid_t * pid, const char *file,
	  const posix_spawn_file_actions_t * acts,
	  const posix_spawnattr_t * attrp, char *const argv[],
	  char *const envp[], int xflags)
{
  return __spawnix (pid, file, acts, attrp, argv, envp, xflags,
		    xflags & SPAWN_XFLAGS_USE_PATH ? __execvpe : __execve);
}