unsigned int if_nametoindex (const char *ifname) { #ifndef SIOCGIFINDEX __set_errno (ENOSYS); return 0; #else struct ifreq ifr; int fd = __opensock (); if (fd < 0) return 0; strncpy (ifr.ifr_name, ifname, sizeof (ifr.ifr_name)); if (__ioctl (fd, SIOCGIFINDEX, &ifr) < 0) { int saved_errno = errno; close_not_cancel_no_status (fd); if (saved_errno == EINVAL) __set_errno (ENOSYS); return 0; } close_not_cancel_no_status (fd); return ifr.ifr_ifindex; #endif }
static void backtrace_and_maps (int do_abort, bool written, int fd) { if (do_abort > 1 && written) { void *addrs[64]; #define naddrs (sizeof (addrs) / sizeof (addrs[0])) int n = __backtrace (addrs, naddrs); if (n > 2) { #define strnsize(str) str, strlen (str) #define writestr(str) write_not_cancel (fd, str) writestr (strnsize ("======= Backtrace: =========\n")); __backtrace_symbols_fd (addrs + 1, n - 1, fd); writestr (strnsize ("======= Memory map: ========\n")); int fd2 = open_not_cancel_2 ("/proc/self/maps", O_RDONLY); char buf[1024]; ssize_t n2; while ((n2 = read_not_cancel (fd2, buf, sizeof (buf))) > 0) if (write_not_cancel (fd, buf, n2) != n2) break; close_not_cancel_no_status (fd2); } } }
char * __if_indextoname (unsigned int ifindex, char *ifname) { /* We may be able to do the conversion directly, rather than searching a list. This ioctl is not present in kernels before version 2.1.50. */ struct ifreq ifr; int fd; int status; fd = __opensock (); if (fd < 0) return NULL; ifr.ifr_ifindex = ifindex; status = __ioctl (fd, SIOCGIFNAME, &ifr); close_not_cancel_no_status (fd); if (status < 0) { if (errno == ENODEV) /* POSIX requires ENXIO. */ __set_errno (ENXIO); return NULL; } else return strncpy (ifname, ifr.ifr_name, IFNAMSIZ); }
internal_function __alloc_dir (int fd, bool close_fd, const struct stat64 *statp) { /* We always have to set the close-on-exit flag if the user provided the file descriptor. Otherwise only if we have no working O_CLOEXEC support. */ #ifdef O_CLOEXEC if (! close_fd || ! check_have_o_cloexec (fd)) #endif { if (__builtin_expect (__fcntl (fd, F_SETFD, FD_CLOEXEC), 0) < 0) goto lose; } const size_t default_allocation = (BUFSIZ < sizeof (struct dirent64) ? sizeof (struct dirent64) : BUFSIZ); size_t allocation; #ifdef _STATBUF_ST_BLKSIZE if (__builtin_expect ((size_t) statp->st_blksize >= sizeof (struct dirent64), 1)) allocation = statp->st_blksize; else #endif allocation = default_allocation; DIR *dirp = (DIR *) malloc (sizeof (DIR) + allocation); if (dirp == NULL) { #ifdef _STATBUF_ST_BLKSIZE if (allocation == statp->st_blksize && allocation != default_allocation) { allocation = default_allocation; dirp = (DIR *) malloc (sizeof (DIR) + allocation); } if (dirp == NULL) #endif lose: { if (close_fd) { int save_errno = errno; close_not_cancel_no_status (fd); __set_errno (save_errno); } return NULL; } } dirp->fd = fd; #ifndef NOT_IN_libc __libc_lock_init (dirp->lock); #endif dirp->allocation = allocation; dirp->size = 0; dirp->offset = 0; dirp->filepos = 0; return dirp; }
/* Close all file descriptors except the one specified. */ static void close_all_fds (void) { DIR *dir = __opendir ("/proc/self/fd"); if (dir != NULL) { struct dirent64 *d; while ((d = __readdir64 (dir)) != NULL) if (isdigit (d->d_name[0])) { char *endp; long int fd = strtol (d->d_name, &endp, 10); if (*endp == '\0' && fd != PTY_FILENO && fd != dirfd (dir)) close_not_cancel_no_status (fd); } __closedir (dir); int nullfd = open_not_cancel_2 (_PATH_DEVNULL, O_RDONLY); assert (nullfd == STDIN_FILENO); nullfd = open_not_cancel_2 (_PATH_DEVNULL, O_WRONLY); assert (nullfd == STDOUT_FILENO); __dup2 (STDOUT_FILENO, STDERR_FILENO); } }
static int open_socket (void) { int sock = __socket (PF_UNIX, SOCK_STREAM, 0); if (sock < 0) return -1; /* Make socket non-blocking. */ int fl = __fcntl (sock, F_GETFL); if (fl != -1) __fcntl (sock, F_SETFL, fl | O_NONBLOCK); struct sockaddr_un sun; sun.sun_family = AF_UNIX; strcpy (sun.sun_path, _PATH_NSCDSOCKET); if (__connect (sock, (struct sockaddr *) &sun, sizeof (sun)) < 0 && errno != EINPROGRESS) goto out; struct pollfd fds[1]; fds[0].fd = sock; fds[0].events = POLLOUT | POLLERR | POLLHUP; if (__poll (fds, 1, 5 * 1000) > 0) /* Success. We do not check for success of the connect call here. If it failed, the following operations will fail. */ return sock; out: close_not_cancel_no_status (sock); return -1; }
/* Open the database stored in FILE. If succesful, store either a pointer to the mapped file or a file handle for the file in H and return NSS_STATUS_SUCCESS. On failure, return the appropriate lookup status. */ enum nss_status internal_setent (const char *file, struct nss_db_map *mapping) { enum nss_status status = NSS_STATUS_UNAVAIL; int mode = O_RDONLY | O_LARGEFILE; #ifdef O_CLOEXEC mode |= O_CLOEXEC; #endif int fd = open_not_cancel_2 (file, mode); if (fd != -1) { struct nss_db_header header; if (read (fd, &header, sizeof (header)) == sizeof (header)) { mapping->header = mmap (NULL, header.allocate, PROT_READ, MAP_PRIVATE, fd, 0); mapping->len = header.allocate; if (mapping->header != MAP_FAILED) status = NSS_STATUS_SUCCESS; else if (errno == ENOMEM) status = NSS_STATUS_TRYAGAIN; } close_not_cancel_no_status (fd); } return status; }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { struct stat64 statbuf; if (__builtin_expect (name[0], '\1') == '\0') { /* POSIX.1-1990 says an empty name gets ENOENT; but `open' might like it fine. */ __set_errno (ENOENT); return NULL; } #ifdef O_DIRECTORY /* Test whether O_DIRECTORY works. */ if (o_directory_works == 0) tryopen_o_directory (); /* We can skip the expensive `stat' call if O_DIRECTORY works. */ if (o_directory_works < 0) #endif { /* We first have to check whether the name is for a directory. We cannot do this after the open() call since the open/close operation performed on, say, a tape device might have undesirable effects. */ if (__builtin_expect (__xstat64 (_STAT_VER, name, &statbuf), 0) < 0) return NULL; if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); return NULL; } } int fd = open_not_cancel_2 (name, O_RDONLY|O_NDELAY|EXTRA_FLAGS|O_LARGEFILE); if (__builtin_expect (fd, 0) < 0) return NULL; /* Now make sure this really is a directory and nothing changed since the `stat' call. We do not have to perform the test for the descriptor being associated with a directory if we know the O_DIRECTORY flag is honored by the kernel. */ if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &statbuf), 0) < 0) goto lose; #ifdef O_DIRECTORY if (o_directory_works <= 0) #endif { if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); lose: close_not_cancel_no_status (fd); return NULL; } } return __alloc_dir (fd, true, &statbuf); }
internal_function __alloc_dir (int fd, bool close_fd, int flags, const struct stat64 *statp) { /* We always have to set the close-on-exit flag if the user provided the file descriptor. Otherwise only if we have no working O_CLOEXEC support. */ #ifdef O_CLOEXEC if ((! close_fd && (flags & O_CLOEXEC) == 0) || ! check_have_o_cloexec (fd)) #endif { if (__builtin_expect (__fcntl (fd, F_SETFD, FD_CLOEXEC), 0) < 0) goto lose; } const size_t default_allocation = (4 * BUFSIZ < sizeof (struct dirent64) ? sizeof (struct dirent64) : 4 * BUFSIZ); const size_t small_allocation = (BUFSIZ < sizeof (struct dirent64) ? sizeof (struct dirent64) : BUFSIZ); size_t allocation = default_allocation; #ifdef _STATBUF_ST_BLKSIZE /* Increase allocation if requested, but not if the value appears to be bogus. */ if (statp != NULL) allocation = MIN (MAX ((size_t) statp->st_blksize, default_allocation), MAX_DIR_BUFFER_SIZE); #endif DIR *dirp = (DIR *) malloc (sizeof (DIR) + allocation); if (dirp == NULL) { allocation = small_allocation; dirp = (DIR *) malloc (sizeof (DIR) + allocation); if (dirp == NULL) lose: { if (close_fd) { int save_errno = errno; close_not_cancel_no_status (fd); __set_errno (save_errno); } return NULL; } } dirp->fd = fd; #if IS_IN (libc) __libc_lock_init (dirp->lock); #endif dirp->allocation = allocation; dirp->size = 0; dirp->offset = 0; dirp->filepos = 0; dirp->errcode = 0; return dirp; }
int __get_nprocs () { /* XXX Here will come a test for the new system call. */ const size_t buffer_size = __libc_use_alloca (8192) ? 8192 : 512; char *buffer = alloca (buffer_size); char *buffer_end = buffer + buffer_size; char *cp = buffer_end; char *re = buffer_end; int result = 1; #ifdef O_CLOEXEC const int flags = O_RDONLY | O_CLOEXEC; #else const int flags = O_RDONLY; #endif /* The /proc/stat format is more uniform, use it by default. */ int fd = open_not_cancel_2 ("/proc/stat", flags); if (fd != -1) { result = 0; char *l; while ((l = next_line (fd, buffer, &cp, &re, buffer_end)) != NULL) /* The current format of /proc/stat has all the cpu* entries at the front. We assume here that stays this way. */ if (strncmp (l, "cpu", 3) != 0) break; else if (isdigit (l[3])) ++result; close_not_cancel_no_status (fd); } else { fd = open_not_cancel_2 ("/proc/cpuinfo", flags); if (fd != -1) { GET_NPROCS_PARSER (fd, buffer, cp, re, buffer_end, result); close_not_cancel_no_status (fd); } } return result; }
void end(void) { __UCLIBC_MUTEX_LOCK(utmplock); if (static_fd >= 0) close_not_cancel_no_status(static_fd); static_fd = -1; __UCLIBC_MUTEX_UNLOCK(utmplock); }
int daemon(int nochdir, int noclose) { int fd; if (fork_parent() == -1) return -1; if (setsid() == -1) return -1; if (!nochdir) chdir("/"); if (!noclose) { struct STAT st; if ((fd = open_not_cancel(_PATH_DEVNULL, O_RDWR, 0)) != -1 && (__builtin_expect (FSTAT (fd, &st), 0) == 0)) { if (__builtin_expect (S_ISCHR (st.st_mode), 1) != 0) { dup2(fd, STDIN_FILENO); dup2(fd, STDOUT_FILENO); dup2(fd, STDERR_FILENO); if (fd > 2) close(fd); } else { /* We must set an errno value since no function call actually failed. */ close_not_cancel_no_status (fd); __set_errno (ENODEV); return -1; } } else { close_not_cancel_no_status (fd); return -1; } } return 0; }
static void __updwtmp(const char *wtmp_file, const struct utmp *lutmp) { int fd; fd = open_not_cancel_2(wtmp_file, O_APPEND | O_WRONLY); if (fd >= 0) { if (lockf(fd, F_LOCK, 0) == 0) { write_not_cancel(fd, lutmp, sizeof(struct utmp)); lockf(fd, F_ULOCK, 0); close_not_cancel_no_status(fd); } } }
static void tryopen_o_directory (void) { int serrno = errno; int x = open_not_cancel_2 ("/dev/null", O_RDONLY|O_NDELAY|O_DIRECTORY); if (x >= 0) { close_not_cancel_no_status (x); o_directory_works = -1; } else if (errno != ENOTDIR) o_directory_works = -1; else o_directory_works = 1; __set_errno (serrno); }
int name(const char *new_file) { __UCLIBC_MUTEX_LOCK(utmplock); if (new_file != NULL) { if (current_file != default_file) free((char *)current_file); current_file = strdup(new_file); if (current_file == NULL) { /* We should probably whine about out-of-memory * errors here... Instead just reset to the default */ current_file = default_file; } } if (static_fd >= 0) { close_not_cancel_no_status(static_fd); static_fd = -1; } __UCLIBC_MUTEX_UNLOCK(utmplock); return 0; /* or maybe return -(current_file != new_file)? */ }
/* Get file-specific information about FILE. */ long int __pathconf (const char *file, int name) { struct statfs fsbuf; int fd; int flags; switch (name) { case _PC_LINK_MAX: return __statfs_link_max (__statfs (file, &fsbuf), &fsbuf, file, -1); case _PC_FILESIZEBITS: return __statfs_filesize_max (__statfs (file, &fsbuf), &fsbuf); case _PC_2_SYMLINKS: return __statfs_symlinks (__statfs (file, &fsbuf), &fsbuf); case _PC_CHOWN_RESTRICTED: return __statfs_chown_restricted (__statfs (file, &fsbuf), &fsbuf); case _PC_PIPE_BUF: flags = O_RDONLY|O_NONBLOCK|O_NOCTTY; #ifdef O_CLOEXEC flags |= O_CLOEXEC; #endif fd = open_not_cancel_2 (file, flags); if (fd >= 0) { long int r = __fcntl (fd, F_GETPIPE_SZ); close_not_cancel_no_status (fd); if (r > 0) return r; } /* FALLTHROUGH */ default: return posix_pathconf (file, name); } }
int pthread_getname_np (pthread_t th, char *buf, size_t len) { const struct pthread *pd = (const struct pthread *) th; /* Unfortunately the kernel headers do not export the TASK_COMM_LEN macro. So we have to define it here. */ #define TASK_COMM_LEN 16 if (len < TASK_COMM_LEN) return ERANGE; if (pd == THREAD_SELF) return prctl (PR_GET_NAME, buf) ? errno : 0; #define FMT "/proc/self/task/%u/comm" char fname[sizeof (FMT) + 8]; sprintf (fname, FMT, (unsigned int) pd->tid); int fd = open_not_cancel_2 (fname, O_RDONLY); if (fd == -1) return errno; int res = 0; ssize_t n = TEMP_FAILURE_RETRY (read_not_cancel (fd, buf, len)); if (n < 0) res = errno; else { if (buf[n - 1] == '\n') buf[n - 1] = '\0'; else if (n == len) res = ERANGE; else buf[n] = '\0'; } close_not_cancel_no_status (fd); return res; }
static DIR * opendir_tail (int fd) { if (__glibc_unlikely (fd < 0)) return NULL; /* Now make sure this really is a directory and nothing changed since the `stat' call. The S_ISDIR check is superfluous if O_DIRECTORY works, but it's cheap and we need the stat call for st_blksize anyway. */ struct stat64 statbuf; if (__glibc_unlikely (__fxstat64 (_STAT_VER, fd, &statbuf) < 0)) goto lose; if (__glibc_unlikely (! S_ISDIR (statbuf.st_mode))) { __set_errno (ENOTDIR); lose: close_not_cancel_no_status (fd); return NULL; } return __alloc_dir (fd, true, 0, &statbuf); }
internal_function __alloc_dir (int fd, bool close_fd, const struct stat64 *statp) { if (__builtin_expect (__fcntl (fd, F_SETFD, FD_CLOEXEC), 0) < 0) goto lose; size_t allocation; #ifdef _STATBUF_ST_BLKSIZE if (__builtin_expect ((size_t) statp->st_blksize >= sizeof (struct dirent64), 1)) allocation = statp->st_blksize; else #endif allocation = (BUFSIZ < sizeof (struct dirent64) ? sizeof (struct dirent64) : BUFSIZ); const int pad = -sizeof (DIR) % __alignof__ (struct dirent64); DIR *dirp = (DIR *) malloc (sizeof (DIR) + allocation + pad); if (dirp == NULL) lose: { if (close_fd) { int save_errno = errno; close_not_cancel_no_status (fd); __set_errno (save_errno); } return NULL; } memset (dirp, '\0', sizeof (DIR)); dirp->data = (char *) (dirp + 1) + pad; dirp->allocation = allocation; dirp->fd = fd; __libc_lock_init (dirp->lock); return dirp; }
/* Used like: return __statfs_chown_restricted (__statfs (name, &buf), &buf);*/ long int __statfs_chown_restricted (int result, const struct statfs *fsbuf) { if (result < 0) { if (errno == ENOSYS) /* Not possible, return the default value. */ return 1; /* Some error occured. */ return -1; } int fd; long int retval = 1; switch (fsbuf->f_type) { case XFS_SUPER_MAGIC: /* Read the value from /proc/sys/fs/xfs/restrict_chown. If we cannot read it default to assume the restriction is in place. */ fd = open_not_cancel_2 ("/proc/sys/fs/xfs/restrict_chown", O_RDONLY); if (fd != -1) { char buf[2]; if (TEMP_FAILURE_RETRY (read_not_cancel (fd, buf, 2)) == 2 && buf[0] >= '0' && buf[0] <= '1') retval = buf[0] - '0'; close_not_cancel_no_status (fd); } break; default: break; } return retval; }
int getloadavg (double loadavg[], int nelem) { int fd; fd = open_not_cancel_2 ("/proc/loadavg", O_RDONLY); if (fd < 0) return -1; else { char buf[65], *p; ssize_t nread; int i; nread = read_not_cancel (fd, buf, sizeof buf - 1); close_not_cancel_no_status (fd); if (nread <= 0) return -1; buf[nread - 1] = '\0'; if (nelem > 3) nelem = 3; p = buf; for (i = 0; i < nelem; ++i) { char *endp; loadavg[i] = __strtod_l (p, &endp, _nl_C_locobj_ptr); if (endp == p) /* This should not happen. The format of /proc/loadavg must have changed. Don't return with what we have, signal an error. */ return -1; p = endp; } return i; } }
/* Create a socket connected to a name. */ int __nscd_open_socket (const char *key, size_t keylen, request_type type, void *response, size_t responselen) { int saved_errno = errno; int sock = open_socket (); if (sock >= 0) { request_header req; req.version = NSCD_VERSION; req.type = type; req.key_len = keylen; struct iovec vec[2]; vec[0].iov_base = &req; vec[0].iov_len = sizeof (request_header); vec[1].iov_base = (void *) key; vec[1].iov_len = keylen; ssize_t nbytes = TEMP_FAILURE_RETRY (__writev (sock, vec, 2)); if (nbytes == (ssize_t) (sizeof (request_header) + keylen) /* Wait for data. */ && wait_on_socket (sock) > 0) { nbytes = TEMP_FAILURE_RETRY (__read (sock, response, responselen)); if (nbytes == (ssize_t) responselen) return sock; } close_not_cancel_no_status (sock); } __set_errno (saved_errno); return -1; }
internal_function __opendirat (int dfd, const char *name) { struct stat64 statbuf; struct stat64 *statp = NULL; if (__builtin_expect (name[0], '\1') == '\0') { /* POSIX.1-1990 says an empty name gets ENOENT; but `open' might like it fine. */ __set_errno (ENOENT); return NULL; } #ifdef O_DIRECTORY /* Test whether O_DIRECTORY works. */ if (o_directory_works == 0) tryopen_o_directory (); /* We can skip the expensive `stat' call if O_DIRECTORY works. */ if (o_directory_works < 0) #endif { /* We first have to check whether the name is for a directory. We cannot do this after the open() call since the open/close operation performed on, say, a tape device might have undesirable effects. */ if (__builtin_expect (__xstat64 (_STAT_VER, name, &statbuf), 0) < 0) return NULL; if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); return NULL; } } int flags = O_RDONLY|O_NDELAY|EXTRA_FLAGS|O_LARGEFILE; #ifdef O_CLOEXEC flags |= O_CLOEXEC; #endif int fd; #ifdef IS_IN_rtld assert (dfd == AT_FDCWD); fd = open_not_cancel_2 (name, flags); #else fd = openat_not_cancel_3 (dfd, name, flags); #endif if (__builtin_expect (fd, 0) < 0) return NULL; #ifdef O_DIRECTORY if (o_directory_works <= 0) #endif { /* Now make sure this really is a directory and nothing changed since the `stat' call. */ if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &statbuf), 0) < 0) goto lose; if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); lose: close_not_cancel_no_status (fd); return NULL; } statp = &statbuf; } return __alloc_dir (fd, true, 0, statp); }
int __open_catalog (const char *cat_name, const char *nlspath, const char *env_var, __nl_catd catalog) { int fd = -1; struct stat64 st; int swapping; size_t cnt; size_t max_offset; size_t tab_size; const char *lastp; int result = -1; char *buf = NULL; if (strchr (cat_name, '/') != NULL || nlspath == NULL) fd = open_not_cancel_2 (cat_name, O_RDONLY); else { const char *run_nlspath = nlspath; #define ENOUGH(n) \ if (__glibc_unlikely (bufact + (n) >= bufmax)) \ { \ char *old_buf = buf; \ bufmax += (bufmax < 256 + (n)) ? 256 + (n) : bufmax; \ buf = realloc (buf, bufmax); \ if (__glibc_unlikely (buf == NULL)) \ { \ free (old_buf); \ return -1; \ } \ } /* The RUN_NLSPATH variable contains a colon separated list of descriptions where we expect to find catalogs. We have to recognize certain % substitutions and stop when we found the first existing file. */ size_t bufact; size_t bufmax = 0; size_t len; fd = -1; while (*run_nlspath != '\0') { bufact = 0; if (*run_nlspath == ':') { /* Leading colon or adjacent colons - treat same as %N. */ len = strlen (cat_name); ENOUGH (len); memcpy (&buf[bufact], cat_name, len); bufact += len; } else while (*run_nlspath != ':' && *run_nlspath != '\0') if (*run_nlspath == '%') { const char *tmp; ++run_nlspath; /* We have seen the `%'. */ switch (*run_nlspath++) { case 'N': /* Use the catalog name. */ len = strlen (cat_name); ENOUGH (len); memcpy (&buf[bufact], cat_name, len); bufact += len; break; case 'L': /* Use the current locale category value. */ len = strlen (env_var); ENOUGH (len); memcpy (&buf[bufact], env_var, len); bufact += len; break; case 'l': /* Use language element of locale category value. */ tmp = env_var; do { ENOUGH (1); buf[bufact++] = *tmp++; } while (*tmp != '\0' && *tmp != '_' && *tmp != '.'); break; case 't': /* Use territory element of locale category value. */ tmp = env_var; do ++tmp; while (*tmp != '\0' && *tmp != '_' && *tmp != '.'); if (*tmp == '_') { ++tmp; do { ENOUGH (1); buf[bufact++] = *tmp++; } while (*tmp != '\0' && *tmp != '.'); } break; case 'c': /* Use code set element of locale category value. */ tmp = env_var; do ++tmp; while (*tmp != '\0' && *tmp != '.'); if (*tmp == '.') { ++tmp; do { ENOUGH (1); buf[bufact++] = *tmp++; } while (*tmp != '\0'); } break; case '%': ENOUGH (1); buf[bufact++] = '%'; break; default: /* Unknown variable: ignore this path element. */ bufact = 0; while (*run_nlspath != '\0' && *run_nlspath != ':') ++run_nlspath; break; } } else { ENOUGH (1); buf[bufact++] = *run_nlspath++; } ENOUGH (1); buf[bufact] = '\0'; if (bufact != 0) { fd = open_not_cancel_2 (buf, O_RDONLY); if (fd >= 0) break; } ++run_nlspath; } } /* Avoid dealing with directories and block devices */ if (__builtin_expect (fd, 0) < 0) { free (buf); return -1; } if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &st), 0) < 0) goto close_unlock_return; if (__builtin_expect (!S_ISREG (st.st_mode), 0) || (size_t) st.st_size < sizeof (struct catalog_obj)) { /* `errno' is not set correctly but the file is not usable. Use an reasonable error value. */ __set_errno (EINVAL); goto close_unlock_return; } catalog->file_size = st.st_size; #ifdef _POSIX_MAPPED_FILES # ifndef MAP_COPY /* Linux seems to lack read-only copy-on-write. */ # define MAP_COPY MAP_PRIVATE # endif # ifndef MAP_FILE /* Some systems do not have this flag; it is superfluous. */ # define MAP_FILE 0 # endif catalog->file_ptr = (struct catalog_obj *) __mmap (NULL, st.st_size, PROT_READ, MAP_FILE|MAP_COPY, fd, 0); if (__builtin_expect (catalog->file_ptr != (struct catalog_obj *) MAP_FAILED, 1)) /* Tell the world we managed to mmap the file. */ catalog->status = mmapped; else #endif /* _POSIX_MAPPED_FILES */ { /* mmap failed perhaps because the system call is not implemented. Try to load the file. */ size_t todo; catalog->file_ptr = malloc (st.st_size); if (catalog->file_ptr == NULL) goto close_unlock_return; todo = st.st_size; /* Save read, handle partial reads. */ do { size_t now = read_not_cancel (fd, (((char *) catalog->file_ptr) + (st.st_size - todo)), todo); if (now == 0 || now == (size_t) -1) { #ifdef EINTR if (now == (size_t) -1 && errno == EINTR) continue; #endif free ((void *) catalog->file_ptr); goto close_unlock_return; } todo -= now; } while (todo > 0); catalog->status = malloced; } /* Determine whether the file is a catalog file and if yes whether it is written using the correct byte order. Else we have to swap the values. */ if (__glibc_likely (catalog->file_ptr->magic == CATGETS_MAGIC)) swapping = 0; else if (catalog->file_ptr->magic == SWAPU32 (CATGETS_MAGIC)) swapping = 1; else { invalid_file: /* Invalid file. Free the resources and mark catalog as not usable. */ #ifdef _POSIX_MAPPED_FILES if (catalog->status == mmapped) __munmap ((void *) catalog->file_ptr, catalog->file_size); else #endif /* _POSIX_MAPPED_FILES */ free (catalog->file_ptr); goto close_unlock_return; } #define SWAP(x) (swapping ? SWAPU32 (x) : (x)) /* Get dimensions of the used hashing table. */ catalog->plane_size = SWAP (catalog->file_ptr->plane_size); catalog->plane_depth = SWAP (catalog->file_ptr->plane_depth); /* The file contains two versions of the pointer tables. Pick the right one for the local byte order. */ #if __BYTE_ORDER == __LITTLE_ENDIAN catalog->name_ptr = &catalog->file_ptr->name_ptr[0]; #elif __BYTE_ORDER == __BIG_ENDIAN catalog->name_ptr = &catalog->file_ptr->name_ptr[catalog->plane_size * catalog->plane_depth * 3]; #else # error Cannot handle __BYTE_ORDER byte order #endif /* The rest of the file contains all the strings. They are addressed relative to the position of the first string. */ catalog->strings = (const char *) &catalog->file_ptr->name_ptr[catalog->plane_size * catalog->plane_depth * 3 * 2]; /* Determine the largest string offset mentioned in the table. */ max_offset = 0; tab_size = 3 * catalog->plane_size * catalog->plane_depth; for (cnt = 2; cnt < tab_size; cnt += 3) if (catalog->name_ptr[cnt] > max_offset) max_offset = catalog->name_ptr[cnt]; /* Now we can check whether the file is large enough to contain the tables it says it contains. */ if ((size_t) st.st_size <= (sizeof (struct catalog_obj) + 2 * tab_size + max_offset)) /* The last string is not contained in the file. */ goto invalid_file; lastp = catalog->strings + max_offset; max_offset = (st.st_size - sizeof (struct catalog_obj) + 2 * tab_size + max_offset); while (*lastp != '\0') { if (--max_offset == 0) goto invalid_file; ++lastp; } /* We succeeded. */ result = 0; /* Release the lock again. */ close_unlock_return: close_not_cancel_no_status (fd); free (buf); return result; }
static int internal_function nscd_getpw_r (const char *key, size_t keylen, request_type type, struct passwd *resultbuf, char *buffer, size_t buflen, struct passwd **result) { int gc_cycle; int nretries = 0; /* If the mapping is available, try to search there instead of communicating with the nscd. */ struct mapped_database *mapped; mapped = __nscd_get_map_ref (GETFDPW, "passwd", &map_handle, &gc_cycle); retry:; const char *pw_name = NULL; int retval = -1; const char *recend = (const char *) ~UINTMAX_C (0); pw_response_header pw_resp; if (mapped != NO_MAPPING) { struct datahead *found = __nscd_cache_search (type, key, keylen, mapped, sizeof pw_resp); if (found != NULL) { pw_name = (const char *) (&found->data[0].pwdata + 1); pw_resp = found->data[0].pwdata; recend = (const char *) found->data + found->recsize; /* Now check if we can trust pw_resp fields. If GC is in progress, it can contain anything. */ if (mapped->head->gc_cycle != gc_cycle) { retval = -2; goto out; } } } int sock = -1; if (pw_name == NULL) { sock = __nscd_open_socket (key, keylen, type, &pw_resp, sizeof (pw_resp)); if (sock == -1) { __nss_not_use_nscd_passwd = 1; goto out; } } /* No value found so far. */ *result = NULL; if (__glibc_unlikely (pw_resp.found == -1)) { /* The daemon does not cache this database. */ __nss_not_use_nscd_passwd = 1; goto out_close; } if (pw_resp.found == 1) { /* Set the information we already have. */ resultbuf->pw_uid = pw_resp.pw_uid; resultbuf->pw_gid = pw_resp.pw_gid; char *p = buffer; /* get pw_name */ resultbuf->pw_name = p; p += pw_resp.pw_name_len; /* get pw_passwd */ resultbuf->pw_passwd = p; p += pw_resp.pw_passwd_len; /* get pw_gecos */ resultbuf->pw_gecos = p; p += pw_resp.pw_gecos_len; /* get pw_dir */ resultbuf->pw_dir = p; p += pw_resp.pw_dir_len; /* get pw_pshell */ resultbuf->pw_shell = p; p += pw_resp.pw_shell_len; ssize_t total = p - buffer; if (__glibc_unlikely (pw_name + total > recend)) goto out_close; if (__glibc_unlikely (buflen < total)) { __set_errno (ERANGE); retval = ERANGE; goto out_close; } retval = 0; if (pw_name == NULL) { ssize_t nbytes = __readall (sock, buffer, total); if (__glibc_unlikely (nbytes != total)) { /* The `errno' to some value != ERANGE. */ __set_errno (ENOENT); retval = ENOENT; } else *result = resultbuf; } else { /* Copy the various strings. */ memcpy (resultbuf->pw_name, pw_name, total); /* Try to detect corrupt databases. */ if (resultbuf->pw_name[pw_resp.pw_name_len - 1] != '\0' || resultbuf->pw_passwd[pw_resp.pw_passwd_len - 1] != '\0' || resultbuf->pw_gecos[pw_resp.pw_gecos_len - 1] != '\0' || resultbuf->pw_dir[pw_resp.pw_dir_len - 1] != '\0' || resultbuf->pw_shell[pw_resp.pw_shell_len - 1] != '\0') { /* We cannot use the database. */ retval = mapped->head->gc_cycle != gc_cycle ? -2 : -1; goto out_close; } *result = resultbuf; } } else { /* Set errno to 0 to indicate no error, just no found record. */ __set_errno (0); /* Even though we have not found anything, the result is zero. */ retval = 0; } out_close: if (sock != -1) close_not_cancel_no_status (sock); out: if (__nscd_drop_map_ref (mapped, &gc_cycle) != 0) { /* When we come here this means there has been a GC cycle while we were looking for the data. This means the data might have been inconsistent. Retry if possible. */ if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1) { /* nscd is just running gc now. Disable using the mapping. */ if (atomic_decrement_val (&mapped->counter) == 0) __nscd_unmap (mapped); mapped = NO_MAPPING; } if (retval != -1) goto retry; } return retval; }
int __nscd_getgrouplist (const char *user, gid_t group, long int *size, gid_t **groupsp, long int limit) { size_t userlen = strlen (user) + 1; int gc_cycle; int nretries = 0; /* If the mapping is available, try to search there instead of communicating with the nscd. */ struct mapped_database *mapped; mapped = __nscd_get_map_ref (GETFDGR, "group", &__gr_map_handle, &gc_cycle); retry:; char *respdata = NULL; int retval = -1; int sock = -1; initgr_response_header initgr_resp; if (mapped != NO_MAPPING) { struct datahead *found = __nscd_cache_search (INITGROUPS, user, userlen, mapped, sizeof initgr_resp); if (found != NULL) { respdata = (char *) (&found->data[0].initgrdata + 1); initgr_resp = found->data[0].initgrdata; char *recend = (char *) found->data + found->recsize; /* Now check if we can trust initgr_resp fields. If GC is in progress, it can contain anything. */ if (mapped->head->gc_cycle != gc_cycle) { retval = -2; goto out; } if (respdata + initgr_resp.ngrps * sizeof (int32_t) > recend) goto out; } } /* If we do not have the cache mapped, try to get the data over the socket. */ if (respdata == NULL) { sock = __nscd_open_socket (user, userlen, INITGROUPS, &initgr_resp, sizeof (initgr_resp)); if (sock == -1) { /* nscd not running or wrong version. */ __nss_not_use_nscd_group = 1; goto out; } } if (initgr_resp.found == 1) { /* The following code assumes that gid_t and int32_t are the same size. This is the case for al existing implementation. If this should change some code needs to be added which doesn't use memcpy but instead copies each array element one by one. */ assert (sizeof (int32_t) == sizeof (gid_t)); assert (initgr_resp.ngrps >= 0); /* Make sure we have enough room. We always count GROUP in even though we might not end up adding it. */ if (*size < initgr_resp.ngrps + 1) { gid_t *newp = realloc (*groupsp, (initgr_resp.ngrps + 1) * sizeof (gid_t)); if (newp == NULL) /* We cannot increase the buffer size. */ goto out_close; *groupsp = newp; *size = initgr_resp.ngrps + 1; } if (respdata == NULL) { /* Read the data from the socket. */ if ((size_t) __readall (sock, *groupsp, initgr_resp.ngrps * sizeof (gid_t)) == initgr_resp.ngrps * sizeof (gid_t)) retval = initgr_resp.ngrps; } else { /* Just copy the data. */ retval = initgr_resp.ngrps; memcpy (*groupsp, respdata, retval * sizeof (gid_t)); } } else { if (__glibc_unlikely (initgr_resp.found == -1)) { /* The daemon does not cache this database. */ __nss_not_use_nscd_group = 1; goto out_close; } /* No group found yet. */ retval = 0; assert (*size >= 1); } /* Check whether GROUP is part of the mix. If not, add it. */ if (retval >= 0) { int cnt; for (cnt = 0; cnt < retval; ++cnt) if ((*groupsp)[cnt] == group) break; if (cnt == retval) (*groupsp)[retval++] = group; } out_close: if (sock != -1) close_not_cancel_no_status (sock); out: if (__nscd_drop_map_ref (mapped, &gc_cycle) != 0) { /* When we come here this means there has been a GC cycle while we were looking for the data. This means the data might have been inconsistent. Retry if possible. */ if ((gc_cycle & 1) != 0 || ++nretries == 5 || retval == -1) { /* nscd is just running gc now. Disable using the mapping. */ if (atomic_decrement_val (&mapped->counter) == 0) __nscd_unmap (mapped); mapped = NO_MAPPING; } if (retval != -1) goto retry; } return retval; }
int __get_nprocs (void) { static int cached_result = -1; static time_t timestamp; time_t now = time (NULL); time_t prev = timestamp; atomic_read_barrier (); if (now == prev && cached_result > -1) return cached_result; /* XXX Here will come a test for the new system call. */ const size_t buffer_size = __libc_use_alloca (8192) ? 8192 : 512; char *buffer = alloca (buffer_size); char *buffer_end = buffer + buffer_size; char *cp = buffer_end; char *re = buffer_end; const int flags = O_RDONLY | O_CLOEXEC; int fd = open_not_cancel_2 ("/sys/devices/system/cpu/online", flags); char *l; int result = 0; if (fd != -1) { l = next_line (fd, buffer, &cp, &re, buffer_end); if (l != NULL) do { char *endp; unsigned long int n = strtoul (l, &endp, 10); if (l == endp) { result = 0; break; } unsigned long int m = n; if (*endp == '-') { l = endp + 1; m = strtoul (l, &endp, 10); if (l == endp) { result = 0; break; } } result += m - n + 1; l = endp; while (l < re && isspace (*l)) ++l; } while (l < re); close_not_cancel_no_status (fd); if (result > 0) goto out; } cp = buffer_end; re = buffer_end; result = 1; /* The /proc/stat format is more uniform, use it by default. */ fd = open_not_cancel_2 ("/proc/stat", flags); if (fd != -1) { result = 0; while ((l = next_line (fd, buffer, &cp, &re, buffer_end)) != NULL) /* The current format of /proc/stat has all the cpu* entries at the front. We assume here that stays this way. */ if (strncmp (l, "cpu", 3) != 0) break; else if (isdigit (l[3])) ++result; close_not_cancel_no_status (fd); } else { fd = open_not_cancel_2 ("/proc/cpuinfo", flags); if (fd != -1) { GET_NPROCS_PARSER (fd, buffer, cp, re, buffer_end, result); close_not_cancel_no_status (fd); } } out: cached_result = result; atomic_write_barrier (); timestamp = now; return result; }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { DIR *dirp; struct stat64 statbuf; int fd; size_t allocation; int save_errno; if (__builtin_expect (name[0], '\1') == '\0') { /* POSIX.1-1990 says an empty name gets ENOENT; but `open' might like it fine. */ __set_errno (ENOENT); return NULL; } #ifdef O_DIRECTORY /* Test whether O_DIRECTORY works. */ if (o_directory_works == 0) tryopen_o_directory (); /* We can skip the expensive `stat' call if O_DIRECTORY works. */ if (o_directory_works < 0) #endif { /* We first have to check whether the name is for a directory. We cannot do this after the open() call since the open/close operation performed on, say, a tape device might have undesirable effects. */ if (__builtin_expect (__xstat64 (_STAT_VER, name, &statbuf), 0) < 0) return NULL; if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { __set_errno (ENOTDIR); return NULL; } } fd = open_not_cancel_2 (name, O_RDONLY|O_NDELAY|EXTRA_FLAGS|O_LARGEFILE); if (__builtin_expect (fd, 0) < 0) return NULL; /* Now make sure this really is a directory and nothing changed since the `stat' call. We do not have to perform the test for the descriptor being associated with a directory if we know the O_DIRECTORY flag is honored by the kernel. */ if (__builtin_expect (__fxstat64 (_STAT_VER, fd, &statbuf), 0) < 0) goto lose; #ifdef O_DIRECTORY if (o_directory_works <= 0) #endif { if (__builtin_expect (! S_ISDIR (statbuf.st_mode), 0)) { save_errno = ENOTDIR; goto lose; } } if (__builtin_expect (__fcntl (fd, F_SETFD, FD_CLOEXEC), 0) < 0) goto lose; #ifdef _STATBUF_ST_BLKSIZE if (__builtin_expect ((size_t) statbuf.st_blksize >= sizeof (struct dirent64), 1)) allocation = statbuf.st_blksize; else #endif allocation = (BUFSIZ < sizeof (struct dirent64) ? sizeof (struct dirent64) : BUFSIZ); const int pad = -sizeof (DIR) % __alignof__ (struct dirent64); dirp = (DIR *) malloc (sizeof (DIR) + allocation + pad); if (dirp == NULL) lose: { save_errno = errno; close_not_cancel_no_status (fd); __set_errno (save_errno); return NULL; } memset (dirp, '\0', sizeof (DIR)); dirp->data = (char *) (dirp + 1) + pad; dirp->allocation = allocation; dirp->fd = fd; __libc_lock_init (dirp->lock); return dirp; }
static struct if_nameindex * if_nameindex_ioctl (void) { int fd = __opensock (); struct ifconf ifc; unsigned int nifs, i; int rq_len; struct if_nameindex *idx = NULL; # define RQ_IFS 4 if (fd < 0) return NULL; ifc.ifc_buf = NULL; /* We may be able to get the needed buffer size directly, rather than guessing. */ if (! old_siocgifconf) { ifc.ifc_buf = NULL; ifc.ifc_len = 0; if (__ioctl (fd, SIOCGIFCONF, &ifc) < 0 || ifc.ifc_len == 0) { # if __ASSUME_SIOCGIFNAME == 0 old_siocgifconf = 1; # endif rq_len = RQ_IFS * sizeof (struct ifreq); } else rq_len = ifc.ifc_len; } else rq_len = RQ_IFS * sizeof (struct ifreq); /* Read all the interfaces out of the kernel. */ ifc.ifc_buf = alloca (rq_len); ifc.ifc_len = rq_len; while (1) { if (__ioctl (fd, SIOCGIFCONF, &ifc) < 0) { close_not_cancel_no_status (fd); return NULL; } if (ifc.ifc_len < rq_len || ! old_siocgifconf) break; ifc.ifc_buf = extend_alloca (ifc.ifc_buf, rq_len, 2 * rq_len); ifc.ifc_len = rq_len; } nifs = ifc.ifc_len / sizeof (struct ifreq); idx = malloc ((nifs + 1) * sizeof (struct if_nameindex)); if (idx == NULL) { close_not_cancel_no_status (fd); __set_errno (ENOBUFS); return NULL; } for (i = 0; i < nifs; ++i) { struct ifreq *ifr = &ifc.ifc_req[i]; idx[i].if_name = __strdup (ifr->ifr_name); if (idx[i].if_name == NULL || __ioctl (fd, SIOCGIFINDEX, ifr) < 0) { int saved_errno = errno; unsigned int j; for (j = 0; j < i; ++j) free (idx[j].if_name); free (idx); close_not_cancel_no_status (fd); if (saved_errno == EINVAL) saved_errno = ENOSYS; else if (saved_errno == ENOMEM) saved_errno = ENOBUFS; __set_errno (saved_errno); return NULL; } idx[i].if_index = ifr->ifr_ifindex; } idx[i].if_index = 0; idx[i].if_name = NULL; close_not_cancel_no_status (fd); return idx; }
void internal_function __protocol_available (int *have_inet, int *have_inet6) { int fd = __opensock (); unsigned int nifs; int rq_len; struct ifconf ifc; # define RQ_IFS 4 /* Wirst case assumption. */ *have_inet = 0; *have_inet6 = 0; if (fd < 0) /* We cannot open the socket. No networking at all? */ return; /* We may be able to get the needed buffer size directly, rather than guessing. */ if (! old_siocgifconf) { ifc.ifc_buf = NULL; ifc.ifc_len = 0; if (__ioctl (fd, SIOCGIFCONF, &ifc) < 0 || ifc.ifc_len == 0) { # if __ASSUME_SIOCGIFNAME == 0 old_siocgifconf = 1; # endif rq_len = RQ_IFS * sizeof (struct ifreq); } else rq_len = ifc.ifc_len; } else rq_len = RQ_IFS * sizeof (struct ifreq); /* Read all the interfaces out of the kernel. */ do { ifc.ifc_buf = alloca (ifc.ifc_len = rq_len); if (__ioctl (fd, SIOCGIFCONF, &ifc) < 0) { close_not_cancel_no_status (fd); return; } rq_len *= 2; } while (ifc.ifc_len == rq_len && old_siocgifconf); nifs = ifc.ifc_len / sizeof (struct ifreq); /* Go through all the interfaces and get the address. */ while (nifs-- > 0) if (__ioctl (fd, SIOCGIFADDR, &ifc.ifc_req[nifs]) >= 0) { /* We successfully got information about this interface. Now test whether it is an IPv4 or IPv6 address. */ if (ifc.ifc_req[nifs].ifr_addr.sa_family == AF_INET) *have_inet = 1; else if (ifc.ifc_req[nifs].ifr_addr.sa_family == AF_INET6) *have_inet6 = 1; /* Note, this is & not &&. It works since the values are always 0 or 1. */ if (*have_inet & *have_inet6) /* We can stop early. */ break; } close_not_cancel_no_status (fd); }