ssize_t __pread_chk (int fd, void *buf, size_t nbytes, off_t offset, size_t buflen) { if (nbytes > buflen) __chk_fail (); return __pread (fd, buf, nbytes, offset); }
/* Try to get a file descriptor for the shared meory segment containing the database. */ static struct mapped_database * get_mapping (request_type type, const char *key, struct mapped_database **mappedp) { struct mapped_database *result = NO_MAPPING; #ifdef SCM_RIGHTS const size_t keylen = strlen (key) + 1; char resdata[keylen]; int saved_errno = errno; int mapfd = -1; /* Send the request. */ struct iovec iov[2]; request_header req; int sock = open_socket (); if (sock < 0) goto out; req.version = NSCD_VERSION; req.type = type; req.key_len = keylen; iov[0].iov_base = &req; iov[0].iov_len = sizeof (req); iov[1].iov_base = (void *) key; iov[1].iov_len = keylen; if (__builtin_expect (TEMP_FAILURE_RETRY (__writev (sock, iov, 2)) != iov[0].iov_len + iov[1].iov_len, 0)) /* We cannot even write the request. */ goto out_close2; /* Room for the data sent along with the file descriptor. We expect the key name back. */ iov[0].iov_base = resdata; iov[0].iov_len = keylen; union { struct cmsghdr hdr; char bytes[CMSG_SPACE (sizeof (int))]; } buf; struct msghdr msg = { .msg_iov = iov, .msg_iovlen = 1, .msg_control = buf.bytes, .msg_controllen = sizeof (buf) }; struct cmsghdr *cmsg = CMSG_FIRSTHDR (&msg); cmsg->cmsg_level = SOL_SOCKET; cmsg->cmsg_type = SCM_RIGHTS; cmsg->cmsg_len = CMSG_LEN (sizeof (int)); /* This access is well-aligned since BUF is correctly aligned for an int and CMSG_DATA preserves this alignment. */ *(int *) CMSG_DATA (cmsg) = -1; msg.msg_controllen = cmsg->cmsg_len; if (wait_on_socket (sock) <= 0) goto out_close2; # ifndef MSG_NOSIGNAL # define MSG_NOSIGNAL 0 # endif if (__builtin_expect (TEMP_FAILURE_RETRY (__recvmsg (sock, &msg, MSG_NOSIGNAL)) != keylen, 0)) goto out_close2; mapfd = *(int *) CMSG_DATA (cmsg); if (__builtin_expect (CMSG_FIRSTHDR (&msg)->cmsg_len != CMSG_LEN (sizeof (int)), 0)) goto out_close; struct stat64 st; if (__builtin_expect (strcmp (resdata, key) != 0, 0) || __builtin_expect (fstat64 (mapfd, &st) != 0, 0) || __builtin_expect (st.st_size < sizeof (struct database_pers_head), 0)) goto out_close; struct database_pers_head head; if (__builtin_expect (TEMP_FAILURE_RETRY (__pread (mapfd, &head, sizeof (head), 0)) != sizeof (head), 0)) goto out_close; if (__builtin_expect (head.version != DB_VERSION, 0) || __builtin_expect (head.header_size != sizeof (head), 0) /* This really should not happen but who knows, maybe the update thread got stuck. */ || __builtin_expect (! head.nscd_certainly_running && head.timestamp + MAPPING_TIMEOUT < time (NULL), 0)) goto out_close; size_t size = (sizeof (head) + roundup (head.module * sizeof (ref_t), ALIGN) + head.data_size); if (__builtin_expect (st.st_size < size, 0)) goto out_close; /* The file is large enough, map it now. */ void *mapping = __mmap (NULL, size, PROT_READ, MAP_SHARED, mapfd, 0); if (__builtin_expect (mapping != MAP_FAILED, 1)) { /* Allocate a record for the mapping. */ struct mapped_database *newp = malloc (sizeof (*newp)); if (newp == NULL) { /* Ugh, after all we went through the memory allocation failed. */ __munmap (mapping, size); goto out_close; } newp->head = mapping; newp->data = ((char *) mapping + head.header_size + roundup (head.module * sizeof (ref_t), ALIGN)); newp->mapsize = size; /* Set counter to 1 to show it is usable. */ newp->counter = 1; result = newp; } out_close: __close (mapfd); out_close2: __close (sock); out: __set_errno (saved_errno); #endif /* SCM_RIGHTS */ struct mapped_database *oldval = *mappedp; *mappedp = result; if (oldval != NULL && atomic_decrement_val (&oldval->counter) == 0) __nscd_unmap (oldval); return result; } struct mapped_database * __nscd_get_map_ref (request_type type, const char *name, struct locked_map_ptr *mapptr, int *gc_cyclep) { struct mapped_database *cur = mapptr->mapped; if (cur == NO_MAPPING) return cur; int cnt = 0; while (atomic_compare_and_exchange_val_acq (&mapptr->lock, 1, 0) != 0) { // XXX Best number of rounds? if (++cnt > 5) return NO_MAPPING; atomic_delay (); } cur = mapptr->mapped; if (__builtin_expect (cur != NO_MAPPING, 1)) { /* If not mapped or timestamp not updated, request new map. */ if (cur == NULL || (cur->head->nscd_certainly_running == 0 && cur->head->timestamp + MAPPING_TIMEOUT < time (NULL))) cur = get_mapping (type, name, &mapptr->mapped); if (__builtin_expect (cur != NO_MAPPING, 1)) { if (__builtin_expect (((*gc_cyclep = cur->head->gc_cycle) & 1) != 0, 0)) cur = NO_MAPPING; else atomic_increment (&cur->counter); } } mapptr->lock = 0; return cur; } const struct datahead * __nscd_cache_search (request_type type, const char *key, size_t keylen, const struct mapped_database *mapped) { unsigned long int hash = __nis_hash (key, keylen) % mapped->head->module; ref_t work = mapped->head->array[hash]; while (work != ENDREF) { struct hashentry *here = (struct hashentry *) (mapped->data + work); if (type == here->type && keylen == here->len && memcmp (key, mapped->data + here->key, keylen) == 0) { /* We found the entry. Increment the appropriate counter. */ const struct datahead *dh = (struct datahead *) (mapped->data + here->packet); /* See whether we must ignore the entry or whether something is wrong because garbage collection is in progress. */ if (dh->usable && ((char *) dh + dh->allocsize <= (char *) mapped->head + mapped->mapsize)) return dh; } work = here->next; } return NULL; }
int posix_fallocate (int fd, __off_t offset, __off_t len) { struct stat64 st; if (offset < 0 || len < 0) return EINVAL; /* Perform overflow check. The outer cast relies on a GCC extension. */ if ((__off_t) ((uint64_t) offset + (uint64_t) len) < 0) return EFBIG; /* pwrite below will not do the right thing in O_APPEND mode. */ { int flags = __fcntl (fd, F_GETFL, 0); if (flags < 0 || (flags & O_APPEND) != 0) return EBADF; } /* We have to make sure that this is really a regular file. */ if (__fxstat64 (_STAT_VER, fd, &st) != 0) return EBADF; if (S_ISFIFO (st.st_mode)) return ESPIPE; if (! S_ISREG (st.st_mode)) return ENODEV; if (len == 0) { /* This is racy, but there is no good way to satisfy a zero-length allocation request. */ if (st.st_size < offset) { int ret = __ftruncate (fd, offset); if (ret != 0) ret = errno; return ret; } return 0; } /* Minimize data transfer for network file systems, by issuing single-byte write requests spaced by the file system block size. (Most local file systems have fallocate support, so this fallback code is not used there.) */ unsigned increment; { struct statfs64 f; if (__fstatfs64 (fd, &f) != 0) return errno; if (f.f_bsize == 0) increment = 512; else if (f.f_bsize < 4096) increment = f.f_bsize; else /* NFS does not propagate the block size of the underlying storage and may report a much larger value which would still leave holes after the loop below, so we cap the increment at 4096. */ increment = 4096; } /* Write a null byte to every block. This is racy; we currently lack a better option. Compare-and-swap against a file mapping might additional local races, but requires interposition of a signal handler to catch SIGBUS. */ for (offset += (len - 1) % increment; len > 0; offset += increment) { len -= increment; if (offset < st.st_size) { unsigned char c; ssize_t rsize = __pread (fd, &c, 1, offset); if (rsize < 0) return errno; /* If there is a non-zero byte, the block must have been allocated already. */ else if (rsize == 1 && c != 0) continue; } if (__pwrite (fd, "", 1, offset) != 1) return errno; } return 0; }
size_t __libc_pread(int fd, void *buf, size_t count, off_t offset) { return __pread(fd,buf,count,0,__LONG_LONG_PAIR(0,offset)); }