ssize_t __recv (int fd, void *buf, size_t n, int flags) { error_t err; mach_port_t addrport; char *bufp = buf; mach_msg_type_number_t nread = n; mach_port_t *ports; mach_msg_type_number_t nports = 0; char *cdata = NULL; mach_msg_type_number_t clen = 0; if (err = HURD_DPORT_USE (fd, __socket_recv (port, &addrport, flags, &bufp, &nread, &ports, &nports, &cdata, &clen, &flags, n))) return __hurd_sockfail (fd, flags, err); __mach_port_deallocate (__mach_task_self (), addrport); __vm_deallocate (__mach_task_self (), (vm_address_t) cdata, clen); if (bufp != buf) { memcpy (buf, bufp, nread); __vm_deallocate (__mach_task_self (), (vm_address_t) bufp, nread); } return nread; }
error_t _hurd_fd_read (struct hurd_fd *fd, void *buf, size_t *nbytes, loff_t offset) { error_t err; char *data; mach_msg_type_number_t nread; error_t readfd (io_t port) { return __io_read (port, &data, &nread, offset, *nbytes); } data = buf; nread = *nbytes; if (err = HURD_FD_PORT_USE (fd, _hurd_ctty_input (port, ctty, readfd))) return err; if (data != buf) { if (nread > *nbytes) /* Sanity check for bogus server. */ { __vm_deallocate (__mach_task_self (), (vm_address_t) data, nread); return EGRATUITOUS; } memcpy (buf, data, nread); __vm_deallocate (__mach_task_self (), (vm_address_t) data, nread); } *nbytes = nread; return 0; }
error_t _hurd_check_ids (void) { if (! _hurd_id.valid) { inline void dealloc (__typeof (_hurd_id.gen) *p) { if (p->uids) { __vm_deallocate (__mach_task_self (), (vm_address_t) p->uids, p->nuids * sizeof (uid_t)); p->uids = NULL; } p->nuids = 0; if (p->gids) { __vm_deallocate (__mach_task_self (), (vm_address_t) p->gids, p->ngids * sizeof (gid_t)); p->gids = NULL; } p->ngids = 0; } error_t err; dealloc (&_hurd_id.gen); dealloc (&_hurd_id.aux); if (_hurd_id.rid_auth != MACH_PORT_NULL) { __mach_port_deallocate (__mach_task_self (), _hurd_id.rid_auth); _hurd_id.rid_auth = MACH_PORT_NULL; } if (err = __USEPORT (AUTH, __auth_getids (port, &_hurd_id.gen.uids, &_hurd_id.gen.nuids, &_hurd_id.aux.uids, &_hurd_id.aux.nuids, &_hurd_id.gen.gids, &_hurd_id.gen.ngids, &_hurd_id.aux.gids, &_hurd_id.aux.ngids))) return err; _hurd_id.valid = 1; } return 0; }
/* XXX should be __getsockopt ? */ int getsockopt (int fd, int level, int optname, void *optval, socklen_t *optlen) { error_t err; char *buf = optval; mach_msg_type_number_t buflen = *optlen; if (err = HURD_DPORT_USE (fd, __socket_getopt (port, level, optname, &buf, &buflen))) return __hurd_dfail (fd, err); if (*optlen > buflen) *optlen = buflen; if (buf != optval) { memcpy (optval, buf, *optlen); __vm_deallocate (__mach_task_self (), (vm_address_t) buf, buflen); } return 0; }
kern_return_t _S_get_init_ints (mach_port_t msgport, mach_port_t auth, int **values, unsigned int *nvalues) { error_t err; unsigned int i; AUTHCHECK; if (err = __vm_allocate (__mach_task_self (), (vm_address_t *) values, INIT_INT_MAX * sizeof (int), 1)) return err; *nvalues = INIT_INT_MAX; for (i = 0; i < INIT_INT_MAX; ++i) switch (err = get_int (i, &(*values)[i])) { case 0: /* Success. */ break; case EINVAL: /* Unknown index. */ (*values)[i] = 0; break; default: /* Lossage. */ __vm_deallocate (__mach_task_self (), (vm_address_t) *values, INIT_INT_MAX * sizeof (int)); return err; } return 0; }
kern_return_t _S_get_init_ports (mach_port_t msgport, mach_port_t auth, mach_port_t **ports, mach_msg_type_name_t *ports_type, unsigned int *nports) { unsigned int i; error_t err; AUTHCHECK; if (err = __vm_allocate (__mach_task_self (), (vm_address_t *) ports, _hurd_nports * sizeof (mach_port_t), 1)) return err; *nports = _hurd_nports; for (i = 0; i < _hurd_nports; ++i) /* This function adds a new user ref for the *RESULT it gives back. Our reply message uses move-send rights that consumes this ref. */ if (err = _hurd_ports_get (i, &(*ports)[i])) { /* Died part way through. Deallocate the ports already fetched. */ while (i-- > 0) __mach_port_deallocate (__mach_task_self (), (*ports)[i]); __vm_deallocate (__mach_task_self (), (vm_address_t) *ports, *nports * sizeof (mach_port_t)); return err; } *ports_type = MACH_MSG_TYPE_MOVE_SEND; return 0; }
/* Close the directory stream DIRP. Return 0 if successful, -1 if not. */ int __closedir (DIR *dirp) { error_t err; if (dirp == NULL) { errno = EINVAL; return -1; } __libc_lock_lock (dirp->__lock); err = __vm_deallocate (__mach_task_self (), (vm_address_t) dirp->__data, dirp->__allocation); dirp->__data = NULL; err = _hurd_fd_close (dirp->__fd); if (err) { /* Unlock the DIR. A failing closedir can be repeated (and may fail again, but shouldn't deadlock). */ __libc_lock_unlock (dirp->__lock); return __hurd_fail (err); } /* Clean up the lock and free the structure. */ __libc_lock_fini (dirp->__lock); free (dirp); return 0; }
/* Read the contents of the symbolic link FILE_NAME relative to FD into no more than LEN bytes of BUF. The contents are not null-terminated. Returns the number of characters read, or -1 for errors. */ ssize_t readlinkat (int fd, const char *file_name, char *buf, size_t len) { error_t err; file_t file; struct stat64 st; file = __file_name_lookup_at (fd, 0, file_name, O_READ | O_NOLINK, 0); if (file == MACH_PORT_NULL) return -1; err = __io_stat (file, &st); if (! err) if (S_ISLNK (st.st_mode)) { char *rbuf = buf; err = __io_read (file, &rbuf, &len, 0, len); if (!err && rbuf != buf) { memcpy (buf, rbuf, len); __vm_deallocate (__mach_task_self (), (vm_address_t)rbuf, len); } } else err = EINVAL; __mach_port_deallocate (__mach_task_self (), file); return err ? __hurd_fail (err) : len; }
inline void kill_itimer_thread (void) { __thread_terminate (_hurd_itimer_thread); __vm_deallocate (__mach_task_self (), _hurd_itimer_thread_stack_base, _hurd_itimer_thread_stack_size); _hurd_itimer_thread = MACH_PORT_NULL; }
int __munmap (__ptr_t addr, size_t len) { kern_return_t err; if (err = __vm_deallocate (__mach_task_self (), (vm_address_t) addr, (vm_size_t) len)) { errno = err; return -1; } return 0; }
/* Fetch the thread port for PID's user thread. */ error_t fetch_user_thread (task_t task, thread_t *thread) { thread_t threadbuf[3], *threads = threadbuf; mach_msg_type_number_t nthreads = 3, i; error_t err = __task_threads (task, &threads, &nthreads); if (err) return err; if (nthreads == 0) return EINVAL; *thread = threads[0]; /* Assume user thread is first. */ for (i = 1; i < nthreads; ++i) __mach_port_deallocate (__mach_task_self (), threads[i]); if (threads != threadbuf) __vm_deallocate (__mach_task_self (), (vm_address_t) threads, nthreads * sizeof threads[0]); return 0; }
/* Read up to N chars into BUF from COOKIE. Return how many chars were read, 0 for EOF or -1 for error. */ static ssize_t readio (void *cookie, char *buf, size_t n) { mach_msg_type_number_t nread; error_t err; char *bufp = buf; nread = n; if (err = __io_read ((io_t) cookie, &bufp, &nread, -1, n)) return __hurd_fail (err); if (bufp != buf) { memcpy (buf, bufp, nread); __vm_deallocate (__mach_task_self (), (vm_address_t) bufp, (vm_size_t) nread); } return nread; }
__ptr_t __mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset) { error_t err; vm_prot_t vmprot; memory_object_t memobj; vm_address_t mapaddr; vm_size_t pageoff; mapaddr = (vm_address_t) addr; if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */ { /* vm_allocate has (a little) less overhead in the kernel too. */ err = __vm_allocate (__mach_task_self (), &mapaddr, len, !(flags & MAP_FIXED)); if (err == KERN_NO_SPACE && (flags & MAP_FIXED)) { /* XXX this is not atomic as it is in unix! */ /* The region is already allocated; deallocate it first. */ err = __vm_deallocate (__mach_task_self (), mapaddr, len); if (!err) err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0); } return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr; } pageoff = offset & (vm_page_size - 1); offset &= ~(vm_page_size - 1); if (flags & MAP_FIXED) { /* A specific address is requested. It need not be page-aligned; it just needs to be congruent with the object offset. */ if ((mapaddr & (vm_page_size - 1)) != pageoff) return (__ptr_t) (long int) __hurd_fail (EINVAL); else /* We will add back PAGEOFF after mapping. */ mapaddr -= pageoff; } vmprot = VM_PROT_NONE; if (prot & PROT_READ) vmprot |= VM_PROT_READ; if (prot & PROT_WRITE) vmprot |= VM_PROT_WRITE; if (prot & PROT_EXEC) vmprot |= VM_PROT_EXECUTE; switch (flags & MAP_TYPE) { default: return (__ptr_t) (long int) __hurd_fail (EINVAL); case MAP_ANON: memobj = MACH_PORT_NULL; break; case MAP_FILE: case 0: /* Allow, e.g., just MAP_SHARED. */ { mach_port_t robj, wobj; if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj))) return (__ptr_t) (long int) __hurd_dfail (fd, err); switch (prot & (PROT_READ|PROT_WRITE)) { case PROT_READ: memobj = robj; if (wobj != MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), wobj); break; case PROT_WRITE: memobj = wobj; if (robj != MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), robj); break; case PROT_READ|PROT_WRITE: if (robj == wobj) { memobj = wobj; /* Remove extra reference. */ __mach_port_deallocate (__mach_task_self (), memobj); } else if (wobj == MACH_PORT_NULL && /* Not writable by mapping. */ !(flags & MAP_SHARED)) /* The file can only be mapped for reading. Since we are making a private mapping, we will never try to write the object anyway, so we don't care. */ memobj = robj; else { __mach_port_deallocate (__mach_task_self (), wobj); return (__ptr_t) (long int) __hurd_fail (EACCES); } break; } break; /* XXX handle MAP_NOEXTEND */ } } /* XXX handle MAP_INHERIT */ err = __vm_map (__mach_task_self (), &mapaddr, (vm_size_t) len, (vm_address_t) 0, ! (flags & MAP_FIXED), memobj, (vm_offset_t) offset, ! (flags & MAP_SHARED), vmprot, VM_PROT_ALL, (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); if (err == KERN_NO_SPACE && (flags & MAP_FIXED)) { /* XXX this is not atomic as it is in unix! */ /* The region is already allocated; deallocate it first. */ err = __vm_deallocate (__mach_task_self (), mapaddr, len); if (! err) err = __vm_map (__mach_task_self (), &mapaddr, (vm_size_t) len, (vm_address_t) 0, 0, memobj, (vm_offset_t) offset, ! (flags & MAP_SHARED), vmprot, VM_PROT_ALL, (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); } if (memobj != MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), memobj); if (err) return (__ptr_t) (long int) __hurd_fail (err); /* Adjust the mapping address for the offset-within-page. */ mapaddr += pageoff; return (__ptr_t) mapaddr; }
__ptr_t __mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset) { error_t err; vm_prot_t vmprot; memory_object_t memobj; vm_address_t mapaddr; mapaddr = (vm_address_t) addr; /* ADDR and OFFSET must be page-aligned. */ if ((mapaddr & (__vm_page_size - 1)) || (offset & (__vm_page_size - 1))) return (__ptr_t) (long int) __hurd_fail (EINVAL); if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */ { /* vm_allocate has (a little) less overhead in the kernel too. */ err = __vm_allocate (__mach_task_self (), &mapaddr, len, mapaddr == 0); if (err == KERN_NO_SPACE) { if (flags & MAP_FIXED) { /* XXX this is not atomic as it is in unix! */ /* The region is already allocated; deallocate it first. */ err = __vm_deallocate (__mach_task_self (), mapaddr, len); if (!err) err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0); } else if (mapaddr != 0) err = __vm_allocate (__mach_task_self (), &mapaddr, len, 1); } return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr; } vmprot = VM_PROT_NONE; if (prot & PROT_READ) vmprot |= VM_PROT_READ; if (prot & PROT_WRITE) vmprot |= VM_PROT_WRITE; if (prot & PROT_EXEC) vmprot |= VM_PROT_EXECUTE; switch (flags & MAP_TYPE) { default: return (__ptr_t) (long int) __hurd_fail (EINVAL); case MAP_ANON: memobj = MACH_PORT_NULL; break; case MAP_FILE: case 0: /* Allow, e.g., just MAP_SHARED. */ { mach_port_t robj, wobj; if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj))) { if (err == MIG_BAD_ID || err == EOPNOTSUPP || err == ENOSYS) err = ENODEV; /* File descriptor doesn't support mmap. */ return (__ptr_t) (long int) __hurd_dfail (fd, err); } switch (prot & (PROT_READ|PROT_WRITE)) { /* Although it apparently doesn't make sense to map a file with protection set to PROT_NONE, it is actually sometimes done. In particular, that's how localedef reserves some space for the locale archive file, the rationale being that some implementations take into account whether the mapping is anonymous or not when selecting addresses. */ case PROT_NONE: case PROT_READ: memobj = robj; if (wobj != MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), wobj); break; case PROT_WRITE: memobj = wobj; if (robj != MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), robj); break; case PROT_READ|PROT_WRITE: if (robj == wobj) { memobj = wobj; /* Remove extra reference. */ __mach_port_deallocate (__mach_task_self (), memobj); } else if (wobj == MACH_PORT_NULL && /* Not writable by mapping. */ !(flags & MAP_SHARED)) /* The file can only be mapped for reading. Since we are making a private mapping, we will never try to write the object anyway, so we don't care. */ memobj = robj; else { __mach_port_deallocate (__mach_task_self (), wobj); return (__ptr_t) (long int) __hurd_fail (EACCES); } break; default: __builtin_unreachable (); } break; /* XXX handle MAP_NOEXTEND */ } } /* XXX handle MAP_INHERIT */ err = __vm_map (__mach_task_self (), &mapaddr, (vm_size_t) len, (vm_address_t) 0, mapaddr == 0, memobj, (vm_offset_t) offset, ! (flags & MAP_SHARED), vmprot, VM_PROT_ALL, (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); if (err == KERN_NO_SPACE) { if (flags & MAP_FIXED) { /* XXX this is not atomic as it is in unix! */ /* The region is already allocated; deallocate it first. */ err = __vm_deallocate (__mach_task_self (), mapaddr, len); if (! err) err = __vm_map (__mach_task_self (), &mapaddr, (vm_size_t) len, (vm_address_t) 0, 0, memobj, (vm_offset_t) offset, ! (flags & MAP_SHARED), vmprot, VM_PROT_ALL, (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); } else if (mapaddr != 0) err = __vm_map (__mach_task_self (), &mapaddr, (vm_size_t) len, (vm_address_t) 0, 1, memobj, (vm_offset_t) offset, ! (flags & MAP_SHARED), vmprot, VM_PROT_ALL, (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY); } if (memobj != MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), memobj); if (err) return (__ptr_t) (long int) __hurd_fail (err); return (__ptr_t) mapaddr; }
/* Send signal SIG to process number PID. If PID is zero, send SIG to all processes in the current process's process group. If PID is < -1, send SIG to all processes in process group - PID. */ int __kill (pid_t pid, int sig) { int delivered = 0; /* Set when we deliver any signal. */ error_t err; mach_port_t proc; struct hurd_userlink ulink; void kill_pid (pid_t pid) /* Kill one PID. */ { /* SIGKILL is not delivered as a normal signal. Sending SIGKILL to a process means to terminate its task. */ if (sig == SIGKILL) /* Fetch the process's task port and terminate the task. We loop in case the process execs and changes its task port. If the old task port dies after we fetch it but before we send the RPC, we get MACH_SEND_INVALID_DEST; if it dies after we send the RPC request but before it is serviced, we get MIG_SERVER_DIED. */ do { task_t refport; err = __proc_pid2task (proc, pid, &refport); /* Ignore zombies. */ if (!err && refport != MACH_PORT_NULL) { err = __task_terminate (refport); __mach_port_deallocate (__mach_task_self (), refport); } } while (err == MACH_SEND_INVALID_DEST || err == MIG_SERVER_DIED); else { error_t taskerr; error_t kill_port (mach_port_t msgport, mach_port_t refport) { if (msgport != MACH_PORT_NULL) /* Send a signal message to his message port. */ return __msg_sig_post (msgport, sig, 0, refport); /* The process has no message port. Perhaps try direct frobnication of the task. */ if (taskerr) /* If we could not get the task port, we can do nothing. */ return taskerr; if (refport == MACH_PORT_NULL) /* proc_pid2task returned success with a null task port. That means the process is a zombie. Signals to zombies should return success and do nothing. */ return 0; /* For user convenience in the case of a task that has not registered any message port with the proc server, translate a few signals to direct task operations. */ switch (sig) { /* The only signals that really make sense for an unregistered task are kill, suspend, and continue. */ case SIGSTOP: case SIGTSTP: return __task_suspend (refport); case SIGCONT: return __task_resume (refport); case SIGTERM: case SIGQUIT: case SIGINT: return __task_terminate (refport); default: /* We have full permission to send signals, but there is no meaningful way to express this signal. */ return EPERM; } } err = HURD_MSGPORT_RPC (__proc_getmsgport (proc, pid, &msgport), (taskerr = __proc_pid2task (proc, pid, &refport)) ? __proc_getsidport (proc, &refport) : 0, 1, kill_port (msgport, refport)); } if (! err) delivered = 1; } proc = _hurd_port_get (&_hurd_ports[INIT_PORT_PROC], &ulink); if (pid <= 0) { /* Send SIG to each process in pgrp (- PID). */ pid_t pidbuf[10], *pids = pidbuf; mach_msg_type_number_t i, npids = sizeof (pidbuf) / sizeof (pidbuf[0]); err = __proc_getpgrppids (proc, - pid, &pids, &npids); if (!err) { for (i = 0; i < npids; ++i) { kill_pid (pids[i]); if (err == ESRCH) /* The process died already. Ignore it. */ err = 0; } if (pids != pidbuf) __vm_deallocate (__mach_task_self (), (vm_address_t) pids, npids * sizeof (pids[0])); } } else kill_pid (pid); _hurd_port_free (&_hurd_ports[INIT_PORT_PROC], &ulink, proc); /* If we delivered no signals, but ERR is clear, this must mean that every kill_pid call failed with ESRCH, meaning all the processes in the pgrp died between proc_getpgrppids and kill_pid; in that case we fail with ESRCH. */ return delivered ? 0 : __hurd_fail (err ?: ESRCH); }
/* Return an array of if_nameindex structures, one for each network interface present, plus one indicating the end of the array. On error, return NULL. */ struct if_nameindex * if_nameindex (void) { error_t err = 0; char data[2048]; file_t server; int fd = __opensock (); struct ifconf ifc; unsigned int nifs, i; struct if_nameindex *idx = NULL; ifc.ifc_buf = data; if (fd < 0) return NULL; server = _hurd_socket_server (PF_INET, 0); if (server == MACH_PORT_NULL) nifs = 0; else { size_t len = sizeof data; err = __pfinet_siocgifconf (server, -1, &ifc.ifc_buf, &len); if (err == MACH_SEND_INVALID_DEST || err == MIG_SERVER_DIED) { /* On the first use of the socket server during the operation, allow for the old server port dying. */ server = _hurd_socket_server (PF_INET, 1); if (server == MACH_PORT_NULL) goto out; err = __pfinet_siocgifconf (server, -1, &ifc.ifc_buf, &len); } if (err) goto out; ifc.ifc_len = len; nifs = len / sizeof (struct ifreq); } idx = malloc ((nifs + 1) * sizeof (struct if_nameindex)); if (idx == NULL) { err = ENOBUFS; goto out; } for (i = 0; i < nifs; ++i) { struct ifreq *ifr = &ifc.ifc_req[i]; idx[i].if_name = __strdup (ifr->ifr_name); if (idx[i].if_name == NULL || __ioctl (fd, SIOCGIFINDEX, ifr) < 0) { unsigned int j; err = errno; for (j = 0; j < i; ++j) free (idx[j].if_name); free (idx); idx = NULL; if (err == EINVAL) err = ENOSYS; else if (err == ENOMEM) err = ENOBUFS; goto out; } idx[i].if_index = ifr->ifr_ifindex; } idx[i].if_index = 0; idx[i].if_name = NULL; out: __close (fd); if (data != ifc.ifc_buf) __vm_deallocate (__mach_task_self (), (vm_address_t) ifc.ifc_buf, ifc.ifc_len); __set_errno (err); return idx; }
/* Called by MiG to deallocate space. */ void __mig_deallocate (vm_address_t addr, vm_size_t size) { (void) __vm_deallocate (__mach_task_self (), addr, size); }
/* Receive a message as described by MESSAGE from socket FD. Returns the number of bytes read or -1 for errors. */ ssize_t __libc_recvmsg (int fd, struct msghdr *message, int flags) { error_t err; addr_port_t aport; char *data = NULL; mach_msg_type_number_t len = 0; mach_port_t *ports; mach_msg_type_number_t nports = 0; char *cdata = NULL; mach_msg_type_number_t clen = 0; size_t amount; char *buf; int i; /* Find the total number of bytes to be read. */ amount = 0; for (i = 0; i < message->msg_iovlen; i++) { amount += message->msg_iov[i].iov_len; /* As an optimization, we set the initial values of DATA and LEN from the first non-empty iovec. This kicks-in in the case where the whole packet fits into that iovec buffer. */ if (data == NULL && message->msg_iov[i].iov_len > 0) { data = message->msg_iov[i].iov_base; len = message->msg_iov[i].iov_len; } } buf = data; if (err = HURD_DPORT_USE (fd, __socket_recv (port, &aport, flags, &data, &len, &ports, &nports, &cdata, &clen, &message->msg_flags, amount))) return __hurd_sockfail (fd, flags, err); if (message->msg_name != NULL && aport != MACH_PORT_NULL) { char *buf = message->msg_name; mach_msg_type_number_t buflen = message->msg_namelen; int type; err = __socket_whatis_address (aport, &type, &buf, &buflen); if (err == EOPNOTSUPP) /* If the protocol server can't tell us the address, just return a zero-length one. */ { buf = message->msg_name; buflen = 0; err = 0; } if (err) { __mach_port_deallocate (__mach_task_self (), aport); return __hurd_sockfail (fd, flags, err); } if (message->msg_namelen > buflen) message->msg_namelen = buflen; if (buf != message->msg_name) { memcpy (message->msg_name, buf, message->msg_namelen); __vm_deallocate (__mach_task_self (), (vm_address_t) buf, buflen); } if (buflen > 0) ((struct sockaddr *) message->msg_name)->sa_family = type; } else if (message->msg_name != NULL) message->msg_namelen = 0; __mach_port_deallocate (__mach_task_self (), aport); if (buf == data) buf += len; else { /* Copy the data into MSG. */ if (len > amount) message->msg_flags |= MSG_TRUNC; else amount = len; buf = data; for (i = 0; i < message->msg_iovlen; i++) { #define min(a, b) ((a) > (b) ? (b) : (a)) size_t copy = min (message->msg_iov[i].iov_len, amount); memcpy (message->msg_iov[i].iov_base, buf, copy); buf += copy; amount -= copy; if (len == 0) break; } __vm_deallocate (__mach_task_self (), (vm_address_t) data, len); } /* Copy the control message into MSG. */ if (clen > message->msg_controllen) message->msg_flags |= MSG_CTRUNC; else message->msg_controllen = clen; memcpy (message->msg_control, cdata, message->msg_controllen); __vm_deallocate (__mach_task_self (), (vm_address_t) cdata, clen); return (buf - data); }
static void init1 (int argc, char *arg0, ...) { char **argv = &arg0; char **envp = &argv[argc + 1]; struct hurd_startup_data *d; __libc_argc = argc; __libc_argv = argv; __environ = envp; while (*envp) ++envp; d = (void *) ++envp; /* If we are the bootstrap task started by the kernel, then after the environment pointers there is no Hurd data block; the argument strings start there. */ if ((void *) d != argv[0]) { _hurd_init_dtable = d->dtable; _hurd_init_dtablesize = d->dtablesize; { /* Check if the stack we are now on is different from the one described by _hurd_stack_{base,size}. */ char dummy; const vm_address_t newsp = (vm_address_t) &dummy; if (d->stack_size != 0 && (newsp < d->stack_base || newsp - d->stack_base > d->stack_size)) /* The new stack pointer does not intersect with the stack the exec server set up for us, so free that stack. */ __vm_deallocate (__mach_task_self (), d->stack_base, d->stack_size); } } if (__hurd_threadvar_stack_mask == 0) { /* We are not using cthreads, so we will have just a single allocated area for the per-thread variables of the main user thread. */ unsigned long int i; __hurd_threadvar_stack_offset = (unsigned long int) malloc (__hurd_threadvar_max * sizeof (unsigned long int)); if (__hurd_threadvar_stack_offset == 0) __libc_fatal ("Can't allocate single-threaded per-thread variables."); for (i = 0; i < __hurd_threadvar_max; ++i) ((unsigned long int *) __hurd_threadvar_stack_offset)[i] = 0; } if ((void *) d != argv[0] && (d->portarray || d->intarray)) /* Initialize library data structures, start signal processing, etc. */ _hurd_init (d->flags, argv, d->portarray, d->portarraysize, d->intarray, d->intarraysize); __libc_init (argc, argv, __environ); #ifdef USE_NONOPTION_FLAGS /* This is a hack to make the special getopt in GNU libc working. */ __getopt_clean_environment (envp); #endif #ifdef SHARED __libc_global_ctors (); #endif (void) &init1; }
/* Read a directory entry from DIRP. */ struct dirent64 * __readdir64 (DIR *dirp) { struct dirent64 *dp; if (dirp == NULL) { errno = EINVAL; return NULL; } __libc_lock_lock (dirp->__lock); do { if (dirp->__ptr - dirp->__data >= dirp->__size) { /* We've emptied out our buffer. Refill it. */ char *data = dirp->__data; int nentries; error_t err; if (err = HURD_FD_PORT_USE (dirp->__fd, __dir_readdir (port, &data, &dirp->__size, dirp->__entry_ptr, -1, 0, &nentries))) { __hurd_fail (err); dp = NULL; break; } /* DATA now corresponds to entry index DIRP->__entry_ptr. */ dirp->__entry_data = dirp->__entry_ptr; if (data != dirp->__data) { /* The data was passed out of line, so our old buffer is no longer useful. Deallocate the old buffer and reset our information for the new buffer. */ __vm_deallocate (__mach_task_self (), (vm_address_t) dirp->__data, dirp->__allocation); dirp->__data = data; dirp->__allocation = round_page (dirp->__size); } /* Reset the pointer into the buffer. */ dirp->__ptr = dirp->__data; if (nentries == 0) { /* End of file. */ dp = NULL; break; } /* We trust the filesystem to return correct data and so we ignore NENTRIES. */ } dp = (struct dirent64 *) dirp->__ptr; dirp->__ptr += dp->d_reclen; ++dirp->__entry_ptr; /* Loop to ignore deleted files. */ } while (dp->d_fileno == 0); __libc_lock_unlock (dirp->__lock); return dp; }