/* Return the list of devices in the format provided by SIOCGIFCONF in IFR, but don't return more then AMOUNT bytes. If AMOUNT is negative, there is no limit. */ error_t S_pfinet_siocgifconf (io_t port, vm_size_t amount, char **ifr, mach_msg_type_number_t *len) { error_t err = 0; struct ifconf ifc; __mutex_lock (&global_lock); if (amount < 0) { /* Get the needed buffer length. */ ifc.ifc_buf = NULL; ifc.ifc_len = 0; err = dev_ifconf ((char *) &ifc); if (err) { __mutex_unlock (&global_lock); return -err; } amount = ifc.ifc_len; } else ifc.ifc_len = amount; if (amount > 0) { /* Possibly allocate a new buffer. */ if (*len < amount) ifc.ifc_buf = (char *) mmap (0, amount, PROT_READ|PROT_WRITE, MAP_ANON, 0, 0); else ifc.ifc_buf = *ifr; err = dev_ifconf ((char *) &ifc); } if (err) { *len = 0; if (ifc.ifc_buf != *ifr) munmap (ifc.ifc_buf, amount); } else { *len = ifc.ifc_len; *ifr = ifc.ifc_buf; } __mutex_unlock (&global_lock); return err; }
/* Fetch the real group ID, effective group ID, and saved-set group ID, of the calling process. */ int __getresgid (gid_t *rgid, gid_t *egid, gid_t *sgid) { error_t err; gid_t real, eff, saved; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); err = _hurd_check_ids (); if (!err) { if (_hurd_id.aux.ngids < 1) /* We do not even have a real GID. */ err = EGRATUITOUS; else { real = _hurd_id.aux.gids[0]; eff = _hurd_id.gen.ngids < 1 ? real : _hurd_id.gen.gids[0]; saved = _hurd_id.aux.ngids < 2 ? real : _hurd_id.aux.gids[1]; } } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; if (err) return __hurd_fail (err); *rgid = real; *egid = eff; *sgid = saved; return 0; }
/* Get some sockaddr type of info. */ static kern_return_t siocgifXaddr (io_t port, ifname_t ifnam, sockaddr_t *addr, enum siocgif_type type) { struct sock_user *user = begin_using_socket_port (port); error_t err = 0; struct device *dev; struct sockaddr_in *sin = (struct sockaddr_in *) addr; uint32_t addrs[4]; if (!user) return EOPNOTSUPP; dev = get_dev (ifnam); if (!dev) err = ENODEV; else if (user->sock->sk->family != AF_INET) err = EINVAL; else { sin->sin_family = AF_INET; inquire_device (dev, &addrs[0], &addrs[1], &addrs[2], &addrs[3]); sin->sin_addr.s_addr = addrs[type]; } __mutex_unlock (&global_lock); end_using_socket_port (user); return err; }
/* 16 SIOCSIFFLAGS -- Set flags of a network interface. */ kern_return_t S_iioctl_siocsifflags (io_t port, ifname_t ifnam, short flags) { struct sock_user *user = begin_using_socket_port (port); error_t err = 0; struct device *dev; if (!user) return EOPNOTSUPP; dev = get_dev (ifnam); if (!user->isroot) err = EPERM; else if (!dev) err = ENODEV; else { err = dev_change_flags (dev, flags); if (!err) err = ethernet_change_flags (dev, flags); } __mutex_unlock (&global_lock); end_using_socket_port (user); return err; }
/* Fetch the real user ID, effective user ID, and saved-set user ID, of the calling process. */ int __getresuid (uid_t *ruid, uid_t *euid, uid_t *suid) { error_t err; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); err = _hurd_check_ids (); if (!err) { if (_hurd_id.aux.nuids < 1) /* We do not even have a real UID. */ err = EGRATUITOUS; else { uid_t real = _hurd_id.aux.uids[0]; *ruid = real; *euid = _hurd_id.gen.nuids < 1 ? real : _hurd_id.gen.uids[0]; *suid = _hurd_id.aux.nuids < 2 ? real : _hurd_id.aux.uids[1]; } } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return __hurd_fail (err); }
/* Get the effective group ID of the calling process. */ gid_t DEFUN_VOID(__getegid) { error_t err; gid_t egid; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { errno = err; egid = -1; } else if (_hurd_id.gen.ngids >= 1) egid = _hurd_id.gen.gids[0]; else if (_hurd_id.aux.ngids >= 1) /* We have no effective gids. Return the real gid. */ egid = _hurd_id.aux.gids[0]; else { /* We do not even have a real gid. */ errno = EGRATUITOUS; egid = -1; } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return egid; }
/* Get the real user ID of the calling process. */ uid_t DEFUN_VOID(__getuid) { error_t err; uid_t uid; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { errno = err; uid = -1; } else if (_hurd_id.aux.nuids >= 1) uid = _hurd_id.aux.uids[0]; else { /* We do not even have a real uid. */ errno = EGRATUITOUS; uid = -1; } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return uid; }
/* Get the real group ID of the calling process. */ gid_t __getgid () { error_t err; gid_t gid; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { errno = err; gid = -1; } else if (_hurd_id.aux.ngids >= 1) gid = _hurd_id.aux.gids[0]; else { /* We do not even have a real gid. */ errno = EGRATUITOUS; gid = -1; } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return gid; }
/* 51 SIOCSIFMTU -- Set mtu of a network interface. */ error_t S_iioctl_siocsifmtu (io_t port, ifname_t ifnam, int mtu) { struct sock_user *user = begin_using_socket_port (port); error_t err = 0; struct device *dev; if (!user) return EOPNOTSUPP; dev = get_dev (ifnam); if (!user->isroot) err = EPERM; if (!dev) err = ENODEV; else if (mtu <= 0) err = EINVAL; else { if (dev->change_mtu) dev->change_mtu (dev, mtu); else dev->mtu = mtu; notifier_call_chain (&netdev_chain, NETDEV_CHANGEMTU, dev); } __mutex_unlock (&global_lock); end_using_socket_port (user); return err; }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { DIR *dirp; int fd; struct hurd_fd *d; if (name[0] == '\0') { /* POSIX.1-1990 says an empty name gets ENOENT; but `open' might like it fine. */ __set_errno (ENOENT); return NULL; } { /* Append trailing slash to directory name to force ENOTDIR if it's not a directory. */ size_t len = strlen (name); if (name[len - 1] == '/') fd = __open (name, O_RDONLY); else { char n[len + 2]; memcpy (n, name, len); n[len] = '/'; n[len + 1] = '\0'; fd = __open (n, O_RDONLY); } } if (fd < 0) return NULL; dirp = (DIR *) malloc (sizeof (DIR)); if (dirp == NULL) { __close (fd); return NULL; } /* Extract the pointer to the descriptor structure. */ __mutex_lock (&_hurd_dtable_lock); d = dirp->__fd = _hurd_dtable[fd]; __mutex_unlock (&_hurd_dtable_lock); /* Set the descriptor to close on exec. */ __spin_lock (&d->port.lock); d->flags |= FD_CLOEXEC; __spin_unlock (&d->port.lock); dirp->__data = dirp->__ptr = NULL; dirp->__entry_data = dirp->__entry_ptr = 0; dirp->__allocation = 0; dirp->__size = 0; __libc_lock_init (dirp->__lock); return dirp; }
void hurd_preempt_signals (struct hurd_signal_preemptor *preemptor) { __mutex_lock (&_hurd_siglock); preemptor->next = _hurdsig_preemptors; _hurdsig_preemptors = preemptor; _hurdsig_preempted_set |= preemptor->signals; __mutex_unlock (&_hurd_siglock); }
/* Release the reference on the referenced socket. */ void clean_socketport (void *arg) { struct sock_user *const user = arg; __mutex_lock (&global_lock); sock_release (user->sock); __mutex_unlock (&global_lock); }
int __getgroups (int n, gid_t *gidset) { error_t err; int ngids; void *crit; if (n < 0) return __hurd_fail (EINVAL); crit = _hurd_critical_section_lock (); __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); return __hurd_fail (err); } ngids = _hurd_id.gen.ngids; if (n != 0) { /* Copy the gids onto stack storage and then release the idlock. */ gid_t gids[ngids]; memcpy (gids, _hurd_id.gen.gids, sizeof (gids)); __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); /* Now that the lock is released, we can safely copy the group set into the user's array, which might fault. */ if (ngids > n) return __hurd_fail (EINVAL); memcpy (gidset, gids, ngids * sizeof (gid_t)); } else { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); } return ngids; }
static kern_return_t set_int (int which, int value) { switch (which) { case INIT_UMASK: _hurd_umask = value; return 0; /* These are pretty odd things to do. But you asked for it. */ case INIT_SIGMASK: { struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread); ss->blocked = value; __mutex_unlock (&ss->lock); return 0; } case INIT_SIGPENDING: { struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread); ss->pending = value; __mutex_unlock (&ss->lock); return 0; } case INIT_SIGIGN: { struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread); int sig; const sigset_t ign = value; for (sig = 1; sig < NSIG; ++sig) { if (__sigismember (&ign, sig)) ss->actions[sig].sa_handler = SIG_IGN; else if (ss->actions[sig].sa_handler == SIG_IGN) ss->actions[sig].sa_handler = SIG_DFL; } __mutex_unlock (&ss->lock); return 0; } default: return EINVAL; } }
struct hurd_sigstate * _hurd_thread_sigstate (thread_t thread) { struct hurd_sigstate *ss; __mutex_lock (&_hurd_siglock); for (ss = _hurd_sigstates; ss != NULL; ss = ss->next) if (ss->thread == thread) break; if (ss == NULL) { ss = malloc (sizeof (*ss)); if (ss == NULL) __libc_fatal ("hurd: Can't allocate sigstate\n"); ss->thread = thread; __spin_lock_init (&ss->lock); /* Initialize default state. */ __sigemptyset (&ss->blocked); __sigemptyset (&ss->pending); memset (&ss->sigaltstack, 0, sizeof (ss->sigaltstack)); ss->preemptors = NULL; ss->suspended = MACH_PORT_NULL; ss->intr_port = MACH_PORT_NULL; ss->context = NULL; if (thread == MACH_PORT_NULL) { /* Process-wide sigstate, use the system defaults. */ default_sigaction (ss->actions); /* The global sigstate is not added to the _hurd_sigstates list. It is created with _hurd_thread_sigstate (MACH_PORT_NULL) but should be accessed through _hurd_global_sigstate. */ } else { /* Use the global actions as a default for new threads. */ struct hurd_sigstate *s = _hurd_global_sigstate; if (s) { __spin_lock (&s->lock); memcpy (ss->actions, s->actions, sizeof (s->actions)); __spin_unlock (&s->lock); } else default_sigaction (ss->actions); ss->next = _hurd_sigstates; _hurd_sigstates = ss; } } __mutex_unlock (&_hurd_siglock); return ss; }
int geteuids (int n, uid_t *uidset) { error_t err; int nuids; void *crit; crit = _hurd_critical_section_lock (); __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); return __hurd_fail (err); } nuids = _hurd_id.gen.nuids; if (n != 0) { /* Copy the uids onto stack storage and then release the idlock. */ uid_t uids[nuids]; memcpy (uids, _hurd_id.gen.uids, sizeof (uids)); __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); /* Now that the lock is released, we can safely copy the uid set into the user's array, which might fault. */ if (nuids > n) nuids = n; memcpy (uidset, uids, nuids * sizeof (uid_t)); } else { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); } return nuids; }
struct hurd_sigstate * _hurd_thread_sigstate (thread_t thread) { struct hurd_sigstate *ss; __mutex_lock (&_hurd_siglock); for (ss = _hurd_sigstates; ss != NULL; ss = ss->next) if (ss->thread == thread) break; if (ss == NULL) { ss = malloc (sizeof (*ss)); if (ss == NULL) __libc_fatal ("hurd: Can't allocate thread sigstate\n"); ss->thread = thread; __spin_lock_init (&ss->lock); /* Initialize default state. */ __sigemptyset (&ss->blocked); __sigemptyset (&ss->pending); memset (&ss->sigaltstack, 0, sizeof (ss->sigaltstack)); ss->sigaltstack.ss_flags |= SS_DISABLE; ss->preemptors = NULL; ss->suspended = MACH_PORT_NULL; ss->intr_port = MACH_PORT_NULL; ss->context = NULL; /* Initialize the sigaction vector from the default signal receiving thread's state, and its from the system defaults. */ if (thread == _hurd_sigthread) default_sigaction (ss->actions); else { struct hurd_sigstate *s; for (s = _hurd_sigstates; s != NULL; s = s->next) if (s->thread == _hurd_sigthread) break; if (s) { __spin_lock (&s->lock); memcpy (ss->actions, s->actions, sizeof (s->actions)); __spin_unlock (&s->lock); } else default_sigaction (ss->actions); } ss->next = _hurd_sigstates; _hurd_sigstates = ss; } __mutex_unlock (&_hurd_siglock); return ss; }
static kern_return_t get_int (int which, int *value) { switch (which) { case INIT_UMASK: *value = _hurd_umask; return 0; case INIT_SIGMASK: { struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread); *value = ss->blocked; __mutex_unlock (&ss->lock); return 0; } case INIT_SIGPENDING: { struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread); *value = ss->pending; __mutex_unlock (&ss->lock); return 0; } case INIT_SIGIGN: { struct hurd_sigstate *ss = _hurd_thread_sigstate (_hurd_sigthread); sigset_t ign; int sig; __sigemptyset (&ign); for (sig = 1; sig < NSIG; ++sig) if (ss->actions[sig].sa_handler == SIG_IGN) __sigaddset (&ign, sig); __mutex_unlock (&ss->lock); *value = ign; return 0; } default: return EINVAL; } }
void hurd_unpreempt_signals (struct hurd_signal_preemptor *preemptor) { struct hurd_signal_preemptor **p; sigset_t preempted = 0; __mutex_lock (&_hurd_siglock); p = &_hurdsig_preemptors; while (*p) if (*p == preemptor) { /* Found it; take it off the chain. */ *p = (*p)->next; if ((preemptor->signals & preempted) != preemptor->signals) { /* This might have been the only preemptor for some of those signals, so we must collect the full mask from the others. */ struct hurd_signal_preemptor *pp; for (pp = *p; pp; pp = pp->next) preempted |= pp->signals; _hurdsig_preempted_set = preempted; } __mutex_unlock (&_hurd_siglock); return; } else { preempted |= (*p)->signals; p = &(*p)->next; } __mutex_unlock (&_hurd_siglock); /* Avoid deadlock during death rattle. */ assert (! "removing absent preemptor"); }
/* Extend the process's data space by INCREMENT. If INCREMENT is negative, shrink data space by - INCREMENT. Return the address of the start of the new data space, or -1 for errors. */ void * __sbrk (intptr_t increment) { void *result; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_brk_lock); result = (void *) _hurd_brk; if (increment != 0 && _hurd_set_brk (_hurd_brk + increment) < 0) result = (void *) -1; __mutex_unlock (&_hurd_brk_lock); HURD_CRITICAL_END; return result; }
/* Removes a segment attachment. Returns its size if found, or EINVAL otherwise. */ error_t __sysvshm_remove (void *addr, size_t *size) { struct sysvshm_attach *shm; struct sysvshm_attach **pshm = &attach_list; __mutex_lock (&sysvshm_lock); shm = attach_list; while (shm) { shm = *pshm; if (shm->addr == addr) { *pshm = shm->next; *size = shm->size; __mutex_unlock (&sysvshm_lock); return 0; } pshm = &shm->next; shm = shm->next; } __mutex_unlock (&sysvshm_lock); return EINVAL; }
/* Return the maximum number of file descriptors the current process could possibly have (until it raises the resource limit). */ int __getdtablesize (void) { rlim_t limit; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_rlimit_lock); limit = _hurd_rlimits[RLIMIT_NOFILE].rlim_cur; __mutex_unlock (&_hurd_rlimit_lock); HURD_CRITICAL_END; /* RLIM_INFINITY is not meaningful to our caller. -1 is a good choice because `sysconf (_SC_OPEN_MAX)' calls us, and -1 from sysconf means "no determinable limit". */ return limit == RLIM_INFINITY ? -1 : (int) limit; }
/* Adds a segment attachment. */ error_t __sysvshm_add (void *addr, size_t size) { struct sysvshm_attach *shm; shm = malloc (sizeof (*shm)); if (!shm) return errno; __mutex_lock (&sysvshm_lock); shm->addr = addr; shm->size = size; shm->next = attach_list; attach_list = shm; __mutex_unlock (&sysvshm_lock); return 0; }
/* 17 SIOCGIFFLAGS -- Get flags of a network interface. */ kern_return_t S_iioctl_siocgifflags (io_t port, char *name, short *flags) { error_t err = 0; struct device *dev; dev = get_dev (name); if (!dev) err = ENODEV; else { *flags = dev->flags; } __mutex_unlock (&global_lock); return err; }
/* Destroy a sigstate structure. Called by libpthread just before the * corresponding thread is terminated (the kernel thread port must remain valid * until this function is called.) */ void _hurd_sigstate_delete (thread_t thread) { struct hurd_sigstate **ssp, *ss; __mutex_lock (&_hurd_siglock); for (ssp = &_hurd_sigstates; *ssp; ssp = &(*ssp)->next) if ((*ssp)->thread == thread) break; ss = *ssp; if (ss) *ssp = ss->next; __mutex_unlock (&_hurd_siglock); if (ss) free (ss); }
void _hurd_raise_signal (struct hurd_sigstate *ss, int signo, int sigcode, int sigerror) { if (ss == NULL) ss = _hurd_self_sigstate (); /* Mark SIGNO as pending to be delivered. */ __sigaddset (&ss->pending, signo); ss->pending_data[signo].code = sigcode; ss->pending_data[signo].error = sigerror; __mutex_unlock (&ss->lock); /* Send a message to the signal thread so it will wake up and check for pending signals. */ __sig_post (_hurd_msgport, 0, __mach_task_self ()); }
/* 100 SIOCGIFINDEX -- Get index number of a network interface. */ error_t S_iioctl_siocgifindex (io_t port, ifname_t ifnam, int *index) { error_t err = 0; struct device *dev; dev = get_dev (ifnam); if (!dev) err = ENODEV; else { *index = dev->ifindex; } __mutex_unlock (&global_lock); return err; }
/* 23 SIOCGIFMETRIC -- Get metric of a network interface. */ kern_return_t S_iioctl_siocgifmetric (io_t port, ifname_t ifnam, int *metric) { error_t err = 0; struct device *dev; dev = get_dev (ifnam); if (!dev) err = ENODEV; else { *metric = 0; /* Not supported. */ } __mutex_unlock (&global_lock); return err; }
/* Create a new session with the calling process as its leader. The process group IDs of the session and the calling process are set to the process ID of the calling process, which is returned. */ pid_t __setsid (void) { error_t err; unsigned int stamp; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_dtable_lock); stamp = _hurd_pids_changed_stamp; /* Atomic fetch. */ /* Tell the proc server we want to start a new session. */ err = __USEPORT (PROC, __proc_setsid (port)); if (err) __mutex_unlock (&_hurd_dtable_lock); else { /* Punt our current ctty, and update the dtable accordingly. We hold the dtable lock from before the proc_setsid call through clearing the cttyid port and processing the dtable, so that we can be sure that it's all done by the time the signal thread processes the pgrp change notification. */ _hurd_locked_install_cttyid (MACH_PORT_NULL); /* Synchronize with the signal thread to make sure we have received and processed proc_newids before returning to the user. This is necessary to ensure that _hurd_pgrp (and thus the value returned by `getpgrp ()' in other threads) has been updated before we return. */ while (_hurd_pids_changed_stamp == stamp) { #ifdef noteven /* XXX we have no need for a mutex, but cthreads demands one. */ __condition_wait (&_hurd_pids_changed_sync, NULL); #else __swtch_pri (0); #endif } } HURD_CRITICAL_END; return err ? __hurd_fail (err) : _hurd_pgrp; }
/* Put the soft and hard limits for RESOURCE in *RLIMITS. Returns 0 if successful, -1 if not (and sets errno). */ int __getrlimit (enum __rlimit_resource resource, struct rlimit *rlimits) { struct rlimit lim; if (rlimits == NULL || (unsigned int) resource >= RLIMIT_NLIMITS) { errno = EINVAL; return -1; } __mutex_lock (&_hurd_rlimit_lock); lim = _hurd_rlimits[resource]; __mutex_unlock (&_hurd_rlimit_lock); *rlimits = lim; return 0; }