/* Get the real user ID of the calling process. */ uid_t DEFUN_VOID(__getuid) { error_t err; uid_t uid; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { errno = err; uid = -1; } else if (_hurd_id.aux.nuids >= 1) uid = _hurd_id.aux.uids[0]; else { /* We do not even have a real uid. */ errno = EGRATUITOUS; uid = -1; } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return uid; }
/* Fetch the real user ID, effective user ID, and saved-set user ID, of the calling process. */ int __getresuid (uid_t *ruid, uid_t *euid, uid_t *suid) { error_t err; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); err = _hurd_check_ids (); if (!err) { if (_hurd_id.aux.nuids < 1) /* We do not even have a real UID. */ err = EGRATUITOUS; else { uid_t real = _hurd_id.aux.uids[0]; *ruid = real; *euid = _hurd_id.gen.nuids < 1 ? real : _hurd_id.gen.uids[0]; *suid = _hurd_id.aux.nuids < 2 ? real : _hurd_id.aux.uids[1]; } } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return __hurd_fail (err); }
/* Get the effective group ID of the calling process. */ gid_t DEFUN_VOID(__getegid) { error_t err; gid_t egid; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { errno = err; egid = -1; } else if (_hurd_id.gen.ngids >= 1) egid = _hurd_id.gen.gids[0]; else if (_hurd_id.aux.ngids >= 1) /* We have no effective gids. Return the real gid. */ egid = _hurd_id.aux.gids[0]; else { /* We do not even have a real gid. */ errno = EGRATUITOUS; egid = -1; } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return egid; }
/* Fetch the real group ID, effective group ID, and saved-set group ID, of the calling process. */ int __getresgid (gid_t *rgid, gid_t *egid, gid_t *sgid) { error_t err; gid_t real, eff, saved; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); err = _hurd_check_ids (); if (!err) { if (_hurd_id.aux.ngids < 1) /* We do not even have a real GID. */ err = EGRATUITOUS; else { real = _hurd_id.aux.gids[0]; eff = _hurd_id.gen.ngids < 1 ? real : _hurd_id.gen.gids[0]; saved = _hurd_id.aux.ngids < 2 ? real : _hurd_id.aux.gids[1]; } } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; if (err) return __hurd_fail (err); *rgid = real; *egid = eff; *sgid = saved; return 0; }
/* Get the real group ID of the calling process. */ gid_t __getgid () { error_t err; gid_t gid; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { errno = err; gid = -1; } else if (_hurd_id.aux.ngids >= 1) gid = _hurd_id.aux.gids[0]; else { /* We do not even have a real gid. */ errno = EGRATUITOUS; gid = -1; } __mutex_unlock (&_hurd_id.lock); HURD_CRITICAL_END; return gid; }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { DIR *dirp; int fd; struct hurd_fd *d; if (name[0] == '\0') { /* POSIX.1-1990 says an empty name gets ENOENT; but `open' might like it fine. */ __set_errno (ENOENT); return NULL; } { /* Append trailing slash to directory name to force ENOTDIR if it's not a directory. */ size_t len = strlen (name); if (name[len - 1] == '/') fd = __open (name, O_RDONLY); else { char n[len + 2]; memcpy (n, name, len); n[len] = '/'; n[len + 1] = '\0'; fd = __open (n, O_RDONLY); } } if (fd < 0) return NULL; dirp = (DIR *) malloc (sizeof (DIR)); if (dirp == NULL) { __close (fd); return NULL; } /* Extract the pointer to the descriptor structure. */ __mutex_lock (&_hurd_dtable_lock); d = dirp->__fd = _hurd_dtable[fd]; __mutex_unlock (&_hurd_dtable_lock); /* Set the descriptor to close on exec. */ __spin_lock (&d->port.lock); d->flags |= FD_CLOEXEC; __spin_unlock (&d->port.lock); dirp->__data = dirp->__ptr = NULL; dirp->__entry_data = dirp->__entry_ptr = 0; dirp->__allocation = 0; dirp->__size = 0; __libc_lock_init (dirp->__lock); return dirp; }
void hurd_preempt_signals (struct hurd_signal_preemptor *preemptor) { __mutex_lock (&_hurd_siglock); preemptor->next = _hurdsig_preemptors; _hurdsig_preemptors = preemptor; _hurdsig_preempted_set |= preemptor->signals; __mutex_unlock (&_hurd_siglock); }
/* Release the reference on the referenced socket. */ void clean_socketport (void *arg) { struct sock_user *const user = arg; __mutex_lock (&global_lock); sock_release (user->sock); __mutex_unlock (&global_lock); }
kern_return_t _S_catch_exception_raise (mach_port_t port, thread_t thread, task_t task, int exception, int code, int subcode) { int signo, sigcode, error; struct hurd_sigstate *ss; if (task != __mach_task_self ()) /* The sender wasn't the kernel. */ return EPERM; /* Call the machine-dependent function to translate the Mach exception codes into a signal number and subcode. */ _hurd_exception2signal (exception, code, subcode, &signo, &sigcode, &error); /* Find the sigstate structure for the faulting thread. */ __mutex_lock (&_hurd_siglock); for (ss = _hurd_sigstates; ss != NULL; ss = ss->next) if (ss->thread == thread) break; __mutex_unlock (&_hurd_siglock); if (ss == NULL) ss = _hurd_thread_sigstate (thread); /* Allocate a fresh one. */ if (__spin_lock_locked (&ss->lock.held)) /* Oops. The thread faulted with its sigstate lock held. Bad scene. What to do? */ ; /* XXX */ else __mutex_lock (&ss->lock); /* Post the signal. */ _hurd_internal_post_signal (ss, signo, sigcode, error, MACH_PORT_NULL, MACH_MSG_TYPE_PORT_SEND); return KERN_SUCCESS; }
struct hurd_sigstate * _hurd_thread_sigstate (thread_t thread) { struct hurd_sigstate *ss; __mutex_lock (&_hurd_siglock); for (ss = _hurd_sigstates; ss != NULL; ss = ss->next) if (ss->thread == thread) break; if (ss == NULL) { ss = malloc (sizeof (*ss)); if (ss == NULL) __libc_fatal ("hurd: Can't allocate sigstate\n"); ss->thread = thread; __spin_lock_init (&ss->lock); /* Initialize default state. */ __sigemptyset (&ss->blocked); __sigemptyset (&ss->pending); memset (&ss->sigaltstack, 0, sizeof (ss->sigaltstack)); ss->preemptors = NULL; ss->suspended = MACH_PORT_NULL; ss->intr_port = MACH_PORT_NULL; ss->context = NULL; if (thread == MACH_PORT_NULL) { /* Process-wide sigstate, use the system defaults. */ default_sigaction (ss->actions); /* The global sigstate is not added to the _hurd_sigstates list. It is created with _hurd_thread_sigstate (MACH_PORT_NULL) but should be accessed through _hurd_global_sigstate. */ } else { /* Use the global actions as a default for new threads. */ struct hurd_sigstate *s = _hurd_global_sigstate; if (s) { __spin_lock (&s->lock); memcpy (ss->actions, s->actions, sizeof (s->actions)); __spin_unlock (&s->lock); } else default_sigaction (ss->actions); ss->next = _hurd_sigstates; _hurd_sigstates = ss; } } __mutex_unlock (&_hurd_siglock); return ss; }
struct hurd_sigstate * _hurd_thread_sigstate (thread_t thread) { struct hurd_sigstate *ss; __mutex_lock (&_hurd_siglock); for (ss = _hurd_sigstates; ss != NULL; ss = ss->next) if (ss->thread == thread) break; if (ss == NULL) { ss = malloc (sizeof (*ss)); if (ss == NULL) __libc_fatal ("hurd: Can't allocate thread sigstate\n"); ss->thread = thread; __spin_lock_init (&ss->lock); /* Initialize default state. */ __sigemptyset (&ss->blocked); __sigemptyset (&ss->pending); memset (&ss->sigaltstack, 0, sizeof (ss->sigaltstack)); ss->sigaltstack.ss_flags |= SS_DISABLE; ss->preemptors = NULL; ss->suspended = MACH_PORT_NULL; ss->intr_port = MACH_PORT_NULL; ss->context = NULL; /* Initialize the sigaction vector from the default signal receiving thread's state, and its from the system defaults. */ if (thread == _hurd_sigthread) default_sigaction (ss->actions); else { struct hurd_sigstate *s; for (s = _hurd_sigstates; s != NULL; s = s->next) if (s->thread == _hurd_sigthread) break; if (s) { __spin_lock (&s->lock); memcpy (ss->actions, s->actions, sizeof (s->actions)); __spin_unlock (&s->lock); } else default_sigaction (ss->actions); } ss->next = _hurd_sigstates; _hurd_sigstates = ss; } __mutex_unlock (&_hurd_siglock); return ss; }
/* Return the list of devices in the format provided by SIOCGIFCONF in IFR, but don't return more then AMOUNT bytes. If AMOUNT is negative, there is no limit. */ error_t S_pfinet_siocgifconf (io_t port, vm_size_t amount, char **ifr, mach_msg_type_number_t *len) { error_t err = 0; struct ifconf ifc; __mutex_lock (&global_lock); if (amount < 0) { /* Get the needed buffer length. */ ifc.ifc_buf = NULL; ifc.ifc_len = 0; err = dev_ifconf ((char *) &ifc); if (err) { __mutex_unlock (&global_lock); return -err; } amount = ifc.ifc_len; } else ifc.ifc_len = amount; if (amount > 0) { /* Possibly allocate a new buffer. */ if (*len < amount) ifc.ifc_buf = (char *) mmap (0, amount, PROT_READ|PROT_WRITE, MAP_ANON, 0, 0); else ifc.ifc_buf = *ifr; err = dev_ifconf ((char *) &ifc); } if (err) { *len = 0; if (ifc.ifc_buf != *ifr) munmap (ifc.ifc_buf, amount); } else { *len = ifc.ifc_len; *ifr = ifc.ifc_buf; } __mutex_unlock (&global_lock); return err; }
/* Extend the process's data space by INCREMENT. If INCREMENT is negative, shrink data space by - INCREMENT. Return the address of the start of the new data space, or -1 for errors. */ void * __sbrk (intptr_t increment) { void *result; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_brk_lock); result = (void *) _hurd_brk; if (increment != 0 && _hurd_set_brk (_hurd_brk + increment) < 0) result = (void *) -1; __mutex_unlock (&_hurd_brk_lock); HURD_CRITICAL_END; return result; }
/* Return the maximum number of file descriptors the current process could possibly have (until it raises the resource limit). */ int __getdtablesize (void) { rlim_t limit; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_rlimit_lock); limit = _hurd_rlimits[RLIMIT_NOFILE].rlim_cur; __mutex_unlock (&_hurd_rlimit_lock); HURD_CRITICAL_END; /* RLIM_INFINITY is not meaningful to our caller. -1 is a good choice because `sysconf (_SC_OPEN_MAX)' calls us, and -1 from sysconf means "no determinable limit". */ return limit == RLIM_INFINITY ? -1 : (int) limit; }
/* Truncate name, take the global lock and find device with this name. */ struct device *get_dev (char *name) { char ifname[16]; struct device *dev; memcpy (ifname, name, IFNAMSIZ-1); ifname[IFNAMSIZ-1] = 0; __mutex_lock (&global_lock); for (dev = dev_base; dev; dev = dev->next) if (strcmp (dev->name, ifname) == 0) break; return dev; }
/* Adds a segment attachment. */ error_t __sysvshm_add (void *addr, size_t size) { struct sysvshm_attach *shm; shm = malloc (sizeof (*shm)); if (!shm) return errno; __mutex_lock (&sysvshm_lock); shm->addr = addr; shm->size = size; shm->next = attach_list; attach_list = shm; __mutex_unlock (&sysvshm_lock); return 0; }
/* Destroy a sigstate structure. Called by libpthread just before the * corresponding thread is terminated (the kernel thread port must remain valid * until this function is called.) */ void _hurd_sigstate_delete (thread_t thread) { struct hurd_sigstate **ssp, *ss; __mutex_lock (&_hurd_siglock); for (ssp = &_hurd_sigstates; *ssp; ssp = &(*ssp)->next) if ((*ssp)->thread == thread) break; ss = *ssp; if (ss) *ssp = ss->next; __mutex_unlock (&_hurd_siglock); if (ss) free (ss); }
/* Create a new session with the calling process as its leader. The process group IDs of the session and the calling process are set to the process ID of the calling process, which is returned. */ pid_t __setsid (void) { error_t err; unsigned int stamp; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_dtable_lock); stamp = _hurd_pids_changed_stamp; /* Atomic fetch. */ /* Tell the proc server we want to start a new session. */ err = __USEPORT (PROC, __proc_setsid (port)); if (err) __mutex_unlock (&_hurd_dtable_lock); else { /* Punt our current ctty, and update the dtable accordingly. We hold the dtable lock from before the proc_setsid call through clearing the cttyid port and processing the dtable, so that we can be sure that it's all done by the time the signal thread processes the pgrp change notification. */ _hurd_locked_install_cttyid (MACH_PORT_NULL); /* Synchronize with the signal thread to make sure we have received and processed proc_newids before returning to the user. This is necessary to ensure that _hurd_pgrp (and thus the value returned by `getpgrp ()' in other threads) has been updated before we return. */ while (_hurd_pids_changed_stamp == stamp) { #ifdef noteven /* XXX we have no need for a mutex, but cthreads demands one. */ __condition_wait (&_hurd_pids_changed_sync, NULL); #else __swtch_pri (0); #endif } } HURD_CRITICAL_END; return err ? __hurd_fail (err) : _hurd_pgrp; }
int __getgroups (int n, gid_t *gidset) { error_t err; int ngids; void *crit; if (n < 0) return __hurd_fail (EINVAL); crit = _hurd_critical_section_lock (); __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); return __hurd_fail (err); } ngids = _hurd_id.gen.ngids; if (n != 0) { /* Copy the gids onto stack storage and then release the idlock. */ gid_t gids[ngids]; memcpy (gids, _hurd_id.gen.gids, sizeof (gids)); __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); /* Now that the lock is released, we can safely copy the group set into the user's array, which might fault. */ if (ngids > n) return __hurd_fail (EINVAL); memcpy (gidset, gids, ngids * sizeof (gid_t)); } else { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); } return ngids; }
/* Put the soft and hard limits for RESOURCE in *RLIMITS. Returns 0 if successful, -1 if not (and sets errno). */ int __getrlimit (enum __rlimit_resource resource, struct rlimit *rlimits) { struct rlimit lim; if (rlimits == NULL || (unsigned int) resource >= RLIMIT_NLIMITS) { errno = EINVAL; return -1; } __mutex_lock (&_hurd_rlimit_lock); lim = _hurd_rlimits[resource]; __mutex_unlock (&_hurd_rlimit_lock); *rlimits = lim; return 0; }
int dirfd (DIR *dirp) { int fd; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_dtable_lock); for (fd = 0; fd < _hurd_dtablesize; ++fd) if (_hurd_dtable[fd] == dirp->__fd) break; if (fd == _hurd_dtablesize) { errno = EINVAL; fd = -1; } __mutex_unlock (&_hurd_dtable_lock); HURD_CRITICAL_END; return fd; }
int geteuids (int n, uid_t *uidset) { error_t err; int nuids; void *crit; crit = _hurd_critical_section_lock (); __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); return __hurd_fail (err); } nuids = _hurd_id.gen.nuids; if (n != 0) { /* Copy the uids onto stack storage and then release the idlock. */ uid_t uids[nuids]; memcpy (uids, _hurd_id.gen.uids, sizeof (uids)); __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); /* Now that the lock is released, we can safely copy the uid set into the user's array, which might fault. */ if (nuids > n) nuids = n; memcpy (uidset, uids, nuids * sizeof (uid_t)); } else { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); } return nuids; }
/* 101 SIOCGIFNAME -- Get name of a network interface from index number. */ error_t S_iioctl_siocgifname (io_t port, ifname_t ifnam, int *index) { error_t err = 0; struct device *dev; __mutex_lock (&global_lock); dev = dev_get_by_index (*index); if (!dev) err = ENODEV; else { strncpy (ifnam, dev->name, IFNAMSIZ); ifnam[IFNAMSIZ-1] = '\0'; } __mutex_unlock (&global_lock); return err; }
int __gthr_win32_recursive_mutex_lock (__gthread_recursive_mutex_t *mutex) { int me = tls_get(0); if ( __sync_add_and_fetch(&mutex->counter, 1) == 0) { mutex->depth = 1; mutex->owner = me; mutex->sema = 1; } else if (mutex->owner == me) { __sync_sub_and_fetch(&mutex->counter, 1); ++(mutex->depth); } else { __mutex_lock(&mutex->sema); mutex->depth = 1; mutex->owner = me; } return 0; }
/* Open a directory stream on NAME. */ DIR * __opendir (const char *name) { DIR *dirp; int fd; struct hurd_fd *d; fd = __open (name, O_RDONLY); if (fd < 0) return NULL; dirp = (DIR *) malloc (sizeof (DIR)); if (dirp == NULL) { __close (fd); return NULL; } /* Extract the pointer to the descriptor structure. */ __mutex_lock (&_hurd_dtable_lock); d = dirp->__fd = _hurd_dtable[fd]; __mutex_unlock (&_hurd_dtable_lock); /* Set the descriptor to close on exec. */ __spin_lock (&d->port.lock); d->flags |= FD_CLOEXEC; __spin_unlock (&d->port.lock); dirp->__data = dirp->__ptr = NULL; dirp->__entry_data = dirp->__entry_ptr = 0; dirp->__allocation = 0; dirp->__size = 0; __libc_lock_init (dirp->__lock); return dirp; }
/* Removes a segment attachment. Returns its size if found, or EINVAL otherwise. */ error_t __sysvshm_remove (void *addr, size_t *size) { struct sysvshm_attach *shm; struct sysvshm_attach **pshm = &attach_list; __mutex_lock (&sysvshm_lock); shm = attach_list; while (shm) { shm = *pshm; if (shm->addr == addr) { *pshm = shm->next; *size = shm->size; __mutex_unlock (&sysvshm_lock); return 0; } pshm = &shm->next; shm = shm->next; } __mutex_unlock (&sysvshm_lock); return EINVAL; }
void hurd_unpreempt_signals (struct hurd_signal_preemptor *preemptor) { struct hurd_signal_preemptor **p; sigset_t preempted = 0; __mutex_lock (&_hurd_siglock); p = &_hurdsig_preemptors; while (*p) if (*p == preemptor) { /* Found it; take it off the chain. */ *p = (*p)->next; if ((preemptor->signals & preempted) != preemptor->signals) { /* This might have been the only preemptor for some of those signals, so we must collect the full mask from the others. */ struct hurd_signal_preemptor *pp; for (pp = *p; pp; pp = pp->next) preempted |= pp->signals; _hurdsig_preempted_set = preempted; } __mutex_unlock (&_hurd_siglock); return; } else { preempted |= (*p)->signals; p = &(*p)->next; } __mutex_unlock (&_hurd_siglock); /* Avoid deadlock during death rattle. */ assert (! "removing absent preemptor"); }
kern_return_t _S_catch_exception_raise (mach_port_t port, thread_t thread, task_t task, #ifdef EXC_MASK_ALL /* New interface flavor. */ exception_type_t exception, exception_data_t code, mach_msg_type_number_t codeCnt #else /* Vanilla Mach 3.0 interface. */ integer_t exception, integer_t code, integer_t subcode #endif ) { struct hurd_sigstate *ss; int signo; struct hurd_signal_detail d; if (task != __mach_task_self ()) /* The sender wasn't the kernel. */ return EPERM; d.exc = exception; #ifdef EXC_MASK_ALL assert (codeCnt >= 2); d.exc_code = code[0]; d.exc_subcode = code[1]; #else d.exc_code = code; d.exc_subcode = subcode; #endif /* Call the machine-dependent function to translate the Mach exception codes into a signal number and subcode. */ _hurd_exception2signal (&d, &signo); /* Find the sigstate structure for the faulting thread. */ __mutex_lock (&_hurd_siglock); for (ss = _hurd_sigstates; ss != NULL; ss = ss->next) if (ss->thread == thread) break; __mutex_unlock (&_hurd_siglock); if (ss == NULL) ss = _hurd_thread_sigstate (thread); /* Allocate a fresh one. */ if (__spin_lock_locked (&ss->lock)) { /* Loser. The thread faulted with its sigstate lock held. Its sigstate data is now suspect. So we reset the parts of it which could cause trouble for the signal thread. Anything else clobbered therein will just hose this user thread, but it's faulting already. This is almost certainly a library bug: unless random memory clobberation caused the sigstate lock to gratuitously appear held, no code should do anything that can fault while holding the sigstate lock. */ __spin_unlock (&ss->critical_section_lock); ss->context = NULL; __spin_unlock (&ss->lock); } /* Post the signal. */ _hurd_internal_post_signal (ss, signo, &d, MACH_PORT_NULL, MACH_MSG_TYPE_PORT_SEND, 0); return KERN_SUCCESS; }
/* Check the first NFDS descriptors either in POLLFDS (if nonnnull) or in each of READFDS, WRITEFDS, EXCEPTFDS that is nonnull. If TIMEOUT is not NULL, time out after waiting the interval specified therein. Returns the number of ready descriptors, or -1 for errors. */ int _hurd_select (int nfds, struct pollfd *pollfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, const struct timespec *timeout, const sigset_t *sigmask) { int i; mach_port_t portset; int got; error_t err; fd_set rfds, wfds, xfds; int firstfd, lastfd; mach_msg_timeout_t to = 0; struct { struct hurd_userlink ulink; struct hurd_fd *cell; mach_port_t io_port; int type; mach_port_t reply_port; } d[nfds]; sigset_t oset; union typeword /* Use this to avoid unkosher casts. */ { mach_msg_type_t type; uint32_t word; }; assert (sizeof (union typeword) == sizeof (mach_msg_type_t)); assert (sizeof (uint32_t) == sizeof (mach_msg_type_t)); if (nfds < 0 || (pollfds == NULL && nfds > FD_SETSIZE)) { errno = EINVAL; return -1; } if (timeout != NULL) { if (timeout->tv_sec < 0 || timeout->tv_nsec < 0) { errno = EINVAL; return -1; } to = (timeout->tv_sec * 1000 + (timeout->tv_nsec + 999999) / 1000000); } if (sigmask && __sigprocmask (SIG_SETMASK, sigmask, &oset)) return -1; if (pollfds) { /* Collect interesting descriptors from the user's `pollfd' array. We do a first pass that reads the user's array before taking any locks. The second pass then only touches our own stack, and gets the port references. */ for (i = 0; i < nfds; ++i) if (pollfds[i].fd >= 0) { int type = 0; if (pollfds[i].events & POLLIN) type |= SELECT_READ; if (pollfds[i].events & POLLOUT) type |= SELECT_WRITE; if (pollfds[i].events & POLLPRI) type |= SELECT_URG; d[i].io_port = pollfds[i].fd; d[i].type = type; } else d[i].type = 0; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_dtable_lock); for (i = 0; i < nfds; ++i) if (d[i].type != 0) { const int fd = (int) d[i].io_port; if (fd < _hurd_dtablesize) { d[i].cell = _hurd_dtable[fd]; d[i].io_port = _hurd_port_get (&d[i].cell->port, &d[i].ulink); if (d[i].io_port != MACH_PORT_NULL) continue; } /* If one descriptor is bogus, we fail completely. */ while (i-- > 0) if (d[i].type != 0) _hurd_port_free (&d[i].cell->port, &d[i].ulink, d[i].io_port); break; } __mutex_unlock (&_hurd_dtable_lock); HURD_CRITICAL_END; if (i < nfds) { if (sigmask) __sigprocmask (SIG_SETMASK, &oset, NULL); errno = EBADF; return -1; } lastfd = i - 1; firstfd = i == 0 ? lastfd : 0; } else { /* Collect interested descriptors from the user's fd_set arguments. Use local copies so we can't crash from user bogosity. */ if (readfds == NULL) FD_ZERO (&rfds); else rfds = *readfds; if (writefds == NULL) FD_ZERO (&wfds); else wfds = *writefds; if (exceptfds == NULL) FD_ZERO (&xfds); else xfds = *exceptfds; HURD_CRITICAL_BEGIN; __mutex_lock (&_hurd_dtable_lock); if (nfds > _hurd_dtablesize) nfds = _hurd_dtablesize; /* Collect the ports for interesting FDs. */ firstfd = lastfd = -1; for (i = 0; i < nfds; ++i) { int type = 0; if (readfds != NULL && FD_ISSET (i, &rfds)) type |= SELECT_READ; if (writefds != NULL && FD_ISSET (i, &wfds)) type |= SELECT_WRITE; if (exceptfds != NULL && FD_ISSET (i, &xfds)) type |= SELECT_URG; d[i].type = type; if (type) { d[i].cell = _hurd_dtable[i]; d[i].io_port = _hurd_port_get (&d[i].cell->port, &d[i].ulink); if (d[i].io_port == MACH_PORT_NULL) { /* If one descriptor is bogus, we fail completely. */ while (i-- > 0) if (d[i].type != 0) _hurd_port_free (&d[i].cell->port, &d[i].ulink, d[i].io_port); break; } lastfd = i; if (firstfd == -1) firstfd = i; } } __mutex_unlock (&_hurd_dtable_lock); HURD_CRITICAL_END; if (i < nfds) { if (sigmask) __sigprocmask (SIG_SETMASK, &oset, NULL); errno = EBADF; return -1; } } err = 0; got = 0; /* Send them all io_select request messages. */ if (firstfd == -1) /* But not if there were no ports to deal with at all. We are just a pure timeout. */ portset = __mach_reply_port (); else { portset = MACH_PORT_NULL; for (i = firstfd; i <= lastfd; ++i) if (d[i].type) { int type = d[i].type; d[i].reply_port = __mach_reply_port (); err = __io_select (d[i].io_port, d[i].reply_port, /* Poll only if there's a single descriptor. */ (firstfd == lastfd) ? to : 0, &type); switch (err) { case MACH_RCV_TIMED_OUT: /* No immediate response. This is normal. */ err = 0; if (firstfd == lastfd) /* When there's a single descriptor, we don't need a portset, so just pretend we have one, but really use the single reply port. */ portset = d[i].reply_port; else if (got == 0) /* We've got multiple reply ports, so we need a port set to multiplex them. */ { /* We will wait again for a reply later. */ if (portset == MACH_PORT_NULL) /* Create the portset to receive all the replies on. */ err = __mach_port_allocate (__mach_task_self (), MACH_PORT_RIGHT_PORT_SET, &portset); if (! err) /* Put this reply port in the port set. */ __mach_port_move_member (__mach_task_self (), d[i].reply_port, portset); } break; default: /* No other error should happen. Callers of select don't expect to see errors, so we simulate readiness of the erring object and the next call hopefully will get the error again. */ type = SELECT_ALL; /* FALLTHROUGH */ case 0: /* We got an answer. */ if ((type & SELECT_ALL) == 0) /* Bogus answer; treat like an error, as a fake positive. */ type = SELECT_ALL; /* This port is already ready already. */ d[i].type &= type; d[i].type |= SELECT_RETURNED; ++got; break; } _hurd_port_free (&d[i].cell->port, &d[i].ulink, d[i].io_port); } } /* Now wait for reply messages. */ if (!err && got == 0) { /* Now wait for io_select_reply messages on PORT, timing out as appropriate. */ union { mach_msg_header_t head; #ifdef MACH_MSG_TRAILER_MINIMUM_SIZE struct { mach_msg_header_t head; NDR_record_t ndr; error_t err; } error; struct { mach_msg_header_t head; NDR_record_t ndr; error_t err; int result; mach_msg_trailer_t trailer; } success; #else struct { mach_msg_header_t head; union typeword err_type; error_t err; } error; struct { mach_msg_header_t head; union typeword err_type; error_t err; union typeword result_type; int result; } success; #endif } msg; mach_msg_option_t options = (timeout == NULL ? 0 : MACH_RCV_TIMEOUT); error_t msgerr; while ((msgerr = __mach_msg (&msg.head, MACH_RCV_MSG | MACH_RCV_INTERRUPT | options, 0, sizeof msg, portset, to, MACH_PORT_NULL)) == MACH_MSG_SUCCESS) { /* We got a message. Decode it. */ #define IO_SELECT_REPLY_MSGID (21012 + 100) /* XXX */ #ifdef MACH_MSG_TYPE_BIT const union typeword inttype = { type: { MACH_MSG_TYPE_INTEGER_T, sizeof (integer_t) * 8, 1, 1, 0, 0 } }; #endif if (msg.head.msgh_id == IO_SELECT_REPLY_MSGID && msg.head.msgh_size >= sizeof msg.error && !(msg.head.msgh_bits & MACH_MSGH_BITS_COMPLEX) && #ifdef MACH_MSG_TYPE_BIT msg.error.err_type.word == inttype.word #endif ) { /* This is a properly formatted message so far. See if it is a success or a failure. */ if (msg.error.err == EINTR && msg.head.msgh_size == sizeof msg.error) { /* EINTR response; poll for further responses and then return quickly. */ err = EINTR; goto poll; } if (msg.error.err || msg.head.msgh_size != sizeof msg.success || #ifdef MACH_MSG_TYPE_BIT msg.success.result_type.word != inttype.word || #endif (msg.success.result & SELECT_ALL) == 0) { /* Error or bogus reply. Simulate readiness. */ __mach_msg_destroy (&msg.head); msg.success.result = SELECT_ALL; } /* Look up the respondent's reply port and record its readiness. */ { int had = got; if (firstfd != -1) for (i = firstfd; i <= lastfd; ++i) if (d[i].type && d[i].reply_port == msg.head.msgh_local_port) { d[i].type &= msg.success.result; d[i].type |= SELECT_RETURNED; ++got; } assert (got > had); } } if (msg.head.msgh_remote_port != MACH_PORT_NULL) __mach_port_deallocate (__mach_task_self (), msg.head.msgh_remote_port); if (got) poll: { /* Poll for another message. */ to = 0; options |= MACH_RCV_TIMEOUT; } } if (msgerr == MACH_RCV_INTERRUPTED) /* Interruption on our side (e.g. signal reception). */ err = EINTR; if (got) /* At least one descriptor is known to be ready now, so we will return success. */ err = 0; }
kern_return_t _S_msg_report_wait (mach_port_t msgport, thread_t thread, string_t description, mach_msg_id_t *msgid) { *msgid = 0; if (thread == _hurd_msgport_thread) /* Cute. */ strcpy (description, "msgport"); else if (&_hurd_itimer_thread && thread == _hurd_itimer_thread) strcpy (description, "itimer"); else { /* Make sure this is really one of our threads. */ struct hurd_sigstate *ss; __mutex_lock (&_hurd_siglock); for (ss = _hurd_sigstates; ss != NULL; ss = ss->next) if (ss->thread == thread) break; __mutex_unlock (&_hurd_siglock); if (ss == NULL) /* To hell with you. */ return EINVAL; if (ss->suspended != MACH_PORT_NULL) strcpy (description, "sigsuspend"); else { /* Examine the thread's state to see if it is blocked in an RPC. */ struct machine_thread_state state; mach_msg_type_number_t count = MACHINE_THREAD_STATE_COUNT; error_t err; err = __thread_get_state (thread, MACHINE_THREAD_STATE_FLAVOR, (integer_t *) &state, &count); if (err) return err; assert (count == MACHINE_THREAD_STATE_COUNT); if (SYSCALL_EXAMINE (&state, msgid)) { mach_port_t send_port, rcv_port; mach_msg_option_t option; mach_msg_timeout_t timeout; /* Blocked in a system call. */ if (*msgid == -25 /* mach_msg system call. Examine its parameters. */ && MSG_EXAMINE (&state, msgid, &send_port, &rcv_port, &option, &timeout) == 0) { char *p; if (send_port != MACH_PORT_NULL && *msgid != 0) { /* For the normal case of RPCs, we consider the destination port to be the interesting thing whether we are in fact sending or receiving at the moment. That tells us who we are waiting for the reply from. */ if (send_port == ss->intr_port) { /* This is a Hurd interruptible RPC. Mark it by surrounding the port description string with [...] brackets. */ description[0] = '['; p = describe_port (description + 1, send_port); *p++ = ']'; *p = '\0'; } else (void) describe_port (description, send_port); } else if (rcv_port != MACH_PORT_NULL) { /* This system call had no send port, but had a receive port. The msgid we extracted is then just some garbage or perhaps the msgid of the last message this thread received, but it's not a helpful thing to return. */ strcpy (describe_port (description, rcv_port), ":rcv"); *msgid = 0; } else if ((option & (MACH_RCV_MSG|MACH_RCV_TIMEOUT)) == (MACH_RCV_MSG|MACH_RCV_TIMEOUT)) { /* A receive with no valid port can be used for a pure timeout. Report the timeout value (counted in milliseconds); note this is the original total time, not the time remaining. */ strcpy (describe_number (description, 0, timeout), "ms"); *msgid = 0; } else { strcpy (description, "mach_msg"); *msgid = 0; } } else /* Some other system call. */ { (void) describe_number (description, "syscall#", *msgid); *msgid = 0; } } else description[0] = '\0'; } } __mach_port_deallocate (__mach_task_self (), thread); return 0; }