int __getgroups (int n, gid_t *gidset) { error_t err; int ngids; void *crit; if (n < 0) return __hurd_fail (EINVAL); crit = _hurd_critical_section_lock (); __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); return __hurd_fail (err); } ngids = _hurd_id.gen.ngids; if (n != 0) { /* Copy the gids onto stack storage and then release the idlock. */ gid_t gids[ngids]; memcpy (gids, _hurd_id.gen.gids, sizeof (gids)); __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); /* Now that the lock is released, we can safely copy the group set into the user's array, which might fault. */ if (ngids > n) return __hurd_fail (EINVAL); memcpy (gidset, gids, ngids * sizeof (gid_t)); } else { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); } return ngids; }
int geteuids (int n, uid_t *uidset) { error_t err; int nuids; void *crit; crit = _hurd_critical_section_lock (); __mutex_lock (&_hurd_id.lock); if (err = _hurd_check_ids ()) { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); return __hurd_fail (err); } nuids = _hurd_id.gen.nuids; if (n != 0) { /* Copy the uids onto stack storage and then release the idlock. */ uid_t uids[nuids]; memcpy (uids, _hurd_id.gen.uids, sizeof (uids)); __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); /* Now that the lock is released, we can safely copy the uid set into the user's array, which might fault. */ if (nuids > n) nuids = n; memcpy (uidset, uids, nuids * sizeof (uid_t)); } else { __mutex_unlock (&_hurd_id.lock); _hurd_critical_section_unlock (crit); } return nuids; }
error_t hurd_thread_cancel (thread_t thread) { struct hurd_sigstate *ss = _hurd_thread_sigstate (thread); struct machine_thread_all_state state; int state_change; error_t err; if (! ss) return EINVAL; if (ss == _hurd_self_sigstate ()) { /* We are cancelling ourselves, so it is easy to succeed quickly. Since this function is not a cancellation point, we just leave the flag set pending the next cancellation point (hurd_check_cancel or RPC) and return success. */ ss->cancel = 1; return 0; } assert (! __spin_lock_locked (&ss->critical_section_lock)); __spin_lock (&ss->critical_section_lock); __spin_lock (&ss->lock); err = __thread_suspend (thread); __spin_unlock (&ss->lock); if (! err) { /* Set the flag telling the thread its operation is being cancelled. */ ss->cancel = 1; /* Interrupt any interruptible RPC now in progress. */ state.set = 0; _hurdsig_abort_rpcs (ss, 0, 0, &state, &state_change, NULL, 0, 0); if (state_change) err = __thread_set_state (thread, MACHINE_THREAD_STATE_FLAVOR, (natural_t *) &state.basic, MACHINE_THREAD_STATE_COUNT); if (ss->cancel_hook) /* The code being cancelled has a special wakeup function. Calling this should make the thread wake up and check the cancellation flag. */ (*ss->cancel_hook) (); __thread_resume (thread); } _hurd_critical_section_unlock (ss); return err; }
void _longjmp_unwind (jmp_buf env, int val) { struct hurd_sigstate *ss = _hurd_self_sigstate (); struct hurd_userlink *link; /* All access to SS->active_resources must take place inside a critical section where signal handlers cannot run. */ __spin_lock (&ss->lock); assert (! __spin_lock_locked (&ss->critical_section_lock)); __spin_lock (&ss->critical_section_lock); /* Remove local signal preemptors being unwound past. */ while (ss->preemptors && _JMPBUF_UNWINDS (env[0].__jmpbuf, ss->preemptors, demangle_ptr)) ss->preemptors = ss->preemptors->next; __spin_unlock (&ss->lock); /* Iterate over the current thread's list of active resources. Process the head portion of the list whose links reside in stack frames being unwound by this jump. */ for (link = ss->active_resources; link && _JMPBUF_UNWINDS (env[0].__jmpbuf, link, demangle_ptr); link = link->thread.next) /* Remove this link from the resource's users list, since the frame using the resource is being unwound. This call returns nonzero if that was the last user. */ if (_hurd_userlink_unlink (link)) /* One of the frames being unwound by the longjmp was the last user of its resource. Call the cleanup function to deallocate it. */ (*link->cleanup) (link->cleanup_data, env, val); _hurd_critical_section_unlock (ss); }
/* Overlay TASK, executing FILE with arguments ARGV and environment ENVP. If TASK == mach_task_self (), some ports are dealloc'd by the exec server. ARGV and ENVP are terminated by NULL pointers. */ error_t _hurd_exec (task_t task, file_t file, char *const argv[], char *const envp[]) { error_t err; char *args, *env; size_t argslen, envlen; int ints[INIT_INT_MAX]; mach_port_t ports[_hurd_nports]; struct hurd_userlink ulink_ports[_hurd_nports]; file_t *dtable; unsigned int dtablesize, i; struct hurd_port **dtable_cells; struct hurd_userlink *ulink_dtable; struct hurd_sigstate *ss; mach_port_t *please_dealloc, *pdp; /* XXX needs to be hurdmalloc XXX */ if (err = __argz_create (argv, &args, &argslen)) return err; if (err = __argz_create (envp, &env, &envlen)) goto outargs; /* Load up the ports to give to the new program. */ for (i = 0; i < _hurd_nports; ++i) if (i == INIT_PORT_PROC && task != __mach_task_self ()) { /* This is another task, so we need to ask the proc server for the right proc server port for it. */ if (err = __USEPORT (PROC, __proc_task2proc (port, task, &ports[i]))) { while (--i > 0) _hurd_port_free (&_hurd_ports[i], &ulink_ports[i], ports[i]); goto outenv; } } else ports[i] = _hurd_port_get (&_hurd_ports[i], &ulink_ports[i]); /* Load up the ints to give the new program. */ for (i = 0; i < INIT_INT_MAX; ++i) switch (i) { case INIT_UMASK: ints[i] = _hurd_umask; break; case INIT_SIGMASK: case INIT_SIGIGN: case INIT_SIGPENDING: /* We will set these all below. */ break; case INIT_TRACEMASK: ints[i] = _hurdsig_traced; break; default: ints[i] = 0; } ss = _hurd_self_sigstate (); assert (! __spin_lock_locked (&ss->critical_section_lock)); __spin_lock (&ss->critical_section_lock); __spin_lock (&ss->lock); ints[INIT_SIGMASK] = ss->blocked; ints[INIT_SIGPENDING] = ss->pending; ints[INIT_SIGIGN] = 0; for (i = 1; i < NSIG; ++i) if (ss->actions[i].sa_handler == SIG_IGN) ints[INIT_SIGIGN] |= __sigmask (i); /* We hold the sigstate lock until the exec has failed so that no signal can arrive between when we pack the blocked and ignored signals, and when the exec actually happens. A signal handler could change what signals are blocked and ignored. Either the change will be reflected in the exec, or the signal will never be delivered. Setting the critical section flag avoids anything we call trying to acquire the sigstate lock. */ __spin_unlock (&ss->lock); /* Pack up the descriptor table to give the new program. */ __mutex_lock (&_hurd_dtable_lock); dtablesize = _hurd_dtable ? _hurd_dtablesize : _hurd_init_dtablesize; if (task == __mach_task_self ()) /* Request the exec server to deallocate some ports from us if the exec succeeds. The init ports and descriptor ports will arrive in the new program's exec_startup message. If we failed to deallocate them, the new program would have duplicate user references for them. But we cannot deallocate them ourselves, because we must still have them after a failed exec call. */ please_dealloc = __alloca ((_hurd_nports + (2 * dtablesize)) * sizeof (mach_port_t)); else please_dealloc = NULL; pdp = please_dealloc; if (_hurd_dtable != NULL) { dtable = __alloca (dtablesize * sizeof (dtable[0])); ulink_dtable = __alloca (dtablesize * sizeof (ulink_dtable[0])); dtable_cells = __alloca (dtablesize * sizeof (dtable_cells[0])); for (i = 0; i < dtablesize; ++i) { struct hurd_fd *const d = _hurd_dtable[i]; if (d == NULL) { dtable[i] = MACH_PORT_NULL; continue; } __spin_lock (&d->port.lock); if (d->flags & FD_CLOEXEC) { /* This descriptor is marked to be closed on exec. So don't pass it to the new program. */ dtable[i] = MACH_PORT_NULL; if (pdp && d->port.port != MACH_PORT_NULL) { /* We still need to deallocate the ports. */ *pdp++ = d->port.port; if (d->ctty.port != MACH_PORT_NULL) *pdp++ = d->ctty.port; } __spin_unlock (&d->port.lock); } else { if (pdp && d->ctty.port != MACH_PORT_NULL) /* All the elements of DTABLE are added to PLEASE_DEALLOC below, so we needn't add the port itself. But we must deallocate the ctty port as well as the normal port that got installed in DTABLE[I]. */ *pdp++ = d->ctty.port; dtable[i] = _hurd_port_locked_get (&d->port, &ulink_dtable[i]); dtable_cells[i] = &d->port; } } } else { dtable = _hurd_init_dtable; ulink_dtable = NULL; dtable_cells = NULL; } /* The information is all set up now. Try to exec the file. */ { if (pdp) { /* Request the exec server to deallocate some ports from us if the exec succeeds. The init ports and descriptor ports will arrive in the new program's exec_startup message. If we failed to deallocate them, the new program would have duplicate user references for them. But we cannot deallocate them ourselves, because we must still have them after a failed exec call. */ for (i = 0; i < _hurd_nports; ++i) *pdp++ = ports[i]; for (i = 0; i < dtablesize; ++i) *pdp++ = dtable[i]; } err = __file_exec (file, task, 0, args, argslen, env, envlen, dtable, MACH_MSG_TYPE_COPY_SEND, dtablesize, ports, MACH_MSG_TYPE_COPY_SEND, _hurd_nports, ints, INIT_INT_MAX, please_dealloc, pdp - please_dealloc, &_hurd_msgport, task == __mach_task_self () ? 1 : 0); } /* Release references to the standard ports. */ for (i = 0; i < _hurd_nports; ++i) if (i == INIT_PORT_PROC && task != __mach_task_self ()) __mach_port_deallocate (__mach_task_self (), ports[i]); else _hurd_port_free (&_hurd_ports[i], &ulink_ports[i], ports[i]); if (ulink_dtable != NULL) /* Release references to the file descriptor ports. */ for (i = 0; i < dtablesize; ++i) if (dtable[i] != MACH_PORT_NULL) _hurd_port_free (dtable_cells[i], &ulink_dtable[i], dtable[i]); /* Release lock on the file descriptor table. */ __mutex_unlock (&_hurd_dtable_lock); /* Safe to let signals happen now. */ _hurd_critical_section_unlock (ss); outargs: free (args); outenv: free (env); return err; }