static int acpi_smbat_attach(device_t dev) { struct acpi_smbat_softc *sc; uint32_t base; sc = device_get_softc(dev); if (ACPI_FAILURE(acpi_GetInteger(acpi_get_handle(dev), "_EC", &base))) { device_printf(dev, "cannot get EC base address\n"); return (ENXIO); } sc->sb_base_addr = (base >> 8) & 0xff; /* XXX Only works with one EC, but nearly all systems only have one. */ sc->ec_dev = devclass_get_device(devclass_find("acpi_ec"), 0); if (sc->ec_dev == NULL) { device_printf(dev, "cannot find EC device\n"); return (ENXIO); } timespecclear(&sc->bif_lastupdated); timespecclear(&sc->bst_lastupdated); if (acpi_battery_register(dev) != 0) { device_printf(dev, "cannot register battery\n"); return (ENXIO); } return (0); }
static void acpi_cmbat_init_battery(void *arg) { int retry; device_t dev = (device_t)arg; struct acpi_cmbat_softc *sc = device_get_softc(dev); #define ACPI_CMBAT_RETRY_MAX 6 if (sc->initializing) { return; } sc->initializing = 1; ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization start\n"); for (retry = 0; retry < ACPI_CMBAT_RETRY_MAX; retry++, AcpiOsSleep(10, 0)) { sc->present = acpi_BatteryIsPresent(dev); if (!sc->present) { continue; } timespecclear(&sc->bst_lastupdated); timespecclear(&sc->bif_lastupdated); acpi_cmbat_get_bst(dev); if (!acpi_cmbat_is_bst_valid(&sc->bst)) { continue; } acpi_cmbat_get_bif(dev); if (!acpi_cmbat_is_bif_valid(&sc->bif)) { continue; } break; } if (retry == ACPI_CMBAT_RETRY_MAX) ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization failed, giving up\n"); else ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization done, tried %d times\n", retry+1); sc->initializing = 0; }
static void acpi_cmbat_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { struct acpi_cmbat_softc *sc; device_t dev; dev = (device_t)context; sc = device_get_softc(dev); switch (notify) { case ACPI_NOTIFY_DEVICE_CHECK: case ACPI_BATTERY_BST_CHANGE: /* * Clear the last updated time. The next call to retrieve the * battery status will get the new value for us. */ timespecclear(&sc->bst_lastupdated); break; case ACPI_NOTIFY_BUS_CHECK: case ACPI_BATTERY_BIF_CHANGE: /* * Queue a callback to get the current battery info from thread * context. It's not safe to block in a notify handler. */ AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cmbat_get_bif_task, dev); break; } acpi_UserNotify("CMBAT", h, notify); }
static int acpi_cmbat_attach(device_t dev) { int error; ACPI_HANDLE handle; struct acpi_cmbat_softc *sc; sc = device_get_softc(dev); handle = acpi_get_handle(dev); sc->dev = dev; timespecclear(&sc->bst_lastupdated); error = acpi_battery_register(dev); if (error != 0) { device_printf(dev, "registering battery failed\n"); return (error); } /* * Install a system notify handler in addition to the device notify. * Toshiba notebook uses this alternate notify for its battery. */ AcpiInstallNotifyHandler(handle, ACPI_ALL_NOTIFY, acpi_cmbat_notify_handler, dev); AcpiOsExecute(OSL_NOTIFY_HANDLER, acpi_cmbat_init_battery, dev); return (0); }
static int acpi_cmbat_attach(device_t dev) { int error; ACPI_HANDLE handle; struct acpi_cmbat_softc *sc; if ((sc = device_get_softc(dev)) == NULL) { return (ENXIO); } handle = acpi_get_handle(dev); AcpiInstallNotifyHandler(handle, ACPI_DEVICE_NOTIFY, acpi_cmbat_notify_handler, dev); sc->bif_updating = sc->bst_updating = 0; sc->dev = dev; timespecclear(&sc->bif_lastupdated); timespecclear(&sc->bst_lastupdated); if (acpi_cmbat_units == 0) { if ((error = acpi_register_ioctl(ACPIIO_CMBAT_GET_BIF, acpi_cmbat_ioctl, NULL)) != 0) { return (error); } if ((error = acpi_register_ioctl(ACPIIO_CMBAT_GET_BST, acpi_cmbat_ioctl, NULL)) != 0) { return (error); } } if ((error = acpi_battery_register(ACPI_BATT_TYPE_CMBAT, acpi_cmbat_units)) != 0) { return (error); } acpi_cmbat_units++; timespecclear(&acpi_cmbat_info_lastupdated); sc->initializing = 0; AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cmbat_init_battery, dev); return (0); }
static void acpi_cmbat_init_battery(void *arg) { struct acpi_cmbat_softc *sc; int retry, valid; device_t dev; dev = (device_t)arg; sc = device_get_softc(dev); ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization start\n"); /* * Try repeatedly to get valid data from the battery. Since the * embedded controller isn't always ready just after boot, we may have * to wait a while. */ for (retry = 0; retry < ACPI_CMBAT_RETRY_MAX; retry++, AcpiOsSleep(10000)) { /* batteries on DOCK can be ejected w/ DOCK during retrying */ if (!device_is_attached(dev)) return; if (!acpi_BatteryIsPresent(dev)) continue; /* * Only query the battery if this is the first try or the specific * type of info is still invalid. */ ACPI_SERIAL_BEGIN(cmbat); if (retry == 0 || !acpi_battery_bst_valid(&sc->bst)) { timespecclear(&sc->bst_lastupdated); acpi_cmbat_get_bst(dev); } if (retry == 0 || !acpi_battery_bif_valid(&sc->bif)) acpi_cmbat_get_bif(dev); valid = acpi_battery_bst_valid(&sc->bst) && acpi_battery_bif_valid(&sc->bif); ACPI_SERIAL_END(cmbat); if (valid) break; } if (retry == ACPI_CMBAT_RETRY_MAX) { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization failed, giving up\n"); } else { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization done, tried %d times\n", retry + 1); } }
int nanosleep1(struct lwp *l, clockid_t clock_id, int flags, struct timespec *rqt, struct timespec *rmt) { struct timespec rmtstart; int error, timo; if ((error = ts2timo(clock_id, flags, rqt, &timo, &rmtstart)) != 0) { if (error == ETIMEDOUT) { error = 0; if (rmt != NULL) rmt->tv_sec = rmt->tv_nsec = 0; } return error; } /* * Avoid inadvertently sleeping forever */ if (timo == 0) timo = 1; again: error = kpause("nanoslp", true, timo, NULL); if (rmt != NULL || error == 0) { struct timespec rmtend; struct timespec t0; struct timespec *t; (void)clock_gettime1(clock_id, &rmtend); t = (rmt != NULL) ? rmt : &t0; if (flags & TIMER_ABSTIME) { timespecsub(rqt, &rmtend, t); } else { timespecsub(&rmtend, &rmtstart, t); timespecsub(rqt, t, t); } if (t->tv_sec < 0) timespecclear(t); if (error == 0) { timo = tstohz(t); if (timo > 0) goto again; } } if (error == ERESTART) error = EINTR; if (error == EWOULDBLOCK) error = 0; return error; }
static void acpi_cmbat_notify_handler(ACPI_HANDLE h, UINT32 notify, void *context) { device_t dev; struct acpi_cmbat_softc *sc; dev = (device_t)context; if ((sc = device_get_softc(dev)) == NULL) { return; } switch (notify) { case ACPI_BATTERY_BST_CHANGE: timespecclear(&sc->bst_lastupdated); break; case ACPI_BATTERY_BIF_CHANGE: timespecclear(&sc->bif_lastupdated); AcpiOsQueueForExecution(OSD_PRIORITY_LO, acpi_cmbat_get_bif, dev); break; default: break; } }
/* ARGSUSED */ int sys_execve(struct proc *p, void *v, register_t *retval) { struct sys_execve_args /* { syscallarg(const char *) path; syscallarg(char *const *) argp; syscallarg(char *const *) envp; } */ *uap = v; int error; struct exec_package pack; struct nameidata nid; struct vattr attr; struct ucred *cred = p->p_ucred; char *argp; char * const *cpp, *dp, *sp; #ifdef KTRACE char *env_start; #endif struct process *pr = p->p_p; long argc, envc; size_t len, sgap; #ifdef MACHINE_STACK_GROWS_UP size_t slen; #endif char *stack; struct ps_strings arginfo; struct vmspace *vm = pr->ps_vmspace; char **tmpfap; extern struct emul emul_native; #if NSYSTRACE > 0 int wassugid = ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC); size_t pathbuflen; #endif char *pathbuf = NULL; struct vnode *otvp; /* get other threads to stop */ if ((error = single_thread_set(p, SINGLE_UNWIND, 1))) return (error); /* * Cheap solution to complicated problems. * Mark this process as "leave me alone, I'm execing". */ atomic_setbits_int(&pr->ps_flags, PS_INEXEC); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE)) { systrace_execve0(p); pathbuf = pool_get(&namei_pool, PR_WAITOK); error = copyinstr(SCARG(uap, path), pathbuf, MAXPATHLEN, &pathbuflen); if (error != 0) goto clrflag; } #endif if (pathbuf != NULL) { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_SYSSPACE, pathbuf, p); } else { NDINIT(&nid, LOOKUP, NOFOLLOW, UIO_USERSPACE, SCARG(uap, path), p); } /* * initialize the fields of the exec package. */ if (pathbuf != NULL) pack.ep_name = pathbuf; else pack.ep_name = (char *)SCARG(uap, path); pack.ep_hdr = malloc(exec_maxhdrsz, M_EXEC, M_WAITOK); pack.ep_hdrlen = exec_maxhdrsz; pack.ep_hdrvalid = 0; pack.ep_ndp = &nid; pack.ep_interp = NULL; pack.ep_emul_arg = NULL; VMCMDSET_INIT(&pack.ep_vmcmds); pack.ep_vap = &attr; pack.ep_emul = &emul_native; pack.ep_flags = 0; /* see if we can run it. */ if ((error = check_exec(p, &pack)) != 0) { goto freehdr; } /* XXX -- THE FOLLOWING SECTION NEEDS MAJOR CLEANUP */ /* allocate an argument buffer */ argp = km_alloc(NCARGS, &kv_exec, &kp_pageable, &kd_waitok); #ifdef DIAGNOSTIC if (argp == NULL) panic("execve: argp == NULL"); #endif dp = argp; argc = 0; /* copy the fake args list, if there's one, freeing it as we go */ if (pack.ep_flags & EXEC_HASARGL) { tmpfap = pack.ep_fa; while (*tmpfap != NULL) { char *cp; cp = *tmpfap; while (*cp) *dp++ = *cp++; *dp++ = '\0'; free(*tmpfap, M_EXEC, 0); tmpfap++; argc++; } free(pack.ep_fa, M_EXEC, 0); pack.ep_flags &= ~EXEC_HASARGL; } /* Now get argv & environment */ if (!(cpp = SCARG(uap, argp))) { error = EFAULT; goto bad; } if (pack.ep_flags & EXEC_SKIPARG) cpp++; while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; argc++; } /* must have at least one argument */ if (argc == 0) { error = EINVAL; goto bad; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECARGS)) ktrexec(p, KTR_EXECARGS, argp, dp - argp); #endif envc = 0; /* environment does not need to be there */ if ((cpp = SCARG(uap, envp)) != NULL ) { #ifdef KTRACE env_start = dp; #endif while (1) { len = argp + ARG_MAX - dp; if ((error = copyin(cpp, &sp, sizeof(sp))) != 0) goto bad; if (!sp) break; if ((error = copyinstr(sp, dp, len, &len)) != 0) { if (error == ENAMETOOLONG) error = E2BIG; goto bad; } dp += len; cpp++; envc++; } #ifdef KTRACE if (KTRPOINT(p, KTR_EXECENV)) ktrexec(p, KTR_EXECENV, env_start, dp - env_start); #endif } dp = (char *)(((long)dp + _STACKALIGNBYTES) & ~_STACKALIGNBYTES); sgap = STACKGAPLEN; /* * If we have enabled random stackgap, the stack itself has already * been moved from a random location, but is still aligned to a page * boundary. Provide the lower bits of random placement now. */ if (stackgap_random != 0) { sgap += arc4random() & PAGE_MASK; sgap = (sgap + _STACKALIGNBYTES) & ~_STACKALIGNBYTES; } /* Now check if args & environ fit into new stack */ len = ((argc + envc + 2 + pack.ep_emul->e_arglen) * sizeof(char *) + sizeof(long) + dp + sgap + sizeof(struct ps_strings)) - argp; len = (len + _STACKALIGNBYTES) &~ _STACKALIGNBYTES; if (len > pack.ep_ssize) { /* in effect, compare to initial limit */ error = ENOMEM; goto bad; } /* adjust "active stack depth" for process VSZ */ pack.ep_ssize = len; /* maybe should go elsewhere, but... */ /* * we're committed: any further errors will kill the process, so * kill the other threads now. */ single_thread_set(p, SINGLE_EXIT, 0); /* * Prepare vmspace for remapping. Note that uvmspace_exec can replace * pr_vmspace! */ uvmspace_exec(p, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS); vm = pr->ps_vmspace; /* Now map address space */ vm->vm_taddr = (char *)trunc_page(pack.ep_taddr); vm->vm_tsize = atop(round_page(pack.ep_taddr + pack.ep_tsize) - trunc_page(pack.ep_taddr)); vm->vm_daddr = (char *)trunc_page(pack.ep_daddr); vm->vm_dsize = atop(round_page(pack.ep_daddr + pack.ep_dsize) - trunc_page(pack.ep_daddr)); vm->vm_dused = 0; vm->vm_ssize = atop(round_page(pack.ep_ssize)); vm->vm_maxsaddr = (char *)pack.ep_maxsaddr; vm->vm_minsaddr = (char *)pack.ep_minsaddr; /* create the new process's VM space by running the vmcmds */ #ifdef DIAGNOSTIC if (pack.ep_vmcmds.evs_used == 0) panic("execve: no vmcmds"); #endif error = exec_process_vmcmds(p, &pack); /* if an error happened, deallocate and punt */ if (error) goto exec_abort; /* old "stackgap" is gone now */ pr->ps_stackgap = 0; #ifdef MACHINE_STACK_GROWS_UP pr->ps_strings = (vaddr_t)vm->vm_maxsaddr + sgap; if (uvm_map_protect(&vm->vm_map, (vaddr_t)vm->vm_maxsaddr, trunc_page(pr->ps_strings), PROT_NONE, TRUE)) goto exec_abort; #else pr->ps_strings = (vaddr_t)vm->vm_minsaddr - sizeof(arginfo) - sgap; if (uvm_map_protect(&vm->vm_map, round_page(pr->ps_strings + sizeof(arginfo)), (vaddr_t)vm->vm_minsaddr, PROT_NONE, TRUE)) goto exec_abort; #endif /* remember information about the process */ arginfo.ps_nargvstr = argc; arginfo.ps_nenvstr = envc; #ifdef MACHINE_STACK_GROWS_UP stack = (char *)vm->vm_maxsaddr + sizeof(arginfo) + sgap; slen = len - sizeof(arginfo) - sgap; #else stack = (char *)(vm->vm_minsaddr - len); #endif /* Now copy argc, args & environ to new stack */ if (!(*pack.ep_emul->e_copyargs)(&pack, &arginfo, stack, argp)) goto exec_abort; /* copy out the process's ps_strings structure */ if (copyout(&arginfo, (char *)pr->ps_strings, sizeof(arginfo))) goto exec_abort; stopprofclock(pr); /* stop profiling */ fdcloseexec(p); /* handle close on exec */ execsigs(p); /* reset caught signals */ TCB_SET(p, NULL); /* reset the TCB address */ pr->ps_kbind_addr = 0; /* reset the kbind bits */ pr->ps_kbind_cookie = 0; /* set command name & other accounting info */ memset(p->p_comm, 0, sizeof(p->p_comm)); len = min(nid.ni_cnd.cn_namelen, MAXCOMLEN); memcpy(p->p_comm, nid.ni_cnd.cn_nameptr, len); pr->ps_acflag &= ~AFORK; /* record proc's vnode, for use by sysctl */ otvp = pr->ps_textvp; vref(pack.ep_vp); pr->ps_textvp = pack.ep_vp; if (otvp) vrele(otvp); atomic_setbits_int(&pr->ps_flags, PS_EXEC); if (pr->ps_flags & PS_PPWAIT) { atomic_clearbits_int(&pr->ps_flags, PS_PPWAIT); atomic_clearbits_int(&pr->ps_pptr->ps_flags, PS_ISPWAIT); wakeup(pr->ps_pptr); } /* * If process does execve() while it has a mismatched real, * effective, or saved uid/gid, we set PS_SUGIDEXEC. */ if (cred->cr_uid != cred->cr_ruid || cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_rgid || cred->cr_gid != cred->cr_svgid) atomic_setbits_int(&pr->ps_flags, PS_SUGIDEXEC); else atomic_clearbits_int(&pr->ps_flags, PS_SUGIDEXEC); atomic_clearbits_int(&pr->ps_flags, PS_TAMED); tame_dropwpaths(pr); /* * deal with set[ug]id. * MNT_NOEXEC has already been used to disable s[ug]id. */ if ((attr.va_mode & (VSUID | VSGID)) && proc_cansugid(p)) { int i; atomic_setbits_int(&pr->ps_flags, PS_SUGID|PS_SUGIDEXEC); #ifdef KTRACE /* * If process is being ktraced, turn off - unless * root set it. */ if (pr->ps_tracevp && !(pr->ps_traceflag & KTRFAC_ROOT)) ktrcleartrace(pr); #endif p->p_ucred = cred = crcopy(cred); if (attr.va_mode & VSUID) cred->cr_uid = attr.va_uid; if (attr.va_mode & VSGID) cred->cr_gid = attr.va_gid; /* * For set[ug]id processes, a few caveats apply to * stdin, stdout, and stderr. */ error = 0; fdplock(p->p_fd); for (i = 0; i < 3; i++) { struct file *fp = NULL; /* * NOTE - This will never return NULL because of * immature fds. The file descriptor table is not * shared because we're suid. */ fp = fd_getfile(p->p_fd, i); /* * Ensure that stdin, stdout, and stderr are already * allocated. We do not want userland to accidentally * allocate descriptors in this range which has implied * meaning to libc. */ if (fp == NULL) { short flags = FREAD | (i == 0 ? 0 : FWRITE); struct vnode *vp; int indx; if ((error = falloc(p, &fp, &indx)) != 0) break; #ifdef DIAGNOSTIC if (indx != i) panic("sys_execve: falloc indx != i"); #endif if ((error = cdevvp(getnulldev(), &vp)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); break; } if ((error = VOP_OPEN(vp, flags, cred, p)) != 0) { fdremove(p->p_fd, indx); closef(fp, p); vrele(vp); break; } if (flags & FWRITE) vp->v_writecount++; fp->f_flag = flags; fp->f_type = DTYPE_VNODE; fp->f_ops = &vnops; fp->f_data = (caddr_t)vp; FILE_SET_MATURE(fp, p); } } fdpunlock(p->p_fd); if (error) goto exec_abort; } else atomic_clearbits_int(&pr->ps_flags, PS_SUGID); /* * Reset the saved ugids and update the process's copy of the * creds if the creds have been changed */ if (cred->cr_uid != cred->cr_svuid || cred->cr_gid != cred->cr_svgid) { /* make sure we have unshared ucreds */ p->p_ucred = cred = crcopy(cred); cred->cr_svuid = cred->cr_uid; cred->cr_svgid = cred->cr_gid; } if (pr->ps_ucred != cred) { struct ucred *ocred; ocred = pr->ps_ucred; crhold(cred); pr->ps_ucred = cred; crfree(ocred); } if (pr->ps_flags & PS_SUGIDEXEC) { int i, s = splclock(); timeout_del(&pr->ps_realit_to); for (i = 0; i < nitems(pr->ps_timer); i++) { timerclear(&pr->ps_timer[i].it_interval); timerclear(&pr->ps_timer[i].it_value); } splx(s); } /* reset CPU time usage for the thread, but not the process */ timespecclear(&p->p_tu.tu_runtime); p->p_tu.tu_uticks = p->p_tu.tu_sticks = p->p_tu.tu_iticks = 0; km_free(argp, NCARGS, &kv_exec, &kp_pageable); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); /* * notify others that we exec'd */ KNOTE(&pr->ps_klist, NOTE_EXEC); /* setup new registers and do misc. setup. */ if (pack.ep_emul->e_fixup != NULL) { if ((*pack.ep_emul->e_fixup)(p, &pack) != 0) goto free_pack_abort; } #ifdef MACHINE_STACK_GROWS_UP (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack + slen, retval); #else (*pack.ep_emul->e_setregs)(p, &pack, (u_long)stack, retval); #endif /* map the process's signal trampoline code */ if (exec_sigcode_map(pr, pack.ep_emul)) goto free_pack_abort; #ifdef __HAVE_EXEC_MD_MAP /* perform md specific mappings that process might need */ if (exec_md_map(p, &pack)) goto free_pack_abort; #endif if (pr->ps_flags & PS_TRACED) psignal(p, SIGTRAP); free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); /* * Call emulation specific exec hook. This can setup per-process * p->p_emuldata or do any other per-process stuff an emulation needs. * * If we are executing process of different emulation than the * original forked process, call e_proc_exit() of the old emulation * first, then e_proc_exec() of new emulation. If the emulation is * same, the exec hook code should deallocate any old emulation * resources held previously by this process. */ if (pr->ps_emul && pr->ps_emul->e_proc_exit && pr->ps_emul != pack.ep_emul) (*pr->ps_emul->e_proc_exit)(p); p->p_descfd = 255; if ((pack.ep_flags & EXEC_HASFD) && pack.ep_fd < 255) p->p_descfd = pack.ep_fd; /* * Call exec hook. Emulation code may NOT store reference to anything * from &pack. */ if (pack.ep_emul->e_proc_exec) (*pack.ep_emul->e_proc_exec)(p, &pack); #if defined(KTRACE) && defined(COMPAT_LINUX) /* update ps_emul, but don't ktrace it if native-execing-native */ if (pr->ps_emul != pack.ep_emul || pack.ep_emul != &emul_native) { pr->ps_emul = pack.ep_emul; if (KTRPOINT(p, KTR_EMUL)) ktremul(p); } #else /* update ps_emul, the old value is no longer needed */ pr->ps_emul = pack.ep_emul; #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); #if NSYSTRACE > 0 if (ISSET(p->p_flag, P_SYSTRACE) && wassugid && !ISSET(pr->ps_flags, PS_SUGID | PS_SUGIDEXEC)) systrace_execve1(pathbuf, p); #endif if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (0); bad: /* free the vmspace-creation commands, and release their references */ kill_vmcmds(&pack.ep_vmcmds); /* kill any opened file descriptor, if necessary */ if (pack.ep_flags & EXEC_HASFD) { pack.ep_flags &= ~EXEC_HASFD; fdplock(p->p_fd); (void) fdrelease(p, pack.ep_fd); fdpunlock(p->p_fd); } if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); /* close and put the exec'd file */ vn_close(pack.ep_vp, FREAD, cred, p); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); km_free(argp, NCARGS, &kv_exec, &kp_pageable); freehdr: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); #if NSYSTRACE > 0 clrflag: #endif atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); single_thread_clear(p, P_SUSPSIG); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); return (error); exec_abort: /* * the old process doesn't exist anymore. exit gracefully. * get rid of the (new) address space we have created, if any, get rid * of our namei data and vnode, and exit noting failure */ uvm_deallocate(&vm->vm_map, VM_MIN_ADDRESS, VM_MAXUSER_ADDRESS - VM_MIN_ADDRESS); if (pack.ep_interp != NULL) pool_put(&namei_pool, pack.ep_interp); if (pack.ep_emul_arg != NULL) free(pack.ep_emul_arg, M_TEMP, pack.ep_emul_argsize); pool_put(&namei_pool, nid.ni_cnd.cn_pnbuf); vn_close(pack.ep_vp, FREAD, cred, p); km_free(argp, NCARGS, &kv_exec, &kp_pageable); free_pack_abort: free(pack.ep_hdr, M_EXEC, pack.ep_hdrlen); if (pathbuf != NULL) pool_put(&namei_pool, pathbuf); exit1(p, W_EXITCODE(0, SIGABRT), EXIT_NORMAL); /* NOTREACHED */ atomic_clearbits_int(&pr->ps_flags, PS_INEXEC); return (0); }
int timer_create1(timer_t *tid, clockid_t id, struct sigevent *evp, copyin_t fetch_event, struct lwp *l) { int error; timer_t timerid; struct ptimers *pts; struct ptimer *pt; struct proc *p; p = l->l_proc; if ((u_int)id > CLOCK_MONOTONIC) return (EINVAL); if ((pts = p->p_timers) == NULL) pts = timers_alloc(p); pt = pool_get(&ptimer_pool, PR_WAITOK); if (evp != NULL) { if (((error = (*fetch_event)(evp, &pt->pt_ev, sizeof(pt->pt_ev))) != 0) || ((pt->pt_ev.sigev_notify < SIGEV_NONE) || (pt->pt_ev.sigev_notify > SIGEV_SA)) || (pt->pt_ev.sigev_notify == SIGEV_SIGNAL && (pt->pt_ev.sigev_signo <= 0 || pt->pt_ev.sigev_signo >= NSIG))) { pool_put(&ptimer_pool, pt); return (error ? error : EINVAL); } } /* Find a free timer slot, skipping those reserved for setitimer(). */ mutex_spin_enter(&timer_lock); for (timerid = TIMER_MIN; timerid < TIMER_MAX; timerid++) if (pts->pts_timers[timerid] == NULL) break; if (timerid == TIMER_MAX) { mutex_spin_exit(&timer_lock); pool_put(&ptimer_pool, pt); return EAGAIN; } if (evp == NULL) { pt->pt_ev.sigev_notify = SIGEV_SIGNAL; switch (id) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: pt->pt_ev.sigev_signo = SIGALRM; break; case CLOCK_VIRTUAL: pt->pt_ev.sigev_signo = SIGVTALRM; break; case CLOCK_PROF: pt->pt_ev.sigev_signo = SIGPROF; break; } pt->pt_ev.sigev_value.sival_int = timerid; } pt->pt_info.ksi_signo = pt->pt_ev.sigev_signo; pt->pt_info.ksi_errno = 0; pt->pt_info.ksi_code = 0; pt->pt_info.ksi_pid = p->p_pid; pt->pt_info.ksi_uid = kauth_cred_getuid(l->l_cred); pt->pt_info.ksi_value = pt->pt_ev.sigev_value; pt->pt_type = id; pt->pt_proc = p; pt->pt_overruns = 0; pt->pt_poverruns = 0; pt->pt_entry = timerid; pt->pt_queued = false; timespecclear(&pt->pt_time.it_value); if (!CLOCK_VIRTUAL_P(id)) callout_init(&pt->pt_ch, CALLOUT_MPSAFE); else pt->pt_active = 0; pts->pts_timers[timerid] = pt; mutex_spin_exit(&timer_lock); return copyout(&timerid, tid, sizeof(timerid)); }
int rcmd_af(char **ahost, int porta, const char *locuser, const char *remuser, const char *cmd, int *fd2p, int af) { static char hbuf[HOST_NAME_MAX+1]; char pbuf[NI_MAXSERV]; struct addrinfo hints, *res, *r; int error; struct sockaddr_storage from; sigset_t oldmask, mask; pid_t pid; int s, lport; struct timespec timo; char c, *p; int refused; in_port_t rport = porta; int numread; /* call rcmdsh() with specified remote shell if appropriate. */ if (!issetugid() && (p = getenv("RSH")) && *p) { struct servent *sp = getservbyname("shell", "tcp"); if (sp && sp->s_port == rport) return (rcmdsh(ahost, rport, locuser, remuser, cmd, p)); } /* use rsh(1) if non-root and remote port is shell. */ if (geteuid()) { struct servent *sp = getservbyname("shell", "tcp"); if (sp && sp->s_port == rport) return (rcmdsh(ahost, rport, locuser, remuser, cmd, NULL)); } pid = getpid(); snprintf(pbuf, sizeof(pbuf), "%u", ntohs(rport)); memset(&hints, 0, sizeof(hints)); hints.ai_family = af; hints.ai_socktype = SOCK_STREAM; hints.ai_flags = AI_CANONNAME; error = getaddrinfo(*ahost, pbuf, &hints, &res); if (error) { (void)fprintf(stderr, "rcmd: %s: %s\n", *ahost, gai_strerror(error)); return (-1); } if (res->ai_canonname) { strlcpy(hbuf, res->ai_canonname, sizeof(hbuf)); *ahost = hbuf; } else ; /*XXX*/ r = res; refused = 0; timespecclear(&timo); sigemptyset(&mask); sigaddset(&mask, SIGURG); sigprocmask(SIG_BLOCK, &mask, &oldmask); for (timo.tv_sec = 1, lport = IPPORT_RESERVED - 1;;) { s = rresvport_af(&lport, r->ai_family); if (s < 0) { if (errno == EAGAIN) (void)fprintf(stderr, "rcmd: socket: All ports in use\n"); else (void)fprintf(stderr, "rcmd: socket: %s\n", strerror(errno)); if (r->ai_next) { r = r->ai_next; continue; } else { sigprocmask(SIG_SETMASK, &oldmask, NULL); freeaddrinfo(res); return (-1); } } fcntl(s, F_SETOWN, pid); if (connect(s, r->ai_addr, r->ai_addrlen) >= 0) break; (void)close(s); if (errno == EADDRINUSE) { lport--; continue; } if (errno == ECONNREFUSED) refused++; if (r->ai_next) { int oerrno = errno; char hbuf[NI_MAXHOST]; const int niflags = NI_NUMERICHOST; hbuf[0] = '\0'; if (getnameinfo(r->ai_addr, r->ai_addrlen, hbuf, sizeof(hbuf), NULL, 0, niflags) != 0) strlcpy(hbuf, "(invalid)", sizeof hbuf); (void)fprintf(stderr, "connect to address %s: ", hbuf); errno = oerrno; perror(0); r = r->ai_next; hbuf[0] = '\0'; if (getnameinfo(r->ai_addr, r->ai_addrlen, hbuf, sizeof(hbuf), NULL, 0, niflags) != 0) strlcpy(hbuf, "(invalid)", sizeof hbuf); (void)fprintf(stderr, "Trying %s...\n", hbuf); continue; } if (refused && timo.tv_sec <= 16) { (void)nanosleep(&timo, NULL); timo.tv_sec *= 2; r = res; refused = 0; continue; } (void)fprintf(stderr, "%s: %s\n", res->ai_canonname, strerror(errno)); sigprocmask(SIG_SETMASK, &oldmask, NULL); freeaddrinfo(res); return (-1); } /* given "af" can be PF_UNSPEC, we need the real af for "s" */ af = r->ai_family; freeaddrinfo(res); if (fd2p == 0) { write(s, "", 1); lport = 0; } else { struct pollfd pfd[2]; char num[8]; int s2 = rresvport_af(&lport, af), s3; socklen_t len = sizeof(from); if (s2 < 0) goto bad; listen(s2, 1); (void)snprintf(num, sizeof(num), "%d", lport); if (write(s, num, strlen(num)+1) != strlen(num)+1) { (void)fprintf(stderr, "rcmd: write (setting up stderr): %s\n", strerror(errno)); (void)close(s2); goto bad; } again: pfd[0].fd = s; pfd[0].events = POLLIN; pfd[1].fd = s2; pfd[1].events = POLLIN; errno = 0; if (poll(pfd, 2, INFTIM) < 1 || (pfd[1].revents & (POLLIN|POLLHUP)) == 0) { if (errno != 0) (void)fprintf(stderr, "rcmd: poll (setting up stderr): %s\n", strerror(errno)); else (void)fprintf(stderr, "poll: protocol failure in circuit setup\n"); (void)close(s2); goto bad; } s3 = accept(s2, (struct sockaddr *)&from, &len); if (s3 < 0) { (void)fprintf(stderr, "rcmd: accept: %s\n", strerror(errno)); lport = 0; close(s2); goto bad; } /* * XXX careful for ftp bounce attacks. If discovered, shut them * down and check for the real auxiliary channel to connect. */ switch (from.ss_family) { case AF_INET: case AF_INET6: if (getnameinfo((struct sockaddr *)&from, len, NULL, 0, num, sizeof(num), NI_NUMERICSERV) == 0 && atoi(num) != 20) { break; } close(s3); goto again; default: break; } (void)close(s2); *fd2p = s3; switch (from.ss_family) { case AF_INET: case AF_INET6: if (getnameinfo((struct sockaddr *)&from, len, NULL, 0, num, sizeof(num), NI_NUMERICSERV) != 0 || (atoi(num) >= IPPORT_RESERVED || atoi(num) < IPPORT_RESERVED / 2)) { (void)fprintf(stderr, "socket: protocol failure in circuit setup.\n"); goto bad2; } break; default: break; } } (void)write(s, locuser, strlen(locuser)+1); (void)write(s, remuser, strlen(remuser)+1); (void)write(s, cmd, strlen(cmd)+1); if ((numread = read(s, &c, 1)) != 1) { (void)fprintf(stderr, "rcmd: %s: %s\n", *ahost, numread == -1 ? strerror(errno) : "Short read"); goto bad2; } if (c != 0) { while (read(s, &c, 1) == 1) { (void)write(STDERR_FILENO, &c, 1); if (c == '\n') break; } goto bad2; } sigprocmask(SIG_SETMASK, &oldmask, NULL); return (s); bad2: if (lport) (void)close(*fd2p); bad: (void)close(s); sigprocmask(SIG_SETMASK, &oldmask, NULL); return (-1); }