static int mmioctl(struct dev_ioctl_args *ap) { cdev_t dev = ap->a_head.a_dev; int error; get_mplock(); switch (minor(dev)) { case 0: error = mem_ioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, ap->a_cred); break; case 3: case 4: error = random_ioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, ap->a_cred); break; default: error = ENODEV; break; } rel_mplock(); return (error); }
/* * Process Interrupt Queue * * Processes entries on the ATM interrupt queue. This queue is used by * device interface drivers in order to schedule events from the driver's * lower (interrupt) half to the driver's stack services. * * The interrupt routines must store the stack processing function to call * and a token (typically a driver/stack control block) at the front of the * queued buffer. We assume that the function pointer and token values are * both contained (and properly aligned) in the first buffer of the chain. * * Arguments: * none * * Returns: * none * */ static void atm_intr(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; caddr_t cp; atm_intr_func_t func; void *token; /* * Get function to call and token value */ get_mplock(); KB_DATASTART(m, cp, caddr_t); func = *(atm_intr_func_t *)cp; cp += sizeof(func); token = *(void **)cp; KB_HEADADJ(m, -(sizeof(func) + sizeof(token))); if (KB_LEN(m) == 0) { KBuffer *m1; KB_UNLINKHEAD(m, m1); m = m1; } /* * Call processing function */ (*func)(token, m); /* * Drain any deferred calls */ STACK_DRAIN(); rel_mplock(); /* msg was embedded in the mbuf, do not reply! */ }
void cam_sim_lock(sim_lock *lock) { if (lock == &sim_mplock) get_mplock(); else lockmgr(lock, LK_EXCLUSIVE); }
static void mpls_input_handler(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; get_mplock(); mpls_input(m); rel_mplock(); /* do not reply, msg embedded in mbuf */ }
/* * MPALMOSTSAFE */ int sys_linux_sysctl(struct linux_sysctl_args *args) { struct l___sysctl_args la; l_int *mib; int error, i; error = copyin((caddr_t)args->args, &la, sizeof(la)); if (error) return (error); if (la.nlen <= 0 || la.nlen > LINUX_CTL_MAXNAME) return (ENOTDIR); mib = kmalloc(la.nlen * sizeof(l_int), M_TEMP, M_WAITOK); error = copyin(la.name, mib, la.nlen * sizeof(l_int)); if (error) { kfree(mib, M_TEMP); return (error); } get_mplock(); switch (mib[0]) { case LINUX_CTL_KERN: if (la.nlen < 2) { error = ENOTDIR; break; } switch (mib[1]) { case LINUX_KERN_VERSION: error = handle_string(&la, version); break; default: error = ENOTDIR; break; } break; default: error = ENOTDIR; break; } rel_mplock(); if (error == ENOTDIR && mib) { kprintf("linux: sysctl: unhandled name="); for (i = 0; i < la.nlen; i++) kprintf("%c%d", (i) ? ',' : '{', mib[i]); kprintf("}\n"); } if (mib) kfree(mib, M_TEMP); return (error); }
/* * MPSAFE */ int vfs_uninit(struct vfsconf *vfc, struct vfsconf *vfsp) { int error; get_mplock(); error = (vfc->vfc_vfsops->vfs_uninit)(vfsp); rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_oftruncate(struct oftruncate_args *uap) { int error; get_mplock(); error = kern_ftruncate(uap->fd, uap->length); rel_mplock(); return (error); }
int cam_sim_cond_lock(sim_lock *lock) { if (lock == &sim_mplock) { get_mplock(); return(1); } else if (lockstatus(lock, curthread) != LK_EXCLUSIVE) { lockmgr(lock, LK_EXCLUSIVE); return(1); } return(0); }
/* * Could probably merge these two code segments a little better... */ void at2intr(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; /* * Phase 2 packet handling */ get_mplock(); ddp_input(m, m->m_pkthdr.rcvif, NULL, 2); rel_mplock(); /* msg was embedded in the mbuf, do not reply! */ }
/* * MPALMOSTSAFE */ int sys_getdomainname(struct getdomainname_args *uap) { int domainnamelen; int error; get_mplock(); domainnamelen = strlen(domainname) + 1; if ((u_int)uap->len > domainnamelen + 1) uap->len = domainnamelen + 1; error = copyout(domainname, uap->domainname, uap->len); rel_mplock(); return (error); }
/* * The system call that results in a reboot * * MPALMOSTSAFE */ int sys_reboot(struct reboot_args *uap) { struct thread *td = curthread; int error; if ((error = priv_check(td, PRIV_REBOOT))) return (error); get_mplock(); boot(uap->opt); rel_mplock(); return (0); }
/* * MPALMOSTSAFE */ int sys_otruncate(struct otruncate_args *uap) { struct nlookupdata nd; int error; get_mplock(); error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); if (error == 0) error = kern_truncate(&nd, uap->length); nlookup_done(&nd); rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_clock_settime(struct clock_settime_args *uap) { struct timespec ats; int error; if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) return (error); get_mplock(); error = kern_clock_settime(uap->clock_id, &ats); rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_linux_execve(struct linux_execve_args *args) { struct nlookupdata nd; struct image_args exec_args; char *path; int error; error = linux_copyin_path(args->path, &path, LINUX_PATH_EXISTS); if (error) return (error); #ifdef DEBUG if (ldebug(execve)) kprintf(ARGS(execve, "%s"), path); #endif get_mplock(); error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW); bzero(&exec_args, sizeof(exec_args)); if (error == 0) { error = exec_copyin_args(&exec_args, path, PATH_SYSSPACE, args->argp, args->envp); } if (error == 0) error = kern_execve(&nd, &exec_args); nlookup_done(&nd); /* * The syscall result is returned in registers to the new program. * Linux will register %edx as an atexit function and we must be * sure to set it to 0. XXX */ if (error == 0) { args->sysmsg_result64 = 0; if (curproc->p_sysent == &elf_linux_sysvec) error = emuldata_init(curproc, NULL, 0); } exec_free_args(&exec_args); linux_free_path(&path); if (error < 0) { /* We hit a lethal error condition. Let's die now. */ exit1(W_EXITCODE(0, SIGABRT)); /* NOTREACHED */ } rel_mplock(); return(error); }
/* * MPALMOSTSAFE */ int sys_ocreat(struct ocreat_args *uap) { struct nlookupdata nd; int error; get_mplock(); error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); if (error == 0) { error = kern_open(&nd, O_WRONLY | O_CREAT | O_TRUNC, uap->mode, &uap->sysmsg_iresult); } rel_mplock(); return (error); }
/* * The ACPI helper thread processes OSD execution callback messages. */ static void acpi_task_thread(void *arg) { ACPI_OSD_EXEC_CALLBACK func; struct acpi_task *at; get_mplock(); for (;;) { at = (void *)lwkt_waitport(&curthread->td_msgport, 0); func = at->at_function; func(at->at_context); lwkt_replymsg(&at->at_msg, 0); } rel_mplock(); }
static void ata_boot_attach(void) { struct ata_channel *ch; int ctlr; get_mplock(); /* kick of probe and attach on all channels */ for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { if ((ch = devclass_get_softc(ata_devclass, ctlr))) { ata_identify(ch->dev); } } rel_mplock(); }
/* * dfbsd12_fstat_args(int fd, struct dfbsd12_stat *sb) * * MPALMOSTSAFE */ int sys_dfbsd12_fstat(struct dfbsd12_fstat_args *uap) { struct dfbsd12_stat ost; struct stat st; int error; get_mplock(); error = kern_fstat(uap->fd, &st); rel_mplock(); if (error == 0) { cvtstat(&ost, &st); error = copyout(&ost, uap->sb, sizeof(ost)); } return (error); }
int dev_dclone(cdev_t dev) { struct dev_clone_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_clone_desc; ap.a_head.a_dev = dev; if (needmplock) get_mplock(); error = dev->si_ops->d_clone(&ap); if (needmplock) rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_setdomainname(struct setdomainname_args *uap) { struct thread *td = curthread; int error, domainnamelen; if ((error = priv_check(td, PRIV_SETDOMAINNAME))) return (error); if ((u_int)uap->len > sizeof(domainname) - 1) return EINVAL; get_mplock(); domainnamelen = uap->len; error = copyin(uap->domainname, domainname, uap->len); domainname[domainnamelen] = 0; rel_mplock(); return (error); }
void dev_dstrategy_chain(cdev_t dev, struct bio *bio) { struct dev_strategy_args ap; int needmplock = dev_needmplock(dev); ap.a_head.a_desc = &dev_strategy_desc; ap.a_head.a_dev = dev; ap.a_bio = bio; KKASSERT(bio->bio_track != NULL); KKASSERT((bio->bio_flags & BIO_DONE) == 0); if (needmplock) get_mplock(); (void)dev->si_ops->d_strategy(&ap); if (needmplock) rel_mplock(); }
/* * MPALMOSTSAFE */ int sys_olstat(struct olstat_args *uap) { struct nlookupdata nd; struct stat st; int error; get_mplock(); error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); if (error == 0) { error = kern_stat(&nd, &st); if (error == 0) error = compat_43_copyout_stat(&st, uap->ub); nlookup_done(&nd); } rel_mplock(); return (error); }
int dev_dclose(cdev_t dev, int fflag, int devtype) { struct dev_close_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_close_desc; ap.a_head.a_dev = dev; ap.a_fflag = fflag; ap.a_devtype = devtype; if (needmplock) get_mplock(); error = dev->si_ops->d_close(&ap); if (needmplock) rel_mplock(); return (error); }
/************************************************************************ * GENERAL DEVICE API FUNCTIONS * ************************************************************************ * * The MPSAFEness of these depends on dev->si_ops->head.flags */ int dev_dopen(cdev_t dev, int oflags, int devtype, struct ucred *cred) { struct dev_open_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_open_desc; ap.a_head.a_dev = dev; ap.a_oflags = oflags; ap.a_devtype = devtype; ap.a_cred = cred; if (needmplock) get_mplock(); error = dev->si_ops->d_open(&ap); if (needmplock) rel_mplock(); return (error); }
/* * stat_args(char *path, struct dfbsd12_stat *ub) * * Get file status; this version follows links. * * MPALMOSTSAFE */ int sys_dfbsd12_stat(struct dfbsd12_stat_args *uap) { struct nlookupdata nd; struct dfbsd12_stat ost; struct stat st; int error; get_mplock(); error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); if (error == 0) { error = kern_stat(&nd, &st); if (error == 0) { cvtstat(&ost, &st); error = copyout(&ost, uap->ub, sizeof(ost)); } } nlookup_done(&nd); rel_mplock(); return (error); }
/* * fhstat_args(struct fhandle *u_fhp, struct dfbsd12_stat *sb) * * MPALMOSTSAFE */ int sys_dfbsd12_fhstat(struct dfbsd12_fhstat_args *uap) { struct thread *td = curthread; struct dfbsd12_stat osb; struct stat sb; fhandle_t fh; struct mount *mp; struct vnode *vp; int error; /* * Must be super user */ error = priv_check(td, PRIV_ROOT); if (error) return (error); error = copyin(uap->u_fhp, &fh, sizeof(fhandle_t)); if (error) return (error); get_mplock(); if ((mp = vfs_getvfs(&fh.fh_fsid)) == NULL) { error = ESTALE; goto done; } if ((error = VFS_FHTOVP(mp, NULL, &fh.fh_fid, &vp))) goto done; error = vn_stat(vp, &sb, td->td_ucred); vput(vp); if (error) goto done; cvtstat(&osb, &sb); error = copyout(&osb, uap->sb, sizeof(osb)); done: rel_mplock(); return (error); }
int dev_dmmap(cdev_t dev, vm_offset_t offset, int nprot) { struct dev_mmap_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_mmap_desc; ap.a_head.a_dev = dev; ap.a_offset = offset; ap.a_nprot = nprot; if (needmplock) get_mplock(); error = dev->si_ops->d_mmap(&ap); if (needmplock) rel_mplock(); if (error == 0) return(ap.a_result); return(-1); }
int dev_dmmap_single(cdev_t dev, vm_ooffset_t *offset, vm_size_t size, struct vm_object **object, int nprot) { struct dev_mmap_single_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_mmap_single_desc; ap.a_head.a_dev = dev; ap.a_offset = offset; ap.a_size = size; ap.a_object = object; ap.a_nprot = nprot; if (needmplock) get_mplock(); error = dev->si_ops->d_mmap_single(&ap); if (needmplock) rel_mplock(); return(error); }
int dev_dioctl(cdev_t dev, u_long cmd, caddr_t data, int fflag, struct ucred *cred, struct sysmsg *msg) { struct dev_ioctl_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_ioctl_desc; ap.a_head.a_dev = dev; ap.a_cmd = cmd; ap.a_data = data; ap.a_fflag = fflag; ap.a_cred = cred; ap.a_sysmsg = msg; if (needmplock) get_mplock(); error = dev->si_ops->d_ioctl(&ap); if (needmplock) rel_mplock(); return (error); }
/* * Software interrupt routine, called at spl[soft]net. */ static void pppintr(netmsg_t msg) { struct mbuf *m; struct ppp_softc *sc; struct ifaltq_subque *ifsq; int i; /* * Packets are never sent to this netisr so the message must always * be replied. Interlock processing and notification by replying * the message first. */ lwkt_replymsg(&msg->lmsg, 0); get_mplock(); sc = ppp_softc; ifsq = ifq_get_subq_default(&sc->sc_if.if_snd); for (i = 0; i < NPPP; ++i, ++sc) { ifnet_serialize_all(&sc->sc_if); if (!(sc->sc_flags & SC_TBUSY) && (!ifsq_is_empty(ifsq) || !IF_QEMPTY(&sc->sc_fastq))) { sc->sc_flags |= SC_TBUSY; (*sc->sc_start)(sc); } for (;;) { IF_DEQUEUE(&sc->sc_rawq, m); if (m == NULL) break; ppp_inproc(sc, m); } ifnet_deserialize_all(&sc->sc_if); } rel_mplock(); }