/* * Get SMP fully working before we start initializing devices. */ static void ap_finish(void) { mp_finish = 1; if (bootverbose) kprintf("Finish MP startup\n"); /* build our map of 'other' CPUs */ mycpu->gd_other_cpus = smp_startup_mask; CPUMASK_NANDBIT(mycpu->gd_other_cpus, mycpu->gd_cpuid); /* * Let the other cpu's finish initializing and build their map * of 'other' CPUs. */ rel_mplock(); while (CPUMASK_CMPMASKNEQ(smp_active_mask,smp_startup_mask)) { DELAY(100000); cpu_lfence(); } while (try_mplock() == 0) DELAY(100000); if (bootverbose) kprintf("Active CPU Mask: %08lx\n", (long)CPUMASK_LOWMASK(smp_active_mask)); }
void at1intr(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; struct elaphdr *elhp, elh; get_mplock(); /* * Phase 1 packet handling */ if (m->m_len < SZ_ELAPHDR && ((m = m_pullup(m, SZ_ELAPHDR)) == 0)) { ddpstat.ddps_tooshort++; goto out; } /* * This seems a little dubious, but I don't know phase 1 so leave it. */ elhp = mtod(m, struct elaphdr *); m_adj(m, SZ_ELAPHDR); if (elhp->el_type == ELAP_DDPEXTEND) { ddp_input(m, m->m_pkthdr.rcvif, NULL, 1); } else { bcopy((caddr_t)elhp, (caddr_t)&elh, SZ_ELAPHDR); ddp_input(m, m->m_pkthdr.rcvif, &elh, 1); } out: rel_mplock(); /* msg was embedded in the mbuf, do not reply! */ }
static int mmioctl(struct dev_ioctl_args *ap) { cdev_t dev = ap->a_head.a_dev; int error; get_mplock(); switch (minor(dev)) { case 0: error = mem_ioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, ap->a_cred); break; case 3: case 4: error = random_ioctl(dev, ap->a_cmd, ap->a_data, ap->a_fflag, ap->a_cred); break; default: error = ENODEV; break; } rel_mplock(); return (error); }
/* * Software interrupt routine, called at spl[soft]net. */ static void pppintr(netmsg_t msg) { struct mbuf *m; struct ppp_softc *sc; int i; /* * Packets are never sent to this netisr so the message must always * be replied. Interlock processing and notification by replying * the message first. */ lwkt_replymsg(&msg->lmsg, 0); get_mplock(); sc = ppp_softc; for (i = 0; i < NPPP; ++i, ++sc) { ifnet_serialize_all(&sc->sc_if); if (!(sc->sc_flags & SC_TBUSY) && (!ifq_is_empty(&sc->sc_if.if_snd) || !IF_QEMPTY(&sc->sc_fastq))) { sc->sc_flags |= SC_TBUSY; (*sc->sc_start)(sc); } for (;;) { IF_DEQUEUE(&sc->sc_rawq, m); if (m == NULL) break; ppp_inproc(sc, m); } ifnet_deserialize_all(&sc->sc_if); } rel_mplock(); }
/* * Get SMP fully working before we start initializing devices. */ static void ap_finish(void) { int i; cpumask_t ncpus_mask = 0; for (i = 1; i <= ncpus; i++) ncpus_mask |= CPUMASK(i); mp_finish = 1; if (bootverbose) kprintf("Finish MP startup\n"); /* build our map of 'other' CPUs */ mycpu->gd_other_cpus = smp_startup_mask & ~CPUMASK(mycpu->gd_cpuid); /* * Let the other cpu's finish initializing and build their map * of 'other' CPUs. */ rel_mplock(); while (smp_active_mask != smp_startup_mask) { DELAY(100000); cpu_lfence(); } while (try_mplock() == 0) DELAY(100000); if (bootverbose) kprintf("Active CPU Mask: %08x\n", smp_active_mask); }
int dev_dread(cdev_t dev, struct uio *uio, int ioflag, struct file *fp) { struct dev_read_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_read_desc; ap.a_head.a_dev = dev; ap.a_uio = uio; ap.a_ioflag = ioflag; ap.a_fp = fp; if (needmplock) { get_mplock(); ++mplock_reads; } else { ++mpsafe_reads; } error = dev->si_ops->d_read(&ap); if (needmplock) rel_mplock(); if (error == 0) dev->si_lastread = time_uptime; return (error); }
int dev_dwrite(cdev_t dev, struct uio *uio, int ioflag, struct file *fp) { struct dev_write_args ap; int needmplock = dev_needmplock(dev); int error; dev->si_lastwrite = time_uptime; ap.a_head.a_desc = &dev_write_desc; ap.a_head.a_dev = dev; ap.a_uio = uio; ap.a_ioflag = ioflag; ap.a_fp = fp; if (needmplock) { get_mplock(); ++mplock_writes; } else { ++mpsafe_writes; } error = dev->si_ops->d_write(&ap); if (needmplock) rel_mplock(); return (error); }
/* * Core device strategy call, used to issue I/O on a device. There are * two versions, a non-chained version and a chained version. The chained * version reuses a BIO set up by vn_strategy(). The only difference is * that, for now, we do not push a new tracking structure when chaining * from vn_strategy. XXX this will ultimately have to change. */ void dev_dstrategy(cdev_t dev, struct bio *bio) { struct dev_strategy_args ap; struct bio_track *track; int needmplock = dev_needmplock(dev); ap.a_head.a_desc = &dev_strategy_desc; ap.a_head.a_dev = dev; ap.a_bio = bio; KKASSERT(bio->bio_track == NULL); KKASSERT(bio->bio_buf->b_cmd != BUF_CMD_DONE); if (bio->bio_buf->b_cmd == BUF_CMD_READ) track = &dev->si_track_read; else track = &dev->si_track_write; bio_track_ref(track); bio->bio_track = track; if (dsched_is_clear_buf_priv(bio->bio_buf)) dsched_new_buf(bio->bio_buf); KKASSERT((bio->bio_flags & BIO_DONE) == 0); if (needmplock) { get_mplock(); ++mplock_strategies; } else { ++mpsafe_strategies; } (void)dev->si_ops->d_strategy(&ap); if (needmplock) rel_mplock(); }
/* * Process Interrupt Queue * * Processes entries on the ATM interrupt queue. This queue is used by * device interface drivers in order to schedule events from the driver's * lower (interrupt) half to the driver's stack services. * * The interrupt routines must store the stack processing function to call * and a token (typically a driver/stack control block) at the front of the * queued buffer. We assume that the function pointer and token values are * both contained (and properly aligned) in the first buffer of the chain. * * Arguments: * none * * Returns: * none * */ static void atm_intr(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; caddr_t cp; atm_intr_func_t func; void *token; /* * Get function to call and token value */ get_mplock(); KB_DATASTART(m, cp, caddr_t); func = *(atm_intr_func_t *)cp; cp += sizeof(func); token = *(void **)cp; KB_HEADADJ(m, -(sizeof(func) + sizeof(token))); if (KB_LEN(m) == 0) { KBuffer *m1; KB_UNLINKHEAD(m, m1); m = m1; } /* * Call processing function */ (*func)(token, m); /* * Drain any deferred calls */ STACK_DRAIN(); rel_mplock(); /* msg was embedded in the mbuf, do not reply! */ }
void cam_sim_unlock(sim_lock *lock) { if (lock == &sim_mplock) rel_mplock(); else lockmgr(lock, LK_RELEASE); }
static void mpls_input_handler(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; get_mplock(); mpls_input(m); rel_mplock(); /* do not reply, msg embedded in mbuf */ }
void cam_sim_cond_unlock(sim_lock *lock, int doun) { if (doun) { if (lock == &sim_mplock) rel_mplock(); else lockmgr(lock, LK_RELEASE); } }
/* * MPALMOSTSAFE */ int sys_linux_sysctl(struct linux_sysctl_args *args) { struct l___sysctl_args la; l_int *mib; int error, i; error = copyin((caddr_t)args->args, &la, sizeof(la)); if (error) return (error); if (la.nlen <= 0 || la.nlen > LINUX_CTL_MAXNAME) return (ENOTDIR); mib = kmalloc(la.nlen * sizeof(l_int), M_TEMP, M_WAITOK); error = copyin(la.name, mib, la.nlen * sizeof(l_int)); if (error) { kfree(mib, M_TEMP); return (error); } get_mplock(); switch (mib[0]) { case LINUX_CTL_KERN: if (la.nlen < 2) { error = ENOTDIR; break; } switch (mib[1]) { case LINUX_KERN_VERSION: error = handle_string(&la, version); break; default: error = ENOTDIR; break; } break; default: error = ENOTDIR; break; } rel_mplock(); if (error == ENOTDIR && mib) { kprintf("linux: sysctl: unhandled name="); for (i = 0; i < la.nlen; i++) kprintf("%c%d", (i) ? ',' : '{', mib[i]); kprintf("}\n"); } if (mib) kfree(mib, M_TEMP); return (error); }
/* * MPSAFE */ int vfs_uninit(struct vfsconf *vfc, struct vfsconf *vfsp) { int error; get_mplock(); error = (vfc->vfc_vfsops->vfs_uninit)(vfsp); rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_oftruncate(struct oftruncate_args *uap) { int error; get_mplock(); error = kern_ftruncate(uap->fd, uap->length); rel_mplock(); return (error); }
/* * Could probably merge these two code segments a little better... */ void at2intr(netmsg_t msg) { struct mbuf *m = msg->packet.nm_packet; /* * Phase 2 packet handling */ get_mplock(); ddp_input(m, m->m_pkthdr.rcvif, NULL, 2); rel_mplock(); /* msg was embedded in the mbuf, do not reply! */ }
/* * MPALMOSTSAFE */ int sys_clock_settime(struct clock_settime_args *uap) { struct timespec ats; int error; if ((error = copyin(uap->tp, &ats, sizeof(ats))) != 0) return (error); get_mplock(); error = kern_clock_settime(uap->clock_id, &ats); rel_mplock(); return (error); }
/* * The system call that results in a reboot * * MPALMOSTSAFE */ int sys_reboot(struct reboot_args *uap) { struct thread *td = curthread; int error; if ((error = priv_check(td, PRIV_REBOOT))) return (error); get_mplock(); boot(uap->opt); rel_mplock(); return (0); }
/* * MPALMOSTSAFE */ int sys_getdomainname(struct getdomainname_args *uap) { int domainnamelen; int error; get_mplock(); domainnamelen = strlen(domainname) + 1; if ((u_int)uap->len > domainnamelen + 1) uap->len = domainnamelen + 1; error = copyout(domainname, uap->domainname, uap->len); rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_otruncate(struct otruncate_args *uap) { struct nlookupdata nd; int error; get_mplock(); error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); if (error == 0) error = kern_truncate(&nd, uap->length); nlookup_done(&nd); rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_linux_execve(struct linux_execve_args *args) { struct nlookupdata nd; struct image_args exec_args; char *path; int error; error = linux_copyin_path(args->path, &path, LINUX_PATH_EXISTS); if (error) return (error); #ifdef DEBUG if (ldebug(execve)) kprintf(ARGS(execve, "%s"), path); #endif get_mplock(); error = nlookup_init(&nd, path, UIO_SYSSPACE, NLC_FOLLOW); bzero(&exec_args, sizeof(exec_args)); if (error == 0) { error = exec_copyin_args(&exec_args, path, PATH_SYSSPACE, args->argp, args->envp); } if (error == 0) error = kern_execve(&nd, &exec_args); nlookup_done(&nd); /* * The syscall result is returned in registers to the new program. * Linux will register %edx as an atexit function and we must be * sure to set it to 0. XXX */ if (error == 0) { args->sysmsg_result64 = 0; if (curproc->p_sysent == &elf_linux_sysvec) error = emuldata_init(curproc, NULL, 0); } exec_free_args(&exec_args); linux_free_path(&path); if (error < 0) { /* We hit a lethal error condition. Let's die now. */ exit1(W_EXITCODE(0, SIGABRT)); /* NOTREACHED */ } rel_mplock(); return(error); }
/* * The ACPI helper thread processes OSD execution callback messages. */ static void acpi_task_thread(void *arg) { ACPI_OSD_EXEC_CALLBACK func; struct acpi_task *at; get_mplock(); for (;;) { at = (void *)lwkt_waitport(&curthread->td_msgport, 0); func = at->at_function; func(at->at_context); lwkt_replymsg(&at->at_msg, 0); } rel_mplock(); }
/* * MPALMOSTSAFE */ int sys_ocreat(struct ocreat_args *uap) { struct nlookupdata nd; int error; get_mplock(); error = nlookup_init(&nd, uap->path, UIO_USERSPACE, NLC_FOLLOW); if (error == 0) { error = kern_open(&nd, O_WRONLY | O_CREAT | O_TRUNC, uap->mode, &uap->sysmsg_iresult); } rel_mplock(); return (error); }
int dev_dclone(cdev_t dev) { struct dev_clone_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_clone_desc; ap.a_head.a_dev = dev; if (needmplock) get_mplock(); error = dev->si_ops->d_clone(&ap); if (needmplock) rel_mplock(); return (error); }
/* * MPALMOSTSAFE */ int sys_setdomainname(struct setdomainname_args *uap) { struct thread *td = curthread; int error, domainnamelen; if ((error = priv_check(td, PRIV_SETDOMAINNAME))) return (error); if ((u_int)uap->len > sizeof(domainname) - 1) return EINVAL; get_mplock(); domainnamelen = uap->len; error = copyin(uap->domainname, domainname, uap->len); domainname[domainnamelen] = 0; rel_mplock(); return (error); }
/* * dfbsd12_fstat_args(int fd, struct dfbsd12_stat *sb) * * MPALMOSTSAFE */ int sys_dfbsd12_fstat(struct dfbsd12_fstat_args *uap) { struct dfbsd12_stat ost; struct stat st; int error; get_mplock(); error = kern_fstat(uap->fd, &st); rel_mplock(); if (error == 0) { cvtstat(&ost, &st); error = copyout(&ost, uap->sb, sizeof(ost)); } return (error); }
static void ata_boot_attach(void) { struct ata_channel *ch; int ctlr; get_mplock(); /* kick of probe and attach on all channels */ for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { if ((ch = devclass_get_softc(ata_devclass, ctlr))) { ata_identify(ch->dev); } } rel_mplock(); }
/* * MPALMOSTSAFE */ int sys_olstat(struct olstat_args *uap) { struct nlookupdata nd; struct stat st; int error; get_mplock(); error = nlookup_init(&nd, uap->path, UIO_USERSPACE, 0); if (error == 0) { error = kern_stat(&nd, &st); if (error == 0) error = compat_43_copyout_stat(&st, uap->ub); nlookup_done(&nd); } rel_mplock(); return (error); }
void dev_dstrategy_chain(cdev_t dev, struct bio *bio) { struct dev_strategy_args ap; int needmplock = dev_needmplock(dev); ap.a_head.a_desc = &dev_strategy_desc; ap.a_head.a_dev = dev; ap.a_bio = bio; KKASSERT(bio->bio_track != NULL); KKASSERT((bio->bio_flags & BIO_DONE) == 0); if (needmplock) get_mplock(); (void)dev->si_ops->d_strategy(&ap); if (needmplock) rel_mplock(); }
int dev_dclose(cdev_t dev, int fflag, int devtype) { struct dev_close_args ap; int needmplock = dev_needmplock(dev); int error; ap.a_head.a_desc = &dev_close_desc; ap.a_head.a_dev = dev; ap.a_fflag = fflag; ap.a_devtype = devtype; if (needmplock) get_mplock(); error = dev->si_ops->d_close(&ap); if (needmplock) rel_mplock(); return (error); }