void cpu_intr(int ppl, vaddr_t pc, uint32_t status) { struct cpu_info * const ci = curcpu(); uint32_t pending; int ipl; #ifdef DIAGNOSTIC const int mtx_count = ci->ci_mtx_count; const u_int biglock_count = ci->ci_biglock_count; const u_int blcnt = curlwp->l_blcnt; #endif KASSERT(ci->ci_cpl == IPL_HIGH); KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); ci->ci_data.cpu_nintr++; while (ppl < (ipl = splintr(&pending))) { KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); splx(ipl); /* lower to interrupt level */ KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); KASSERTMSG(ci->ci_cpl == ipl, "%s: cpl (%d) != ipl (%d)", __func__, ci->ci_cpl, ipl); KASSERT(pending != 0); cf.pc = pc; cf.sr = status; cf.intr = (ci->ci_idepth > 1); #ifdef MIPS3_ENABLE_CLOCK_INTR if (pending & MIPS_INT_MASK_5) { KASSERTMSG(ipl == IPL_SCHED, "%s: ipl (%d) != IPL_SCHED (%d)", __func__, ipl, IPL_SCHED); /* call the common MIPS3 clock interrupt handler */ mips3_clockintr(&cf); pending ^= MIPS_INT_MASK_5; } #endif if (pending != 0) { /* Process I/O and error interrupts. */ evbmips_iointr(ipl, pc, pending); } KASSERT(biglock_count == ci->ci_biglock_count); KASSERT(blcnt == curlwp->l_blcnt); KASSERT(mtx_count == ci->ci_mtx_count); /* * If even our spl is higher now (due to interrupting while * spin-lock is held and higher IPL spin-lock is locked, it * can no longer be locked so it's safe to lower IPL back * to ppl. */ (void) splhigh(); /* disable interrupts */ } KASSERT(ci->ci_cpl == IPL_HIGH); KDASSERT(mips_cp0_status_read() & MIPS_SR_INT_IE); }
struct evcnt * sysfpga_intr_evcnt(int group, int inum) { struct evcnt *ev = NULL; KDASSERT(group < SYSFPGA_NGROUPS); KDASSERT(sysfpga_sc->sc_ih[group] != NULL); switch (group) { case SYSFPGA_IGROUP_IRL1: KDASSERT(inum >= 0 && inum < SYSFPGA_IRL1_NINTR); ev = &sysfpga_irl1_intr_events[inum]; break; case SYSFPGA_IGROUP_IRL2: ev = &sysfpga_irl2_intr_events; break; case SYSFPGA_IGROUP_IRL3: ev = &sysfpga_irl3_intr_events; break; } return (ev); }
int v7fs_readlink(void *v) { struct vop_readlink_args /* { struct vnode *a_vp; struct uio *a_uio; kauth_cred_t a_cred; } */ *a = v; struct uio *uio = a->a_uio; struct vnode *vp = a->a_vp; struct v7fs_node *v7node = vp->v_data; struct v7fs_inode *inode = &v7node->inode; struct v7fs_self *fs = v7node->v7fsmount->core; int error = 0; KDASSERT(vp->v_type == VLNK); KDASSERT(uio->uio_offset >= 0); KDASSERT(v7fs_inode_islnk(inode)); v7fs_daddr_t blk = inode->addr[0]; void *buf; if (!(buf = scratch_read(fs, blk))) { error = EIO; goto error_exit; } if ((error = uiomove(buf, strlen(buf), uio))) { DPRINTF("uiomove failed.\n"); } scratch_free(fs, buf); error_exit: return error; }
int sysvbfs_readdir(void *v) { struct vop_readdir_args /* { struct vnode *a_vp; struct uio *a_uio; kauth_cred_t a_cred; int *a_eofflag; off_t **a_cookies; int *a_ncookies; } */ *ap = v; struct uio *uio = ap->a_uio; struct vnode *vp = ap->a_vp; struct sysvbfs_node *bnode = vp->v_data; struct bfs *bfs = bnode->bmp->bfs; struct dirent *dp; struct bfs_dirent *file; int i, n, error; DPRINTF("%s: offset=%" PRId64 " residue=%zu\n", __func__, uio->uio_offset, uio->uio_resid); KDASSERT(vp->v_type == VDIR); KDASSERT(uio->uio_offset >= 0); dp = malloc(sizeof(struct dirent), M_BFS, M_WAITOK | M_ZERO); i = uio->uio_offset / sizeof(struct dirent); n = uio->uio_resid / sizeof(struct dirent); if ((i + n) > bfs->n_dirent) n = bfs->n_dirent - i; for (file = &bfs->dirent[i]; i < n; file++) { if (file->inode == 0) continue; if (i == bfs->max_dirent) { DPRINTF("%s: file system inconsistent.\n", __func__); break; } i++; memset(dp, 0, sizeof(struct dirent)); dp->d_fileno = file->inode; dp->d_type = file->inode == BFS_ROOT_INODE ? DT_DIR : DT_REG; dp->d_namlen = strlen(file->name); strncpy(dp->d_name, file->name, BFS_FILENAME_MAXLEN); dp->d_reclen = sizeof(struct dirent); if ((error = uiomove(dp, dp->d_reclen, uio)) != 0) { DPRINTF("%s: uiomove failed.\n", __func__); free(dp, M_BFS); return error; } } DPRINTF("%s: %d %d %d\n", __func__, i, n, bfs->n_dirent); *ap->a_eofflag = (i == bfs->n_dirent); free(dp, M_BFS); return 0; }
static void slugled_defer(device_t self) { struct slugled_softc *sc = device_private(self); struct ixp425_softc *ixsc = ixp425_softc; uint32_t reg; int s; s = splhigh(); /* Configure LED GPIO pins as output */ reg = GPIO_CONF_READ_4(ixsc, IXP425_GPIO_GPOER); reg &= ~(LEDBITS_USB0 | LEDBITS_USB1); reg &= ~(LEDBITS_READY | LEDBITS_STATUS); GPIO_CONF_WRITE_4(ixsc, IXP425_GPIO_GPOER, reg); /* All LEDs off */ reg = GPIO_CONF_READ_4(ixsc, IXP425_GPIO_GPOUTR); reg |= LEDBITS_USB0 | LEDBITS_USB1; reg &= ~(LEDBITS_STATUS | LEDBITS_READY); GPIO_CONF_WRITE_4(ixsc, IXP425_GPIO_GPOUTR, reg); splx(s); if (shutdownhook_establish(slugled_shutdown, sc) == NULL) aprint_error_dev(self, "WARNING - Failed to register shutdown hook\n"); callout_init(&sc->sc_usb0, 0); callout_setfunc(&sc->sc_usb0, slugled_callout, (void *)(uintptr_t)LEDBITS_USB0); callout_init(&sc->sc_usb1, 0); callout_setfunc(&sc->sc_usb1, slugled_callout, (void *)(uintptr_t)LEDBITS_USB1); callout_init(&sc->sc_usb2, 0); callout_setfunc(&sc->sc_usb2, slugled_callout, (void *)(uintptr_t)(LEDBITS_USB0 | LEDBITS_USB1)); sc->sc_usb0_ih = ixp425_intr_establish(PCI_INT_A, IPL_USB, slugled_intr0, sc); KDASSERT(sc->sc_usb0_ih != NULL); sc->sc_usb1_ih = ixp425_intr_establish(PCI_INT_B, IPL_USB, slugled_intr1, sc); KDASSERT(sc->sc_usb1_ih != NULL); sc->sc_usb2_ih = ixp425_intr_establish(PCI_INT_C, IPL_USB, slugled_intr2, sc); KDASSERT(sc->sc_usb2_ih != NULL); sc->sc_tmr_ih = ixp425_intr_establish(IXP425_INT_TMR0, IPL_CLOCK, slugled_tmr, NULL); KDASSERT(sc->sc_tmr_ih != NULL); }
int v7fs_readdir(void *v) { struct vop_readdir_args /* { struct vnode *a_vp; struct uio *a_uio; kauth_cred_t a_cred; int *a_eofflag; off_t **a_cookies; int *a_ncookies; } */ *a = v; struct uio *uio = a->a_uio; struct vnode *vp = a->a_vp; struct v7fs_node *v7node = vp->v_data; struct v7fs_inode *inode = &v7node->inode; struct v7fs_self *fs = v7node->v7fsmount->core; struct dirent *dp; int error; DPRINTF("offset=%zu residue=%zu\n", uio->uio_offset, uio->uio_resid); KDASSERT(vp->v_type == VDIR); KDASSERT(uio->uio_offset >= 0); KDASSERT(v7fs_inode_isdir(inode)); struct v7fs_readdir_arg arg; arg.start = uio->uio_offset / sizeof(*dp); arg.end = arg.start + uio->uio_resid / sizeof(*dp); if (arg.start == arg.end) {/* user buffer has not enuf space. */ DPRINTF("uio buffer too small\n"); return ENOMEM; } dp = kmem_zalloc(sizeof(*dp), KM_SLEEP); arg.cnt = 0; arg.dp = dp; arg.uio = uio; *a->a_eofflag = false; error = v7fs_datablock_foreach(fs, inode, readdir_subr, &arg); if (error == V7FS_ITERATOR_END) { *a->a_eofflag = true; } if (error < 0) error = 0; kmem_free(dp, sizeof(*dp)); return error; }
/* * This function is invoked after a log is replayed to * disk to perform logical cleanup actions as described by * the log */ void ffs_wapbl_replay_finish(struct mount *mp) { struct wapbl_replay *wr = mp->mnt_wapbl_replay; int i; int error; if (!wr) return; KDASSERT((mp->mnt_flag & MNT_RDONLY) == 0); for (i = 0; i < wr->wr_inodescnt; i++) { struct vnode *vp; struct inode *ip; error = VFS_VGET(mp, wr->wr_inodes[i].wr_inumber, &vp); if (error) { printf("ffs_wapbl_replay_finish: " "unable to cleanup inode %u\n", wr->wr_inodes[i].wr_inumber); continue; } ip = VTOI(vp); KDASSERT(wr->wr_inodes[i].wr_inumber == ip->i_number); #ifdef WAPBL_DEBUG printf("ffs_wapbl_replay_finish: " "cleaning inode %llu size=%llu mode=%o nlink=%d\n", ip->i_number, DIP(ip, size), DIP(ip, mode), DIP(ip, nlink)); #endif KASSERT(DIP(ip, nlink) == 0); /* * The journal may have left partially allocated inodes in mode * zero. This may occur if a crash occurs betweeen the node * allocation in ffs_nodeallocg and when the node is properly * initialized in ufs_makeinode. If so, just dallocate them. */ if (DIP(ip, mode) == 0) { UFS_WAPBL_BEGIN(mp); ffs_inode_free(ip, ip->i_number, wr->wr_inodes[i].wr_imode); UFS_WAPBL_END(mp); } vput(vp); } wapbl_replay_stop(wr); wapbl_replay_free(wr); mp->mnt_wapbl_replay = NULL; }
int sysvbfs_strategy(void *arg) { struct vop_strategy_args /* { struct vnode *a_vp; struct buf *a_bp; } */ *a = arg; struct buf *b = a->a_bp; struct vnode *v = a->a_vp; struct sysvbfs_node *bnode = v->v_data; struct sysvbfs_mount *bmp = bnode->bmp; int error; DPRINTF("%s:\n", __func__); KDASSERT(v->v_type == VREG); if (b->b_blkno == b->b_lblkno) { error = VOP_BMAP(v, b->b_lblkno, NULL, &b->b_blkno, NULL); if (error) { b->b_error = error; biodone(b); return error; } if ((long)b->b_blkno == -1) clrbuf(b); } if ((long)b->b_blkno == -1) { biodone(b); return 0; } return VOP_STRATEGY(bmp->devvp, b); }
int sysvbfs_open(void *arg) { struct vop_open_args /* { struct vnode *a_vp; int a_mode; kauth_cred_t a_cred; } */ *a = arg; struct vnode *v = a->a_vp; struct sysvbfs_node *bnode = v->v_data; struct bfs_inode *inode = bnode->inode; DPRINTF("%s:\n", __func__); KDASSERT(v->v_type == VREG || v->v_type == VDIR); bnode->update_atime = true; if ((a->a_mode & FWRITE) && !(a->a_mode & O_APPEND)) { bnode->size = 0; } else { bnode->size = bfs_file_size(inode); } bnode->data_block = inode->start_sector; return 0; }
/* * void sh4_switch_setup(struct proc *p): * prepare kernel stack PTE table. sh4_switch_resume wired this PTE. */ void sh4_switch_setup(struct proc *p) { pt_entry_t *pte; struct md_upte *md_upte = p->p_md.md_upte; uint32_t vpn; int i, e; vpn = (uint32_t)p->p_addr; vpn &= ~PGOFSET; e = SH4_UTLB_ENTRY - UPAGES; for (i = 0; i < UPAGES; i++, e++, vpn += PAGE_SIZE) { pte = __pmap_kpte_lookup(vpn); KDASSERT(pte && *pte != 0); /* Address array */ md_upte->addr = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT); md_upte->data = vpn | SH4_UTLB_AA_D | SH4_UTLB_AA_V; md_upte++; /* Data array */ md_upte->addr = SH4_UTLB_DA1 | (e << SH4_UTLB_E_SHIFT); md_upte->data = (*pte & PG_HW_BITS) | SH4_UTLB_DA1_D | SH4_UTLB_DA1_V; md_upte++; } }
/* * Make up a "fake" cleaner buffer, copy the data from userland into it. */ struct buf * lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, void *uaddr) { struct buf *bp; int error; KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM); bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size, LFS_NB_CLEAN); error = copyin(uaddr, bp->b_data, size); if (error) { lfs_freebuf(fs, bp); return NULL; } KDASSERT(bp->b_iodone == lfs_callback); #if 0 mutex_enter(&lfs_lock); ++fs->lfs_iocount; mutex_exit(&lfs_lock); #endif bp->b_bufsize = size; bp->b_bcount = size; return (bp); }
int v7fs_update(struct vnode *vp, const struct timespec *acc, const struct timespec *mod, int flags) { struct v7fs_node *v7node = vp->v_data; struct v7fs_inode *inode = &v7node->inode; struct v7fs_self *fs = v7node->v7fsmount->core; bool update = false; DPRINTF("%p %zu %d\n", vp, vp->v_size, v7fs_inode_filesize(inode)); KDASSERT(vp->v_size == v7fs_inode_filesize(inode)); if (v7node->update_atime) { inode->atime = acc ? acc->tv_sec : time_second; v7node->update_atime = false; update = true; } if (v7node->update_ctime) { inode->ctime = time_second; v7node->update_ctime = false; update = true; } if (v7node->update_mtime) { inode->mtime = mod ? mod->tv_sec : time_second; v7node->update_mtime = false; update = true; } if (update) v7fs_inode_writeback(fs, inode); return 0; }
int v7fs_strategy(void *v) { struct vop_strategy_args /* { struct vnode *a_vp; struct buf *a_bp; } */ *a = v; struct buf *b = a->a_bp; struct vnode *vp = a->a_vp; struct v7fs_node *v7node = vp->v_data; struct v7fs_mount *v7fsmount = v7node->v7fsmount; int error; DPRINTF("%p\n", vp); KDASSERT(vp->v_type == VREG); if (b->b_blkno == b->b_lblkno) { error = VOP_BMAP(vp, b->b_lblkno, NULL, &b->b_blkno, NULL); if (error) { b->b_error = error; biodone(b); return error; } if ((long)b->b_blkno == -1) clrbuf(b); } if ((long)b->b_blkno == -1) { biodone(b); return 0; } return VOP_STRATEGY(v7fsmount->devvp, b); }
/* * Prepare kernel stack PTE table. sh4_switch_resume wires these PTEs. */ void sh4_switch_setup(struct lwp *l) { struct md_upte *md_upte; uint32_t vpn; pt_entry_t *pte; int i, e; md_upte = l->l_md.md_upte; vpn = sh3_trunc_page(uvm_lwp_getuarea(l)); e = SH4_UTLB_ENTRY - UPAGES; for (i = 0; i < UPAGES; ++i) { pte = __pmap_kpte_lookup(vpn); KDASSERT(pte && *pte != 0); /* Address array */ md_upte->addr = SH4_UTLB_AA | (e << SH4_UTLB_E_SHIFT); md_upte->data = vpn | SH4_UTLB_AA_D | SH4_UTLB_AA_V; ++md_upte; /* Data array */ md_upte->addr = SH4_UTLB_DA1 | (e << SH4_UTLB_E_SHIFT); md_upte->data = (*pte & PG_HW_BITS) | SH4_UTLB_DA1_D | SH4_UTLB_DA1_V; ++md_upte; vpn += PAGE_SIZE; ++e; } }
static int ulz_put(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) { struct vm_page *pg; KDASSERT(uobj == &uvm_loanzero_object); /* * Don't need to do any work here if we're not freeing pages. */ if ((flags & PGO_FREE) == 0) { mutex_exit(&uobj->vmobjlock); return 0; } /* * we don't actually want to ever free the uvm_loanzero_page, so * just reactivate or dequeue it. */ pg = TAILQ_FIRST(&uobj->memq); KASSERT(pg != NULL); KASSERT(TAILQ_NEXT(pg, listq.queue) == NULL); mutex_enter(&uvm_pageqlock); if (pg->uanon) uvm_pageactivate(pg); else uvm_pagedequeue(pg); mutex_exit(&uvm_pageqlock); mutex_exit(&uobj->vmobjlock); return 0; }
void intc_intr(int ssr, int spc, int ssp) { struct intc_intrhand *ih; int s, evtcode; evtcode = _reg_read_4(SH4_INTEVT); ih = EVTCODE_IH(evtcode); KDASSERT(ih->ih_func); /* * On entry, all interrrupts are disabled, and exception is enabled. * Enable higher level interrupt here. */ s = _cpu_intr_resume(ih->ih_level); if (evtcode == SH_INTEVT_TMU0_TUNI0) { /* hardclock */ struct clockframe cf; cf.spc = spc; cf.ssr = ssr; cf.ssp = ssp; (*ih->ih_func)(&cf); } else { (*ih->ih_func)(ih->ih_arg); } }
/* create default bus_space_tag */ bus_space_tag_t bus_space_create(bus_space_tag_t t, const char *name, bus_addr_t addr, bus_size_t size) { struct playstation2_bus_space *pbs = (void *)t; /* discard const */ if (pbs == 0) pbs = malloc(sizeof(*pbs), M_DEVBUF, M_NOWAIT); KDASSERT(pbs); memset(pbs, 0, sizeof(*pbs)); /* set default method */ *pbs = _default_bus_space; pbs->pbs_cookie = pbs; /* set access region */ if (size == 0) { pbs->pbs_base_addr = addr; /* no extent */ } else { pbs->pbs_extent = extent_create(name, addr, addr + size - 1, M_DEVBUF, 0, 0, EX_NOWAIT); if (pbs->pbs_extent == 0) { panic("%s:: unable to create bus_space for " "0x%08lx-%#lx\n", __FUNCTION__, addr, size); } } return (pbs); }
void intc_intr(int ssr, int spc, int ssp) { struct intc_intrhand *ih; int evtcode; u_int16_t r; evtcode = _reg_read_4(CPU_IS_SH3 ? SH7709_INTEVT2 : SH4_INTEVT); ih = EVTCODE_IH(evtcode); KDASSERT(ih->ih_func); /* * On entry, all interrrupts are disabled, * and exception is enabled for P3 access. (kernel stack is P3, * SH3 may or may not cause TLB miss when access stack.) * Enable higher level interrupt here. */ r = _reg_read_2(HD6446X_NIRR); splx(ih->ih_level); if (evtcode == SH_INTEVT_TMU0_TUNI0) { struct clockframe cf; cf.spc = spc; cf.ssr = ssr; cf.ssp = ssp; (*ih->ih_func)(&cf); __dbg_heart_beat(HEART_BEAT_RED); } else if (evtcode == (CPU_IS_SH3 ? SH7709_INTEVT2_IRQ4 : SH_INTEVT_IRL11)) { int cause = r & hd6446x_ienable; struct hd6446x_intrhand *hh = &hd6446x_intrhand[ffs(cause) - 1]; if (cause == 0) { printf("masked HD6446x interrupt.0x%04x\n", r); _reg_write_2(HD6446X_NIRR, 0x0000); return; } /* Enable higher level interrupt*/ hd6446x_intr_resume(hh->hh_ipl); KDASSERT(hh->hh_func != NULL); (*hh->hh_func)(hh->hh_arg); __dbg_heart_beat(HEART_BEAT_GREEN); } else { (*ih->ih_func)(ih->ih_arg); __dbg_heart_beat(HEART_BEAT_BLUE); } }
int v7fs_create(void *v) { struct vop_create_v3_args /* { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; } */ *a = v; struct v7fs_node *parent_node = a->a_dvp->v_data; struct v7fs_mount *v7fsmount = parent_node->v7fsmount; struct v7fs_self *fs = v7fsmount->core; struct mount *mp = v7fsmount->mountp; struct v7fs_fileattr attr; struct vattr *va = a->a_vap; kauth_cred_t cr = a->a_cnp->cn_cred; v7fs_ino_t ino; int error = 0; DPRINTF("%s parent#%d\n", a->a_cnp->cn_nameptr, parent_node->inode.inode_number); KDASSERT((va->va_type == VREG) || (va->va_type == VSOCK)); memset(&attr, 0, sizeof(attr)); attr.uid = kauth_cred_geteuid(cr); attr.gid = kauth_cred_getegid(cr); attr.mode = va->va_mode | vtype_to_v7fs_mode (va->va_type); attr.device = 0; /* Allocate disk entry. and register its entry to parent directory. */ if ((error = v7fs_file_allocate(fs, &parent_node->inode, a->a_cnp->cn_nameptr, &attr, &ino))) { DPRINTF("v7fs_file_allocate failed.\n"); return error; } /* Sync dirent size change. */ uvm_vnp_setsize(a->a_dvp, v7fs_inode_filesize(&parent_node->inode)); /* Get myself vnode. */ *a->a_vpp = 0; if ((error = v7fs_vget(mp, ino, a->a_vpp))) { DPRINTF("v7fs_vget failed.\n"); return error; } /* Scheduling update time. real update by v7fs_update */ struct v7fs_node *newnode = (*a->a_vpp)->v_data; newnode->update_ctime = true; newnode->update_mtime = true; newnode->update_atime = true; DPRINTF("allocated %s->#%d\n", a->a_cnp->cn_nameptr, ino); if (error == 0) VOP_UNLOCK(*a->a_vpp); return error; }
void timer_one_shot(int timer) { KDASSERT(LEGAL_TIMER(timer) && timer != 0); _reg_write_4(T_COUNT_REG(timer), 0); _reg_write_4(T_COMP_REG(timer), 1); _reg_write_4(T_MODE_REG(timer), T_MODE_CUE | T_MODE_CMPE); }
static void micphy_writexreg(struct mii_softc *sc, uint32_t reg, uint32_t wval) { int rval __diagused; PHY_WRITE(sc, XREG_CONTROL, XREG_CTL_SEL_WRITE | reg); PHY_WRITE(sc, XREG_WRITE, wval); PHY_WRITE(sc, XREG_CONTROL, XREG_CTL_SEL_READ | reg); rval = PHY_READ(sc, XREG_READ); KDASSERT(wval == rval); }
/* helper functions, makes the code below more readable */ inline static void ite_sendstr(const char *str) { struct tty *kbd_tty; kbd_tty = kbd_ite->tp; KDASSERT(kbd_tty); while (*str) kbd_tty->t_linesw->l_rint(*str++, kbd_tty); }
int itepoll(dev_t dev, int events, struct lwp *l) { struct tty *tp; tp = getitesp(dev)->tp; KDASSERT(tp); return ((*tp->t_linesw->l_poll)(tp, events, l)); }
int itewrite(dev_t dev, struct uio *uio, int flag) { struct tty *tp; tp = getitesp(dev)->tp; KDASSERT(tp); return tp->t_linesw->l_write(tp, uio, flag); }
int obio_iomem_add_mapping(bus_addr_t bpa, bus_size_t size, int type, bus_space_handle_t *bshp) { u_long pa, endpa; vaddr_t va; pt_entry_t *pte; unsigned int m = 0; int io_type = type & ~OBIO_IOMEM_PCMCIA_8BIT; pa = trunc_page(bpa); endpa = round_page(bpa + size); #ifdef DIAGNOSTIC if (endpa <= pa) panic("obio_iomem_add_mapping: overflow"); #endif va = uvm_km_valloc(kernel_map, endpa - pa); if (va == 0) return (ENOMEM); *bshp = (bus_space_handle_t)(va + (bpa & PGOFSET)); #define MODE(t, s) \ ((t) & OBIO_IOMEM_PCMCIA_8BIT) ? \ _PG_PCMCIA_ ## s ## 8 : \ _PG_PCMCIA_ ## s ## 16 switch (io_type) { default: panic("unknown pcmcia space."); /* NOTREACHED */ case OBIO_IOMEM_PCMCIA_IO: m = MODE(type, IO); break; case OBIO_IOMEM_PCMCIA_MEM: m = MODE(type, MEM); break; case OBIO_IOMEM_PCMCIA_ATT: m = MODE(type, ATTR); break; } #undef MODE for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) { pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE); pte = __pmap_kpte_lookup(va); KDASSERT(pte); *pte |= m; /* PTEA PCMCIA assistant bit */ sh_tlb_update(0, va, *pte); } return (0); }
int iteclose(dev_t dev, int flag, int mode, struct lwp *l) { struct tty *tp; tp = getitesp(dev)->tp; KDASSERT(tp); tp->t_linesw->l_close(tp, flag); ttyclose(tp); ite_off(dev, 0); return (0); }
/* * This version of sbappend() should only be used when the caller * absolutely knows that there will never be more than one record * in the socket buffer, that is, a stream protocol (such as TCP). */ void sbappendstream(struct sockbuf *sb, struct mbuf *m) { KDASSERT(m->m_nextpkt == NULL); KASSERT(sb->sb_mb == sb->sb_lastrecord); SBLASTMBUFCHK(sb, __func__); sbcompress(sb, m, sb->sb_mbtail); sb->sb_lastrecord = sb->sb_mb; SBLASTRECORDCHK(sb, __func__); }
int sysvbfs_create(void *arg) { struct vop_create_args /* { struct vnode *a_dvp; struct vnode **a_vpp; struct componentname *a_cnp; struct vattr *a_vap; } */ *a = arg; struct sysvbfs_node *bnode = a->a_dvp->v_data; struct sysvbfs_mount *bmp = bnode->bmp; struct bfs *bfs = bmp->bfs; struct mount *mp = bmp->mountp; struct bfs_dirent *dirent; struct bfs_fileattr attr; struct vattr *va = a->a_vap; kauth_cred_t cr = a->a_cnp->cn_cred; int err = 0; DPRINTF("%s: %s\n", __func__, a->a_cnp->cn_nameptr); KDASSERT(a->a_vap->va_type == VREG); attr.uid = kauth_cred_geteuid(cr); attr.gid = kauth_cred_getegid(cr); attr.mode = va->va_mode; if ((err = bfs_file_create(bfs, a->a_cnp->cn_nameptr, 0, 0, &attr)) != 0) { DPRINTF("%s: bfs_file_create failed.\n", __func__); goto unlock_exit; } if (!bfs_dirent_lookup_by_name(bfs, a->a_cnp->cn_nameptr, &dirent)) panic("no dirent for created file."); if ((err = sysvbfs_vget(mp, dirent->inode, a->a_vpp)) != 0) { DPRINTF("%s: sysvbfs_vget failed.\n", __func__); goto unlock_exit; } bnode = (*a->a_vpp)->v_data; bnode->update_ctime = true; bnode->update_mtime = true; bnode->update_atime = true; unlock_exit: /* unlock parent directory */ vput(a->a_dvp); /* locked at sysvbfs_lookup(); */ return err; }
static void cgdiodone(struct buf *nbp) { struct buf *obp = nbp->b_private; struct cgd_softc *cs = getcgd_softc(obp->b_dev); struct dk_softc *dksc = &cs->sc_dksc; struct disk_geom *dg = &dksc->sc_dkdev.dk_geom; daddr_t bn; KDASSERT(cs); DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp)); DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n", obp, obp->b_bcount, obp->b_resid)); DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64 " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data, nbp->b_bcount)); if (nbp->b_error != 0) { obp->b_error = nbp->b_error; DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname, obp->b_error)); } /* Perform the decryption if we are reading. * * Note: use the blocknumber from nbp, since it is what * we used to encrypt the blocks. */ if (nbp->b_flags & B_READ) { bn = dbtob(nbp->b_blkno) / dg->dg_secsize; cgd_cipher(cs, obp->b_data, obp->b_data, obp->b_bcount, bn, dg->dg_secsize, CGD_CIPHER_DECRYPT); } /* If we allocated memory, free it now... */ if (nbp->b_data != obp->b_data) cgd_putdata(dksc, nbp->b_data); putiobuf(nbp); /* Request is complete for whatever reason */ obp->b_resid = 0; if (obp->b_error != 0) obp->b_resid = obp->b_bcount; dk_done(dksc, obp); dk_start(dksc, NULL); }
void setsoft(int ipl) { const static int timer_map[] = { [IPL_SOFT] = 1, [IPL_SOFTCLOCK] = 2, [IPL_SOFTNET] = 3, [IPL_SOFTSERIAL]= 3, }; KDASSERT(ipl >= IPL_SOFT && ipl <= IPL_SOFTSERIAL); /* kick one shot timer */ timer_one_shot(timer_map[ipl]); }