/* * Device ioctl operation. */ int spec_ioctl(struct vnop_ioctl_args *ap) { proc_t p = vfs_context_proc(ap->a_context); dev_t dev = ap->a_vp->v_rdev; int retval = 0; KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_START, (unsigned int)dev, (unsigned int)ap->a_command, (unsigned int)ap->a_fflag, (unsigned int)ap->a_vp->v_type, 0); switch (ap->a_vp->v_type) { case VCHR: retval = (*cdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, ap->a_fflag, p); break; case VBLK: retval = (*bdevsw[major(dev)].d_ioctl)(dev, ap->a_command, ap->a_data, ap->a_fflag, p); break; default: panic("spec_ioctl"); /* NOTREACHED */ } KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_IOCTL, 0) | DBG_FUNC_END, (unsigned int)dev, (unsigned int)ap->a_command, (unsigned int)ap->a_fflag, retval, 0); return (retval); }
/* * Call for enabling global system override. * This should be called only with the sys_override_lock held. */ static void enable_system_override(uint64_t flags) { if (flags & SYS_OVERRIDE_IO_THROTTLE) { if (io_throttle_assert_cnt == 0) { /* Disable I/O Throttling */ printf("Process %s [%d] disabling system-wide I/O Throttling\n", current_proc()->p_comm, current_proc()->p_pid); KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_START, current_proc()->p_pid, 0, 0, 0, 0); sys_override_io_throttle(THROTTLE_IO_DISABLE); } io_throttle_assert_cnt++; } if (flags & SYS_OVERRIDE_CPU_THROTTLE) { if (cpu_throttle_assert_cnt == 0) { /* Disable CPU Throttling */ printf("Process %s [%d] disabling system-wide CPU Throttling\n", current_proc()->p_comm, current_proc()->p_pid); KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_START, current_proc()->p_pid, 0, 0, 0, 0); sys_override_cpu_throttle(CPU_THROTTLE_DISABLE); } cpu_throttle_assert_cnt++; } }
void throttle_lowpri_io(boolean_t ok_to_sleep) { int i; int max_try_num; struct uthread *ut; struct _throttle_io_info_t *info; ut = get_bsdthread_info(current_thread()); if ((ut->uu_lowpri_window == 0) || (ut->uu_throttle_info == NULL)) goto done; info = ut->uu_throttle_info; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_START, ut->uu_lowpri_window, ok_to_sleep, 0, 0, 0); if (ok_to_sleep == TRUE) { max_try_num = lowpri_max_waiting_msecs / LOWPRI_SLEEP_INTERVAL * MAX(1, info->numthreads_throttling); for (i=0; i<max_try_num; i++) { if (throttle_io_will_be_throttled_internal(ut->uu_lowpri_window, info)) { IOSleep(LOWPRI_SLEEP_INTERVAL); DEBUG_ALLOC_THROTTLE_INFO("sleeping because of info = %p\n", info, info ); } else { break; } } } KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 97)) | DBG_FUNC_END, ut->uu_lowpri_window, i*5, 0, 0, 0); SInt32 oldValue; oldValue = OSDecrementAtomic(&info->numthreads_throttling); if (oldValue <= 0) { panic("%s: numthreads negative", __func__); } done: ut->uu_lowpri_window = 0; if (ut->uu_throttle_info) throttle_info_rel(ut->uu_throttle_info); ut->uu_throttle_info = NULL; }
static void kdebug_lookup(struct vnode *dp, struct componentname *cnp) { unsigned int i; int code; int dbg_namelen; char *dbg_nameptr; long dbg_parms[NUMPARMS]; /* Collect the pathname for tracing */ dbg_namelen = (cnp->cn_nameptr - cnp->cn_pnbuf) + cnp->cn_namelen; dbg_nameptr = cnp->cn_nameptr + cnp->cn_namelen; if (dbg_namelen > (int)sizeof(dbg_parms)) dbg_namelen = sizeof(dbg_parms); dbg_nameptr -= dbg_namelen; /* Copy the (possibly truncated) path itself */ memcpy(dbg_parms, dbg_nameptr, dbg_namelen); /* Pad with '\0' or '>' */ if (dbg_namelen < (int)sizeof(dbg_parms)) { memset((char *)dbg_parms + dbg_namelen, *(cnp->cn_nameptr + cnp->cn_namelen) ? '>' : 0, sizeof(dbg_parms) - dbg_namelen); } /* * In the event that we collect multiple, consecutive pathname * entries, we must mark the start of the path's string and the end. */ code = (FSDBG_CODE(DBG_FSRW,36)) | DBG_FUNC_START; if (dbg_namelen <= 12) code |= DBG_FUNC_END; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, code, dp, dbg_parms[0], dbg_parms[1], dbg_parms[2], 0); code &= ~DBG_FUNC_START; for (i=3, dbg_namelen -= 12; dbg_namelen > 0; i+=4, dbg_namelen -= 16) { if (dbg_namelen <= 16) code |= DBG_FUNC_END; KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, code, dbg_parms[i], dbg_parms[i+1], dbg_parms[i+2], dbg_parms[i+3], 0); } }
/* * Call for disabling global system override. * This should be called only with the sys_override_lock held. */ static void disable_system_override(uint64_t flags) { if (flags & SYS_OVERRIDE_IO_THROTTLE) { assert(io_throttle_assert_cnt > 0); io_throttle_assert_cnt--; if (io_throttle_assert_cnt == 0) { /* Enable I/O Throttling */ KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_THROTTLE, IO_THROTTLE_DISABLE) | DBG_FUNC_END, current_proc()->p_pid, 0, 0, 0, 0); sys_override_io_throttle(THROTTLE_IO_ENABLE); } } if (flags & SYS_OVERRIDE_CPU_THROTTLE) { assert(cpu_throttle_assert_cnt > 0); cpu_throttle_assert_cnt--; if (cpu_throttle_assert_cnt == 0) { /* Enable CPU Throttling */ KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_SCHED, MACH_CPU_THROTTLE_DISABLE) | DBG_FUNC_END, current_proc()->p_pid, 0, 0, 0, 0); sys_override_cpu_throttle(CPU_THROTTLE_ENABLE); } } }
int spec_strategy(struct vnop_strategy_args *ap) { buf_t bp; int bflags; int policy; dev_t bdev; uthread_t ut; size_t devbsdunit; mount_t mp; bp = ap->a_bp; bdev = buf_device(bp); bflags = buf_flags(bp); mp = buf_vnode(bp)->v_mount; if (kdebug_enable) { int code = 0; if (bflags & B_READ) code |= DKIO_READ; if (bflags & B_ASYNC) code |= DKIO_ASYNC; if (bflags & B_META) code |= DKIO_META; else if (bflags & B_PAGEIO) code |= DKIO_PAGING; KERNEL_DEBUG_CONSTANT(FSDBG_CODE(DBG_DKRW, code) | DBG_FUNC_NONE, bp, bdev, (int)buf_blkno(bp), buf_count(bp), 0); } if (((bflags & (B_IOSTREAMING | B_PAGEIO | B_READ)) == (B_PAGEIO | B_READ)) && mp && (mp->mnt_kern_flag & MNTK_ROOTDEV)) hard_throttle_on_root = 1; if (mp != NULL) devbsdunit = mp->mnt_devbsdunit; else devbsdunit = LOWPRI_MAX_NUM_DEV - 1; throttle_info_update(&_throttle_io_info[devbsdunit], bflags); if ((policy = throttle_get_io_policy(&ut)) == IOPOL_THROTTLE) { bp->b_flags |= B_THROTTLED_IO; } if ((bflags & B_READ) == 0) { microuptime(&_throttle_io_info[devbsdunit].last_IO_timestamp); if (mp) { INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_write_size); } } else if (mp) { INCR_PENDING_IO(buf_count(bp), mp->mnt_pending_read_size); } (*bdevsw[major(bdev)].d_strategy)(bp); return (0); }
// LP64todo - fix this! 'n' should be int64_t? int uiomove64(const addr64_t c_cp, int n, struct uio *uio) { addr64_t cp = c_cp; #if LP64KERN uint64_t acnt; #else u_int acnt; #endif int error = 0; #if DIAGNOSTIC if (uio->uio_rw != UIO_READ && uio->uio_rw != UIO_WRITE) panic("uiomove: mode"); #endif #if LP64_DEBUG if (IS_VALID_UIO_SEGFLG(uio->uio_segflg) == 0) { panic("%s :%d - invalid uio_segflg\n", __FILE__, __LINE__); } #endif /* LP64_DEBUG */ while (n > 0 && uio_resid(uio)) { acnt = uio_iov_len(uio); if (acnt == 0) { uio_next_iov(uio); uio->uio_iovcnt--; continue; } if (n > 0 && acnt > (uint64_t)n) acnt = n; switch (uio->uio_segflg) { case UIO_USERSPACE64: case UIO_USERISPACE64: // LP64 - 3rd argument in debug code is 64 bit, expected to be 32 bit if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0); error = copyout( CAST_DOWN(caddr_t, cp), uio->uio_iovs.iov64p->iov_base, acnt ); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 0,0); } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0); error = copyin(uio->uio_iovs.iov64p->iov_base, CAST_DOWN(caddr_t, cp), acnt); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 0,0); } if (error) return (error); break; case UIO_USERSPACE32: case UIO_USERISPACE32: case UIO_USERSPACE: case UIO_USERISPACE: if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0); error = copyout( CAST_DOWN(caddr_t, cp), CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), acnt ); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 0,0); } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0); error = copyin(CAST_USER_ADDR_T(uio->uio_iovs.iov32p->iov_base), CAST_DOWN(caddr_t, cp), acnt); KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 0,0); } if (error) return (error); break; case UIO_SYSSPACE32: case UIO_SYSSPACE: if (uio->uio_rw == UIO_READ) error = copywithin(CAST_DOWN(caddr_t, cp), (caddr_t)uio->uio_iovs.iov32p->iov_base, acnt); else error = copywithin((caddr_t)uio->uio_iovs.iov32p->iov_base, CAST_DOWN(caddr_t, cp), acnt); break; case UIO_PHYS_USERSPACE64: if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0); error = copypv((addr64_t)cp, uio->uio_iovs.iov64p->iov_base, acnt, cppvPsrc | cppvNoRefSrc); if (error) /* Copy physical to virtual */ error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, (int)cp, (int)uio->uio_iovs.iov64p->iov_base, acnt, 1,0); } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0); error = copypv(uio->uio_iovs.iov64p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk); if (error) /* Copy virtual to physical */ error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, (int)uio->uio_iovs.iov64p->iov_base, (int)cp, acnt, 1,0); } if (error) return (error); break; case UIO_PHYS_USERSPACE32: case UIO_PHYS_USERSPACE: if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0); error = copypv((addr64_t)cp, (addr64_t)uio->uio_iovs.iov32p->iov_base, acnt, cppvPsrc | cppvNoRefSrc); if (error) /* Copy physical to virtual */ error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 1,0); } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0); error = copypv((addr64_t)uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvPsnk | cppvNoRefSrc | cppvNoModSnk); if (error) /* Copy virtual to physical */ error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 1,0); } if (error) return (error); break; case UIO_PHYS_SYSSPACE32: case UIO_PHYS_SYSSPACE: if (uio->uio_rw == UIO_READ) { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_START, (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0); error = copypv((addr64_t)cp, uio->uio_iovs.iov32p->iov_base, acnt, cppvKmap | cppvPsrc | cppvNoRefSrc); if (error) /* Copy physical to virtual */ error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYOUT)) | DBG_FUNC_END, (int)cp, (int)uio->uio_iovs.iov32p->iov_base, acnt, 2,0); } else { KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_START, (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0); error = copypv(uio->uio_iovs.iov32p->iov_base, (addr64_t)cp, acnt, cppvKmap | cppvPsnk | cppvNoRefSrc | cppvNoModSnk); if (error) /* Copy virtual to physical */ error = EFAULT; KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, DBG_UIO_COPYIN)) | DBG_FUNC_END, (int)uio->uio_iovs.iov32p->iov_base, (int)cp, acnt, 2,0); } if (error) return (error); break; default: break; } uio_iov_base_add(uio, acnt); #if LP64KERN uio_iov_len_add(uio, -((int64_t)acnt)); uio_setresid(uio, (uio_resid(uio) - ((int64_t)acnt))); #else uio_iov_len_add(uio, -((int)acnt)); uio_setresid(uio, (uio_resid(uio) - ((int)acnt))); #endif uio->uio_offset += acnt; cp += acnt; n -= acnt; } return (error); }