static void schedcpu(void *arg) { allproc_scan(schedcpu_stats, NULL); allproc_scan(schedcpu_resource, NULL); wakeup((caddr_t)&lbolt); wakeup(lbolt_syncer); callout_reset(&schedcpu_callout, hz, schedcpu, NULL); }
static int procfs_readdir_root(struct vop_readdir_args *ap) { struct procfs_readdir_root_info info; struct uio *uio = ap->a_uio; int res; info.error = 0; info.i = (int)uio->uio_offset; if (info.i < 0) return (EINVAL); info.pcnt = 0; info.uio = uio; info.cred = ap->a_cred; while (info.pcnt < 3) { res = procfs_readdir_root_callback(NULL, &info); if (res < 0) break; } if (res >= 0) allproc_scan(procfs_readdir_root_callback, &info); uio->uio_offset = (off_t)info.i; return (info.error); }
static void scheduler(void *dummy) { struct scheduler_info info; struct proc *p; KKASSERT(!IN_CRITICAL_SECT(curthread)); loop: scheduler_notify = 0; /* * Don't try to swap anything in if we are low on memory. */ if (vm_page_count_severe()) { vm_wait(0); goto loop; } /* * Look for a good candidate to wake up * * XXX we should make the schedule thread pcpu and then use a * segmented allproc scan. */ info.pp = NULL; info.ppri = INT_MIN; allproc_scan(scheduler_callback, &info, 0); /* * Nothing to do, back to sleep for at least 1/10 of a second. If * we are woken up, immediately process the next request. If * multiple requests have built up the first is processed * immediately and the rest are staggered. */ if ((p = info.pp) == NULL) { tsleep(&proc0, 0, "nowork", hz / 10); if (scheduler_notify == 0) tsleep(&scheduler_notify, 0, "nowork", 0); goto loop; } /* * Fault the selected process in, then wait for a short period of * time and loop up. * * XXX we need a heuristic to get a measure of system stress and * then adjust our stagger wakeup delay accordingly. */ lwkt_gettoken(&p->p_token); faultin(p); p->p_swtime = 0; lwkt_reltoken(&p->p_token); PRELE(p); tsleep(&proc0, 0, "swapin", hz / 10); goto loop; }
/* * No requirements. */ static int do_vmtotal(SYSCTL_HANDLER_ARGS) { struct vmtotal total; struct vmtotal *totalp; struct vm_object marker; vm_object_t object; long collisions; int burst; bzero(&total, sizeof(total)); totalp = &total; bzero(&marker, sizeof(marker)); marker.type = OBJT_MARKER; collisions = vmobj_token.t_collisions; #if 0 /* * Mark all objects as inactive. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object,object_list)) { if (object->type == OBJT_MARKER) continue; vm_object_clear_flag(object, OBJ_ACTIVE); } lwkt_reltoken(&vmobj_token); #endif /* * Calculate process statistics. */ allproc_scan(do_vmtotal_callback, totalp); /* * Calculate object memory usage statistics. */ lwkt_gettoken(&vmobj_token); TAILQ_INSERT_HEAD(&vm_object_list, &marker, object_list); burst = 0; for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object, object_list)) { /* * devices, like /dev/mem, will badly skew our totals. * markers aren't real objects. */ if (object->type == OBJT_MARKER) continue; if (object->type == OBJT_DEVICE) continue; if (object->size >= 0x7FFFFFFF) { /* * Probably unbounded anonymous memory (really * bounded by related vm_map_entry structures which * we do not have access to in this loop). */ totalp->t_vm += object->resident_page_count; } else { /* * It's questionable how useful this is but... */ totalp->t_vm += object->size; } totalp->t_rm += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avm += object->size; totalp->t_arm += object->resident_page_count; } if (object->shadow_count > 1) { /* shared object */ totalp->t_vmshr += object->size; totalp->t_rmshr += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avmshr += object->size; totalp->t_armshr += object->resident_page_count; } } /* * Don't waste time unnecessarily */ if (++burst < 25) continue; burst = 0; /* * Don't hog the vmobj_token if someone else wants it. */ TAILQ_REMOVE(&vm_object_list, &marker, object_list); TAILQ_INSERT_AFTER(&vm_object_list, object, &marker, object_list); object = ▮ if (collisions != vmobj_token.t_collisions) { tsleep(&vm_object_list, 0, "breath", 1); collisions = vmobj_token.t_collisions; } else { lwkt_yield(); } } TAILQ_REMOVE(&vm_object_list, &marker, object_list); lwkt_reltoken(&vmobj_token); totalp->t_free = vmstats.v_free_count + vmstats.v_cache_count; return (sysctl_handle_opaque(oidp, totalp, sizeof total, req)); }
/* * No requirements. */ void swapout_procs(int action) { allproc_scan(swapout_procs_callback, &action, 0); }
/* * No requirements. */ static int do_vmtotal(SYSCTL_HANDLER_ARGS) { struct vmtotal total; struct vmtotal *totalp; vm_object_t object; bzero(&total, sizeof(total)); totalp = &total; /* * Mark all objects as inactive. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object,object_list)) { if (object->type == OBJT_MARKER) continue; vm_object_clear_flag(object, OBJ_ACTIVE); } lwkt_reltoken(&vmobj_token); /* * Calculate process statistics. */ allproc_scan(do_vmtotal_callback, totalp); /* * Calculate object memory usage statistics. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object, object_list)) { /* * devices, like /dev/mem, will badly skew our totals. * markers aren't real objects. */ if (object->type == OBJT_MARKER) continue; if (object->type == OBJT_DEVICE) continue; if (object->size >= 0x7FFFFFFF) { /* * Probably unbounded anonymous memory (really * bounded by related vm_map_entry structures which * we do not have access to in this loop). */ totalp->t_vm += object->resident_page_count; } else { /* * It's questionable how useful this is but... */ totalp->t_vm += object->size; } totalp->t_rm += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avm += object->size; totalp->t_arm += object->resident_page_count; } if (object->shadow_count > 1) { /* shared object */ totalp->t_vmshr += object->size; totalp->t_rmshr += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avmshr += object->size; totalp->t_armshr += object->resident_page_count; } } } lwkt_reltoken(&vmobj_token); totalp->t_free = vmstats.v_free_count + vmstats.v_cache_count; return (sysctl_handle_opaque(oidp, totalp, sizeof total, req)); }