/* * Binary bufring states. */ static int vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS) { #define BR_STATE_RIDX 0 #define BR_STATE_WIDX 1 #define BR_STATE_IMSK 2 #define BR_STATE_RSPC 3 #define BR_STATE_WSPC 4 #define BR_STATE_MAX 5 const struct vmbus_br *br = arg1; uint32_t rindex, windex, wavail, state[BR_STATE_MAX]; rindex = br->vbr_rindex; windex = br->vbr_windex; wavail = VMBUS_BR_WAVAIL(rindex, windex, br->vbr_dsize); state[BR_STATE_RIDX] = rindex; state[BR_STATE_WIDX] = windex; state[BR_STATE_IMSK] = br->vbr_imask; state[BR_STATE_WSPC] = wavail; state[BR_STATE_RSPC] = br->vbr_dsize - wavail; return sysctl_handle_opaque(oidp, state, sizeof(state), req); }
static int sysctl_machdep_msgbuf(SYSCTL_HANDLER_ARGS) { int error; /* Unwind the buffer, so that it's linear (possibly starting with * some initial nulls). */ error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr+msgbufp->msg_bufr, msgbufp->msg_size-msgbufp->msg_bufr,req); if(error) return(error); if(msgbufp->msg_bufr>0) { error=sysctl_handle_opaque(oidp,msgbufp->msg_ptr, msgbufp->msg_bufr,req); } return(error); }
static int ntp_sysctl(SYSCTL_HANDLER_ARGS) { struct ntptimeval ntv; /* temporary structure */ ntp_gettime1(&ntv); return (sysctl_handle_opaque(oidp, &ntv, sizeof(ntv), req)); }
static int sysctl_kern_dumpdev(SYSCTL_HANDLER_ARGS) { int error; udev_t ndumpdev; ndumpdev = dev2udev(dumpdev); error = sysctl_handle_opaque(oidp, &ndumpdev, sizeof ndumpdev, req); if (error == 0 && req->newptr != NULL) error = setdumpdev(udev2dev(ndumpdev, 0)); return (error); }
static int sysctl_machdep_cputime_clock(SYSCTL_HANDLER_ARGS) { int clock; int error; clock = cputime_clock; error = sysctl_handle_opaque(oidp, &clock, sizeof clock, req); if (error == 0 && req->newptr != NULL) { if (clock < 0 || clock >= CPUTIME_CLOCK_I586_PMC) return (EINVAL); cputime_clock = clock; } return (error); }
/* * No requirements. */ static int do_vmmeter_pcpu(SYSCTL_HANDLER_ARGS) { int boffset = offsetof(struct vmmeter, vmmeter_uint_begin); int eoffset = offsetof(struct vmmeter, vmmeter_uint_end); struct globaldata *gd = arg1; struct vmmeter vmm; int off; bzero(&vmm, sizeof(vmm)); for (off = boffset; off <= eoffset; off += sizeof(u_int)) { *(u_int *)((char *)&vmm + off) += *(u_int *)((char *)&gd->gd_cnt + off); } vmm.v_intr += vmm.v_ipi + vmm.v_timer; return (sysctl_handle_opaque(oidp, &vmm, sizeof(vmm), req)); }
static int sysctl_machdep_cputime_clock(SYSCTL_HANDLER_ARGS) { int clock; int error; #if defined(PERFMON) && defined(I586_PMC_GUPROF) int event; struct pmc pmc; #endif clock = cputime_clock; #if defined(PERFMON) && defined(I586_PMC_GUPROF) if (clock == CPUTIME_CLOCK_I586_PMC) { pmc.pmc_val = cputime_clock_pmc_conf; clock += pmc.pmc_event; } #endif error = sysctl_handle_opaque(oidp, &clock, sizeof clock, req); if (error == 0 && req->newptr != NULL) { #if defined(PERFMON) && defined(I586_PMC_GUPROF) if (clock >= CPUTIME_CLOCK_I586_PMC) { event = clock - CPUTIME_CLOCK_I586_PMC; if (event >= 256) return (EINVAL); pmc.pmc_num = 0; pmc.pmc_event = event; pmc.pmc_unit = 0; pmc.pmc_flags = PMCF_E | PMCF_OS | PMCF_USR; pmc.pmc_mask = 0; cputime_clock_pmc_conf = pmc.pmc_val; cputime_clock = CPUTIME_CLOCK_I586_PMC; } else #endif { if (clock < 0 || clock >= CPUTIME_CLOCK_I586_PMC) return (EINVAL); cputime_clock = clock; } } return (error); }
/* * XXXRW: Confirm that sysctl -a won't dump this keying material, don't want * it appearing in debugging output unnecessarily. */ static int sysctl_rss_key(SYSCTL_HANDLER_ARGS) { uint8_t temp_rss_key[RSS_KEYSIZE]; int error; error = priv_check(req->td, PRIV_NETINET_HASHKEY); if (error) return (error); bcopy(rss_key, temp_rss_key, sizeof(temp_rss_key)); error = sysctl_handle_opaque(oidp, temp_rss_key, sizeof(temp_rss_key), req); if (error) return (error); if (req->newptr != NULL) { /* XXXRW: Not yet. */ return (EINVAL); } return (0); }
static int aibs_sysctl(SYSCTL_HANDLER_ARGS) { struct aibs_softc *sc = arg1; enum aibs_type st = arg2; int i = oidp->oid_number; ACPI_STATUS rs; ACPI_OBJECT p, *bp; ACPI_OBJECT_LIST mp; ACPI_BUFFER b; char *name; struct aibs_sensor *as; ACPI_INTEGER v, l, h; int so[3]; switch (st) { case AIBS_VOLT: name = "RVLT"; as = sc->sc_asens_volt; break; case AIBS_TEMP: name = "RTMP"; as = sc->sc_asens_temp; break; case AIBS_FAN: name = "RFAN"; as = sc->sc_asens_fan; break; default: return ENOENT; } if (as == NULL) return ENOENT; l = as[i].l; h = as[i].h; p.Type = ACPI_TYPE_INTEGER; p.Integer.Value = as[i].i; mp.Count = 1; mp.Pointer = &p; b.Length = ACPI_ALLOCATE_BUFFER; ACPI_SERIAL_BEGIN(aibs); rs = AcpiEvaluateObjectTyped(sc->sc_ah, name, &mp, &b, ACPI_TYPE_INTEGER); if (ACPI_FAILURE(rs)) { ddevice_printf(sc->sc_dev, "%s: %i: evaluation failed\n", name, i); ACPI_SERIAL_END(aibs); return EIO; } bp = b.Pointer; v = bp->Integer.Value; AcpiOsFree(b.Pointer); ACPI_SERIAL_END(aibs); switch (st) { case AIBS_VOLT: break; case AIBS_TEMP: v += 2731; l += 2731; h += 2731; break; case AIBS_FAN: break; } so[0] = v; so[1] = l; so[2] = h; return sysctl_handle_opaque(oidp, &so, sizeof(so), req); }
static int sysctl_handle_dtb(SYSCTL_HANDLER_ARGS) { return (sysctl_handle_opaque(oidp, fdtp, fdt_totalsize(fdtp), req)); }
/* * No requirements. */ static int do_vmtotal(SYSCTL_HANDLER_ARGS) { struct vmtotal total; struct vmtotal *totalp; struct vm_object marker; vm_object_t object; long collisions; int burst; bzero(&total, sizeof(total)); totalp = &total; bzero(&marker, sizeof(marker)); marker.type = OBJT_MARKER; collisions = vmobj_token.t_collisions; #if 0 /* * Mark all objects as inactive. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object,object_list)) { if (object->type == OBJT_MARKER) continue; vm_object_clear_flag(object, OBJ_ACTIVE); } lwkt_reltoken(&vmobj_token); #endif /* * Calculate process statistics. */ allproc_scan(do_vmtotal_callback, totalp); /* * Calculate object memory usage statistics. */ lwkt_gettoken(&vmobj_token); TAILQ_INSERT_HEAD(&vm_object_list, &marker, object_list); burst = 0; for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object, object_list)) { /* * devices, like /dev/mem, will badly skew our totals. * markers aren't real objects. */ if (object->type == OBJT_MARKER) continue; if (object->type == OBJT_DEVICE) continue; if (object->size >= 0x7FFFFFFF) { /* * Probably unbounded anonymous memory (really * bounded by related vm_map_entry structures which * we do not have access to in this loop). */ totalp->t_vm += object->resident_page_count; } else { /* * It's questionable how useful this is but... */ totalp->t_vm += object->size; } totalp->t_rm += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avm += object->size; totalp->t_arm += object->resident_page_count; } if (object->shadow_count > 1) { /* shared object */ totalp->t_vmshr += object->size; totalp->t_rmshr += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avmshr += object->size; totalp->t_armshr += object->resident_page_count; } } /* * Don't waste time unnecessarily */ if (++burst < 25) continue; burst = 0; /* * Don't hog the vmobj_token if someone else wants it. */ TAILQ_REMOVE(&vm_object_list, &marker, object_list); TAILQ_INSERT_AFTER(&vm_object_list, object, &marker, object_list); object = ▮ if (collisions != vmobj_token.t_collisions) { tsleep(&vm_object_list, 0, "breath", 1); collisions = vmobj_token.t_collisions; } else { lwkt_yield(); } } TAILQ_REMOVE(&vm_object_list, &marker, object_list); lwkt_reltoken(&vmobj_token); totalp->t_free = vmstats.v_free_count + vmstats.v_cache_count; return (sysctl_handle_opaque(oidp, totalp, sizeof total, req)); }
/* * No requirements. */ static int do_vmstats(SYSCTL_HANDLER_ARGS) { struct vmstats vms = vmstats; return (sysctl_handle_opaque(oidp, &vms, sizeof(vms), req)); }
/* * No requirements. */ static int do_vmtotal(SYSCTL_HANDLER_ARGS) { struct vmtotal total; struct vmtotal *totalp; vm_object_t object; bzero(&total, sizeof(total)); totalp = &total; /* * Mark all objects as inactive. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object,object_list)) { if (object->type == OBJT_MARKER) continue; vm_object_clear_flag(object, OBJ_ACTIVE); } lwkt_reltoken(&vmobj_token); /* * Calculate process statistics. */ allproc_scan(do_vmtotal_callback, totalp); /* * Calculate object memory usage statistics. */ lwkt_gettoken(&vmobj_token); for (object = TAILQ_FIRST(&vm_object_list); object != NULL; object = TAILQ_NEXT(object, object_list)) { /* * devices, like /dev/mem, will badly skew our totals. * markers aren't real objects. */ if (object->type == OBJT_MARKER) continue; if (object->type == OBJT_DEVICE) continue; if (object->size >= 0x7FFFFFFF) { /* * Probably unbounded anonymous memory (really * bounded by related vm_map_entry structures which * we do not have access to in this loop). */ totalp->t_vm += object->resident_page_count; } else { /* * It's questionable how useful this is but... */ totalp->t_vm += object->size; } totalp->t_rm += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avm += object->size; totalp->t_arm += object->resident_page_count; } if (object->shadow_count > 1) { /* shared object */ totalp->t_vmshr += object->size; totalp->t_rmshr += object->resident_page_count; if (object->flags & OBJ_ACTIVE) { totalp->t_avmshr += object->size; totalp->t_armshr += object->resident_page_count; } } } lwkt_reltoken(&vmobj_token); totalp->t_free = vmstats.v_free_count + vmstats.v_cache_count; return (sysctl_handle_opaque(oidp, totalp, sizeof total, req)); }