Пример #1
0
static int user_frac_sysctl(SYSCTL_HANDLER_ARGS)
{
	uint32_t val = user_frac;
	int error;

	error = sysctl_handle_int(oidp, &val, 0, req);
	if (error || !req->newptr )
		return (error);
	if (val > 99)
		return (EINVAL);

	mtx_lock(&poll_mtx);
	user_frac = val;
	mtx_unlock(&poll_mtx);

	return (0);
}
Пример #2
0
static int
sysctl_nmbufs(SYSCTL_HANDLER_ARGS)
{
	int error, newnmbufs;

	newnmbufs = nmbufs;
	error = sysctl_handle_int(oidp, &newnmbufs, 0, req);
	if (error == 0 && req->newptr && newnmbufs != nmbufs) {
		if (newnmbufs > nmbufs) {
			nmbufs = newnmbufs;
			nmbufs = uma_zone_set_max(zone_mbuf, nmbufs);
			EVENTHANDLER_INVOKE(nmbufs_change);
		} else
			error = EINVAL;
	}
	return (error);
}
Пример #3
0
static int
sysctl_zfs_dirty_data_max_percent(SYSCTL_HANDLER_ARGS)
{
	int val, err;

	val = zfs_dirty_data_max_percent;
	err = sysctl_handle_int(oidp, &val, 0, req);
	if (err != 0 || req->newptr == NULL)
		return (err);

	if (val < 0 || val > 100)
		return (EINVAL);

	zfs_dirty_data_max_percent = val;

	return (0);
}
Пример #4
0
/*
 * Timecounter freqency adjustment interface.
 */ 
static int
acpi_timer_sysctl_freq(SYSCTL_HANDLER_ARGS)
{
    int error;
    u_int freq;
 
    if (acpi_timer_timecounter.tc_frequency == 0)
	return (EOPNOTSUPP);
    freq = acpi_timer_frequency;
    error = sysctl_handle_int(oidp, &freq, 0, req);
    if (error == 0 && req->newptr != NULL) {
	acpi_timer_frequency = freq;
	acpi_timer_timecounter.tc_frequency = acpi_timer_frequency;
    }

    return (error);
}
static int
sysctl_machdep_piix_freq(SYSCTL_HANDLER_ARGS)
{
	int error;
	u_int freq;

	if (piix_timecounter.tc_frequency == 0)
		return (EOPNOTSUPP);
	freq = piix_freq;
	error = sysctl_handle_int(oidp, &freq, 0, req);
	if (error == 0 && req->newptr != NULL) {
		piix_freq = freq;
		piix_timecounter.tc_frequency = piix_freq;
		update_timecounter(&piix_timecounter);
	}
	return (error);
}
Пример #6
0
static int
sysctl_kern_quantum(SYSCTL_HANDLER_ARGS)
{
	int error, new_val, period;

	period = 1000000 / realstathz;
	new_val = period * sched_slice;
	error = sysctl_handle_int(oidp, &new_val, 0, req);
	if (error != 0 || req->newptr == NULL)
		return (error);
	if (new_val <= 0)
		return (EINVAL);
	sched_slice = imax(1, (new_val + period / 2) / period);
	hogticks = imax(1, (2 * hz * sched_slice + realstathz / 2) /
	    realstathz);
	return (0);
}
Пример #7
0
static int
hatm_sysctl_natm_traffic(SYSCTL_HANDLER_ARGS)
{
	int error;
	int tmp;

	tmp = hatm_natm_traffic;
	error = sysctl_handle_int(oidp, &tmp, 0, req);
	if (error != 0 || req->newptr == NULL)
		return (error);

	if (tmp != ATMIO_TRAFFIC_UBR && tmp != ATMIO_TRAFFIC_CBR)
		return (EINVAL);

	hatm_natm_traffic = tmp;
	return (0);
}
Пример #8
0
static int
bluetooth_set_hci_connect_timeout_value(SYSCTL_HANDLER_ARGS)
{
	u_int32_t	value;
	int		error;

	value = bluetooth_hci_connect_timeout_value;
	error = sysctl_handle_int(oidp, &value, 0, req);
	if (error == 0 && req->newptr != NULL) {
		if (0 < value && value <= bluetooth_l2cap_rtx_timeout_value)
			bluetooth_hci_connect_timeout_value = value;
		else
			error = EINVAL;
	}

	return (error);
} /* bluetooth_set_hci_connect_timeout_value */
Пример #9
0
static int
isci_sysctl_reset_remote_device_on_controller1(SYSCTL_HANDLER_ARGS)
{
	struct isci_softc *isci = (struct isci_softc *)arg1;
	uint32_t remote_devices_to_be_reset = 0;
	struct ISCI_CONTROLLER *controller = &isci->controllers[1];
	int error =
	    sysctl_handle_int(oidp, &remote_devices_to_be_reset, 0, req);

	if (error || remote_devices_to_be_reset == 0)
		return (error);

	isci_sysctl_reset_remote_devices(controller,
	    remote_devices_to_be_reset);

	return (0);
}
Пример #10
0
static int
kdb_sysctl_enter(SYSCTL_HANDLER_ARGS)
{
	int error, i;

	error = sysctl_wire_old_buffer(req, sizeof(int));
	if (error == 0) {
		i = 0;
		error = sysctl_handle_int(oidp, &i, 0, req);
	}
	if (error != 0 || req->newptr == NULL)
		return (error);
	if (kdb_active)
		return (EBUSY);
	kdb_enter(KDB_WHY_SYSCTL, "sysctl debug.kdb.enter");
	return (0);
}
Пример #11
0
/*
 * Run-time adjustment of the capture buffer.
 */
static int
sysctl_debug_ddb_capture_bufsize(SYSCTL_HANDLER_ARGS)
{
	u_int len, size;
	char *buf;
	int error;

	size = db_capture_bufsize;
	error = sysctl_handle_int(oidp, &size, 0, req);
	if (error || req->newptr == NULL)
		return (error);
	size = roundup(size, TEXTDUMP_BLOCKSIZE);
	if (size > db_capture_maxbufsize)
		return (EINVAL);
	sx_xlock(&db_capture_sx);
	if (size != 0) {
		/*
		 * Potentially the buffer is quite large, so if we can't
		 * allocate it, fail rather than waiting.
		 */
		buf = malloc(size, M_DDB_CAPTURE, M_NOWAIT);
		if (buf == NULL) {
			sx_xunlock(&db_capture_sx);
			return (ENOMEM);
		}
		len = min(db_capture_bufoff, size);
	} else {
		buf = NULL;
		len = 0;
	}
	if (db_capture_buf != NULL && buf != NULL)
		bcopy(db_capture_buf, buf, len);
	if (db_capture_buf != NULL)
		free(db_capture_buf, M_DDB_CAPTURE);
	db_capture_bufoff = len;
	db_capture_buf = buf;
	db_capture_bufsize = size;
	sx_xunlock(&db_capture_sx);

	KASSERT(db_capture_bufoff <= db_capture_bufsize,
	    ("sysctl_debug_ddb_capture_bufsize: bufoff > bufsize"));
	KASSERT(db_capture_bufsize <= db_capture_maxbufsize,
	    ("sysctl_debug_ddb_capture_maxbufsize: bufsize > maxbufsize"));

	return (0);
}
Пример #12
0
static int
ath_sysctl_rfsilent(SYSCTL_HANDLER_ARGS)
{
	struct ath_softc *sc = arg1;
	u_int rfsilent;
	int error;

	(void) ath_hal_getrfsilent(sc->sc_ah, &rfsilent);
	error = sysctl_handle_int(oidp, &rfsilent, 0, req);
	if (error || !req->newptr)
		return error;
	if (!ath_hal_setrfsilent(sc->sc_ah, rfsilent))
		return EINVAL;
	sc->sc_rfsilentpin = rfsilent & 0x1c;
	sc->sc_rfsilentpol = (rfsilent & 0x2) != 0;
	return 0;
}
Пример #13
0
static int
sysctl_nmbclusters(SYSCTL_HANDLER_ARGS)
{
	int error, newnmbclusters;

	newnmbclusters = nmbclusters;
	error = sysctl_handle_int(oidp, &newnmbclusters, 0, req); 
	if (error == 0 && req->newptr) {
		if (newnmbclusters > nmbclusters) {
			nmbclusters = newnmbclusters;
			uma_zone_set_max(zone_clust, nmbclusters);
			EVENTHANDLER_INVOKE(nmbclusters_change);
		} else
			error = EINVAL;
	}
	return (error);
}
Пример #14
0
static int
sysctl_zfs_delay_min_dirty_percent(SYSCTL_HANDLER_ARGS)
{
	int val, err;

	val = zfs_delay_min_dirty_percent;
	err = sysctl_handle_int(oidp, &val, 0, req);
	if (err != 0 || req->newptr == NULL)
		return (err);

	if (val < zfs_vdev_async_write_active_max_dirty_percent)
		return (EINVAL);

	zfs_delay_min_dirty_percent = val;

	return (0);
}
Пример #15
0
static int
sysctl_nmbjumbo16(SYSCTL_HANDLER_ARGS)
{
	int error, newnmbjumbo16;

	newnmbjumbo16 = nmbjumbo16;
	error = sysctl_handle_int(oidp, &newnmbjumbo16, 0, req);
	if (error == 0 && req->newptr && newnmbjumbo16 != nmbjumbo16) {
		if (newnmbjumbo16 > nmbjumbo16 &&
		    nmbufs >= nmbclusters + nmbjumbop + nmbjumbo9 + nmbjumbo16) {
			nmbjumbo16 = newnmbjumbo16;
			nmbjumbo16 = uma_zone_set_max(zone_jumbo16, nmbjumbo16);
		} else
			error = EINVAL;
	}
	return (error);
}
Пример #16
0
static int
isci_sysctl_coalesce_number(SYSCTL_HANDLER_ARGS)
{
	struct isci_softc *isci = (struct isci_softc *)arg1;
	int error = sysctl_handle_int(oidp, &isci->coalesce_number, 0, req);
	int i;

	if (error)
		return (error);

	for (i = 0; i < isci->controller_count; i++)
		scif_controller_set_interrupt_coalescence(
		    isci->controllers[i].scif_controller_handle,
		    isci->coalesce_number, isci->coalesce_timeout);

	return (0);
}
Пример #17
0
static int
sysctl_kern_securelvl(SYSCTL_HANDLER_ARGS)
{
    struct prison *pr;
    int error, level;

    pr = req->td->td_ucred->cr_prison;

    /*
     * If the process is in jail, return the maximum of the global and
     * local levels; otherwise, return the global level.  Perform a
     * lockless read since the securelevel is an integer.
     */
    if (pr != NULL)
        level = imax(securelevel, pr->pr_securelevel);
    else
        level = securelevel;
    error = sysctl_handle_int(oidp, &level, 0, req);
    if (error || !req->newptr)
        return (error);
    /*
     * Permit update only if the new securelevel exceeds the
     * global level, and local level if any.
     */
    if (pr != NULL) {
        mtx_lock(&pr->pr_mtx);
        if (!regression_securelevel_nonmonotonic &&
                (level < imax(securelevel, pr->pr_securelevel))) {
            mtx_unlock(&pr->pr_mtx);
            return (EPERM);
        }
        pr->pr_securelevel = level;
        mtx_unlock(&pr->pr_mtx);
    } else {
        mtx_lock(&securelevel_mtx);
        if (!regression_securelevel_nonmonotonic &&
                (level < securelevel)) {
            mtx_unlock(&securelevel_mtx);
            return (EPERM);
        }
        securelevel = level;
        mtx_unlock(&securelevel_mtx);
    }
    return (error);
}
Пример #18
0
static int
nvme_sysctl_timeout_period(SYSCTL_HANDLER_ARGS)
{
	struct nvme_controller *ctrlr = arg1;
	uint32_t oldval = ctrlr->timeout_period;
	int error = sysctl_handle_int(oidp, &ctrlr->timeout_period, 0, req);

	if (error)
		return (error);

	if (ctrlr->timeout_period > NVME_MAX_TIMEOUT_PERIOD ||
	    ctrlr->timeout_period < NVME_MIN_TIMEOUT_PERIOD) {
		ctrlr->timeout_period = oldval;
		return (EINVAL);
	}

	return (0);
}
Пример #19
0
/* zy7_devcfg_sysctl_pl_done() returns status of the PL_DONE signal.
 */
static int
zy7_devcfg_sysctl_pl_done(SYSCTL_HANDLER_ARGS)
{
	struct zy7_devcfg_softc *sc = zy7_devcfg_softc_p;
	int pl_done = 0;

	if (sc) {
		DEVCFG_SC_LOCK(sc);

		/* PCFG_DONE bit is sticky.  Clear it before checking it. */
		WR4(sc, ZY7_DEVCFG_INT_STATUS, ZY7_DEVCFG_INT_PCFG_DONE);
		pl_done = ((RD4(sc, ZY7_DEVCFG_INT_STATUS) &
			    ZY7_DEVCFG_INT_PCFG_DONE) != 0);

		DEVCFG_SC_UNLOCK(sc);
	}
	return (sysctl_handle_int(oidp, &pl_done, 0, req));
}
Пример #20
0
static int
zy7_devcfg_fclk_sysctl_level_shifters(SYSCTL_HANDLER_ARGS)
{
	int error, enabled;

	enabled = zy7_pl_level_shifters_enabled();

	error = sysctl_handle_int(oidp, &enabled, 0, req);
	if (error != 0 || req->newptr == NULL)
		return (error);

	if (enabled)
		zy7_pl_level_shifters_enable();
	else
		zy7_pl_level_shifters_disable();

	return (0);
}
Пример #21
0
/* XXX debug stuff */
static int
efi_time_sysctl_handler(SYSCTL_HANDLER_ARGS)
{
	struct efi_tm tm;
	int error, val;

	val = 0;
	error = sysctl_handle_int(oidp, &val, 0, req);
	if (error != 0 || req->newptr == NULL)
		return (error);
	error = efi_get_time(&tm);
	if (error == 0) {
		uprintf("EFI reports: Year %d Month %d Day %d Hour %d Min %d "
		    "Sec %d\n", tm.tm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour,
		    tm.tm_min, tm.tm_sec);
	}
	return (error);
}
Пример #22
0
static int
isci_sysctl_start_phy(SYSCTL_HANDLER_ARGS)
{
    struct isci_softc *isci = (struct isci_softc *)arg1;
    uint32_t phy_to_be_started = 0xff;
    uint32_t controller_index, phy_index;
    int error = sysctl_handle_int(oidp, &phy_to_be_started, 0, req);

    controller_index = phy_to_be_started / SCI_MAX_PHYS;
    phy_index = phy_to_be_started % SCI_MAX_PHYS;

    if(error || controller_index >= isci->controller_count)
        return error;

    isci_sysctl_start(&isci->controllers[controller_index], phy_index);

    return 0;
}
Пример #23
0
static int
nvme_sysctl_int_coal_time(SYSCTL_HANDLER_ARGS)
{
	struct nvme_controller *ctrlr = arg1;
	uint32_t oldval = ctrlr->int_coal_time;
	int error = sysctl_handle_int(oidp, &ctrlr->int_coal_time, 0,
	    req);

	if (error)
		return (error);

	if (oldval != ctrlr->int_coal_time)
		nvme_ctrlr_cmd_set_interrupt_coalescing(ctrlr,
		    ctrlr->int_coal_time, ctrlr->int_coal_threshold, NULL,
		    NULL);

	return (0);
}
Пример #24
0
static int
isci_sysctl_fail_on_task_timeout(SYSCTL_HANDLER_ARGS)
{
	struct isci_softc	*isci = (struct isci_softc *)arg1;
	int32_t			fail_on_timeout;
	int			error, i;

	fail_on_timeout = isci->controllers[0].fail_on_task_timeout;
	error = sysctl_handle_int(oidp, &fail_on_timeout, 0, req);

	if (error || req->newptr == NULL)
		return (error);

	for (i = 0; i < isci->controller_count; i++)
		isci->controllers[i].fail_on_task_timeout = fail_on_timeout;

	return (0);
}
Пример #25
0
static int
ath_sysctl_rfkill(SYSCTL_HANDLER_ARGS)
{
	struct ath_softc *sc = arg1;
	struct ifnet *ifp = sc->sc_ifp;
	struct ath_hal *ah = sc->sc_ah;
	u_int rfkill = ath_hal_getrfkill(ah);
	int error;

	error = sysctl_handle_int(oidp, &rfkill, 0, req);
	if (error || !req->newptr)
		return error;
	if (rfkill == ath_hal_getrfkill(ah))	/* unchanged */
		return 0;
	if (!ath_hal_setrfkill(ah, rfkill))
		return EINVAL;
	return (ifp->if_drv_flags & IFF_DRV_RUNNING) ? ath_reset(ifp) : 0;
}
Пример #26
0
static int
uhso_radio_sysctl(SYSCTL_HANDLER_ARGS)
{
	struct uhso_softc *sc = arg1;
	int error, radio;

	radio = sc->sc_radio;
	error = sysctl_handle_int(oidp, &radio, 0, req);
	if (error)
		return (error);
	if (radio != sc->sc_radio) {
		radio = radio != 0 ? 1 : 0;
		error = uhso_radio_ctrl(sc, radio);
		if (error != -1)
			sc->sc_radio = radio;
			
	}	
	return (0);
}
Пример #27
0
/*
 * Sysctl support routines
 */
static int
sysctl_emergency_enable(SYSCTL_HANDLER_ARGS)
{
	int error, enabled, cpuid, freq;

	enabled = emergency_intr_enable;
	error = sysctl_handle_int(oidp, &enabled, 0, req);
	if (error || req->newptr == NULL)
		return error;
	emergency_intr_enable = enabled;
	if (emergency_intr_enable)
		freq = emergency_intr_freq;
	else
		freq = 1;

	for (cpuid = 0; cpuid < ncpus; ++cpuid)
		systimer_adjust_periodic(&emergency_intr_timer[cpuid], freq);
	return 0;
}
Пример #28
0
static int
sysctl_bless_preempt( struct sysctl_oid *oidp, struct sysctl_req *req )
{
    int error = 0;
    int value = 0;

    /* get the old value and process it */
    value = blessed_preempt;

    /* copy out the old value, get the new value */
    error = sysctl_handle_int(oidp, &value, 0, req);
    if (error || !req->newptr)
	    return (error);

    /* if that worked, and we're writing... */
    blessed_preempt = value ? TRUE : FALSE;

    return 0;
}
Пример #29
0
static int
sysctl_pet_idle_rate( struct sysctl_oid *oidp, struct sysctl_req *req )
{
    int error = 0;
    int value = 0;
    
    /* get the old value and process it */
    value = kperf_get_pet_idle_rate();

    /* copy out the old value, get the new value */
    error = sysctl_handle_int(oidp, &value, 0, req);
    if (error || !req->newptr)
	    return (error);

    /* if that worked, and we're writing... */
    kperf_set_pet_idle_rate(value);

    return error;
}
Пример #30
0
/*
 * Maximum number of flows that can be allocated of a given type.
 *
 * The table is allocated at boot time (for the pure caching case
 * there is no reason why this could not be changed at runtime)
 * and thus (currently) needs to be set with a tunable.
 */
static int
sysctl_nmbflows(SYSCTL_HANDLER_ARGS)
{
	int error, newnmbflows;

	newnmbflows = V_flowtable_nmbflows;
	error = sysctl_handle_int(oidp, &newnmbflows, 0, req); 
	if (error == 0 && req->newptr) {
		if (newnmbflows > V_flowtable_nmbflows) {
			V_flowtable_nmbflows = newnmbflows;
			uma_zone_set_max(V_flow_ipv4_zone,
			    V_flowtable_nmbflows);
			uma_zone_set_max(V_flow_ipv6_zone,
			    V_flowtable_nmbflows);
		} else
			error = EINVAL;
	}
	return (error);
}