/* proc function to read/write RealTime Clock */ int proc_dolasatrtc(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct timespec64 ts; int r; if (!write) { read_persistent_clock64(&ts); rtctmp = ts.tv_sec; /* check for time < 0 and set to 0 */ if (rtctmp < 0) rtctmp = 0; } r = proc_dointvec(table, write, buffer, lenp, ppos); if (r) return r; if (write) { /* * Due to the RTC hardware limitation, we can not actually * use the full 64-bit range here. */ ts.tv_sec = rtctmp; ts.tv_nsec = 0; update_persistent_clock64(ts); } return 0; }
/* * nss_rps_handler() * Enable NSS RPS */ static int nss_rpscfg_handler(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct nss_top_instance *nss_top = &nss_top_main; struct nss_ctx_instance *nss_ctx = &nss_top->nss[0]; int ret; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (!ret) { if ((write) && (nss_rps_cfg == 1)) { printk("Enabling NSS RPS\n"); nss_n2h_tx(nss_ctx, 1); return ret; } if ((write) && (nss_rps_cfg == 0)) { printk("Runtime disabling of NSS RPS not supported \n"); return ret; } if (write) { printk("Invalid input value.Valid values are 0 and 1 \n"); } } return ret; }
static int LL_PROC_PROTO(proc_toggle_thread_pause) { int old_val = kgnilnd_sysctl.ksd_pause_trigger; int rc = 0; ENTRY; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (!write) { /* read */ RETURN(rc); } if (kgnilnd_data.kgn_init != GNILND_INIT_ALL) { rc = -EINVAL; RETURN(rc); } if (old_val != kgnilnd_sysctl.ksd_pause_trigger) { down(&kgnilnd_data.kgn_quiesce_sem); CDEBUG(D_NET, "setting quiesce_trigger %d\n", old_val); kgnilnd_data.kgn_quiesce_trigger = kgnilnd_sysctl.ksd_pause_trigger; kgnilnd_quiesce_wait("admin sysctl"); up(&kgnilnd_data.kgn_quiesce_sem); } RETURN(rc); }
/* * sysctl handler for /proc/sys/vm/dirty_writeback_centisecs */ int dirty_writeback_centisecs_handler(ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { proc_dointvec(table, write, buffer, length, ppos); bdi_arm_supers_timer(); return 0; }
static int sve_proc_do_default_vl(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; int vl = sve_default_vl; struct ctl_table tmp_table = { .data = &vl, .maxlen = sizeof(vl), }; ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos); if (ret || !write) return ret; /* Writing -1 has the special meaning "set to max": */ if (vl == -1) vl = sve_max_vl; if (!sve_vl_valid(vl)) return -EINVAL; sve_default_vl = find_supported_vector_length(vl); return 0; } static struct ctl_table sve_default_vl_table[] = { { .procname = "sve_default_vector_length", .mode = 0644, .proc_handler = sve_proc_do_default_vl, }, { } };
static int proc_toggle_rdmaq_override(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int old_val = kgnilnd_sysctl.ksd_rdmaq_override; int rc = 0; ENTRY; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (!write) { /* read */ RETURN(rc); } if (kgnilnd_data.kgn_init != GNILND_INIT_ALL) { rc = -EINVAL; RETURN(rc); } if (old_val != kgnilnd_sysctl.ksd_rdmaq_override) { long new_mb = kgnilnd_sysctl.ksd_rdmaq_override * (long)(1024*1024); LCONSOLE_INFO("changing RDMAQ override to %d mbytes/sec\n", kgnilnd_sysctl.ksd_rdmaq_override); /* override proc is mbytes, but we calc in bytes */ kgnilnd_data.kgn_rdmaq_override = new_mb; smp_wmb(); } RETURN(rc); }
static int proc_trigger_stack_reset(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int rc = 0; int i = 1; kgn_device_t *dev; ENTRY; if (!write) { /* read */ rc = proc_dointvec(table, write, buffer, lenp, ppos); RETURN(rc); } /* only device 0 gets the handle, see kgnilnd_dev_init */ dev = &kgnilnd_data.kgn_devices[0]; LASSERTF(dev != NULL, "dev 0 is NULL\n"); kgnilnd_critical_error(dev->gnd_err_handle); /* Wait for the reset to complete. This prevents any races in testing * where we'd immediately try to send traffic again */ while (kgnilnd_data.kgn_needs_reset != 0) { i++; LCONSOLE((((i) & (-i)) == i) ? D_WARNING : D_NET, "Waiting for stack reset request to clear\n"); cfs_pause(cfs_time_seconds(1 * i)); } RETURN(rc); }
static int proc_toggle_thread_pause(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int old_val = kgnilnd_sysctl.ksd_pause_trigger; int rc = 0; ENTRY; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (!write) { /* read */ RETURN(rc); } if (kgnilnd_data.kgn_init != GNILND_INIT_ALL) { rc = -EINVAL; RETURN(rc); } if (old_val != kgnilnd_sysctl.ksd_pause_trigger) { mutex_lock(&kgnilnd_data.kgn_quiesce_mutex); CDEBUG(D_NET, "setting quiesce_trigger %d\n", old_val); kgnilnd_data.kgn_quiesce_trigger = kgnilnd_sysctl.ksd_pause_trigger; kgnilnd_quiesce_wait("admin sysctl"); mutex_unlock(&kgnilnd_data.kgn_quiesce_mutex); } RETURN(rc); }
static int mac_hid_toggle_emumouse(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; int old_val = *valp; int rc; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (rc == 0 && write && *valp != old_val) { if (*valp == 1) rc = mac_hid_start_emulation(); else if (*valp == 0) mac_hid_stop_emulation(); else rc = -EINVAL; } /* Restore the old value in case of error */ if (rc) *valp = old_val; return rc; }
static int proc_hw_quiesce(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int rc = 0; kgn_device_t *dev; ENTRY; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (!write) { /* read */ RETURN(rc); } if (kgnilnd_data.kgn_init != GNILND_INIT_ALL) { rc = -EINVAL; RETURN(rc); } /* only device 0 gets the handle, see kgnilnd_dev_init */ dev = &kgnilnd_data.kgn_devices[0]; LASSERTF(dev != NULL, "dev 0 is NULL\n"); kgnilnd_quiesce_end_callback(dev->gnd_handle, kgnilnd_sysctl.ksd_quiesce_secs * MSEC_PER_SEC); RETURN(rc); }
static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net = current->nsproxy->net_ns; unsigned int min = *(unsigned int *) ctl->extra1; unsigned int max = *(unsigned int *) ctl->extra2; struct ctl_table tbl; int ret, new_value; memset(&tbl, 0, sizeof(struct ctl_table)); tbl.maxlen = sizeof(unsigned int); if (write) tbl.data = &new_value; else tbl.data = &net->sctp.rto_min; ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) { if (new_value > max || new_value < min) return -EINVAL; net->sctp.rto_min = new_value; } return ret; }
static int proc_do_knllog_intvec_readonly(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos ) #endif { if ( write ) { printk(KERN_WARNING "Read-only entry!\n"); return -EINVAL; } #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31) return proc_dointvec(table, write, buffer, lenp, ppos ); /* No special processing for read. */ #else return proc_dointvec(table, write, filp, buffer, lenp, ppos ); /* No special processing for read. */ #endif }
/* * proc handler for /proc/sys/kernel/nmi */ int proc_nmi_enabled(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { int old_state; nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0; old_state = nmi_watchdog_enabled; proc_dointvec(table, write, buffer, length, ppos); if (!!old_state == !!nmi_watchdog_enabled) return 0; if (atomic_read(&nmi_active) < 0 || !nmi_watchdog_active()) { printk(KERN_WARNING "NMI watchdog is permanently disabled\n"); return -EIO; } if (nmi_watchdog == NMI_LOCAL_APIC) { if (nmi_watchdog_enabled) enable_lapic_nmi_watchdog(); else disable_lapic_nmi_watchdog(); } else if (nmi_watchdog == NMI_IO_APIC) { if (nmi_watchdog_enabled) enable_ioapic_nmi_watchdog(); else disable_ioapic_nmi_watchdog(); } else { printk(KERN_WARNING "NMI watchdog doesn't know what hardware to touch\n"); return -EIO; } return 0; }
static int proc_sctp_do_auth(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net = current->nsproxy->net_ns; struct ctl_table tbl; int new_value, ret; memset(&tbl, 0, sizeof(struct ctl_table)); tbl.maxlen = sizeof(unsigned int); if (write) tbl.data = &new_value; else tbl.data = &net->sctp.auth_enable; ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); if (write && ret == 0) { struct sock *sk = net->sctp.ctl_sock; net->sctp.auth_enable = new_value; /* Update the value in the control socket */ lock_sock(sk); sctp_sk(sk)->ep->auth_enable = new_value; release_sock(sk); } return ret; }
static int proc_dointvec_deny(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { deny_percentage = 100 - (100 * deny_pages + allowed_pages / 2) / allowed_pages; return proc_dointvec(table, write, filp, buffer, lenp, ppos); }
static int proc_dointvec_l2_notify(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { l2_notify = 100 - (100 * notify_high_pages + allowed_pages / 2) / allowed_pages; return proc_dointvec(table, write, filp, buffer, lenp, ppos); }
static int test_start_thread( ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos ) { kernel_thread( test_thread, NULL, CLONE_KERNEL | CLONE_CUSTOM_STACK ); return proc_dointvec( table, write, filp, buffer, lenp, ppos ); } // test_start_thread
static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); return proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos); }
static int t1_proc_doint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int result; LOG(LC_PROC, 0, "t1: gona read %s from procfs entry, old value is: %i\n", table->procname, *((int*)table->data)); result = proc_dointvec(table, write, buffer, lenp, ppos); LOG(LC_PROC, 0, "t1: read %s from procfs, new value is: %i\n", table->procname, *((int*)table->data)); return result; }
static int proc_mq_dointvec(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table mq_table; memcpy(&mq_table, table, sizeof(mq_table)); mq_table.data = get_mq(table); return proc_dointvec(&mq_table, write, buffer, lenp, ppos); }
int LL_PROC_PROTO(proc_set_timeout) { int rc; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (ldlm_timeout >= obd_timeout) ldlm_timeout = max(obd_timeout / 3, 1U); return rc; }
/* * proc handler for the running debug_active sysctl * always allow read, allow write only if debug_stoppable is set or * if debug_active is already off */ static int s390dbf_procactive(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { if (!write || debug_stoppable || !debug_active) return proc_dointvec(table, write, filp, buffer, lenp, ppos); else return 0; }
static int proc_set_timeout(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int rc; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (ldlm_timeout >= obd_timeout) ldlm_timeout = max(obd_timeout / 3, 1U); return rc; }
flashcache_lru_hot_pct_sysctl(ctl_table *table, int write, #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) struct file *file, #endif void __user *buffer, size_t *length, loff_t *ppos) #endif { struct cache_c *dmc = (struct cache_c *)table->extra1; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) proc_dointvec(table, write, file, buffer, length, ppos); #else proc_dointvec(table, write, buffer, length, ppos); #endif if (write) flashcache_reclaim_rebalance_lru(dmc, dmc->sysctl_lru_hot_pct); return 0; }
static int proc_dointvec_used(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) { if (lowmem_free_pages > 0 && allowed_pages > lowmem_free_pages) used_pages = allowed_pages - lowmem_free_pages; else used_pages = 0; return proc_dointvec(table, write, filp, buffer, lenp, ppos); }
static int proc_diag(ctl_table *table, int write, struct file *filp, void *buffer, size_t *lenp) { int r; r = proc_dointvec(table, write, filp, buffer, lenp); if (write && !r) { diag_change(); } return r; }
int overcommit_ratio_handler(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(table, write, buffer, lenp, ppos); if (ret == 0 && write) sysctl_overcommit_kbytes = 0; return ret; }
int nlm_ctl_handler(ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write && *(int *)(ctl->data)) *(int *)(ctl->data) = 1; nlm_nlm_common_interrupt_device(); return ret; }
static int proc_do_knllog_intvec_enable(ctl_table *table, int write, struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos ) #endif { int rc = 0; if ( !table || !table->data ) return -EINVAL; if (knllog.dumping) { printk(KERN_WARNING "Please wait for dump to complete...\n"); return -EINVAL; } if ( write ) { /* get value from buffer into knllog.enable */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31) rc = proc_dointvec(table, write, buffer, lenp, ppos ); #else rc = proc_dointvec(table, write, filp, buffer, lenp, ppos ); #endif if (rc < 0) return rc; if (knllog.enable) { knllog_enable(); knllog_clear(); } } else { #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,31) rc = proc_dointvec(table, write, buffer, lenp, ppos ); /* No special processing for read. */ #else rc = proc_dointvec(table, write, filp, buffer, lenp, ppos ); /* No special processing for read. */ #endif } return rc; }
static int brnf_sysctl_call_tables(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; ret = proc_dointvec(ctl, write, buffer, lenp, ppos); if (write && *(int *)(ctl->data)) *(int *)(ctl->data) = 1; return ret; }