int __init rds_iw_sysctl_init(void) { rds_iw_sysctl_hdr = register_sysctl_paths(rds_iw_sysctl_path, rds_iw_sysctl_table); if (rds_iw_sysctl_hdr == NULL) return -ENOMEM; return 0; }
int __init dccp_sysctl_init(void) { dccp_table_header = register_sysctl_paths(dccp_path, dccp_default_table); return dccp_table_header != NULL ? 0 : -ENOMEM; }
int rds_ib_sysctl_init(void) { rds_ib_sysctl_hdr = register_sysctl_paths(rds_ib_sysctl_path, rds_ib_sysctl_table); if (!rds_ib_sysctl_hdr) return -ENOMEM; return 0; }
/* * This must be called after early kernel init, since then the rootdev * is available. */ static void check_pinning_enforcement(struct super_block *mnt_sb) { bool ro = false; /* * If load pinning is not enforced via a read-only block * device, allow sysctl to change modes for testing. */ if (mnt_sb->s_bdev) { char bdev[BDEVNAME_SIZE]; ro = bdev_read_only(mnt_sb->s_bdev); bdevname(mnt_sb->s_bdev, bdev); pr_info("%s (%u:%u): %s\n", bdev, MAJOR(mnt_sb->s_bdev->bd_dev), MINOR(mnt_sb->s_bdev->bd_dev), ro ? "read-only" : "writable"); } else pr_info("mnt_sb lacks block device, treating as: writable\n"); if (!ro) { if (!register_sysctl_paths(loadpin_sysctl_path, loadpin_sysctl_table)) pr_notice("sysctl registration failed!\n"); else pr_info("enforcement can be disabled.\n"); } else pr_info("load pinning engaged.\n"); }
int ipv6_static_sysctl_register(void) { ip6_base = register_sysctl_paths(net_ipv6_ctl_path, ipv6_static_skeleton); if (ip6_base == NULL) return -ENOMEM; return 0; }
int __init br_netfilter_init(void) { int ret; ret = dst_entries_init(&fake_dst_ops); if (ret < 0) return ret; ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); if (ret < 0) { dst_entries_destroy(&fake_dst_ops); return ret; } #ifdef CONFIG_SYSCTL brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table); if (brnf_sysctl_header == NULL) { printk(KERN_WARNING "br_netfilter: can't register to sysctl.\n"); nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops)); dst_entries_destroy(&fake_dst_ops); return -ENOMEM; } #endif printk(KERN_NOTICE "Bridge firewalling registered\n"); return 0; }
int iso_params_init() { int i; memset(iso_params_table, 0, sizeof(iso_params_table)); for(i = 0; i < 32; i++) { struct ctl_table *entry = &iso_params_table[i]; if(iso_params[i].ptr == NULL) break; entry->procname = iso_params[i].name; entry->data = iso_params[i].ptr; entry->maxlen = sizeof(int); entry->mode = 0644; entry->proc_handler = proc_dointvec; } iso_sysctl = register_sysctl_paths(iso_params_path, iso_params_table); if(iso_sysctl == NULL) goto err; return 0; err: return -1; }
static int __init proc_sys_tile_init(void) { #ifndef __tilegx__ /* FIXME: GX: no support for unaligned access yet */ register_sysctl_paths(tile_path, unaligned_table); #endif return 0; }
int __init mpls_sysctl_init(void) { MPLS_ENTER; mpls_table_header = register_sysctl_paths(mpls_path, mpls_table); if (!mpls_table_header) { MPLS_EXIT return -ENOMEM; }
int ipv6_static_sysctl_register(void) { static struct ctl_table empty[1]; ip6_base = register_sysctl_paths(net_ipv6_ctl_path, empty); if (ip6_base == NULL) return -ENOMEM; return 0; }
struct ctl_table_header *balong_register_sysctl_table(struct ctl_table *table) { #ifdef CONFIG_SYSCTL return register_sysctl_paths(balong_sysctl_path, table); #endif return NULL; }
static __init int sysctl_core_init(void) { static struct ctl_table empty[1]; register_sysctl_paths(net_core_path, empty); register_net_sysctl_rotable(net_core_path, net_core_table); return register_pernet_subsys(&sysctl_core_ops); }
/* * Function irda_sysctl_register (void) * * Register our sysctl interface * */ int __init irda_sysctl_register(void) { irda_table_header = register_sysctl_paths(irda_path, irda_table); if (!irda_table_header) return -ENOMEM; return 0; }
int rds_sysctl_init(void) { rds_sysctl_reconnect_min = msecs_to_jiffies(1); rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); if (!rds_sysctl_reg_table) return -ENOMEM; return 0; }
int knamed_sysctl_register(void) { sysctl_header = register_sysctl_paths(knamed_ctl_path, knamed_vars); if (sysctl_header == NULL) { return -1; } return 0; }
static int __init lasat_register_sysctl(void) { struct ctl_table_header *lasat_table_header; lasat_table_header = register_sysctl_paths(lasat_path, lasat_table); if (!lasat_table_header) { printk(KERN_ERR "Unable to register LASAT sysctl\n"); return -ENOMEM; } return 0; }
static int nf_ct_register_sysctl(struct ctl_table_header **header, struct ctl_path *path, struct ctl_table *table, unsigned int *users) { if (*header == NULL) { *header = register_sysctl_paths(path, table); if (*header == NULL) return -ENOMEM; } if (users != NULL) (*users)++; return 0; }
static void check_locking_enforcement(void) { /* If module locking is not being enforced, allow sysctl to change * modes for testing. */ if (!rootdev_readonly()) { if (!register_sysctl_paths(chromiumos_sysctl_path, chromiumos_sysctl_table)) pr_notice("sysctl registration failed!\n"); else pr_info("module locking can be disabled.\n"); } else pr_info("module locking engaged.\n"); }
int rds_sysctl_init(void) { rds_sysctl_reconnect_min = msecs_to_jiffies(1); rds_sysctl_reconnect_min_jiffies = rds_sysctl_reconnect_min; #if LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0) rds_sysctl_reg_table = register_sysctl_paths(rds_sysctl_path, rds_sysctl_rds_table); #else rds_sysctl_reg_table = register_net_sysctl_table(&init_net, rds_sysctl_path, rds_sysctl_rds_table); #endif if (!rds_sysctl_reg_table) return -ENOMEM; return 0; }
int kfm_control_start(void) { int ret; ret = kfm_genl_register(); if (ret) { KFM_ERR("cannot register Generic Netlink interface.\n"); return ret; } #ifdef CONFIG_SYSCTL kfm_table_header = register_sysctl_paths(kfm_ctl_path, kfm_table); #endif return ret; }
void ax25_register_sysctl(void) { ax25_dev *ax25_dev; int n, k; spin_lock_bh(&ax25_dev_lock); for (ax25_table_size = sizeof(ctl_table), ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) ax25_table_size += sizeof(ctl_table); if ((ax25_table = kzalloc(ax25_table_size, GFP_ATOMIC)) == NULL) { spin_unlock_bh(&ax25_dev_lock); return; } for (n = 0, ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) { struct ctl_table *child = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_ATOMIC); if (!child) { while (n--) kfree(ax25_table[n].child); kfree(ax25_table); spin_unlock_bh(&ax25_dev_lock); return; } ax25_table[n].child = ax25_dev->systable = child; ax25_table[n].ctl_name = n + 1; ax25_table[n].procname = ax25_dev->dev->name; ax25_table[n].mode = 0555; child[AX25_MAX_VALUES].ctl_name = 0; /* just in case... */ for (k = 0; k < AX25_MAX_VALUES; k++) child[k].data = &ax25_dev->values[k]; n++; } spin_unlock_bh(&ax25_dev_lock); ax25_table_header = register_sysctl_paths(ax25_path, ax25_table); }
static void irq_affinity_register_sysctl_table(unsigned int irq, int cpu) { struct ctl_table *table; char *procname; struct ctl_table_header *head; table = (struct ctl_table *)kmalloc(sizeof(struct ctl_table)*2, GFP_KERNEL); if (NULL == table) { printk(KERN_ERR"%s %d failed to kmalloc \r\n", __FUNCTION__,__LINE__); return; } memset(table, 0, sizeof(struct ctl_table)*2); procname = (char *)kmalloc(IRQ_AFFINITY_PROCNAME_SIZE, GFP_KERNEL); if (NULL == procname) { kfree(table); printk(KERN_ERR"%s %d failed to kmalloc \r\n", __FUNCTION__,__LINE__); return; } snprintf(procname, IRQ_AFFINITY_PROCNAME_SIZE, "%d", irq); table[0].procname = procname; table[0].data = &irq_affinity_records[irq]; table[0].maxlen = sizeof(int); table[0].mode = 0644; table[0].proc_handler = proc_dointvec_minmax; table[0].extra1 = &min_cpu_id; table[0].extra2 = &max_cpu_id; head = register_sysctl_paths(balong_irq_affinity_path, table); if (NULL == head) printk(KERN_ERR "%s %d : failed to irq_affinity_register_sysctl_table %d\r\n", __FUNCTION__, __LINE__, irq); return; };
int __init phonet_sysctl_init(void) { phonet_table_hrd = register_sysctl_paths(phonet_ctl_path, phonet_table); return phonet_table_hrd == NULL ? -ENOMEM : 0; }
void __init x25_register_sysctl(void) { x25_table_header = register_sysctl_paths(x25_path, x25_table); }
void ipx_register_sysctl(void) { ipx_table_header = register_sysctl_paths(ipx_path, ipx_table); }
void dn_register_sysctl(void) { dn_table_header = register_sysctl_paths(dn_path, dn_table); }
/* * Initialize power interface */ static int __init pm_init(void) { register_sysctl_paths(pm_path, pm_table); return 0; }
void __init rose_register_sysctl(void) { rose_table_header = register_sysctl_paths(rose_path, rose_table); }
void __init nr_register_sysctl(void) { nr_table_header = register_sysctl_paths(nr_path, nr_table); }
int __init llc_sysctl_init(void) { llc_table_header = register_sysctl_paths(llc_path, llc_table); return llc_table_header ? 0 : -ENOMEM; }