int __init start_mv_eth_tool(void) { mv_eth_tool = proc_net_create(FILE_NAME , 0666 , NULL); mv_eth_tool->read_proc = mv_eth_tool_read; mv_eth_tool->write_proc = mv_eth_tool_write; mv_eth_tool->nlink = 1; return 0; }
static int __init ip_vs_lblcr_init(void) { INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); lblcr_sysctl_table.sysctl_header = register_sysctl_table(lblcr_sysctl_table.root_dir, 0); #ifdef CONFIG_IP_VS_LBLCR_DEBUG proc_net_create("ip_vs_lblcr", 0, ip_vs_lblcr_getinfo); #endif return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); }
int __init start_mv_eth_tool(void) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) mv_eth_tool = proc_net_create(FILE_NAME , 0666 , NULL); #else mv_eth_tool = create_proc_entry(FILE_NAME , 0666 , init_net.proc_net); #endif mv_eth_tool->read_proc = mv_eth_tool_read; mv_eth_tool->write_proc = mv_eth_tool_write; mv_eth_tool->nlink = 1; return 0; }
int __init mipv6_initialize_bcache(__u32 size) { DEBUG_FUNC(); if (size < 1) { DEBUG((DBG_ERROR, "Binding cache size must be at least 1")); return -1; } bcache = (struct mipv6_bcache *) kmalloc(sizeof(struct mipv6_bcache), GFP_KERNEL); if (bcache == NULL) { DEBUG((DBG_ERROR, "Couldn't allocate memory for binding cache")); return -1; } init_timer(&bcache->callback_timer); bcache->callback_timer.data = 0; bcache->callback_timer.function = timer_handler; bcache->size = size; bcache->lock = RW_LOCK_UNLOCKED; if ((bcache->entry_pool = mipv6_create_allocation_pool( size, sizeof(struct mipv6_bcache_entry), GFP_KERNEL)) == NULL) { DEBUG((DBG_ERROR, "mipv6_bcache_init(): Allocation pool creation failed")); kfree(bcache); return -1; } if ((bcache->entries = hashlist_create(size, MIPV6_BCACHE_HASHSIZE)) == NULL) { DEBUG((DBG_ERROR, "Failed to initialize hashlist")); mipv6_free_allocation_pool(bcache->entry_pool); kfree(bcache); return -1; } #ifdef CONFIG_PROC_FS proc_net_create("mip6_bcache", 0, bcache_proc_info); #endif DEBUG((DBG_INFO, "Binding cache initialized")); return 0; }
int __init mipv6_initialize_halist(__u32 size) { DEBUG_FUNC(); if (size <= 0) { DEBUG((DBG_ERROR, "mipv6_initialize_halist: size must be at least 1")); return -1; } home_agents = (struct mipv6_halist *) kmalloc(sizeof(struct mipv6_halist), GFP_KERNEL); if (home_agents == NULL) { DEBUG((DBG_ERROR, "Couldn't allocate memory for Home Agents List")); return -1; } init_timer(&home_agents->expire_timer); home_agents->expire_timer.data = 0; home_agents->expire_timer.function = mipv6_halist_expire; home_agents->lock = RW_LOCK_UNLOCKED; home_agents->entries = hashlist_create(size, 32); if (home_agents->entries == NULL) { DEBUG((DBG_ERROR, "Failed to initialize hashlist")); kfree(home_agents); return -1; } #ifdef CONFIG_PROC_FS proc_net_create("mip6_home_agents", 0, halist_proc_info); #endif /* CONFIG_PROC_FS */ DEBUG((DBG_INFO, "Home Agents List initialized")); return 0; }
static int __init kaodv_init(void) { struct net_device *dev = NULL; int i, ret = -ENOMEM; const struct kaodv_proc_file *f; kaodv_expl_init(); ret = kaodv_queue_init(); if (ret < 0) return ret; ret = kaodv_netlink_init(); if (ret < 0) goto cleanup_queue; ret = nf_register_hook(&kaodv_ops[0]); if (ret < 0) goto cleanup_netlink; ret = nf_register_hook(&kaodv_ops[1]); if (ret < 0) goto cleanup_hook0; ret = nf_register_hook(&kaodv_ops[2]); if (ret < 0) goto cleanup_hook1; /* Prefetch network device info (ip, broadcast address, ifindex). */ for (i = 0; i < MAX_INTERFACES; i++) { if (!ifname[i]) break; dev = dev_get_by_name(&init_net, ifname[i]); if (!dev) { printk("No device %s available, ignoring!\n", ifname[i]); continue; } if_info_add(dev); dev_put(dev); } #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) proc_net_create("kaodv", 0, kaodv_proc_info); #else // if (!create_proc_read_entry("kaodv", 0, init_net.proc_net, kaodv_read_proc, // NULL)) // if (!proc_create("kaodv", 0, init_net.proc_net, &kaodv_fops)) // KAODV_DEBUG("Could not create kaodv proc entry"); for (f = kaodv_proc_files; f->name[0]; f++) { if (!proc_create_data(f->name, 0, init_net.proc_net, &kaodv_proc_fops, f->show)) { KAODV_DEBUG("Could not create kaodv proc entry"); } } #endif KAODV_DEBUG("Module init OK"); return ret; cleanup_hook1: nf_unregister_hook(&kaodv_ops[1]); cleanup_hook0: nf_unregister_hook(&kaodv_ops[0]); cleanup_netlink: kaodv_netlink_fini(); cleanup_queue: kaodv_queue_fini(); return ret; }
static int __init kaodv_init(void) { struct net_device *dev = NULL; struct in_device *indev; struct in_ifaddr **ifap = NULL; struct in_ifaddr *ifa = NULL; int i, ret = -ENOMEM; #ifndef KERNEL26 EXPORT_NO_SYMBOLS; #endif kaodv_expl_init(); ret = kaodv_queue_init(); if (ret < 0) return ret; ret = kaodv_netlink_init(); if (ret < 0) goto cleanup_queue; ret = nf_register_hook(&kaodv_ops[0]); if (ret < 0) goto cleanup_netlink; ret = nf_register_hook(&kaodv_ops[1]); if (ret < 0) goto cleanup_hook0; ret = nf_register_hook(&kaodv_ops[2]); if (ret < 0) goto cleanup_hook1; /* Prefetch network device info (ip, broadcast address, ifindex). */ for (i = 0; i < MAX_INTERFACES; i++) { if (!ifname[i]) break; dev = dev_get_by_name(ifname[i]); if (!dev) { printk("No device %s available, ignoring!\n", ifname[i]); continue; } netdevs[nif].ifindex = dev->ifindex; // indev = inetdev_by_index(dev->ifindex); indev = in_dev_get(dev); if (indev) { for (ifap = &indev->ifa_list; (ifa = *ifap) != NULL; ifap = &ifa->ifa_next) if (!strcmp(dev->name, ifa->ifa_label)) break; if (ifa) { netdevs[nif].ip_addr = ifa->ifa_address; netdevs[nif].bc_addr = ifa->ifa_broadcast; //printk("dev ip=%s bc=%s\n", print_ip(netdevs[nif].ip_addr), print_ip(netdevs[nif].bc_addr)); } in_dev_put(indev); } nif++; dev_put(dev); } proc_net_create("kaodv", 0, kaodv_proc_info); return ret; cleanup_hook1: nf_unregister_hook(&kaodv_ops[1]); cleanup_hook0: nf_unregister_hook(&kaodv_ops[0]); cleanup_netlink: kaodv_netlink_fini(); cleanup_queue: kaodv_queue_fini(); return ret; }
static int __init inet6_init(void) { struct sk_buff *dummy_skb; int err; #ifdef MODULE if (!mod_member_present(&__this_module, can_unload)) return -EINVAL; __this_module.can_unload = &ipv6_unload; #endif printk(KERN_INFO "IPv6 v0.8 for NET4.0\n"); if (sizeof(struct inet6_skb_parm) > sizeof(dummy_skb->cb)) { printk(KERN_CRIT "inet6_proto_init: size fault\n"); return -EINVAL; } /* * ipngwg API draft makes clear that the correct semantics * for TCP and UDP is to consider one TCP and UDP instance * in a host availiable by both INET and INET6 APIs and * able to communicate via both network protocols. */ #if defined(MODULE) && defined(CONFIG_SYSCTL) ipv6_sysctl_register(); #endif err = icmpv6_init(&inet6_family_ops); if (err) goto icmp_fail; err = ndisc_init(&inet6_family_ops); if (err) goto ndisc_fail; err = igmp6_init(&inet6_family_ops); if (err) goto igmp_fail; /* Create /proc/foo6 entries. */ #ifdef CONFIG_PROC_FS err = -ENOMEM; if (!proc_net_create("raw6", 0, raw6_get_info)) goto proc_raw6_fail; if (!proc_net_create("tcp6", 0, tcp6_get_info)) goto proc_tcp6_fail; if (!proc_net_create("udp6", 0, udp6_get_info)) goto proc_udp6_fail; if (!proc_net_create("sockstat6", 0, afinet6_get_info)) goto proc_sockstat6_fail; if (!proc_net_create("snmp6", 0, afinet6_get_snmp)) goto proc_snmp6_fail; #endif ipv6_netdev_notif_init(); ipv6_packet_init(); ip6_route_init(); ip6_flowlabel_init(); addrconf_init(); sit_init(); /* Init v6 transport protocols. */ udpv6_init(); tcpv6_init(); /* Now the userspace is allowed to create INET6 sockets. */ (void) sock_register(&inet6_family_ops); return 0; #ifdef CONFIG_PROC_FS proc_snmp6_fail: proc_net_remove("sockstat6"); proc_sockstat6_fail: proc_net_remove("udp6"); proc_udp6_fail: proc_net_remove("tcp6"); proc_tcp6_fail: proc_net_remove("raw6"); proc_raw6_fail: igmp6_cleanup(); #endif igmp_fail: ndisc_cleanup(); ndisc_fail: icmpv6_cleanup(); icmp_fail: #if defined(MODULE) && defined(CONFIG_SYSCTL) ipv6_sysctl_unregister(); #endif return err; }