static int __init http_mt_init(void) { int ret; ret = html_read(RESPONSE_FILE_PATH); if(ret < 0){ rep_html_buf=temp_302; printk("Read html file fiald, use default\n"); } proc_reload_html_init(); ret = xt_register_target(&http_tg_reg); if(ret <0){ printk("the ret is %d\n",ret); return ret; } ret = xt_register_match(&http_match); if(ret <0){ xt_unregister_target(&http_tg_reg); return ret; } //printk("Register ok\n"); return 0; }
int __init nf_nat_rule_init(void) { int ret; nat_table = ipt_register_table(&init_net, &__nat_table, &nat_initial_table.repl); if (IS_ERR(nat_table)) return PTR_ERR(nat_table); ret = xt_register_target(&ipt_snat_reg); if (ret != 0) goto unregister_table; ret = xt_register_target(&ipt_dnat_reg); if (ret != 0) goto unregister_snat; return ret; unregister_snat: xt_unregister_target(&ipt_snat_reg); unregister_table: ipt_unregister_table(nat_table); return ret; }
static void __exit ipt_ulog_fini(void) { ulog_buff_t *ub; int i; DEBUGP("ipt_ULOG: cleanup_module\n"); if (nflog) nf_log_unregister(&ipt_ulog_logger); xt_unregister_target(&ipt_ulog_reg); sock_release(nflognl->sk_socket); /* remove pending timers and free allocated skb's */ for (i = 0; i < ULOG_MAXNLGROUPS; i++) { ub = &ulog_buffers[i]; if (timer_pending(&ub->timer)) { DEBUGP("timer was pending, deleting\n"); del_timer(&ub->timer); } if (ub->skb) { kfree_skb(ub->skb); ub->skb = NULL; } } }
static void __exit http_mt_exit(void) { if (rep_html_buf && rep_html_buf!=temp_302) kfree(rep_html_buf); remove_proc_entry(PROC_RELOAD_ENTRY, NULL); xt_unregister_match(&http_match); xt_unregister_target(&http_tg_reg); }
static void __exit chaos_tg_exit(void) { xt_unregister_target(&chaos_tg_reg); module_put(xm_tcp->me); module_put(xt_reject->me); if (have_delude) module_put(xt_delude->me); if (have_tarpit) module_put(xt_tarpit->me); }
static int __init xt_mark_init(void) { int err; err = xt_register_target(&ipt_mark_reg_v0); if (err) return err; err = xt_register_target(&ipt_mark_reg_v1); if (err) xt_unregister_target(&ipt_mark_reg_v0); err = xt_register_target(&ip6t_mark_reg_v0); if (err) { xt_unregister_target(&ipt_mark_reg_v0); xt_unregister_target(&ipt_mark_reg_v1); } return err; }
void __exit nat64_exit(void) { nat64_determine_incoming_tuple_destroy(); nat64_destroy_bib_session_memory(); xt_unregister_target(&nat64_tg_reg); nat64_destroy_character_device(); if (my_nl_sock) netlink_kernel_release(my_nl_sock); // Unload netlink sockets. Rob kfree(ipv6_pref_addr_str); //~ kfree(cs); pr_debug("NAT64 module removed!\n\n\n"); }
static int __init ip6t_log_init(void) { int ret; ret = xt_register_target(&ip6t_log_reg); if (ret < 0) return ret; ret = nf_log_register(PF_INET6, &ip6t_logger); if (ret < 0 && ret != -EEXIST) xt_unregister_target(&ip6t_log_reg); return ret; }
static int __init connmark_mt_init(void) { int ret; ret = xt_register_target(&connmark_tg_reg); if (ret < 0) return ret; ret = xt_register_match(&connmark_mt_reg); if (ret < 0) { xt_unregister_target(&connmark_tg_reg); return ret; } return 0; }
static int __init xt_secmark_init(void) { int err; err = xt_register_target(&ipt_secmark_reg); if (err) return err; err = xt_register_target(&ip6t_secmark_reg); if (err) xt_unregister_target(&ipt_secmark_reg); return err; }
static int __init xt_ct_tg_init(void) { int ret; ret = xt_register_target(¬rack_tg_reg); if (ret < 0) return ret; ret = xt_register_targets(xt_ct_tg_reg, ARRAY_SIZE(xt_ct_tg_reg)); if (ret < 0) { xt_unregister_target(¬rack_tg_reg); return ret; } return 0; }
static void __exit ipt_log_fini(void) { nf_log_unregister(&ipt_log_logger); /* remove timer, if it is pending */ if (timer_pending(&timer)) del_timer(&timer); flush_scheduled_work(); syslog_close(&sl_socket); if (loglist_total > 0) printk(KERN_WARNING "ip_SYSLOG: dropping %d log(s). Dropped: %d\n", loglist_total, loglist_dropped + loglist_total); proc_net_remove(&init_net, STAT_PROC_FS_NAME); xt_unregister_target(&ipt_log_reg); }
static void __exit ebt_ulog_fini(void) { ebt_ulog_buff_t *ub; int i; nf_log_unregister(&ebt_ulog_logger); xt_unregister_target(&ebt_ulog_tg_reg); for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) { ub = &ulog_buffers[i]; if (timer_pending(&ub->timer)) del_timer(&ub->timer); spin_lock_bh(&ub->lock); if (ub->skb) { kfree_skb(ub->skb); ub->skb = NULL; } spin_unlock_bh(&ub->lock); } netlink_kernel_release(ebtulognl); }
static int __init ipt_log_init(void) { int ret; struct proc_dir_entry *proc; ret = xt_register_target(&ipt_log_reg); if (ret < 0) return ret; proc = create_proc_entry(STAT_PROC_FS_NAME, 0, init_net.proc_net); if (proc) { proc->owner = THIS_MODULE; proc->proc_fops = &ip_syslogstat_proc_fops; } else { printk(KERN_ERR "ip_SYSLOG: failed to create proc entry\n"); goto cleanup_target; } ret = syslog_connect(&sl_socket); if (ret < 0) { if (ret == -ECONNREFUSED) { timer.expires = jiffies + msecs_to_jiffies(reconnect_freq); add_timer(&timer); } else goto cleanup_proc; } nf_log_register(PF_INET, &ipt_log_logger); return 0; cleanup_proc: proc_net_remove(&init_net, STAT_PROC_FS_NAME); cleanup_target: xt_unregister_target(&ipt_log_reg); return ret; }
int __init nf_nat_rule_init(void) { int ret; ret = ipt_register_table(&nat_table, &nat_initial_table.repl); if (ret != 0) return ret; ret = xt_register_target(&ipt_snat_reg); if (ret != 0) goto unregister_table; ret = xt_register_target(&ipt_dnat_reg); if (ret != 0) goto unregister_snat; return ret; unregister_snat: xt_unregister_target(&ipt_snat_reg); unregister_table: ipt_unregister_table(&nat_table); return ret; }
int __init nf_nat_rule_init(void) { int ret; ret = register_pernet_subsys(&nf_nat_rule_net_ops); if (ret != 0) goto out; ret = xt_register_target(&ipt_snat_reg); if (ret != 0) goto unregister_table; ret = xt_register_target(&ipt_dnat_reg); if (ret != 0) goto unregister_snat; return ret; unregister_snat: xt_unregister_target(&ipt_snat_reg); unregister_table: unregister_pernet_subsys(&nf_nat_rule_net_ops); out: return ret; }
void nf_nat_rule_cleanup(void) { xt_unregister_target(&ipt_dnat_reg); xt_unregister_target(&ipt_snat_reg); unregister_pernet_subsys(&nf_nat_rule_net_ops); }
static void __exit connmark_mt_exit(void) { xt_unregister_match(&connmark_mt_reg); xt_unregister_target(&connmark_tg_reg); }
static void __exit ebt_log_fini(void) { nf_log_unregister(&ebt_log_logger); xt_unregister_target(&ebt_log_tg_reg); }
static void __exit secmark_tg_exit(void) { xt_unregister_target(&secmark_tg_reg); }
static void __exit fini(void) { xt_unregister_target(&ipt_tarpit_reg); }
static void __exit log_tg_exit(void) { nf_log_unregister(&ipt_log_logger); xt_unregister_target(&log_tg_reg); }
static void __exit ebt_mark_fini(void) { xt_unregister_target(&ebt_mark_tg_reg); }
static unsigned int route6_oif(const struct ip6t_route_target_info *route_info, struct sk_buff *skb) { unsigned int ifindex = 0; struct net_device *dev_out = NULL; /* The user set the interface name to use. * Getting the current interface index. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24) if ((dev_out = dev_get_by_name(&init_net, route_info->oif))) { #else if ((dev_out = dev_get_by_name(route_info->oif))) { #endif ifindex = dev_out->ifindex; } else { /* Unknown interface name : packet dropped */ if (net_ratelimit()) DEBUGP("ip6t_ROUTE: oif interface %s not found\n", route_info->oif); if (route_info->flags & IP6T_ROUTE_CONTINUE) return IP6T_CONTINUE; else return NF_DROP; } /* Trying the standard way of routing packets */ if (route6(skb, ifindex, route_info)) { dev_put(dev_out); if (route_info->flags & IP6T_ROUTE_CONTINUE) return IP6T_CONTINUE; ip_direct_send(skb); return NF_STOLEN; } else return NF_DROP; } static unsigned int route6_gw(const struct ip6t_route_target_info *route_info, struct sk_buff *skb) { if (route6(skb, 0, route_info)) { if (route_info->flags & IP6T_ROUTE_CONTINUE) return IP6T_CONTINUE; ip_direct_send(skb); return NF_STOLEN; } else return NF_DROP; } static unsigned int #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) target(struct sk_buff **pskb, unsigned int hooknum, const struct net_device *in, const struct net_device *out, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo, void *userinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) target(struct sk_buff **pskb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) target(struct sk_buff *skb, const struct net_device *in, const struct net_device *out, unsigned int hooknum, const struct xt_target *target, const void *targinfo) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) target(struct sk_buff *skb, const struct xt_target_param *par) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36) */ target(struct sk_buff *skb, const struct xt_action_param *par) #endif { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) const struct ip6t_route_target_info *route_info = targinfo; #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) const struct ip6t_route_target_info *route_info = par->targinfo; unsigned int hooknum = par->hooknum; #else const struct ip6t_route_target_info *route_info = par->targinfo; unsigned int hooknum = par->hooknum; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) struct sk_buff *skb = *pskb; #endif struct in6_addr *gw = (struct in6_addr*)&route_info->gw; unsigned int res; if (route_info->flags & IP6T_ROUTE_CONTINUE) goto do_it; /* If we are at PREROUTING or INPUT hook * the TTL isn't decreased by the IP stack */ if (hooknum == NF_INET_PRE_ROUTING || hooknum == NF_INET_LOCAL_IN) { struct ipv6hdr *ipv6h = ipv6_hdr(skb); if (ipv6h->hop_limit <= 1) { /* Force OUTPUT device used as source address */ skb->dev = skb_dst(skb)->dev; icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); return NF_DROP; } ipv6h->hop_limit--; } if ((route_info->flags & IP6T_ROUTE_TEE)) { /* * Copy the skb, and route the copy. Will later return * IP6T_CONTINUE for the original skb, which should continue * on its way as if nothing happened. The copy should be * independantly delivered to the ROUTE --gw. */ skb = skb_copy(skb, GFP_ATOMIC); if (!skb) { if (net_ratelimit()) DEBUGP(KERN_DEBUG "ip6t_ROUTE: copy failed!\n"); return IP6T_CONTINUE; } } do_it: if (route_info->oif[0]) { res = route6_oif(route_info, skb); } else if (!ipv6_addr_any(gw)) { res = route6_gw(route_info, skb); } else { if (net_ratelimit()) DEBUGP(KERN_DEBUG "ip6t_ROUTE: no parameter !\n"); res = IP6T_CONTINUE; } if ((route_info->flags & IP6T_ROUTE_TEE)) res = IP6T_CONTINUE; return res; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) static int checkentry(const char *tablename, const struct ip6t_entry *e, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) static int checkentry(const char *tablename, const void *e, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) static int checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int targinfosize, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) static int checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int hook_mask) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) static bool checkentry(const char *tablename, const void *e, const struct xt_target *target, void *targinfo, unsigned int hook_mask) #else /* LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) */ static bool checkentry(const struct xt_tgchk_param *par) #endif { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) const char *tablename = par->table; #endif if (strcmp(tablename, "mangle") != 0) { printk("ip6t_ROUTE: can only be called from \"mangle\" table.\n"); return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_route_target_info))) { printk(KERN_WARNING "ip6t_ROUTE: targinfosize %u != %Zu\n", targinfosize, IP6T_ALIGN(sizeof(struct ip6t_route_target_info))); return 0; } #endif return 1; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) static struct xt_target ip6t_route_reg = { #else static struct ip6t_target ip6t_route_reg = { #endif .name = "ROUTE", #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) .family = AF_INET6, #endif .target = target, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17) .targetsize = sizeof(struct ip6t_route_target_info), #endif .checkentry = checkentry, .me = THIS_MODULE }; static int __init init(void) { printk(KERN_DEBUG "registering ipv6 ROUTE target\n"); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) if (xt_register_target(&ip6t_route_reg)) #else if (ip6t_register_target(&ip6t_route_reg)) #endif return -EINVAL; return 0; } static void __exit fini(void) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21) xt_unregister_target(&ip6t_route_reg); #else ip6t_unregister_target(&ip6t_route_reg); #endif } module_init(init); module_exit(fini); MODULE_LICENSE("GPL");
static void __exit ebt_ulog_fini(void) { nf_log_unregister(&ebt_ulog_logger); xt_unregister_target(&ebt_ulog_tg_reg); unregister_pernet_subsys(&ebt_ulog_net_ops); }
static void __exit reject_tg6_exit(void) { xt_unregister_target(&reject_tg6_reg); }
void xtnu_unregister_target(struct xtnu_target *nt) { xt_unregister_target(nt->__compat_target); kfree(nt->__compat_target); }
static void __exit synproxy_tg6_exit(void) { xt_unregister_target(&synproxy_tg6_reg); nf_unregister_hooks(ipv6_synproxy_ops, ARRAY_SIZE(ipv6_synproxy_ops)); }
static void __exit ipt_ecn_fini(void) { xt_unregister_target(&ipt_ecn_reg); }
void nf_nat_rule_cleanup(void) { xt_unregister_target(&ipt_dnat_reg); xt_unregister_target(&ipt_snat_reg); ipt_unregister_table(&nat_table); }