/* Drop any queued packets associated with device ifindex */ static void ipq_dev_drop(ipq_queue_t *q, int ifindex) { ipq_queue_element_t *e; while ((e = ipq_dequeue(q, dev_cmp, ifindex))) { e->verdict = NF_DROP; nf_reinject(e->skb, e->info, e->verdict); kfree(e); } }
static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info, unsigned queue_num, void *data) { struct net_device *dev; struct net_device_stats *stats; struct sk_buff *skb2 = NULL; struct Qdisc *q; unsigned int index = skb->imq_flags&IMQ_F_IFMASK; int ret = -1; if (index > numdevs) return -1; dev = imq_devs + index; if (!(dev->flags & IFF_UP)) { skb->imq_flags = 0; nf_reinject(skb, info, NF_ACCEPT); return 0; } dev->last_rx = jiffies; if (skb->destructor) { skb2 = skb; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) return -1; } skb->nf_info = info; stats = (struct net_device_stats *)dev->priv; stats->rx_bytes+= skb->len; stats->rx_packets++; spin_lock_bh(&dev->queue_lock); q = dev->qdisc; if (q->enqueue) { q->enqueue(skb_get(skb), q); if (skb_shared(skb)) { skb->destructor = imq_skb_destructor; kfree_skb(skb); ret = 0; } } if (spin_is_locked(&dev->_xmit_lock)) netif_schedule(dev); else while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0) /* NOTHING */; spin_unlock_bh(&dev->queue_lock); if (skb2) kfree_skb(ret ? skb : skb2); return ret; }
static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info, void *data) { struct net_device *dev; struct net_device_stats *stats; struct sk_buff *skb2 = NULL; struct Qdisc *q; unsigned int index = skb->imq_flags&IMQ_F_IFMASK; int ret = -1; if (index > numdevs) return -1; dev = imq_devs + index; if (!(dev->flags & IFF_UP)) { skb->imq_flags = 0; nf_reinject(skb, info, NF_ACCEPT); return 0; } dev->last_rx = jiffies; if (skb->destructor) { skb2 = skb; skb = skb_clone(skb, GFP_ATOMIC); if (!skb) return -1; } skb_push(skb, IMQ_HH_LEN(info)); skb->nf_info = info; stats = (struct net_device_stats *)dev->priv; stats->rx_bytes+= skb->len; stats->rx_packets++; spin_lock_bh(&dev->queue_lock); q = dev->qdisc; if (q->enqueue) { q->enqueue(skb_get(skb), q); if (skb_shared(skb)) { skb->destructor = imq_skb_destructor; kfree_skb(skb); ret = 0; } } qdisc_run(dev); spin_unlock_bh(&dev->queue_lock); if (skb2) kfree_skb(ret ? skb : skb2); return ret; }
static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = (struct net_device_stats*) dev->priv; stats->tx_bytes += skb->len; stats->tx_packets++; skb->imq_flags = 0; skb->destructor = NULL; dev->trans_start = jiffies; nf_reinject(skb, skb->nf_info, NF_ACCEPT); return 0; }
/* Flush all packets */ static void ipq_flush(ipq_queue_t *q) { ipq_queue_element_t *e; spin_lock_bh(&q->lock); q->flushing = 1; spin_unlock_bh(&q->lock); while ((e = ipq_dequeue(q, NULL, 0))) { e->verdict = NF_DROP; nf_reinject(e->skb, e->info, e->verdict); kfree(e); } spin_lock_bh(&q->lock); q->flushing = 0; spin_unlock_bh(&q->lock); }
/* locking not needed when called from imq_nf_queue */ static void imq_nf_reinject_lockless(struct nf_queue_entry *entry, unsigned int verdict) { int status; if (!entry->next_outfn) { nf_reinject(entry, verdict); return; } status = entry->next_outfn(entry, entry->next_queuenum); if (status < 0) { nf_queue_entry_release_refs(entry); kfree_skb(entry->skb); kfree(entry); } }
static netdev_tx_t imq_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct nf_queue_entry *entry = skb->nf_queue_entry; skb->nf_queue_entry = NULL; dev->trans_start = jiffies; dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; if (unlikely(entry == NULL)) { /* We don't know what is going on here.. packet is queued for * imq device, but (probably) not by us. * * If this packet was not send here by imq_nf_queue(), then * skb_save_cb() was not used and skb_free() should not show: * WARNING: IMQ: kfree_skb: skb->cb_next:.. * and/or * WARNING: IMQ: kfree_skb: skb->nf_queue_entry... * * However if this message is shown, then IMQ is somehow broken * and you should report this to linuximq.net. */ /* imq_dev_xmit is black hole that eats all packets, report that * we eat this packet happily and increase dropped counters. */ dev->stats.tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } skb_restore_cb(skb); /* restore skb->cb */ skb->imq_flags = 0; skb->destructor = NULL; imq_done_check_queue_mapping(skb, dev); nf_reinject(entry, NF_ACCEPT); return NETDEV_TX_OK; }
static int ipq_set_verdict(ipq_queue_t *q, ipq_verdict_msg_t *v, unsigned int len) { ipq_queue_element_t *e; if (v->value > NF_MAX_VERDICT) return -EINVAL; e = ipq_dequeue(q, id_cmp, v->id); if (e == NULL) return -ENOENT; else { e->verdict = v->value; if (v->data_len && v->data_len == len) if (ipq_mangle_ipv4(v, e) < 0) e->verdict = NF_DROP; nf_reinject(e->skb, e->info, e->verdict); kfree(e); return 0; } }
static void imq_nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) { int status; if (!entry->next_outfn) { spin_lock_bh(&imq_nf_queue_lock); nf_reinject(entry, verdict); spin_unlock_bh(&imq_nf_queue_lock); return; } rcu_read_lock(); local_bh_disable(); status = entry->next_outfn(entry, entry->next_queuenum); local_bh_enable(); if (status < 0) { nf_queue_entry_release_refs(entry); kfree_skb(entry->skb); kfree(entry); } rcu_read_unlock(); }
static int ipq_set_verdict(ipq_queue_t *q, ipq_verdict_msg_t *v, unsigned int len) { ipq_queue_element_t *e; if (v->value > NF_MAX_VERDICT) return -EINVAL; e = ipq_dequeue(q, id_cmp, v->id); if (e == NULL) return -ENOENT; else { e->verdict = v->value; if (v->data_len && v->data_len == len) if (ipq_mangle_ipv4(v, e) < 0) e->verdict = NF_DROP; /* APE: Always re-route packets... UGLY HACK!!! */ route_me_harder(e->skb); /* END APE */ nf_reinject(e->skb, e->info, e->verdict); kfree(e); return 0; } }
void clean_tcp_TrackInfos(tcp_Info_Manager *manager) { unsigned long flags = 0; tcp_TrackInfo *node = NULL; if (manager != NULL) { SEC_spin_lock_irqsave(&manager->tcp_info_lock, flags); while (manager->head != NULL) { node = manager->head; manager->head = manager->head->next; if (node->q_entry != NULL) { nf_reinject((struct nf_queue_entry *)node->q_entry, NF_DROP); node->q_entry = NULL; } free_tcp_TrackInfo(node); } manager->tail = NULL; manager->count = 0; SEC_spin_unlock_irqrestore(&manager->tcp_info_lock, flags); } }
// sec_filter driver write function ssize_t sec_url_filter_write( struct file *filp, const char *buf, size_t count, loff_t *f_pos) { int result = -EIO; if ((buf != NULL) && (count >4)) { short version = *(short *)buf; int cmd = *(int *)(buf+sizeof(short)); char *data = (char *)(buf+sizeof(short)+sizeof(int)); { if (version == 0) { switch(cmd) { case SET_FILTER_MODE: // Turn On and Off filtering. { filterMode = *(int *)data; result = count; if (filterMode == FILTER_MODE_OFF) { wake_up_interruptible(&user_noti_Q); clean_tcp_TrackInfos(getting_TrackInfo); clean_tcp_TrackInfos(rejected_TrackInfo); clean_tcp_TrackInfos(notifying_TrackInfo); clean_tcp_TrackInfos(notified_TrackInfo); SEC_FREE(exceptionURL); SEC_FREE(errorMsg); } printk(KERN_INFO "SEC URL Filter Mode : %d\n", filterMode); } break; case SET_USER_SELECT: //version 2, id 4, choice 2 { tcp_TrackInfo *selectInfo = NULL; int id = *((int *)(data)); int choice = *((int *)(data+sizeof(unsigned int))); unsigned int verdict = NF_DROP; struct nf_queue_entry *entry= NULL; selectInfo = find_tcp_TrackInfo_withID(notified_TrackInfo, id, 1); if (selectInfo != NULL) { result = count; entry = (struct nf_queue_entry *)selectInfo->q_entry; selectInfo->q_entry = NULL; selectInfo->status = choice; if (choice == ALLOW || ((filterMode == FILTER_MODE_ON_RESPONSE)||(filterMode == FILTER_MODE_ON_RESPONSE_REFER))) { verdict = NF_ACCEPT; //Response case should send packet } if (choice == BLOCK) { add_tcp_TrackInfo(rejected_TrackInfo, selectInfo); // Add this node to Rejected List. } else { free_tcp_TrackInfo(selectInfo); } nf_reinject(entry, verdict); // Reinject packet with the verdict } else { printk("SEC_FILTER_URL : NO SUCH ID\n"); } } break; case SET_EXCEPTION_URL: { int urlLen = *((int *)(data)); SEC_FREE(exceptionURL); exceptionURL = SEC_MALLOC(urlLen+1); if (exceptionURL != NULL) { memcpy(exceptionURL, (data+sizeof(int)), urlLen); result = count; } } break; case SET_ERROR_MSG: { int msgLen = *((int *)(data)); SEC_FREE(errorMsg); errMsgSize = 0; errorMsg = SEC_MALLOC(msgLen+1); if (errorMsg != NULL) { memcpy(errorMsg, (data+sizeof(int)), msgLen); errMsgSize = msgLen; result = count; } } break; } } } } return result; }
int sec_url_filter_slow(struct nf_queue_entry *entry, unsigned int queuenum) { if (entry != NULL) { if (filterMode) { struct sk_buff *skb = entry->skb; char *request = NULL; tcp_TrackInfo *gettingNode = NULL; tcp_TrackInfo *notifyNode = NULL; if (skb != NULL) { struct iphdr *iph = (struct iphdr*)ip_hdr(skb); if (iph != NULL) { if (iph->protocol == 6) { struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb); if (tcph!= NULL) { notifyNode = find_tcp_TrackInfo(notifying_TrackInfo, skb, 0); if (notifyNode != NULL) // If this is already notified to user, drop it. { unsigned int verdict = NF_DROP; nf_reinject(entry, verdict); return 0; } gettingNode = find_tcp_TrackInfo(getting_TrackInfo, skb, 1); // Find previous TCP Track Info and remove from list if (gettingNode == NULL) // No previous Info { gettingNode = isURL(skb); // If this is URL Request then make TCP Track Info } if (gettingNode != NULL) { request = getPacketData(skb, gettingNode); // Get Packet if (request != NULL) // If there is packet data { getURL(request, gettingNode); // Get URL and update status kfree(request); request = NULL; if (gettingNode->status == GOT_FULL_URL) // If get Full URL, then make notify info { makeNotifyInfo(gettingNode); if ((exceptionURL != NULL) && (gettingNode->url !=NULL)) { if (strstr(&gettingNode->url[sizeof(URL_Info)], exceptionURL) != NULL) // This is exception URL { free_tcp_TrackInfo(gettingNode); nf_reinject(entry, NF_ACCEPT); return 0; } } gettingNode->q_entry = entry; add_tcp_TrackInfo(notifying_TrackInfo, gettingNode); wake_up_interruptible(&user_noti_Q); // Wake Up read function return 0; } } add_tcp_TrackInfo_ToHead(getting_TrackInfo, gettingNode); } } } } } } nf_reinject(entry, NF_ACCEPT); } return 0; }
static int imq_nf_queue(struct nf_queue_entry *entry, unsigned queue_num) { struct net_device *dev; struct sk_buff *skb_orig, *skb, *skb_shared; struct Qdisc *q; struct netdev_queue *txq; spinlock_t *root_lock; int users, index; int retval = -EINVAL; unsigned int orig_queue_index; index = entry->skb->imq_flags & IMQ_F_IFMASK; if (unlikely(index > numdevs - 1)) { if (net_ratelimit()) printk(KERN_WARNING "IMQ: invalid device specified, highest is %u\n", numdevs - 1); retval = -EINVAL; goto out; } /* check for imq device by index from cache */ dev = imq_devs_cache[index]; if (unlikely(!dev)) { char buf[8]; /* get device by name and cache result */ snprintf(buf, sizeof(buf), "imq%d", index); dev = dev_get_by_name(&init_net, buf); if (unlikely(!dev)) { /* not found ?!*/ BUG(); retval = -ENODEV; goto out; } imq_devs_cache[index] = dev; dev_put(dev); } if (unlikely(!(dev->flags & IFF_UP))) { entry->skb->imq_flags = 0; nf_reinject(entry, NF_ACCEPT); retval = 0; goto out; } dev->last_rx = jiffies; skb = entry->skb; skb_orig = NULL; /* skb has owner? => make clone */ if (unlikely(skb->destructor)) { skb_orig = skb; skb = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb)) { retval = -ENOMEM; goto out; } entry->skb = skb; } skb->nf_queue_entry = entry; dev->stats.rx_bytes += skb->len; dev->stats.rx_packets++; if (!skb->dev) { /* skb->dev == NULL causes problems, try the find cause. */ if (net_ratelimit()) { dev_warn(&dev->dev, "received packet with skb->dev == NULL\n"); dump_stack(); } skb->dev = dev; } /* Disables softirqs for lock below */ rcu_read_lock_bh(); /* Multi-queue selection */ orig_queue_index = skb_get_queue_mapping(skb); txq = imq_select_queue(dev, skb); q = rcu_dereference(txq->qdisc); if (unlikely(!q->enqueue)) goto packet_not_eaten_by_imq_dev; root_lock = qdisc_lock(q); spin_lock(root_lock); users = atomic_read(&skb->users); skb_shared = skb_get(skb); /* increase reference count by one */ skb_save_cb(skb_shared); /* backup skb->cb, as qdisc layer will overwrite it */ qdisc_enqueue_root(skb_shared, q); /* might kfree_skb */ if (likely(atomic_read(&skb_shared->users) == users + 1)) { kfree_skb(skb_shared); /* decrease reference count by one */ skb->destructor = &imq_skb_destructor; /* cloned? */ if (unlikely(skb_orig)) kfree_skb(skb_orig); /* free original */ spin_unlock(root_lock); rcu_read_unlock_bh(); /* schedule qdisc dequeue */ __netif_schedule(q); retval = 0; goto out; } else { skb_restore_cb(skb_shared); /* restore skb->cb */ skb->nf_queue_entry = NULL; /* qdisc dropped packet and decreased skb reference count of * skb, so we don't really want to and try refree as that would * actually destroy the skb. */ spin_unlock(root_lock); goto packet_not_eaten_by_imq_dev; } packet_not_eaten_by_imq_dev: skb_set_queue_mapping(skb, orig_queue_index); rcu_read_unlock_bh(); /* cloned? restore original */ if (unlikely(skb_orig)) { kfree_skb(skb); entry->skb = skb_orig; } retval = -1; out: return retval; }
/* Reinject a packet in the protocol stack after the delay */ static void f_delaydeliver(struct f_delayparms *parms) { nf_reinject(parms->entry, NF_ACCEPT); kfree(parms->timer); kfree(parms); }