Ejemplo n.º 1
0
Archivo: skbuff.c Proyecto: Lyude/linux
/*
 * Note the addition of a ref on a socket buffer.
 */
void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
	const void *here = __builtin_return_address(0);
	int n = atomic_inc_return(select_skb_count(op));
	trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
	skb_get(skb);
}
Ejemplo n.º 2
0
Archivo: skbuff.c Proyecto: foxwolf/yjd
static void skb_clone_fraglist(struct sk_buff *skb)
{
	struct sk_buff *list;

	skb_walk_frags(skb, list)
		skb_get(list);
} 
Ejemplo n.º 3
0
/**
 *	llc_sap_state_process - sends event to SAP state machine
 *	@sap: sap to use
 *	@skb: pointer to occurred event
 *
 *	After executing actions of the event, upper layer will be indicated
 *	if needed(on receiving an UI frame). sk can be null for the
 *	datalink_proto case.
 */
static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_sap_state_ev *ev = llc_sap_ev(skb);

	/*
	 * We have to hold the skb, because llc_sap_next_state
	 * will kfree it in the sending path and we need to
	 * look at the skb->cb, where we encode llc_sap_state_ev.
	 */
	skb_get(skb);
	ev->ind_cfm_flag = 0;
	llc_sap_next_state(sap, skb);
	if (ev->ind_cfm_flag == LLC_IND) {
		if (skb->sk->sk_state == TCP_LISTEN)
			kfree_skb(skb);
		else {
			llc_save_primitive(skb->sk, skb, ev->prim);

			/* queue skb to the user. */
			if (sock_queue_rcv_skb(skb->sk, skb))
				kfree_skb(skb);
		}
	}
	kfree_skb(skb);
}
Ejemplo n.º 4
0
/*
 * Function ircomm_lmp_connect_response (self, skb)
 *
 *    
 *
 */
static int ircomm_lmp_connect_response(struct ircomm_cb *self,
				       struct sk_buff *userdata)
{
	struct sk_buff *tx_skb;
	int ret;

	IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );
	
	/* Any userdata supplied? */
	if (userdata == NULL) {
		tx_skb = dev_alloc_skb(64);
		if (!tx_skb)
			return -ENOMEM;

		/* Reserve space for MUX and LAP header */
		skb_reserve(tx_skb, LMP_MAX_HEADER);
	} else {
		/*  
		 *  Check that the client has reserved enough space for 
		 *  headers
		 */
		ASSERT(skb_headroom(userdata) >= LMP_MAX_HEADER, return -1;);

		/* Don't forget to refcount it - should be NULL anyway */
		skb_get(userdata);
		tx_skb = userdata;
	}
Ejemplo n.º 5
0
	static void recv_handler(struct sk_buff *__skb)
	{
		struct sk_buff * skb = NULL;		//socket buffer
		struct nlmsghdr * nlhdr = NULL;		//netlink message header
		struct cn_msg * cmsg = NULL;		//netlink connector message
		struct w1_netlink_msg * w1msg = NULL;		//w1 netlink message
		struct w1_netlink_cmd * w1cmd = NULL;		//w1 netlink command

		int minSize = sizeof(struct nlmsghdr) + sizeof(struct cn_msg)
				+ sizeof(struct w1_netlink_msg) + sizeof(struct w1_netlink_cmd);

		int len;

		//must be done here:
		skb = skb_get(__skb);

		nlhdr = (struct nlmsghdr *)skb->data;
		if (nlhdr->nlmsg_len < minSize)
		{
			printk("Corrupt w1 netlink message.\n");
			return;
		}

		len = nlhdr->nlmsg_len - NLMSG_LENGTH(0);
		printk("Got a netlink msg, it's length is %d.\n", len);

		cmsg = NLMSG_DATA(nlhdr);
		on_w1_msg_received(cmsg);
	}
Ejemplo n.º 6
0
static void skb_clone_fraglist(struct sk_buff *skb)
{
	struct sk_buff *list;

	for (list = skb_shinfo(skb)->frag_list; list; list = list->next)
		skb_get(list);
}
Ejemplo n.º 7
0
int connection_process(struct connection *conn, struct sk_buff *skb)
{
	int ret = 0;
	do {
		if (mutex_lock_interruptible(&(conn->data_lock))) {
			MCDRV_DBG_ERROR("Interrupted getting data semaphore!");
			ret = -1;
			break;
		}

		kfree_skb(conn->skb);

		/* Get a reference to the incomming skb */
		conn->skb = skb_get(skb);
		if (conn->skb) {
			conn->data_msg = nlmsg_hdr(conn->skb);
			conn->data_len = NLMSG_PAYLOAD(conn->data_msg, 0);
			conn->data_start = NLMSG_DATA(conn->data_msg);
			up(&(conn->data_available_sem));
		}
		mutex_unlock(&(conn->data_lock));
		ret = 0;
	} while (0);
	return ret;
}
Ejemplo n.º 8
0
int genl_exec(genl_exec_func_t func, void *data)
{
	int ret;

	mutex_lock(&genl_exec_lock);

	init_completion(&done);
	skb_get(genlmsg_skb);
	genlmsg_put(genlmsg_skb, 0, 0, &genl_exec_family,
		    NLM_F_REQUEST, GENL_EXEC_RUN);

	genl_exec_function = func;
	genl_exec_data = data;

	/* There is no need to send msg to current namespace. */
	ret = genlmsg_unicast(&init_net, genlmsg_skb, 0);

	if (!ret) {
		wait_for_completion(&done);
		ret = genl_exec_function_ret;
	} else {
		pr_err("genl_exec send error %d\n", ret);
	}

	/* Wait for genetlink to kfree skb. */
	while (skb_shared(genlmsg_skb))
		cpu_relax();

	genlmsg_skb->data = genlmsg_skb->head;
	skb_reset_tail_pointer(genlmsg_skb);

	mutex_unlock(&genl_exec_lock);

	return ret;
}
Ejemplo n.º 9
0
int
hhnet_broadcast(
    void *data,
    unsigned int len,
    hhnet_iodone_fn iodone,
    struct sk_buff *skb
)
{
    int num;

    Debug((PRINT_PREFIX "hhnet_broadcast\n"));
    assert(localmem != NULL);

    for (num = 0; num < HHNET_MAX_DEVICES; num++) {
	if (connections[num].connect_in)
	    (void) hhnet_xmit(num, data, len, iodone, (void *)skb_get(skb));
    }

#ifdef HHNET_USE_DMA
    if (!list_empty(&head)) {
	(void) hhnet_dma_queue_list(&head, dma_ch);
	INIT_LIST_HEAD(&head);
    }
#endif

    dev_kfree_skb_any(skb);
    return 0;
}
Ejemplo n.º 10
0
void nl_data_ready(struct sk_buff *__skb)  
{  
    struct sk_buff *skb;  
    struct nlmsghdr *nlh;  
    char str[100];  
 //   struct completion cmpl;  
    int i=10;  
    skb = skb_get (__skb);  
    if(skb->len >= NLMSG_SPACE(0)){  
        nlh = nlmsg_hdr(skb);  
  
        memcpy(str, NLMSG_DATA(nlh), sizeof(str));  
        printk("Message received:%s\n",str) ;  
        pid = nlh->nlmsg_pid;  
        while(i--){  
            //init_completion(&cmpl);  
            //wait_for_completion_timeout(&cmpl,3 * HZ);  
            sendnlmsg();  
            break;  
        }  
        flag = 1;  
        kfree_skb(skb);  
    }  
  
 }  
Ejemplo n.º 11
0
void kernel_receive(struct sk_buff *__skb) //内核从用户空间接收数据
{
    struct sk_buff *skb;
    struct nlmsghdr *nlh = NULL;

    char *data = "This is eric's test message from kernel";

    printk( "[kernel recv] begin kernel_receive\n");
    skb = skb_get(__skb);

    if(skb->len >= sizeof(struct nlmsghdr)){
        nlh = (struct nlmsghdr *)skb->data;
        if((nlh->nlmsg_len >= sizeof(struct nlmsghdr))
            && (__skb->len >= nlh->nlmsg_len)){
            user_process.pid = nlh->nlmsg_pid;
            printk( "[kernel recv] data receive from user are:%s\n", (char *)NLMSG_DATA(nlh));
            printk( "[kernel recv] user_pid:%d\n", user_process.pid);
//            send_to_user(data);
        }
    }else{
        printk( "[kernel recv] data receive from user are:%s\n",(char *)NLMSG_DATA(nlmsg_hdr(__skb)));
//        send_to_user(data);
    }

    kfree_skb(skb);
}
Ejemplo n.º 12
0
/* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero
 * value if 'skb' is freed.
 */
static int handle_fragments(struct net *net, struct sw_flow_key *key,
			    u16 zone, struct sk_buff *skb)
{
	struct ovs_gso_cb ovs_cb = *OVS_GSO_CB(skb);

	if (!skb->dev) {
		OVS_NLERR(true, "%s: skb has no dev; dropping", __func__);
		return -EINVAL;
	}

	if (key->eth.type == htons(ETH_P_IP)) {
		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
		int err;

		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
		err = ip_defrag(skb, user);
		if (err)
			return err;

		ovs_cb.dp_cb.mru = IPCB(skb)->frag_max_size;
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
	} else if (key->eth.type == htons(ETH_P_IPV6)) {
		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
		struct sk_buff *reasm;

		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
		reasm = nf_ct_frag6_gather(skb, user);
		if (!reasm)
			return -EINPROGRESS;

		if (skb == reasm) {
			kfree_skb(skb);
			return -EINVAL;
		}

		/* Don't free 'skb' even though it is one of the original
		 * fragments, as we're going to morph it into the head.
		 */
		skb_get(skb);
		nf_ct_frag6_consume_orig(reasm);

		key->ip.proto = ipv6_hdr(reasm)->nexthdr;
		skb_morph(skb, reasm);
		skb->next = reasm->next;
		consume_skb(reasm);
		ovs_cb.dp_cb.mru = IP6CB(skb)->frag_max_size;
#endif /* IP frag support */
	} else {
		kfree_skb(skb);
		return -EPFNOSUPPORT;
	}

	key->ip.frag = OVS_FRAG_TYPE_NONE;
	skb_clear_hash(skb);
	skb->ignore_df = 1;
	*OVS_GSO_CB(skb) = ovs_cb;

	return 0;
}
Ejemplo n.º 13
0
/*---------Tx/Rx descriptor------------*/
static int wil_txdesc_debugfs_show(struct seq_file *s, void *data)
{
	struct wil6210_priv *wil = s->private;
	struct vring *vring;
	bool tx = (dbg_vring_index < WIL6210_MAX_TX_RINGS);
	if (tx)
		vring = &(wil->vring_tx[dbg_vring_index]);
	else
		vring = &wil->vring_rx;

	if (!vring->va) {
		if (tx)
			seq_printf(s, "No Tx[%2d] VRING\n", dbg_vring_index);
		else
			seq_puts(s, "No Rx VRING\n");
		return 0;
	}

	if (dbg_txdesc_index < vring->size) {
		/* use struct vring_tx_desc for Rx as well,
		 * only field used, .dma.length, is the same
		 */
		volatile struct vring_tx_desc *d =
				&(vring->va[dbg_txdesc_index].tx);
		volatile u32 *u = (volatile u32 *)d;
		struct sk_buff *skb = vring->ctx[dbg_txdesc_index].skb;

		if (tx)
			seq_printf(s, "Tx[%2d][%3d] = {\n", dbg_vring_index,
				   dbg_txdesc_index);
		else
			seq_printf(s, "Rx[%3d] = {\n", dbg_txdesc_index);
		seq_printf(s, "  MAC = 0x%08x 0x%08x 0x%08x 0x%08x\n",
			   u[0], u[1], u[2], u[3]);
		seq_printf(s, "  DMA = 0x%08x 0x%08x 0x%08x 0x%08x\n",
			   u[4], u[5], u[6], u[7]);
		seq_printf(s, "  SKB = 0x%p\n", skb);

		if (skb) {
			skb_get(skb);
			wil_seq_print_skb(s, skb);
			kfree_skb(skb);
		}
		seq_printf(s, "}\n");
	} else {
		if (tx)
			seq_printf(s, "[%2d] TxDesc index (%d) >= size (%d)\n",
				   dbg_vring_index, dbg_txdesc_index,
				   vring->size);
		else
			seq_printf(s, "RxDesc index (%d) >= size (%d)\n",
				   dbg_txdesc_index, vring->size);
	}

	return 0;
}
Ejemplo n.º 14
0
static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info,
			void *data)
{
	struct net_device *dev;
	struct net_device_stats *stats;
	struct sk_buff *skb2 = NULL;
	struct Qdisc *q;
	unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
	int ret = -1;

	if (index > numdevs) 
		return -1;
	
	dev = imq_devs + index;
	if (!(dev->flags & IFF_UP)) {
		skb->imq_flags = 0;
		nf_reinject(skb, info, NF_ACCEPT);
		return 0;
	}
	dev->last_rx = jiffies;

	if (skb->destructor) {
		skb2 = skb;
		skb = skb_clone(skb, GFP_ATOMIC);
		if (!skb)
			return -1;
	}
	skb_push(skb, IMQ_HH_LEN(info));
	skb->nf_info = info;

	stats = (struct net_device_stats *)dev->priv;
	stats->rx_bytes+= skb->len;
	stats->rx_packets++;
	
	spin_lock_bh(&dev->queue_lock);
	
	q = dev->qdisc;
	if (q->enqueue) {
		q->enqueue(skb_get(skb), q);

		if (skb_shared(skb)) {
			skb->destructor = imq_skb_destructor;
			kfree_skb(skb);
			ret = 0;
		}
	}

	qdisc_run(dev);
	spin_unlock_bh(&dev->queue_lock);

	if (skb2)
		kfree_skb(ret ? skb : skb2);

	return ret;
}
Ejemplo n.º 15
0
Archivo: imq.c Proyecto: cilynx/dd-wrt
static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info, unsigned queue_num, void *data)
{
	struct net_device *dev;
	struct net_device_stats *stats;
	struct sk_buff *skb2 = NULL;
	struct Qdisc *q;
	unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
	int ret = -1;

	if (index > numdevs)
		return -1;

	dev = imq_devs + index;
	if (!(dev->flags & IFF_UP)) {
		skb->imq_flags = 0;
		nf_reinject(skb, info, NF_ACCEPT);
		return 0;
	}
	dev->last_rx = jiffies;

	if (skb->destructor) {
		skb2 = skb;
		skb = skb_clone(skb, GFP_ATOMIC);
		if (!skb)
			return -1;
	}
	skb->nf_info = info;

	stats = (struct net_device_stats *)dev->priv;
	stats->rx_bytes+= skb->len;
	stats->rx_packets++;

	spin_lock_bh(&dev->queue_lock);
	q = dev->qdisc;
	if (q->enqueue) {
		q->enqueue(skb_get(skb), q);
		if (skb_shared(skb)) {
			skb->destructor = imq_skb_destructor;
			kfree_skb(skb);
			ret = 0;
		}
	}
	if (spin_is_locked(&dev->_xmit_lock))
		netif_schedule(dev);
	else
		while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
			/* NOTHING */;

	spin_unlock_bh(&dev->queue_lock);

	if (skb2)
		kfree_skb(ret ? skb : skb2);

	return ret;
}
Ejemplo n.º 16
0
/**
 * hns3_lp_run_test -  run loopback test
 * @ndev: net device
 * @mode: loopback type
 */
static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
{
	struct hns3_nic_priv *priv = netdev_priv(ndev);
	struct sk_buff *skb;
	u32 i, good_cnt;
	int ret_val = 0;

	skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN,
			GFP_KERNEL);
	if (!skb)
		return HNS3_NIC_LB_TEST_NO_MEM_ERR;

	skb->dev = ndev;
	hns3_lp_setup_skb(skb);
	skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID;

	good_cnt = 0;
	for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) {
		netdev_tx_t tx_ret;

		skb_get(skb);
		tx_ret = hns3_nic_net_xmit(skb, ndev);
		if (tx_ret == NETDEV_TX_OK)
			good_cnt++;
		else
			netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
				   tx_ret);
	}
	if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
		ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
		netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n",
			   mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
		goto out;
	}

	/* Allow 200 milliseconds for packets to go from Tx to Rx */
	msleep(200);

	good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM);
	if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
		ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR;
		netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n",
			   mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
	}

out:
	hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID,
			      HNS3_NIC_LB_TEST_RING_ID,
			      HNS3_NIC_LB_TEST_PKT_NUM);

	kfree_skb(skb);
	return ret_val;
}
Ejemplo n.º 17
0
static int efx_begin_loopback(struct efx_tx_queue *tx_queue)
{
	struct efx_nic *efx = tx_queue->efx;
	struct efx_loopback_state *state = efx->loopback_selftest;
	struct efx_loopback_payload *payload;
	struct sk_buff *skb;
	int i;
	netdev_tx_t rc;

	/* Transmit N copies of buffer */
	for (i = 0; i < state->packet_count; i++) {
		/* Allocate an skb, holding an extra reference for
		 * transmit completion counting */
		skb = alloc_skb(sizeof(state->payload), GFP_KERNEL);
		if (!skb)
			return -ENOMEM;
		state->skbs[i] = skb;
		skb_get(skb);

		/* Copy the payload in, incrementing the source address to
		 * exercise the rss vectors */
		payload = ((struct efx_loopback_payload *)
			   skb_put(skb, sizeof(state->payload)));
		memcpy(payload, &state->payload, sizeof(state->payload));
		payload->ip.saddr = htonl(INADDR_LOOPBACK | (i << 2));

		/* Ensure everything we've written is visible to the
		 * interrupt handler. */
		smp_wmb();

		if (efx_dev_registered(efx))
			netif_tx_lock_bh(efx->net_dev);
		rc = efx_enqueue_skb(tx_queue, skb);
		if (efx_dev_registered(efx))
			netif_tx_unlock_bh(efx->net_dev);

		if (rc != NETDEV_TX_OK) {
			EFX_ERR(efx, "TX queue %d could not transmit packet %d "
				"of %d in %s loopback test\n", tx_queue->queue,
				i + 1, state->packet_count, LOOPBACK_MODE(efx));

			/* Defer cleaning up the other skbs for the caller */
			kfree_skb(skb);
			return -EPIPE;
		}
	}

	return 0;
}
/*
 * Function ircomm_ttp_connect_response (self, skb)
 *
 *    
 *
 */
int ircomm_ttp_connect_response(struct ircomm_cb *self,
				struct sk_buff *userdata)
{
	int ret;

	IRDA_DEBUG(4, "%s()\n", __FUNCTION__ );
	
	/* Don't forget to refcount it - should be NULL anyway */
	if(userdata)
		skb_get(userdata);

	ret = irttp_connect_response(self->tsap, TTP_SAR_DISABLE, userdata);

	return ret;
}
Ejemplo n.º 19
0
void nlcall(struct sk_buff *__skb) 
{
	pid_t pid;
	struct sk_buff *skb;
	struct nlmsghdr *nlh;
	int data = 0;
	
	/*
	 * get true skb
	 */
	skb = skb_get(__skb);

	/*
	 * length of actual data must be bigger than
	 * NLMSG_LENTH
	 */
	if (skb->len >= NLMSG_SPACE(0)) {
		/*
		 * skb->data
		 */
		nlh = nlmsg_hdr(skb);	
		/*
		 * nlh + NLMSG_LENTH -->data
		 */
		memcpy((void *)&data, NLMSG_DATA(nlh), sizeof(data));
		pid = nlh->nlmsg_pid;
		/*
		 * H E is from my own user program! 
		 */
		if (data == 1) {
			switchon = 1;
			printk(KERN_ALERT "[i-keylog] process %d switch on\n", pid);
		}
		else if (data == 0) {
			switchon = 0;
			printk(KERN_ALERT "[i-keylog] process %d switch off\n", pid);
		}
		else {
			printk(KERN_ALERT "[i-keylog] process %d unknown control\n", pid);
		}
		kfree_skb(skb);

	}
	else {
		printk(KERN_ALERT "[i-keylog] unknown control\n");

	}
}
Ejemplo n.º 20
0
/*
 * Function ircomm_lmp_connect_request (self, userdata)
 *
 *    
 *
 */
static int ircomm_lmp_connect_request(struct ircomm_cb *self, 
				      struct sk_buff *userdata, 
				      struct ircomm_info *info)
{
	int ret = 0;

	IRDA_DEBUG(0, "%s()\n", __FUNCTION__ );

	/* Don't forget to refcount it - should be NULL anyway */
	if(userdata)
		skb_get(userdata);

	ret = irlmp_connect_request(self->lsap, info->dlsap_sel,
				    info->saddr, info->daddr, NULL, userdata); 
	return ret;
}	
static int ircomm_lmp_connect_request(struct ircomm_cb *self,
				      struct sk_buff *userdata,
				      struct ircomm_info *info)
{
	int ret = 0;

	IRDA_DEBUG(0, "%s()\n", __func__ );

	
	if(userdata)
		skb_get(userdata);

	ret = irlmp_connect_request(self->lsap, info->dlsap_sel,
				    info->saddr, info->daddr, NULL, userdata);
	return ret;
}
Ejemplo n.º 22
0
static struct sk_buff *tls_sw_fallback(struct sock *sk, struct sk_buff *skb)
{
	int tcp_payload_offset = skb_transport_offset(skb) + tcp_hdrlen(skb);
	struct tls_context *tls_ctx = tls_get_ctx(sk);
	struct tls_offload_context_tx *ctx = tls_offload_ctx_tx(tls_ctx);
	int payload_len = skb->len - tcp_payload_offset;
	struct scatterlist *sg_in, sg_out[3];
	struct sk_buff *nskb = NULL;
	int sg_in_max_elements;
	int resync_sgs = 0;
	s32 sync_size = 0;
	u64 rcd_sn;

	/* worst case is:
	 * MAX_SKB_FRAGS in tls_record_info
	 * MAX_SKB_FRAGS + 1 in SKB head and frags.
	 */
	sg_in_max_elements = 2 * MAX_SKB_FRAGS + 1;

	if (!payload_len)
		return skb;

	sg_in = kmalloc_array(sg_in_max_elements, sizeof(*sg_in), GFP_ATOMIC);
	if (!sg_in)
		goto free_orig;

	sg_init_table(sg_in, sg_in_max_elements);
	sg_init_table(sg_out, ARRAY_SIZE(sg_out));

	if (fill_sg_in(sg_in, skb, ctx, &rcd_sn, &sync_size, &resync_sgs)) {
		/* bypass packets before kernel TLS socket option was set */
		if (sync_size < 0 && payload_len <= -sync_size)
			nskb = skb_get(skb);
		goto put_sg;
	}

	nskb = tls_enc_skb(tls_ctx, sg_out, sg_in, skb, sync_size, rcd_sn);

put_sg:
	while (resync_sgs)
		put_page(sg_page(&sg_in[--resync_sgs]));
	kfree(sg_in);
free_orig:
	kfree_skb(skb);
	return nskb;
}
Ejemplo n.º 23
0
void nl_data_ready (struct sk_buff *__skb)
{
 	struct sk_buff *skb;
 	struct nlmsghdr *nlh;
 	u32 pid;
  	int rc;
  	int len = NLMSG_SPACE(1200);
  	char str[100];

  	printk("net_link: data is ready to read.\n");
  	skb = skb_get(__skb);

  	if (skb->len >= NLMSG_SPACE(0)) {
    	nlh = nlmsg_hdr(skb);
   		printk("net_link: recv %s.\n", (char *)NLMSG_DATA(nlh));
   		memcpy(str,NLMSG_DATA(nlh), sizeof(str)); 
		printk("str[0] is %d\n",str[0]);
    	pid = nlh->nlmsg_pid; 
    	printk("net_link: pid is %d\n", pid);
   		kfree_skb(skb);

    	skb = alloc_skb(len, GFP_ATOMIC);
    	if (!skb){
      	printk(KERN_ERR "net_link: allocate failed.\n");
      	return;
    }
    nlh = nlmsg_put(skb,0,0,0,1200,0);
    NETLINK_CB(skb).pid = 0;
	
	if(str[0] == 1){
		flag = 1;
		memcpy(NLMSG_DATA(nlh), "start", 6);
	}
	else if(str[0] == 2){
		flag = 0;
		memcpy(NLMSG_DATA(nlh), "end", 4);
	}
    printk("net_link: going to send.\n");
    rc = netlink_unicast(nl_sk, skb, pid, MSG_DONTWAIT);
    if (rc < 0) {
      printk(KERN_ERR "net_link: can not unicast skb (%d)\n", rc);
    }
    printk("net_link: send is ok.\n");
  }
  return;
}
Ejemplo n.º 24
0
/* Iterate over the skbuff in the audit_buffer, sending their contents
 * to user space. */
static inline int audit_log_drain(struct audit_buffer *ab)
{
	struct sk_buff *skb;

	while ((skb = skb_dequeue(&ab->sklist))) {
		int retval = 0;

		if (audit_pid) {
			if (ab->nlh) {
				ab->nlh->nlmsg_len   = ab->total;
				ab->nlh->nlmsg_type  = ab->type;
				ab->nlh->nlmsg_flags = 0;
				ab->nlh->nlmsg_seq   = 0;
				ab->nlh->nlmsg_pid   = ab->pid;
			}
			skb_get(skb); /* because netlink_* frees */
			retval = netlink_unicast(audit_sock, skb, audit_pid,
						 MSG_DONTWAIT);
		}
		if (retval == -EAGAIN && ab->count < 5) {
			++ab->count;
			skb_queue_tail(&ab->sklist, skb);
			audit_log_end_irq(ab);
			return 1;
		}
		if (retval < 0) {
			if (retval == -ECONNREFUSED) {
				printk(KERN_ERR
				       "audit: *NO* daemon at audit_pid=%d\n",
				       audit_pid);
				audit_pid = 0;
			} else
				audit_log_lost("netlink socket too busy");
		}
		if (!audit_pid) { /* No daemon */
			int offset = ab->nlh ? NLMSG_SPACE(0) : 0;
			int len    = skb->len - offset;
			printk(KERN_ERR "%*.*s\n",
			       len, len, skb->data + offset);
		}
		kfree_skb(skb);
		ab->nlh = NULL;
	}
	return 0;
}
static void nl_data_handler(struct sk_buff *__skb)
{
	struct sk_buff *skb;
	struct nlmsghdr *nlh;
	int i;
	int len;
	char str[128];
	MSG("we got netlink message\n");
	len = NLMSG_SPACE(MAX_NL_MSG_LEN);
	skb = skb_get(__skb);	
	if(skb == NULL)
		ERR("skb_get return NULL");
	if (skb->len >= NLMSG_SPACE(0))	/*presume there is 5byte payload at leaset*/
	{
		MSG("length is enough\n");
		nlh = nlmsg_hdr(skb);	//point to data which include in skb
		memcpy(str, NLMSG_DATA(nlh), sizeof(str));
		for(i = 0; i < 3; i++)
			MSG("str[%d = %c]",i, str[i]);
		MSG("str[0] = %d, str[1] = %d, str[2] = %d\n", str[0], str[1], str[2]);
		if(str[0] == 'B' && str[1] == 'G' && str[2] == 'W')
		{
			MSG("got native daemon init command, record it's pid\n");
			pid = nlh->nlmsg_pid;	/*record the native process PID*/
			MSG("native daemon pid is %d\n", pid);
		}
		else

		{
			ERR("this is not BGW message, ignore it\n");
			return;
		}
	}
	else
	{
		ERR("not engouth data length\n");
		return;
	}
	
	kfree_skb(skb);

	send_command_to_daemon(ACK);
	
	return;
}
Ejemplo n.º 26
0
/*
 * Function ircomm_ttp_connect_request (self, userdata)
 *
 *
 *
 */
static int ircomm_ttp_connect_request(struct ircomm_cb *self,
				      struct sk_buff *userdata,
				      struct ircomm_info *info)
{
	int ret = 0;

	IRDA_DEBUG(4, "%s()\n", __func__ );

	/* Don't forget to refcount it - should be NULL anyway */
	if(userdata)
		skb_get(userdata);

	ret = irttp_connect_request(self->tsap, info->dlsap_sel,
				    info->saddr, info->daddr, NULL,
				    TTP_SAR_DISABLE, userdata);

	return ret;
}
Ejemplo n.º 27
0
/* plum_stack_push() is called to enqueue plum_id|port_id pair into
 * stack of plums to be executed
 */
void plum_stack_push(struct bpf_dp_context *ctx, u32 dest, int copy)
{
	struct plum_stack *stack;
	struct plum_stack_frame *frame;

	stack = ctx->stack;

	if (stack->push_cnt > 1024)
		/* number of frames to execute is too high, ignore
		 * all further bpf_*_forward() calls
		 *
		 * this can happen if connections between plums make a loop:
		 * three bridge-plums in a loop is a valid network
		 * topology if STP is working, but kernel needs to make sure
		 * that packet doesn't loop forever
		 */
		return;

	stack->push_cnt++;

	if (!copy) {
		frame = stack->curr_frame;
		if (!frame) /* bpf_*_forward() is called 2nd time. ignore it */
			return;

		BUG_ON(&frame->ctx != ctx);
		stack->curr_frame = NULL;

		skb_get(ctx->skb);
	} else {
		frame = kmem_cache_alloc(plum_stack_cache, GFP_ATOMIC);
		if (!frame)
			return;
		frame->kmem = 1;
		if (bpf_dp_ctx_copy(&frame->ctx, ctx)) {
			kmem_cache_free(plum_stack_cache, frame);
			return;
		}
	}

	frame->dest = dest;
	list_add(&frame->link, &stack->list);
}
Ejemplo n.º 28
0
/*
 * TX an skb to an idle device
 *
 * When the device is in basestation-idle mode, we need to wake it up
 * and then TX. So we queue a work_struct for doing so.
 *
 * We need to get an extra ref for the skb (so it is not dropped), as
 * well as be careful not to queue more than one request (won't help
 * at all). If more than one request comes or there are errors, we
 * just drop the packets (see i2400m_hard_start_xmit()).
 */
static
int i2400m_net_wake_tx(struct i2400m *i2400m, struct net_device *net_dev,
		       struct sk_buff *skb)
{
	int result;
	struct device *dev = i2400m_dev(i2400m);
	unsigned long flags;

	d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
	if (net_ratelimit()) {
		d_printf(3, dev, "WAKE&NETTX: "
			 "skb %p sending %d bytes to radio\n",
			 skb, skb->len);
		d_dump(4, dev, skb->data, skb->len);
	}
	/* We hold a ref count for i2400m and skb, so when
	 * stopping() the device, we need to cancel that work
	 * and if pending, release those resources. */
	result = 0;
	spin_lock_irqsave(&i2400m->tx_lock, flags);
	if (!i2400m->wake_tx_skb) {
		netif_stop_queue(net_dev);
		i2400m_get(i2400m);
		i2400m->wake_tx_skb = skb_get(skb);	/* transfer ref count */
		i2400m_tx_prep_header(skb);
		result = schedule_work(&i2400m->wake_tx_ws);
		WARN_ON(result == 0);
	}
	spin_unlock_irqrestore(&i2400m->tx_lock, flags);
	if (result == 0) {
		/* Yes, this happens even if we stopped the
		 * queue -- blame the queue disciplines that
		 * queue without looking -- I guess there is a reason
		 * for that. */
		if (net_ratelimit())
			d_printf(1, dev, "NETTX: device exiting idle, "
				 "dropping skb %p, queue running %d\n",
				 skb, netif_queue_stopped(net_dev));
		result = -EBUSY;
	}
	d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
	return result;
}
/* 
 * Accpet the commands from user space to set the rules
 */
void nl_data_ready(struct sk_buff *__skb)
{
    struct sk_buff *skb;
    struct nlmsghdr *nlh;
    char str[100];
    //struct completion cmpl;
    int i=10;
int interval = 0;
char command[6] ={'\0'},ipaddr[60]={'\0'};
struct in6_addr recvaddr;
printk("nl_data_ready......in\n");
    skb = skb_get (__skb);
    if(skb->len >= NLMSG_SPACE(0))
    {
        nlh = nlmsg_hdr(skb);

        memcpy(str, NLMSG_DATA(nlh), sizeof(str));
      printk("Message received:%s\n",str) ;
        sscanf(str, "cmd=%s ip=%s interval=%d", command, ipaddr,&interval);
        memcpy(&recvaddr,ipaddr,sizeof(recvaddr));
//convert ipaddr to struct in6_addr

// the comand format
// ADD>x:x:x:x
// DEL>x:x:x:x
        if(strcmp(command,"ADD")==0)
         {

              add_rule(&ipaddrs, &recvaddr);
         }
         else if(strcmp(command,"DEL")==0)
         {
              del_rule(&ipaddrs, &recvaddr);
         }

         pid = nlh->nlmsg_pid; //the source process id

         sendnlmsg("command executed.");

         kfree_skb(skb);
     }
 }
Ejemplo n.º 30
0
void sample_input (struct sk_buff *__skb)
{
	struct sk_buff *skb;
	struct nlmsghdr *nlh;
	unsigned int pid;
	int rc;
	int len = NLMSG_SPACE(1200);
	char data[100];
	int dlen=0;
	
	skb = skb_get(__skb);
	if (skb->len >= NLMSG_SPACE(0)) 
	{
		nlh = nlmsg_hdr(skb);
		dlen= nlh->nlmsg_len;
		pid = nlh->nlmsg_pid;/*发送进程ID */
		if(dlen>100)dlen=100;
		memset(data,0,100);
		memcpy(data,NLMSG_DATA(nlh),dlen); 
		printk("net_link: recv '%s' from process %d.\n",data,pid);
		kfree_skb(skb);
		
		skb = alloc_skb(len, GFP_ATOMIC);
		if (!skb)
		{
			printk("net_link: alloc_skb failed.\n");
			return;
		}
		nlh = nlmsg_put(skb,0,0,0,1200,0);
		nlh ->nlmsg_len=dlen;
		NETLINK_CB(skb).pid = 0;/* 发自内核*/
		memcpy(NLMSG_DATA(nlh), data, strlen(data));

		rc = netlink_unicast(nl_sk, skb, pid, MSG_DONTWAIT);
		if (rc < 0) 
		{
			printk("net_link: unicast skb error\n");
		}
		printk("net_link: send '%s' to process %d ok.\n",data,pid);
	}
	return;
}