Beispiel #1
0
/**
 *	llc_sap_state_process - sends event to SAP state machine
 *	@sap: sap to use
 *	@skb: pointer to occurred event
 *
 *	After executing actions of the event, upper layer will be indicated
 *	if needed(on receiving an UI frame). sk can be null for the
 *	datalink_proto case.
 */
static void llc_sap_state_process(struct llc_sap *sap, struct sk_buff *skb)
{
	struct llc_sap_state_ev *ev = llc_sap_ev(skb);

	/*
	 * We have to hold the skb, because llc_sap_next_state
	 * will kfree it in the sending path and we need to
	 * look at the skb->cb, where we encode llc_sap_state_ev.
	 */
	skb_get(skb);
	ev->ind_cfm_flag = 0;
	llc_sap_next_state(sap, skb);
	if (ev->ind_cfm_flag == LLC_IND) {
		if (skb->sk->sk_state == TCP_LISTEN)
			kfree_skb(skb);
		else {
			llc_save_primitive(skb->sk, skb, ev->prim);

			/* queue skb to the user. */
			if (sock_queue_rcv_skb(skb->sk, skb))
				kfree_skb(skb);
		}
	}
	kfree_skb(skb);
}
Beispiel #2
0
static int nr_queue_rx_frame(struct sock *sk, struct sk_buff *skb, int more)
{
	struct sk_buff *skbo, *skbn = skb;

	skb_pull(skb, NR_NETWORK_LEN + NR_TRANSPORT_LEN);

	nr_start_idletimer(sk);

	if (more) {
		sk->protinfo.nr->fraglen += skb->len;
		skb_queue_tail(&sk->protinfo.nr->frag_queue, skb);
		return 0;
	}

	if (!more && sk->protinfo.nr->fraglen > 0) {	/* End of fragment */
		sk->protinfo.nr->fraglen += skb->len;
		skb_queue_tail(&sk->protinfo.nr->frag_queue, skb);

		if ((skbn = alloc_skb(sk->protinfo.nr->fraglen, GFP_ATOMIC)) == NULL)
			return 1;

		skbn->h.raw = skbn->data;

		while ((skbo = skb_dequeue(&sk->protinfo.nr->frag_queue)) != NULL) {
			memcpy(skb_put(skbn, skbo->len), skbo->data, skbo->len);
			kfree_skb(skbo);
		}

		sk->protinfo.nr->fraglen = 0;		
	}

	return sock_queue_rcv_skb(sk, skbn);
}
Beispiel #3
0
static void raw_rcv(struct sk_buff *skb, void *data)
{
	struct sock *sk = (struct sock *)data;
	struct raw_sock *ro = raw_sk(sk);
	struct sockaddr_can *addr;

	/* check the received tx sock reference */
	if ((!ro->recv_own_msgs) && (skb->sk == sk))
		return;

	/* clone the given skb to be able to enqueue it into the rcv queue */
	skb = skb_clone(skb, GFP_ATOMIC);
	if (!skb)
		return;

	/*
	 *  Put the datagram to the queue so that raw_recvmsg() can
	 *  get it from there.  We need to pass the interface index to
	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
	 *  containing the interface index.
	 */

	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
	addr = (struct sockaddr_can *)skb->cb;
	memset(addr, 0, sizeof(*addr));
	addr->can_family  = AF_CAN;
	addr->can_ifindex = skb->dev->ifindex;

	if (sock_queue_rcv_skb(sk, skb) < 0)
		kfree_skb(skb);
}
Beispiel #4
0
/* Copy frame to all raw sockets on that connection */
void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
{
	struct l2cap_chan_list *l = &conn->chan_list;
	struct sk_buff *nskb;
	struct sock * sk;

	BT_DBG("conn %p", conn);

	read_lock(&l->lock);
	for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
		if (sk->type != SOCK_RAW)
			continue;

		/* Don't send frame to the socket it came from */
		if (skb->sk == sk)
			continue;

		if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
			continue;

		if (sock_queue_rcv_skb(sk, nskb))
			kfree_skb(nskb);
	}
	read_unlock(&l->lock);
}
static void raw_rcv(struct sk_buff *oskb, void *data)
{
	struct sock *sk = (struct sock *)data;
	struct raw_sock *ro = raw_sk(sk);
	struct sockaddr_can *addr;
	struct sk_buff *skb;
	unsigned int *pflags;

	
	if (!ro->recv_own_msgs && oskb->sk == sk)
		return;

	
	skb = skb_clone(oskb, GFP_ATOMIC);
	if (!skb)
		return;


	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
	addr = (struct sockaddr_can *)skb->cb;
	memset(addr, 0, sizeof(*addr));
	addr->can_family  = AF_CAN;
	addr->can_ifindex = skb->dev->ifindex;

	
	pflags = raw_flags(skb);
	*pflags = 0;
	if (oskb->sk)
		*pflags |= MSG_DONTROUTE;
	if (oskb->sk == sk)
		*pflags |= MSG_CONFIRM;

	if (sock_queue_rcv_skb(sk, skb) < 0)
		kfree_skb(skb);
}
Beispiel #6
0
static inline int l2cap_data_channel(struct l2cap_conn *conn, __u16 cid, struct sk_buff *skb)
{
	struct sock *sk;

	sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
	if (!sk) {
		BT_DBG("unknown cid 0x%4.4x", cid);
		goto drop;
	}

	BT_DBG("sk %p, len %d", sk, skb->len);

	if (sk->state != BT_CONNECTED)
		goto drop;

	if (l2cap_pi(sk)->imtu < skb->len)
		goto drop;

	/* If socket recv buffers overflows we drop data here 
	 * which is *bad* because L2CAP has to be reliable. 
	 * But we don't have any other choice. L2CAP doesn't 
	 * provide flow control mechanism */ 
	
	if (!sock_queue_rcv_skb(sk, skb))
		goto done;

drop:
	kfree_skb(skb);

done:
	if (sk) bh_unlock_sock(sk);
	return 0;
}
Beispiel #7
0
static inline int l2cap_conless_channel(struct l2cap_conn *conn, __u16 psm, struct sk_buff *skb)
{
	struct sock *sk;

	sk = l2cap_get_sock_by_psm(0, psm, conn->src);
	if (!sk)
		goto drop;

	BT_DBG("sk %p, len %d", sk, skb->len);

	if (sk->state != BT_BOUND && sk->state != BT_CONNECTED)
		goto drop;

	if (l2cap_pi(sk)->imtu < skb->len)
		goto drop;

	if (!sock_queue_rcv_skb(sk, skb))
		goto done;

drop:
	kfree_skb(skb);

done:
	if (sk) bh_unlock_sock(sk);
	return 0;
}
static void rawsock_data_exchange_complete(void *context, struct sk_buff *skb,
								int err)
{
	struct sock *sk = (struct sock *) context;

	BUG_ON(in_irq());

	nfc_dbg("sk=%p err=%d", sk, err);

	if (err)
		goto error;

	err = rawsock_add_header(skb);
	if (err)
		goto error;

	err = sock_queue_rcv_skb(sk, skb);
	if (err)
		goto error;

	spin_lock_bh(&sk->sk_write_queue.lock);
	if (!skb_queue_empty(&sk->sk_write_queue))
		schedule_work(&nfc_rawsock(sk)->tx_work);
	else
		nfc_rawsock(sk)->tx_work_scheduled = false;
	spin_unlock_bh(&sk->sk_write_queue.lock);

	sock_put(sk);
	return;

error:
	rawsock_report_error(sk, err);
	sock_put(sk);
}
Beispiel #9
0
static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
{
	struct inet6_dev *idev = in6_dev_get(skb->dev);
	if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
		if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
			UDP6_INC_STATS_BH(UdpInErrors);
			IP6_INC_STATS_BH(idev,Ip6InDiscards);
			if (idev)
				in6_dev_put(idev);
			kfree_skb(skb);
			return 0;
		}
		skb->ip_summed = CHECKSUM_UNNECESSARY;
	}
	if (sock_queue_rcv_skb(sk,skb)<0) {
		UDP6_INC_STATS_BH(UdpInErrors);
		IP6_INC_STATS_BH(idev,Ip6InDiscards);
		if (idev)
			in6_dev_put(idev);
		kfree_skb(skb);
		return 0;
	}
  	IP6_INC_STATS_BH(idev,Ip6InDelivers);
	UDP6_INC_STATS_BH(UdpInDatagrams);

	if (idev)
		in6_dev_put(idev);
	return 0;
}
Beispiel #10
0
/* Queue an skb for a sock. */
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	int err = sock_queue_rcv_skb(sk, skb);

	if (err < 0)
		kfree_skb(skb);
	return err ? NET_RX_DROP : NET_RX_SUCCESS;
}
Beispiel #11
0
static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	struct sockaddr_pkt *spkt;

	/*
	 *	When we registered the protocol we saved the socket in the data
	 *	field for just this event.
	 */

	sk = pt->af_packet_priv;
	
	/*
	 *	Yank back the headers [hope the device set this
	 *	right or kerboom...]
	 *
	 *	Incoming packets have ll header pulled,
	 *	push it back.
	 *
	 *	For outgoing ones skb->data == skb->mac.raw
	 *	so that this procedure is noop.
	 */

	if (skb->pkt_type == PACKET_LOOPBACK)
		goto out;

	if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
		goto oom;

	/* drop any routing info */
	dst_release(skb->dst);
	skb->dst = NULL;

	spkt = (struct sockaddr_pkt*)skb->cb;

	skb_push(skb, skb->data-skb->mac.raw);

	/*
	 *	The SOCK_PACKET socket receives _all_ frames.
	 */

	spkt->spkt_family = dev->type;
	strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
	spkt->spkt_protocol = skb->protocol;

	/*
	 *	Charge the memory to the socket. This is done specifically
	 *	to prevent sockets using all the memory up.
	 */

	if (sock_queue_rcv_skb(sk,skb) == 0)
		return 0;

out:
	kfree_skb(skb);
oom:
	return 0;
}
Beispiel #12
0
/*
 *	This is where all valid I frames are sent to, to be dispatched to
 *	whichever protocol requires them.
 */
int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
	int (*func)(struct sk_buff *, ax25_cb *);
	volatile int queued = 0;
	unsigned char pid;

	if (skb == NULL) return 0;

	ax25_start_idletimer(ax25);

	pid = *skb->data;

#ifdef CONFIG_INET
	if (pid == AX25_P_IP) {
		/* working around a TCP bug to keep additional listeners
		 * happy. TCP re-uses the buffer and destroys the original
		 * content.
		 */
		struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC);
		if (skbn != NULL) {
			kfree_skb(skb);
			skb = skbn;
		}

		skb_pull(skb, 1);	/* Remove PID */
		skb->h.raw    = skb->data;
		skb->nh.raw   = skb->data;
		skb->dev      = ax25->ax25_dev->dev;
		skb->pkt_type = PACKET_HOST;
		skb->protocol = htons(ETH_P_IP);
		ip_rcv(skb, skb->dev, NULL);	/* Wrong ptype */
		return 1;
	}
#endif
	if (pid == AX25_P_SEGMENT) {
		skb_pull(skb, 1);	/* Remove PID */
		return ax25_rx_fragment(ax25, skb);
	}

	if ((func = ax25_protocol_function(pid)) != NULL) {
		skb_pull(skb, 1);	/* Remove PID */
		return (*func)(skb, ax25);
	}

	if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) {
		if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) ||
		    ax25->pidincl) {
			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
				queued = 1;
			else
				ax25->condition |= AX25_COND_OWN_RX_BUSY;
		}
	}

	return queued;
}
Beispiel #13
0
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
	int err = sock_queue_rcv_skb(sk, skb);
	if (err < 0) {
		kfree_skb(skb);
		if (err == -ENOMEM)
			atomic_inc(&sk->sk_drops);
	}
	return err ? NET_RX_DROP : NET_RX_SUCCESS;
}
Beispiel #14
0
static int vsock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
	int err;

	err = sock_queue_rcv_skb(sk, skb);
	if (err)
		kfree_skb(skb);

	return err;
}
Beispiel #15
0
static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
	/* Charge it to the socket. */

	if (sock_queue_rcv_skb(sk, skb) < 0) {
		kfree_skb(skb);
		return NET_RX_DROP;
	}

	return NET_RX_SUCCESS;
}
Beispiel #16
0
static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
	/* Charge it to the socket. */
	
	if (sock_queue_rcv_skb(sk, skb) < 0) {
		/* FIXME: increment a raw drops counter here */
		kfree_skb(skb);
		return NET_RX_DROP;
	}

	return NET_RX_SUCCESS;
}
Beispiel #17
0
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
	/* Charge it to the socket. */
	if (sock_queue_rcv_skb(sk,skb)<0) {
		ipv6_statistics.Ip6InDiscards++;
		kfree_skb(skb);
		return 0;
	}

	ipv6_statistics.Ip6InDelivers++;
	return 0;
}
Beispiel #18
0
static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
	

	ipv4_pktinfo_prepare(skb);
	if (sock_queue_rcv_skb(sk, skb) < 0) {
		kfree_skb(skb);
		return NET_RX_DROP;
	}

	return NET_RX_SUCCESS;
}
static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
	/* Charge it to the socket. */
	
	if (sock_queue_rcv_skb(sk, skb) < 0) {
		IP_INC_STATS(IpInDiscards);
		kfree_skb(skb);
		return NET_RX_DROP;
	}

	IP_INC_STATS(IpInDelivers);
	return NET_RX_SUCCESS;
}
Beispiel #20
0
/* Queue an skb for a sock. */
static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{
    int err = sock_queue_rcv_skb(sk, skb);
#ifdef CONFIG_SAMSUNG_PHONE_SVNET
    /* svent RX debugging */
    if (sk->sk_receive_queue.qlen > 30)
        printk(KERN_DEBUG "svn %s, sk = %p, qlen = %d\n", __func__, sk,
               sk->sk_receive_queue.qlen);
#endif
    if (err < 0)
        kfree_skb(skb);
    return err ? NET_RX_DROP : NET_RX_SUCCESS;
}
Beispiel #21
0
static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
{
	struct ec_device *edev = edev_get(skb->dev);
	struct iphdr *ip = skb->nh.iph;
	unsigned char stn = ntohl(ip->saddr) & 0xff;
	struct sock *sk;
	struct sk_buff *newskb;
	struct ec_cb *eb;
	struct sockaddr_ec *sec;

	if (edev == NULL)
		return;		/* Device not configured for AUN */
	
	if ((sk = ec_listening_socket(ah->port, stn, edev->net)) == NULL)
		goto bad;		/* Nobody wants it */

	newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15, 
			   GFP_ATOMIC);
	if (newskb == NULL)
	{
		printk(KERN_DEBUG "AUN: memory squeeze, dropping packet.\n");
		/* Send nack and hope sender tries again */
		goto bad;
	}

	eb = (struct ec_cb *)&newskb->cb;
	sec = (struct sockaddr_ec *)&eb->sec;
	memset(sec, 0, sizeof(struct sockaddr_ec));
	sec->sec_family = AF_ECONET;
	sec->type = ECTYPE_PACKET_RECEIVED;
	sec->port = ah->port;
	sec->cb = ah->cb;
	sec->addr.net = edev->net;
	sec->addr.station = stn;

	memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah+1), 
	       len - sizeof(struct aunhdr));

	if (sock_queue_rcv_skb(sk, newskb) < 0)
	{
		/* Socket is bankrupt. */
		kfree_skb(newskb);
		goto bad;
	}

	aun_send_response(ip->saddr, ah->handle, 3, 0);
	return;

bad:
	aun_send_response(ip->saddr, ah->handle, 4, 0);
}
Beispiel #22
0
static void raw_rcv(struct sk_buff *oskb, void *data)
{
	struct sock *sk = (struct sock *)data;
	struct raw_sock *ro = raw_sk(sk);
	struct sockaddr_can *addr;
	struct sk_buff *skb;
	unsigned int *pflags;

	/* check the received tx sock reference */
	if (!ro->recv_own_msgs && oskb->sk == sk)
		return;

	/* do not pass frames with DLC > 8 to a legacy socket */
	if (!ro->fd_frames) {
		struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;

		if (unlikely(cfd->len > CAN_MAX_DLEN))
			return;
	}

	/* clone the given skb to be able to enqueue it into the rcv queue */
	skb = skb_clone(oskb, GFP_ATOMIC);
	if (!skb)
		return;

	/*
	 *  Put the datagram to the queue so that raw_recvmsg() can
	 *  get it from there.  We need to pass the interface index to
	 *  raw_recvmsg().  We pass a whole struct sockaddr_can in skb->cb
	 *  containing the interface index.
	 */

	BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
	addr = (struct sockaddr_can *)skb->cb;
	memset(addr, 0, sizeof(*addr));
	addr->can_family  = AF_CAN;
	addr->can_ifindex = skb->dev->ifindex;

	/* add CAN specific message flags for raw_recvmsg() */
	pflags = raw_flags(skb);
	*pflags = 0;
	if (oskb->sk)
		*pflags |= MSG_DONTROUTE;
	if (oskb->sk == sk)
		*pflags |= MSG_CONFIRM;

	if (sock_queue_rcv_skb(sk, skb) < 0)
		kfree_skb(skb);
}
/* Send frame to RAW socket */
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct sock * sk;

	BT_DBG("hdev %p len %d", hdev, skb->len);

	read_lock(&hci_sk_list.lock);
	for (sk = hci_sk_list.head; sk; sk = sk->next) {
		struct hci_filter *flt;
		struct sk_buff *nskb;

		if (sk->state != BT_BOUND || hci_pi(sk)->hdev != hdev)
			continue;

		/* Don't send frame to the socket it came from */
		if (skb->sk == sk)
			continue;

		/* Apply filter */
		flt = &hci_pi(sk)->filter;

		if (!hci_test_bit((skb->pkt_type & HCI_FLT_TYPE_BITS), &flt->type_mask))
			continue;

		if (skb->pkt_type == HCI_EVENT_PKT) {
			register int evt = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
			
			if (!hci_test_bit(evt, &flt->event_mask))
				continue;

			if (flt->opcode && ((evt == EVT_CMD_COMPLETE && 
					flt->opcode != *(__u16 *)(skb->data + 3)) ||
					(evt == EVT_CMD_STATUS && 
					flt->opcode != *(__u16 *)(skb->data + 4))))
				continue;
		}

		if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
			continue;

		/* Put type byte before the data */
		memcpy(skb_push(nskb, 1), &nskb->pkt_type, 1);

		if (sock_queue_rcv_skb(sk, nskb))
			kfree_skb(nskb);
	}
	read_unlock(&hci_sk_list.lock);
}
Beispiel #24
0
static int
mISDN_send(struct mISDNchannel *ch, struct sk_buff *skb)
{
	struct mISDN_sock *msk;
	int	err;

	msk = container_of(ch, struct mISDN_sock, ch);
	if (*debug & DEBUG_SOCKET)
		printk(KERN_DEBUG "%s len %d %p\n", __func__, skb->len, skb);
	if (msk->sk.sk_state == MISDN_CLOSED)
		return -EUNATCH;
	__net_timestamp(skb);
	err = sock_queue_rcv_skb(&msk->sk, skb);
	if (err)
		printk(KERN_WARNING "%s: error %d\n", __func__, err);
	return err;
}
Beispiel #25
0
static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
			   unsigned char stn, unsigned char net,
			   unsigned char cb, unsigned char port)
{
	struct ec_cb *eb = (struct ec_cb *)&skb->cb;
	struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec;

	memset(sec, 0, sizeof(struct sockaddr_ec));
	sec->sec_family = AF_ECONET;
	sec->type = ECTYPE_PACKET_RECEIVED;
	sec->port = port;
	sec->cb = cb;
	sec->addr.net = net;
	sec->addr.station = stn;

	return sock_queue_rcv_skb(sk, skb);
}
Beispiel #26
0
/*
 *	This is where all valid I frames are sent to, to be dispatched to
 *	whichever protocol requires them.
 */
static int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb)
{
	int (*func)(struct sk_buff *, ax25_cb *);
	struct sk_buff *skbn;
	volatile int queued = 0;
	unsigned char pid;

	if (skb == NULL) return 0;

	ax25->idletimer = ax25->idle;

	pid = *skb->data;

#ifdef CONFIG_INET
	if (pid == AX25_P_IP) {
		if ((skbn = skb_copy(skb, GFP_ATOMIC)) != NULL) {
			kfree_skb(skb, FREE_READ);
			skb = skbn;
		}
		skb_pull(skb, 1);	/* Remove PID */
		skb->h.raw = skb->data;
		ip_rcv(skb, ax25->device, NULL);	/* Wrong ptype */
		return 1;
	}
#endif
	if (pid == AX25_P_SEGMENT) {
		skb_pull(skb, 1);	/* Remove PID */
		return ax25_rx_fragment(ax25, skb);
	}

	if ((func = ax25_protocol_function(pid)) != NULL) {
		skb_pull(skb, 1);	/* Remove PID */
		return (*func)(skb, ax25);
	}

	if (ax25->sk != NULL && ax25_dev_get_value(ax25->device, AX25_VALUES_CONMODE) == 2) {
		if ((!ax25->pidincl && ax25->sk->protocol == pid) || ax25->pidincl) {
			if (sock_queue_rcv_skb(ax25->sk, skb) == 0)
				queued = 1;
			else
				ax25->condition |= AX25_COND_OWN_RX_BUSY;
		}
	}

	return queued;
}
Beispiel #27
0
/* corresponds to __udp4_lib_rcv */
int udp_mhost_rcv(struct sk_buff *skb)
{
    int ret;
    struct sock *sk;
    struct udphdr *uh;
    unsigned short ulen;
    int saddr, daddr;
    printk(KERN_INFO "udp_mhost_rcv called\n");

//    if (!pskb_may_pull(skb, sizeof(struct udphdr)))
//        goto drop;

    uh = (struct udphdr *) skb_transport_header(skb);
    ulen = ntohs(uh->len);
    
    // SMS: FIND A BETTER FIX FOR THIS!!!
    skb->len = ulen;
    
    // eh?
    saddr = 0;
    daddr = 0;
    
    //    INT CORRESPONDS TO dev_get_by_index OR dev->ifindex!!!
    /* first we need to lookup the socket... */
//    sk = udp4_lib_lookup(dev_net(skb->dev), saddr, uh->source, daddr, uh->dest, (skb->dev)->ifindex);

    sk = udp_table_lookup(uh->dest);
    
    /* check for multicast here */
    
    if (sk != NULL) {
        ret = sock_queue_rcv_skb(sk, skb);
        if (ret < 0) {
            kfree_skb(skb);
        }
        return ret;
    }
    
    /* no socket wants it, so drop silently */
    printk(KERN_INFO "mnet error: sk NULL!\n");
    kfree_skb(skb);
    return 0;
}
Beispiel #28
0
static inline void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb)
{
	struct sock *sk = sco_chan_get(conn);

	if (!sk)
		goto drop;

	BT_DBG("sk %p len %d", sk, skb->len);

	if (sk->state != BT_CONNECTED)
		goto drop;

	if (!sock_queue_rcv_skb(sk, skb))
		return;

drop:
	kfree_skb(skb);
	return;
}
Beispiel #29
0
int packet_rcv(struct sk_buff *skb, struct device *dev,  struct packet_type *pt)
{
	struct sock *sk;
	
	/*
	 *	When we registered the protocol we saved the socket in the data
	 *	field for just this event.
	 */

	sk = (struct sock *) pt->data;	
	
	/*
	 *	Yank back the headers [hope the device set this
	 *	right or kerboom...]
	 */
	 
	skb_push(skb,skb->data-skb->mac.raw);

	/*
	 *	The SOCK_PACKET socket receives _all_ frames.
	 */
	 
	skb->dev = dev;

	/*
	 *	Charge the memory to the socket. This is done specifically
	 *	to prevent sockets using all the memory up.
	 */
	 
	if(sock_queue_rcv_skb(sk,skb)<0)
	{
		skb->sk = NULL;
		kfree_skb(skb, FREE_READ);
		return 0;
	}
	/*
	 *	Processing complete.
	 */
	 
	return(0);
}
Beispiel #30
0
static void tx_result(struct sock *sk, unsigned long cookie, int result)
{
	struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
	struct ec_cb *eb;
	struct sockaddr_ec *sec;

	if (skb == NULL)
	{
		printk(KERN_DEBUG "ec: memory squeeze, transmit result dropped.\n");
		return;
	}

	eb = (struct ec_cb *)&skb->cb;
	sec = (struct sockaddr_ec *)&eb->sec;
	memset(sec, 0, sizeof(struct sockaddr_ec));
	sec->cookie = cookie;
	sec->type = ECTYPE_TRANSMIT_STATUS | result;
	sec->sec_family = AF_ECONET;

	if (sock_queue_rcv_skb(sk, skb) < 0)
		kfree_skb(skb);
}