static int ax25_rcv(struct sk_buff *skb, struct net_device *dev,
	ax25_address *dev_addr, struct packet_type *ptype)
{
	ax25_address src, dest, *next_digi = NULL;
	int type = 0, mine = 0, dama;
	struct sock *make, *sk;
	ax25_digi dp, reverse_dp;
	ax25_cb *ax25;
	ax25_dev *ax25_dev;

	/*
	 *	Process the AX.25/LAPB frame.
	 */

	skb_reset_transport_header(skb);

	if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
		goto free;

	/*
	 *	Parse the address header.
	 */

	if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL)
		goto free;

	/*
	 *	Ours perhaps ?
	 */
	if (dp.lastrepeat + 1 < dp.ndigi)		/* Not yet digipeated completely */
		next_digi = &dp.calls[dp.lastrepeat + 1];

	/*
	 *	Pull of the AX.25 headers leaving the CTRL/PID bytes
	 */
	skb_pull(skb, ax25_addr_size(&dp));

	/* For our port addresses ? */
	if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* Also match on any registered callsign from L3/4 */
	if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi)
		mine = 1;

	/* UI frame - bypass LAPB processing */
	if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) {
		skb_set_transport_header(skb, 2); /* skip control and pid */

		ax25_send_to_raw(&dest, skb, skb->data[1]);

		if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0)
			goto free;

		/* Now we are pointing at the pid byte */
		switch (skb->data[1]) {
		case AX25_P_IP:
			skb_pull(skb,2);		/* drop PID/CTRL */
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_IP);
			netif_rx(skb);
			break;

		case AX25_P_ARP:
			skb_pull(skb,2);
			skb_reset_transport_header(skb);
			skb_reset_network_header(skb);
			skb->dev      = dev;
			skb->pkt_type = PACKET_HOST;
			skb->protocol = htons(ETH_P_ARP);
			netif_rx(skb);
			break;
		case AX25_P_TEXT:
			/* Now find a suitable dgram socket */
			sk = ax25_get_socket(&dest, &src, SOCK_DGRAM);
			if (sk != NULL) {
				bh_lock_sock(sk);
				if (atomic_read(&sk->sk_rmem_alloc) >=
				    sk->sk_rcvbuf) {
					kfree_skb(skb);
				} else {
					/*
					 *	Remove the control and PID.
					 */
					skb_pull(skb, 2);
					if (sock_queue_rcv_skb(sk, skb) != 0)
						kfree_skb(skb);
				}
				bh_unlock_sock(sk);
				sock_put(sk);
			} else {
				kfree_skb(skb);
			}
			break;

		default:
			kfree_skb(skb);	/* Will scan SOCK_AX25 RAW sockets */
			break;
		}

		return 0;
	}

	/*
	 *	Is connected mode supported on this device ?
	 *	If not, should we DM the incoming frame (except DMs) or
	 *	silently ignore them. For now we stay quiet.
	 */
	if (ax25_dev->values[AX25_VALUES_CONMODE] == 0)
		goto free;

	/* LAPB */

	/* AX.25 state 1-4 */

	ax25_digi_invert(&dp, &reverse_dp);

	if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) {
		/*
		 *	Process the frame. If it is queued up internally it
		 *	returns one otherwise we free it immediately. This
		 *	routine itself wakes the user context layers so we do
		 *	no further work
		 */
		if (ax25_process_rx_frame(ax25, skb, type, dama) == 0)
			kfree_skb(skb);

		ax25_cb_put(ax25);
		return 0;
	}

	/* AX.25 state 0 (disconnected) */

	/* a) received not a SABM(E) */

	if ((*skb->data & ~AX25_PF) != AX25_SABM &&
	    (*skb->data & ~AX25_PF) != AX25_SABME) {
		/*
		 *	Never reply to a DM. Also ignore any connects for
		 *	addresses that are not our interfaces and not a socket.
		 */
		if ((*skb->data & ~AX25_PF) != AX25_DM && mine)
			ax25_return_dm(dev, &src, &dest, &dp);

		goto free;
	}

	/* b) received SABM(E) */

	if (dp.lastrepeat + 1 == dp.ndigi)
		sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET);
	else
		sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET);

	if (sk != NULL) {
		bh_lock_sock(sk);
		if (sk_acceptq_is_full(sk) ||
		    (make = ax25_make_new(sk, ax25_dev)) == NULL) {
			if (mine)
				ax25_return_dm(dev, &src, &dest, &dp);
			kfree_skb(skb);
			bh_unlock_sock(sk);
			sock_put(sk);

			return 0;
		}

		ax25 = ax25_sk(make);
		skb_set_owner_r(skb, make);
		skb_queue_head(&sk->sk_receive_queue, skb);

		make->sk_state = TCP_ESTABLISHED;

		sk->sk_ack_backlog++;
		bh_unlock_sock(sk);
	} else {
		if (!mine)
			goto free;

		if ((ax25 = ax25_create_cb()) == NULL) {
			ax25_return_dm(dev, &src, &dest, &dp);
			goto free;
		}

		ax25_fillin_cb(ax25, ax25_dev);
	}

	ax25->source_addr = dest;
	ax25->dest_addr   = src;

	/*
	 *	Sort out any digipeated paths.
	 */
	if (dp.ndigi && !ax25->digipeat &&
	    (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) {
		kfree_skb(skb);
		ax25_destroy_socket(ax25);
		if (sk)
			sock_put(sk);
		return 0;
	}

	if (dp.ndigi == 0) {
		kfree(ax25->digipeat);
		ax25->digipeat = NULL;
	} else {
		/* Reverse the source SABM's path */
		memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi));
	}

	if ((*skb->data & ~AX25_PF) == AX25_SABME) {
		ax25->modulus = AX25_EMODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_EWINDOW];
	} else {
		ax25->modulus = AX25_MODULUS;
		ax25->window  = ax25_dev->values[AX25_VALUES_WINDOW];
	}

	ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE);

#ifdef CONFIG_AX25_DAMA_SLAVE
	if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE)
		ax25_dama_on(ax25);
#endif

	ax25->state = AX25_STATE_3;

	ax25_cb_add(ax25);

	ax25_start_heartbeat(ax25);
	ax25_start_t3timer(ax25);
	ax25_start_idletimer(ax25);

	if (sk) {
		if (!sock_flag(sk, SOCK_DEAD))
			sk->sk_data_ready(sk, skb->len);
		sock_put(sk);
	} else {
free:
		kfree_skb(skb);
	}
	return 0;
}
示例#2
0
文件: socket.c 项目: gizm0n/wl500g
static int accept(struct socket *sock, struct socket *newsock, int flags)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	int res = -EFAULT;

	if (sock->state == SS_READY)
		return -EOPNOTSUPP;
	if (sock->state != SS_LISTENING)
		return -EINVAL;

	if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
		     (flags & O_NONBLOCK)))
		return -EWOULDBLOCK;

	if (down_interruptible(&tsock->sem))
		return -ERESTARTSYS;

	if (wait_event_interruptible(*sock->sk->sk_sleep,
				     skb_queue_len(&sock->sk->sk_receive_queue))) {
		res = -ERESTARTSYS;
		goto exit;
	}
	buf = skb_peek(&sock->sk->sk_receive_queue);

	res = tipc_create(newsock, 0);
	if (!res) {
		struct tipc_sock *new_tsock = tipc_sk(newsock->sk);
		struct tipc_portid id;
		struct tipc_msg *msg = buf_msg(buf);
		u32 new_ref = new_tsock->p->ref;

		id.ref = msg_origport(msg);
		id.node = msg_orignode(msg);
		tipc_connect2port(new_ref, &id);
		newsock->state = SS_CONNECTED;

		tipc_set_portimportance(new_ref, msg_importance(msg));
		if (msg_named(msg)) {
			new_tsock->p->conn_type = msg_nametype(msg);
			new_tsock->p->conn_instance = msg_nameinst(msg);
		}

	       /*
		 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
		 * Respond to 'SYN+' by queuing it on new socket.
		 */

		msg_dbg(msg,"<ACC<: ");
		if (!msg_data_sz(msg)) {
			struct msghdr m = {NULL,};

			send_packet(NULL, newsock, &m, 0);
			advance_queue(tsock);
		} else {
			sock_lock(tsock);
			skb_dequeue(&sock->sk->sk_receive_queue);
			sock_unlock(tsock);
			skb_queue_head(&newsock->sk->sk_receive_queue, buf);
		}
	}
exit:
	up(&tsock->sem);
	return res;
}
示例#3
0
void a_netbuf_prequeue(A_NETBUF_QUEUE_T *q, void *pkt)
{
    skb_queue_head((struct sk_buff_head *) q, (struct sk_buff *) pkt);
}
示例#4
0
/*
 * Try to send a packet from the board's send queue or from the channel's
 * send queue.
 *
 * 	card: the board.
 * 	channel: the channel (if NULL, the packet will be taken from the 
 * 		board's send queue. If not, it will be taken from the 
 * 		channel's send queue.
 *
 * Return: 0 if tpam_send_tq must try another card/channel combination
 * 	(meaning that no packet has been send), 1 if no more packets
 * 	can be send at that time (a packet has been send or the card is
 * 	still busy from a previous send).
 */
static int tpam_sendpacket(tpam_card *card, tpam_channel *channel) {
        struct sk_buff *skb;
	u32 hpic;
        u32 downloadptr;
	skb_header *skbh;
	u32 waiting_too_long;

	dprintk("TurboPAM(tpam_sendpacket), card=%d, channel=%d\n", 
		card->id, channel ? channel->num : -1);

	if (channel) {
		/* dequeue a packet from the channel's send queue */
		if (!(skb = skb_dequeue(&channel->sendq))) {
			dprintk("TurboPAM(tpam_sendpacket): "
				"card=%d, channel=%d, no packet\n", 
				card->id, channel->num);
			return 0;
		}

		/* if the channel is not ready to receive, requeue the packet
		 * and return 0 to give a chance to another channel */
		if (!channel->readytoreceive) {
			dprintk("TurboPAM(tpam_sendpacket): "
				"card=%d, channel=%d, channel not ready\n",
				card->id, channel->num);
			skb_queue_head(&channel->sendq, skb);
			return 0;
		}

		/* grab the board lock */
		spin_lock_irq(&card->lock);

		/* if the board is busy, requeue the packet and return 1 since
		 * there is no need to try another channel */
		if (card->busy) {
			dprintk("TurboPAM(tpam_sendpacket): "
				"card=%d, channel=%d, card busy\n",
				card->id, channel->num);
			skb_queue_head(&channel->sendq, skb);
			spin_unlock_irq(&card->lock);
			return 1;
		}
	}
	else {
		/* dequeue a packet from the board's send queue */
		if (!(skb = skb_dequeue(&card->sendq))) {
			dprintk("TurboPAM(tpam_sendpacket): "
				"card=%d, no packet\n", card->id);
			return 0;
		}

		/* grab the board lock */
		spin_lock_irq(&card->lock);

		/* if the board is busy, requeue the packet and return 1 since
		 * there is no need to try another channel */
		if (card->busy) {
			dprintk("TurboPAM(tpam_sendpacket): "
				"card=%d, card busy\n", card->id);
			skb_queue_head(&card->sendq, skb);
			spin_unlock_irq(&card->lock);
			return 1;
		}
	}

	/* wait for the board to become ready */
	waiting_too_long = 0;
	do {
		hpic = readl(card->bar0 + TPAM_HPIC_REGISTER);
		if (waiting_too_long++ > 0xfffffff) {
			spin_unlock_irq(&card->lock);
			printk(KERN_ERR "TurboPAM(tpam_sendpacket): "
					"waiting too long...\n");
			return 1;
		}
	} while (hpic & 0x00000002);

	skbh = (skb_header *)skb->data;
	dprintk("TurboPAM(tpam_sendpacket): "
		"card=%d, card ready, sending %d/%d bytes\n", 
		card->id, skbh->size, skbh->data_size);

	/* get the board's download pointer */
       	downloadptr = copy_from_pam_dword(card, 
					  (void *)TPAM_DOWNLOADPTR_REGISTER);

	/* copy the packet to the board at the downloadptr location */
       	copy_to_pam(card, (void *)downloadptr, skb->data + sizeof(skb_header), 
		    skbh->size);
	if (skbh->data_size)
		/* if there is some data in the packet, copy it too */
		copy_to_pam(card, (void *)downloadptr + sizeof(pci_mpb) + 4096,
			    skb->data + sizeof(skb_header) + skbh->size, 
			    skbh->data_size);

	/* card will become busy right now */
	card->busy = 1;

	/* interrupt the board */
	copy_to_pam_dword(card, (void *)TPAM_ACKDOWNLOAD_REGISTER, 0);
	readl(card->bar0 + TPAM_DSPINT_REGISTER);

	/* release the lock */
	spin_unlock_irq(&card->lock);

	/* if a data ack was requested by the ISDN link layer, send it now */
	if (skbh->ack) {
		isdn_ctrl ctrl;
		ctrl.driver = card->id;
		ctrl.command = ISDN_STAT_BSENT;
		ctrl.arg = channel->num;
		ctrl.parm.length = skbh->ack_size;
		(* card->interface.statcallb)(&ctrl);
	}

	/* free the sk_buff */
	kfree_skb(skb);

	return 1;
}
示例#5
0
static void bluecard_write_wakeup(bluecard_info_t *info)
{
	if (!info) {
		BT_ERR("Unknown device");
		return;
	}

	if (!test_bit(XMIT_SENDING_READY, &(info->tx_state)))
		return;

	if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) {
		set_bit(XMIT_WAKEUP, &(info->tx_state));
		return;
	}

	do {
		register unsigned int iobase = info->p_dev->resource[0]->start;
		register unsigned int offset;
		register unsigned char command;
		register unsigned long ready_bit;
		register struct sk_buff *skb;
		register int len;

		clear_bit(XMIT_WAKEUP, &(info->tx_state));

		if (!pcmcia_dev_present(info->p_dev))
			return;

		if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) {
			if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state)))
				break;
			offset = 0x10;
			command = REG_COMMAND_TX_BUF_TWO;
			ready_bit = XMIT_BUF_TWO_READY;
		} else {
			if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state)))
				break;
			offset = 0x00;
			command = REG_COMMAND_TX_BUF_ONE;
			ready_bit = XMIT_BUF_ONE_READY;
		}

		if (!(skb = skb_dequeue(&(info->txq))))
			break;

		if (bt_cb(skb)->pkt_type & 0x80) {
			/* Disable RTS */
			info->ctrl_reg |= REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);
		}

		/* Activate LED */
		bluecard_enable_activity_led(info);

		/* Send frame */
		len = bluecard_write(iobase, offset, skb->data, skb->len);

		/* Tell the FPGA to send the data */
		outb_p(command, iobase + REG_COMMAND);

		/* Mark the buffer as dirty */
		clear_bit(ready_bit, &(info->tx_state));

		if (bt_cb(skb)->pkt_type & 0x80) {
			DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
			DEFINE_WAIT(wait);

			unsigned char baud_reg;

			switch (bt_cb(skb)->pkt_type) {
			case PKT_BAUD_RATE_460800:
				baud_reg = REG_CONTROL_BAUD_RATE_460800;
				break;
			case PKT_BAUD_RATE_230400:
				baud_reg = REG_CONTROL_BAUD_RATE_230400;
				break;
			case PKT_BAUD_RATE_115200:
				baud_reg = REG_CONTROL_BAUD_RATE_115200;
				break;
			case PKT_BAUD_RATE_57600:
				/* Fall through... */
			default:
				baud_reg = REG_CONTROL_BAUD_RATE_57600;
				break;
			}

			/* Wait until the command reaches the baseband */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ/10);
			finish_wait(&wq, &wait);

			/* Set baud on baseband */
			info->ctrl_reg &= ~0x03;
			info->ctrl_reg |= baud_reg;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Enable RTS */
			info->ctrl_reg &= ~REG_CONTROL_RTS;
			outb(info->ctrl_reg, iobase + REG_CONTROL);

			/* Wait before the next HCI packet can be send */
			prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE);
			schedule_timeout(HZ);
			finish_wait(&wq, &wait);
		}

		if (len == skb->len) {
			kfree_skb(skb);
		} else {
			skb_pull(skb, len);
			skb_queue_head(&(info->txq), skb);
		}

		info->hdev->stat.byte_tx += len;

		/* Change buffer */
		change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state));

	} while (test_bit(XMIT_WAKEUP, &(info->tx_state)));

	clear_bit(XMIT_SENDING, &(info->tx_state));
}
示例#6
0
static void xenvif_rx_action(struct xenvif *vif)
{
	s8 status;
	u16 flags;
	struct xen_netif_rx_response *resp;
	struct sk_buff_head rxq;
	struct sk_buff *skb;
	LIST_HEAD(notify);
	int ret;
	unsigned long offset;
	struct skb_cb_overlay *sco;
	bool need_to_notify = false;

	struct netrx_pending_operations npo = {
		.copy  = vif->grant_copy_op,
		.meta  = vif->meta,
	};

	skb_queue_head_init(&rxq);

	while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
		RING_IDX max_slots_needed;
		int i;

		/* We need a cheap worse case estimate for the number of
		 * slots we'll use.
		 */

		max_slots_needed = DIV_ROUND_UP(offset_in_page(skb->data) +
						skb_headlen(skb),
						PAGE_SIZE);
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			unsigned int size;
			size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
			max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
		}
		if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
		    skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
			max_slots_needed++;

		/* If the skb may not fit then bail out now */
		if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
			skb_queue_head(&vif->rx_queue, skb);
			need_to_notify = true;
			vif->rx_last_skb_slots = max_slots_needed;
			break;
		} else
			vif->rx_last_skb_slots = 0;

		sco = (struct skb_cb_overlay *)skb->cb;
		sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
		BUG_ON(sco->meta_slots_used > max_slots_needed);

		__skb_queue_tail(&rxq, skb);
	}

	BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));

	if (!npo.copy_prod)
		goto done;

	BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
	gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);

	while ((skb = __skb_dequeue(&rxq)) != NULL) {
		sco = (struct skb_cb_overlay *)skb->cb;

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_prefix_mask) {
			resp = RING_GET_RESPONSE(&vif->rx,
						 vif->rx.rsp_prod_pvt++);

			resp->flags = XEN_NETRXF_gso_prefix | XEN_NETRXF_more_data;

			resp->offset = vif->meta[npo.meta_cons].gso_size;
			resp->id = vif->meta[npo.meta_cons].id;
			resp->status = sco->meta_slots_used;

			npo.meta_cons++;
			sco->meta_slots_used--;
		}


		vif->dev->stats.tx_bytes += skb->len;
		vif->dev->stats.tx_packets++;

		status = xenvif_check_gop(vif, sco->meta_slots_used, &npo);

		if (sco->meta_slots_used == 1)
			flags = 0;
		else
			flags = XEN_NETRXF_more_data;

		if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
			flags |= XEN_NETRXF_csum_blank | XEN_NETRXF_data_validated;
		else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
			/* remote but checksummed. */
			flags |= XEN_NETRXF_data_validated;

		offset = 0;
		resp = make_rx_response(vif, vif->meta[npo.meta_cons].id,
					status, offset,
					vif->meta[npo.meta_cons].size,
					flags);

		if ((1 << vif->meta[npo.meta_cons].gso_type) &
		    vif->gso_mask) {
			struct xen_netif_extra_info *gso =
				(struct xen_netif_extra_info *)
				RING_GET_RESPONSE(&vif->rx,
						  vif->rx.rsp_prod_pvt++);

			resp->flags |= XEN_NETRXF_extra_info;

			gso->u.gso.type = vif->meta[npo.meta_cons].gso_type;
			gso->u.gso.size = vif->meta[npo.meta_cons].gso_size;
			gso->u.gso.pad = 0;
			gso->u.gso.features = 0;

			gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
			gso->flags = 0;
		}

		xenvif_add_frag_responses(vif, status,
					  vif->meta + npo.meta_cons + 1,
					  sco->meta_slots_used);

		RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&vif->rx, ret);

		need_to_notify |= !!ret;

		npo.meta_cons += sco->meta_slots_used;
		dev_kfree_skb(skb);
	}

done:
	if (need_to_notify)
		notify_remote_via_irq(vif->rx_irq);
}

void xenvif_check_rx_xenvif(struct xenvif *vif)
{
	int more_to_do;

	RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, more_to_do);

	if (more_to_do)
		napi_schedule(&vif->napi);
}