Пример #1
0
void tipc_net_route_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg;
	u32 dnode;

	if (!buf)
		return;
	msg = buf_msg(buf);

	msg_incr_reroute_cnt(msg);
	if (msg_reroute_cnt(msg) > 6) {
		if (msg_errcode(msg)) {
			msg_dbg(msg, "NET>DISC>:");
			buf_discard(buf);
		} else {
			msg_dbg(msg, "NET>REJ>:");
			tipc_reject_msg(buf, msg_destport(msg) ?
					TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
		}
		return;
	}

	msg_dbg(msg, "tipc_net->rout: ");

	/* Handle message for this node */
	dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
	if (tipc_in_scope(dnode, tipc_own_addr)) {
		if (msg_isdata(msg)) {
			if (msg_mcast(msg))
				tipc_port_recv_mcast(buf, NULL);
			else if (msg_destport(msg))
				tipc_port_recv_msg(buf);
			else
				net_route_named_msg(buf);
			return;
		}
		switch (msg_user(msg)) {
		case ROUTE_DISTRIBUTOR:
			tipc_cltr_recv_routing_table(buf);
			break;
		case NAME_DISTRIBUTOR:
			tipc_named_recv(buf);
			break;
		case CONN_MANAGER:
			tipc_port_recv_proto_msg(buf);
			break;
		default:
			msg_dbg(msg,"DROP/NET/<REC<");
			buf_discard(buf);
		}
		return;
	}

	/* Handle message for another node */
	msg_dbg(msg, "NET>SEND>: ");
	tipc_link_send(buf, dnode, msg_link_selector(msg));
}
Пример #2
0
static int release(struct socket *sock)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sock *sk = sock->sk;
	int res = TIPC_OK;
	struct sk_buff *buf;

	dbg("sock_delete: %x\n",tsock);
	if (!tsock)
		return 0;
	down_interruptible(&tsock->sem);
	if (!sock->sk) {
		up(&tsock->sem);
		return 0;
	}

	/* Reject unreceived messages, unless no longer connected */

	while (sock->state != SS_DISCONNECTING) {
		sock_lock(tsock);
		buf = skb_dequeue(&sk->sk_receive_queue);
		if (!buf)
			tsock->p->usr_handle = NULL;
		sock_unlock(tsock);
		if (!buf)
			break;
		if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
			buf_discard(buf);
		else
			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
		atomic_dec(&tipc_queue_size);
	}

	/* Delete TIPC port */

	res = tipc_deleteport(tsock->p->ref);
	sock->sk = NULL;

	/* Discard any remaining messages */

	while ((buf = skb_dequeue(&sk->sk_receive_queue))) {
		buf_discard(buf);
		atomic_dec(&tipc_queue_size);
	}

	up(&tsock->sem);

	sock_put(sk);

	atomic_dec(&tipc_user_count);
	return res;
}
Пример #3
0
void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
{
	struct sk_buff *buf;
	struct tipc_msg *msg;
	u32 highest = c_ptr->highest_node;
	u32 n_num;
	int send = 0;

	assert(is_slave(dest));
	assert(in_own_cluster(c_ptr->addr));
	buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
	if (buf) {
		msg = buf_msg(buf);
		msg_set_remote_node(msg, c_ptr->addr);
		msg_set_type(msg, LOCAL_ROUTING_TABLE);
		for (n_num = 1; n_num <= highest; n_num++) {
			if (c_ptr->nodes[n_num] &&
			    tipc_node_has_active_links(c_ptr->nodes[n_num])) {
				send = 1;
				msg_set_dataoctet(msg, n_num);
			}
		}
		if (send)
			tipc_link_send(buf, dest, dest);
		else
			buf_discard(buf);
	} else {
		warn("Memory squeeze: broadcast of local route failed\n");
	}
}
Пример #4
0
static void advance_queue(struct tipc_sock *tsock)
{
	sock_lock(tsock);
	buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue));
	sock_unlock(tsock);
	atomic_dec(&tipc_queue_size);
}
Пример #5
0
static void net_route_named_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	u32 dnode;
	u32 dport;

	if (!msg_named(msg)) {
		msg_dbg(msg, "tipc_net->drop_nam:");
		buf_discard(buf);
		return;
	}

	dnode = addr_domain(msg_lookup_scope(msg));
	dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
	dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
	    msg_nametype(msg), msg_nameinst(msg), dport, dnode);
	if (dport) {
		msg_set_destnode(msg, dnode);
		msg_set_destport(msg, dport);
		tipc_net_route_msg(buf);
		return;
	}
	msg_dbg(msg, "tipc_net->rej:NO NAME: ");
	tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
}
Пример #6
0
static void bclink_send_nack(struct tipc_node *n_ptr)
{
	struct sk_buff *buf;
	struct tipc_msg *msg;

	if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
		return;

	buf = tipc_buf_acquire(INT_H_SIZE);
	if (buf) {
		msg = buf_msg(buf);
		tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
			 INT_H_SIZE, n_ptr->addr);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
		msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
		msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
		msg_set_bcast_tag(msg, tipc_own_tag);

		tipc_bearer_send(&bcbearer->bearer, buf, NULL);
		bcl->stats.sent_nacks++;
		buf_discard(buf);

		/*
		 * Ensure we doesn't send another NACK msg to the node
		 * until 16 more deferred messages arrive from it
		 * (i.e. helps prevent all nodes from NACK'ing at same time)
		 */

		n_ptr->bclink.nack_sync = tipc_own_tag;
	}
}
Пример #7
0
void tipc_disc_delete(struct tipc_link_req *req)
{
	k_cancel_timer(&req->timer);
	k_term_timer(&req->timer);
	buf_discard(req->buf);
	kfree(req);
}
Пример #8
0
static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
			 u32 lower, u32 upper)
{
	struct sk_buff *buf_copy;
	struct tipc_node *n_ptr;
	u32 n_num;
	u32 tstop;

	assert(lower <= upper);
	assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
	       ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
	assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
	       ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
	assert(in_own_cluster(c_ptr->addr));

	tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
	if (tstop > upper)
		tstop = upper;
	for (n_num = lower; n_num <= tstop; n_num++) {
		n_ptr = c_ptr->nodes[n_num];
		if (n_ptr && tipc_node_has_active_links(n_ptr)) {
			buf_copy = skb_copy(buf, GFP_ATOMIC);
			if (buf_copy == NULL)
				break;
			msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
			tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
		}
	}
	buf_discard(buf);
}
Пример #9
0
static void discard_rx_queue(struct sock *sk)
{
	struct sk_buff *buf;

	while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
		atomic_dec(&tipc_queue_size);
		buf_discard(buf);
	}
}
Пример #10
0
void tipc_disc_stop_link_req(struct link_req *req)
{
	if (!req)
		return;

	k_cancel_timer(&req->timer);
	k_term_timer(&req->timer);
	buf_discard(req->buf);
	kfree(req);
}
Пример #11
0
static int shutdown(struct socket *sock, int how)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	int res;

	if (how != SHUT_RDWR)
		return -EINVAL;

	lock_sock(sk);

	switch (sock->state) {
	case SS_CONNECTING:
	case SS_CONNECTED:

		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
restart:
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf) {
			atomic_dec(&tipc_queue_size);
			if (TIPC_SKB_CB(buf)->handle != 0) {
				buf_discard(buf);
				goto restart;
			}
			tipc_disconnect(tport->ref);
			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
		} else {
			tipc_shutdown(tport->ref);
		}

		sock->state = SS_DISCONNECTING;

		/* fall through */

	case SS_DISCONNECTING:

		/* Discard any unreceived messages; wake up sleeping tasks */

		discard_rx_queue(sk);
		if (waitqueue_active(sk_sleep(sk)))
			wake_up_interruptible(sk_sleep(sk));
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

	release_sock(sk);
	return res;
}
Пример #12
0
int tipc_bclink_send_msg(struct sk_buff *buf)
{
	int res;

	spin_lock_bh(&bc_lock);

	res = tipc_link_send_buf(bcl, buf);
	if (unlikely(res == -ELINKCONG))
		buf_discard(buf);
	else
		bcl->stats.sent_info++;

	if (bcl->out_queue_size > bcl->stats.max_queue_sz)
		bcl->stats.max_queue_sz = bcl->out_queue_size;
	bcl->stats.queue_sz_counts++;
	bcl->stats.accu_queue_sz += bcl->out_queue_size;

	spin_unlock_bh(&bc_lock);
	return res;
}
Пример #13
0
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
	struct sk_buff *crs;
	struct sk_buff *next;
	unsigned int released = 0;

	if (less_eq(acked, n_ptr->bclink.acked))
		return;

	spin_lock_bh(&bc_lock);

	/* Skip over packets that node has previously acknowledged */

	crs = bcl->first_out;
	while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
		crs = crs->next;
	}

	/* Update packets that node is now acknowledging */

	while (crs && less_eq(buf_seqno(crs), acked)) {
		next = crs->next;
		bcbuf_decr_acks(crs);
		if (bcbuf_acks(crs) == 0) {
			bcl->first_out = next;
			bcl->out_queue_size--;
			buf_discard(crs);
			released = 1;
		}
		crs = next;
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */

	if (unlikely(bcl->next_out))
		tipc_link_push_queue(bcl);
	if (unlikely(released && !list_empty(&bcl->waiting_ports)))
		tipc_link_wakeup_ports(bcl, 0);
	spin_unlock_bh(&bc_lock);
}
Пример #14
0
static void net_route_named_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	u32 dnode;
	u32 dport;

	if (!msg_named(msg)) {
		buf_discard(buf);
		return;
	}

	dnode = addr_domain(msg_lookup_scope(msg));
	dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
	if (dport) {
		msg_set_destnode(msg, dnode);
		msg_set_destport(msg, dport);
		tipc_net_route_msg(buf);
		return;
	}
	tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
}
Пример #15
0
int tipc_bclink_send_msg(struct sk_buff *buf)
{
	int res;

	spin_lock_bh(&bc_lock);

	if (!bclink->bcast_nodes.count) {
		res = msg_data_sz(buf_msg(buf));
		buf_discard(buf);
		goto exit;
	}

	res = tipc_link_send_buf(bcl, buf);
	if (likely(res >= 0)) {
		bclink_set_last_sent();
		bcl->stats.queue_sz_counts++;
		bcl->stats.accu_queue_sz += bcl->out_queue_size;
	}
exit:
	spin_unlock_bh(&bc_lock);
	return res;
}
Пример #16
0
void tipc_net_route_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg;
	u32 dnode;

	if (!buf)
		return;
	msg = buf_msg(buf);

	/* Handle message for this node */
	dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
	if (tipc_in_scope(dnode, tipc_own_addr)) {
		if (msg_isdata(msg)) {
			if (msg_mcast(msg))
				tipc_port_recv_mcast(buf, NULL);
			else if (msg_destport(msg))
				tipc_port_recv_msg(buf);
			else
				net_route_named_msg(buf);
			return;
		}
		switch (msg_user(msg)) {
		case NAME_DISTRIBUTOR:
			tipc_named_recv(buf);
			break;
		case CONN_MANAGER:
			tipc_port_recv_proto_msg(buf);
			break;
		default:
			buf_discard(buf);
		}
		return;
	}

	/* Handle message for another node */
	skb_trim(buf, msg_size(msg));
	tipc_link_send(buf, dnode, msg_link_selector(msg));
}
Пример #17
0
void tipc_cltr_broadcast(struct sk_buff *buf)
{
	struct sk_buff *buf_copy;
	struct cluster *c_ptr;
	struct tipc_node *n_ptr;
	u32 n_num;
	u32 tstart;
	u32 tstop;
	u32 node_type;

	if (tipc_mode == TIPC_NET_MODE) {
		c_ptr = tipc_cltr_find(tipc_own_addr);
		assert(in_own_cluster(c_ptr->addr));	/* For now */

		/* Send to standard nodes, then repeat loop sending to slaves */
		tstart = 1;
		tstop = c_ptr->highest_node;
		for (node_type = 1; node_type <= 2; node_type++) {
			for (n_num = tstart; n_num <= tstop; n_num++) {
				n_ptr = c_ptr->nodes[n_num];
				if (n_ptr && tipc_node_has_active_links(n_ptr)) {
					buf_copy = skb_copy(buf, GFP_ATOMIC);
					if (buf_copy == NULL)
						goto exit;
					msg_set_destnode(buf_msg(buf_copy),
							 n_ptr->addr);
					tipc_link_send(buf_copy, n_ptr->addr,
						       n_ptr->addr);
				}
			}
			tstart = LOWEST_SLAVE;
			tstop = c_ptr->highest_slave;
		}
	}
exit:
	buf_discard(buf);
}
Пример #18
0
int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
		   u32 num_sect, unsigned int total_len,
			    int max_size, int usrmem, struct sk_buff **buf)
{
	int dsz, sz, hsz, pos, res, cnt;

	dsz = total_len;
	pos = hsz = msg_hdr_sz(hdr);
	sz = hsz + dsz;
	msg_set_size(hdr, sz);
	if (unlikely(sz > max_size)) {
		*buf = NULL;
		return dsz;
	}

	*buf = tipc_buf_acquire(sz);
	if (!(*buf))
		return -ENOMEM;
	skb_copy_to_linear_data(*buf, hdr, hsz);
	for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
		if (likely(usrmem))
			res = !copy_from_user((*buf)->data + pos,
					      msg_sect[cnt].iov_base,
					      msg_sect[cnt].iov_len);
		else
			skb_copy_to_linear_data_offset(*buf, pos,
						       msg_sect[cnt].iov_base,
						       msg_sect[cnt].iov_len);
		pos += msg_sect[cnt].iov_len;
	}
	if (likely(res))
		return dsz;

	buf_discard(*buf);
	*buf = NULL;
	return -EFAULT;
}
Пример #19
0
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_node *node;
	u32 next_in;
	u32 seqno;
	struct sk_buff *deferred;

	/* Screen out unwanted broadcast messages */

	if (msg_mc_netid(msg) != tipc_net_id)
		goto exit;

	node = tipc_node_find(msg_prevnode(msg));
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
	if (unlikely(!node->bclink.supported))
		goto unlock;

	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
		if (msg_destnode(msg) == tipc_own_addr) {
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
			tipc_node_unlock(node);
			spin_lock_bh(&bc_lock);
			bcl->stats.recv_nacks++;
			bclink->retransmit_to = node;
			bclink_retransmit_pkt(msg_bcgap_after(msg),
					      msg_bcgap_to(msg));
			spin_unlock_bh(&bc_lock);
		} else {
			tipc_node_unlock(node);
			tipc_bclink_peek_nack(msg_destnode(msg),
					      msg_bcast_tag(msg),
					      msg_bcgap_after(msg),
					      msg_bcgap_to(msg));
		}
		goto exit;
	}

	/* Handle in-sequence broadcast message */

receive:
	next_in = mod(node->bclink.last_in + 1);
	seqno = msg_seqno(msg);

	if (likely(seqno == next_in)) {
		bcl->stats.recv_info++;
		node->bclink.last_in++;
		bclink_set_gap(node);
		if (unlikely(bclink_ack_allowed(seqno))) {
			bclink_send_ack(node);
			bcl->stats.sent_acks++;
		}
		if (likely(msg_isdata(msg))) {
			tipc_node_unlock(node);
			if (likely(msg_mcast(msg)))
				tipc_port_recv_mcast(buf, NULL);
			else
				buf_discard(buf);
		} else if (msg_user(msg) == MSG_BUNDLER) {
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
			tipc_node_unlock(node);
			tipc_link_recv_bundle(buf);
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
			bcl->stats.recv_fragments++;
			if (tipc_link_recv_fragment(&node->bclink.defragm,
						    &buf, &msg))
				bcl->stats.recv_fragmented++;
			tipc_node_unlock(node);
			tipc_net_route_msg(buf);
		} else if (msg_user(msg) == NAME_DISTRIBUTOR) {
			tipc_node_unlock(node);
			tipc_named_recv(buf);
		} else {
			tipc_node_unlock(node);
			buf_discard(buf);
		}
		buf = NULL;
		tipc_node_lock(node);
		deferred = node->bclink.deferred_head;
		if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
			buf = deferred;
			msg = buf_msg(buf);
			node->bclink.deferred_head = deferred->next;
			goto receive;
		}
	} else if (less(next_in, seqno)) {
		u32 gap_after = node->bclink.gap_after;
		u32 gap_to = node->bclink.gap_to;

		if (tipc_link_defer_pkt(&node->bclink.deferred_head,
					&node->bclink.deferred_tail,
					buf)) {
			node->bclink.nack_sync++;
			bcl->stats.deferred_recv++;
			if (seqno == mod(gap_after + 1))
				node->bclink.gap_after = seqno;
			else if (less(gap_after, seqno) && less(seqno, gap_to))
				node->bclink.gap_to = seqno;
		}
		buf = NULL;
		if (bclink_ack_allowed(node->bclink.nack_sync)) {
			if (gap_to != gap_after)
				bclink_send_nack(node);
			bclink_set_gap(node);
		}
	} else {
		bcl->stats.duplicates++;
	}
unlock:
	tipc_node_unlock(node);
exit:
	buf_discard(buf);
}
Пример #20
0
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
#if (TIPC_BCAST_LOSS_RATE)
	static int rx_count = 0;
#endif
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_node* node = tipc_node_find(msg_prevnode(msg));
	u32 next_in;
	u32 seqno;
	struct sk_buff *deferred;

	msg_dbg(msg, "<BC<<<");

	if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
		     (msg_mc_netid(msg) != tipc_net_id))) {
		buf_discard(buf);
		return;
	}

	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
		msg_dbg(msg, "<BCNACK<<<");
		if (msg_destnode(msg) == tipc_own_addr) {
			tipc_node_lock(node);
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
			tipc_node_unlock(node);
			spin_lock_bh(&bc_lock);
			bcl->stats.recv_nacks++;
			bcl->owner->next = node;   /* remember requestor */
			bclink_retransmit_pkt(msg_bcgap_after(msg),
					      msg_bcgap_to(msg));
			bcl->owner->next = NULL;
			spin_unlock_bh(&bc_lock);
		} else {
			tipc_bclink_peek_nack(msg_destnode(msg),
					      msg_bcast_tag(msg),
					      msg_bcgap_after(msg),
					      msg_bcgap_to(msg));
		}
		buf_discard(buf);
		return;
	}

#if (TIPC_BCAST_LOSS_RATE)
	if (++rx_count == TIPC_BCAST_LOSS_RATE) {
		rx_count = 0;
		buf_discard(buf);
		return;
	}
#endif

	tipc_node_lock(node);
receive:
	deferred = node->bclink.deferred_head;
	next_in = mod(node->bclink.last_in + 1);
	seqno = msg_seqno(msg);

	if (likely(seqno == next_in)) {
		bcl->stats.recv_info++;
		node->bclink.last_in++;
		bclink_set_gap(node);
		if (unlikely(bclink_ack_allowed(seqno))) {
			bclink_send_ack(node);
			bcl->stats.sent_acks++;
		}
		if (likely(msg_isdata(msg))) {
			tipc_node_unlock(node);
			tipc_port_recv_mcast(buf, NULL);
		} else if (msg_user(msg) == MSG_BUNDLER) {
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
			tipc_node_unlock(node);
			tipc_link_recv_bundle(buf);
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
			bcl->stats.recv_fragments++;
			if (tipc_link_recv_fragment(&node->bclink.defragm,
						    &buf, &msg))
				bcl->stats.recv_fragmented++;
			tipc_node_unlock(node);
			tipc_net_route_msg(buf);
		} else {
			tipc_node_unlock(node);
			tipc_net_route_msg(buf);
		}
		if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
			tipc_node_lock(node);
			buf = deferred;
			msg = buf_msg(buf);
			node->bclink.deferred_head = deferred->next;
			goto receive;
		}
		return;
	} else if (less(next_in, seqno)) {
		u32 gap_after = node->bclink.gap_after;
		u32 gap_to = node->bclink.gap_to;

		if (tipc_link_defer_pkt(&node->bclink.deferred_head,
					&node->bclink.deferred_tail,
					buf)) {
			node->bclink.nack_sync++;
			bcl->stats.deferred_recv++;
			if (seqno == mod(gap_after + 1))
				node->bclink.gap_after = seqno;
			else if (less(gap_after, seqno) && less(seqno, gap_to))
				node->bclink.gap_to = seqno;
		}
		if (bclink_ack_allowed(node->bclink.nack_sync)) {
			if (gap_to != gap_after)
				bclink_send_nack(node);
			bclink_set_gap(node);
		}
	} else {
		bcl->stats.duplicates++;
		buf_discard(buf);
	}
	tipc_node_unlock(node);
}
Пример #21
0
void tipc_cltr_recv_routing_table(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	struct cluster *c_ptr;
	struct tipc_node *n_ptr;
	unchar *node_table;
	u32 table_size;
	u32 router;
	u32 rem_node = msg_remote_node(msg);
	u32 z_num;
	u32 c_num;
	u32 n_num;

	c_ptr = tipc_cltr_find(rem_node);
	if (!c_ptr) {
		c_ptr = tipc_cltr_create(rem_node);
		if (!c_ptr) {
			buf_discard(buf);
			return;
		}
	}

	node_table = buf->data + msg_hdr_sz(msg);
	table_size = msg_size(msg) - msg_hdr_sz(msg);
	router = msg_prevnode(msg);
	z_num = tipc_zone(rem_node);
	c_num = tipc_cluster(rem_node);

	switch (msg_type(msg)) {
	case LOCAL_ROUTING_TABLE:
		assert(is_slave(tipc_own_addr));
	case EXT_ROUTING_TABLE:
		for (n_num = 1; n_num < table_size; n_num++) {
			if (node_table[n_num]) {
				u32 addr = tipc_addr(z_num, c_num, n_num);
				n_ptr = c_ptr->nodes[n_num];
				if (!n_ptr) {
					n_ptr = tipc_node_create(addr);
				}
				if (n_ptr)
					tipc_node_add_router(n_ptr, router);
			}
		}
		break;
	case SLAVE_ROUTING_TABLE:
		assert(!is_slave(tipc_own_addr));
		assert(in_own_cluster(c_ptr->addr));
		for (n_num = 1; n_num < table_size; n_num++) {
			if (node_table[n_num]) {
				u32 slave_num = n_num + LOWEST_SLAVE;
				u32 addr = tipc_addr(z_num, c_num, slave_num);
				n_ptr = c_ptr->nodes[slave_num];
				if (!n_ptr) {
					n_ptr = tipc_node_create(addr);
				}
				if (n_ptr)
					tipc_node_add_router(n_ptr, router);
			}
		}
		break;
	case ROUTE_ADDITION:
		if (!is_slave(tipc_own_addr)) {
			assert(!in_own_cluster(c_ptr->addr) ||
			       is_slave(rem_node));
		} else {
			assert(in_own_cluster(c_ptr->addr) &&
			       !is_slave(rem_node));
		}
		n_ptr = c_ptr->nodes[tipc_node(rem_node)];
		if (!n_ptr)
			n_ptr = tipc_node_create(rem_node);
		if (n_ptr)
			tipc_node_add_router(n_ptr, router);
		break;
	case ROUTE_REMOVAL:
		if (!is_slave(tipc_own_addr)) {
			assert(!in_own_cluster(c_ptr->addr) ||
			       is_slave(rem_node));
		} else {
			assert(in_own_cluster(c_ptr->addr) &&
			       !is_slave(rem_node));
		}
		n_ptr = c_ptr->nodes[tipc_node(rem_node)];
		if (n_ptr)
			tipc_node_remove_router(n_ptr, router);
		break;
	default:
		assert(!"Illegal routing manager message received\n");
	}
	buf_discard(buf);
}
Пример #22
0
static void advance_rx_queue(struct sock *sk)
{
	buf_discard(__skb_dequeue(&sk->sk_receive_queue));
	atomic_dec(&tipc_queue_size);
}
void tipc_disc_recv_msg(struct sk_buff *buf)
{
	struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
	struct link *link;
	struct tipc_media_addr media_addr;
	struct tipc_msg *msg = buf_msg(buf);
	u32 dest = msg_dest_domain(msg);
	u32 orig = msg_prevnode(msg);
	u32 net_id = msg_bc_netid(msg);
	u32 type = msg_type(msg);

	msg_get_media_addr(msg,&media_addr);
	msg_dbg(msg, "RECV:");
	buf_discard(buf);

	if (net_id != tipc_net_id)
		return;
	if (!tipc_addr_domain_valid(dest))
		return;
	if (!tipc_addr_node_valid(orig))
		return;
	if (orig == tipc_own_addr)
		return;
	if (!in_scope(dest, tipc_own_addr))
		return;
	if (is_slave(tipc_own_addr) && is_slave(orig))
		return;
	if (is_slave(orig) && !in_own_cluster(orig))
		return;
	if (in_own_cluster(orig)) {
		/* Always accept link here */
		struct sk_buff *rbuf;
		struct tipc_media_addr *addr;
		struct node *n_ptr = tipc_node_find(orig);
		int link_up;
		dbg(" in own cluster\n");
		if (n_ptr == NULL) {
			n_ptr = tipc_node_create(orig);
		}
		if (n_ptr == NULL) {
			warn("Memory squeeze; Failed to create node\n");
			return;
		}
		spin_lock_bh(&n_ptr->lock);
		link = n_ptr->links[b_ptr->identity];
		if (!link) {
			dbg("creating link\n");
			link = tipc_link_create(b_ptr, orig, &media_addr);
			if (!link) {
				spin_unlock_bh(&n_ptr->lock);                
				return;
			}
		}
		addr = &link->media_addr;
		if (memcmp(addr, &media_addr, sizeof(*addr))) {
			char addr_string[16];

			warn("New bearer address for %s\n", 
			     addr_string_fill(addr_string, orig));
			memcpy(addr, &media_addr, sizeof(*addr));
			tipc_link_reset(link);     
		}
		link_up = tipc_link_is_up(link);
		spin_unlock_bh(&n_ptr->lock);                
		if ((type == DSC_RESP_MSG) || link_up)
			return;
		rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
		if (rbuf != NULL) {
			msg_dbg(buf_msg(rbuf),"SEND:");
			b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
			buf_discard(rbuf);
		}
	}
}
Пример #24
0
static int shutdown(struct socket *sock, int how)
{
	struct tipc_sock* tsock = tipc_sk(sock->sk);
	struct sk_buff *buf;
	int res;

	/* Could return -EINVAL for an invalid "how", but why bother? */

	if (down_interruptible(&tsock->sem))
		return -ERESTARTSYS;

	sock_lock(tsock);

	switch (sock->state) {
	case SS_CONNECTED:

		/* Send 'FIN+' or 'FIN-' message to peer */

		sock_unlock(tsock);
restart:
		if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
			atomic_dec(&tipc_queue_size);
			if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
				buf_discard(buf);
				goto restart;
			}
			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
		}
		else {
			tipc_shutdown(tsock->p->ref);
		}
		sock_lock(tsock);

		/* fall through */

	case SS_DISCONNECTING:

		/* Discard any unreceived messages */

		while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
			atomic_dec(&tipc_queue_size);
			buf_discard(buf);
		}
		tsock->p->conn_unacked = 0;

		/* fall through */

	case SS_CONNECTING:
		sock->state = SS_DISCONNECTING;
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

	sock_unlock(tsock);

	up(&tsock->sem);
	return res;
}
Пример #25
0
static int release(struct socket *sock)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport;
	struct sk_buff *buf;
	int res;

	/*
	 * Exit if socket isn't fully initialized (occurs when a failed accept()
	 * releases a pre-allocated child socket that was never used)
	 */

	if (sk == NULL)
		return 0;

	tport = tipc_sk_port(sk);
	lock_sock(sk);

	/*
	 * Reject all unreceived messages, except on an active connection
	 * (which disconnects locally & sends a 'FIN+' to peer)
	 */

	while (sock->state != SS_DISCONNECTING) {
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf == NULL)
			break;
		atomic_dec(&tipc_queue_size);
		if (TIPC_SKB_CB(buf)->handle != 0)
			buf_discard(buf);
		else {
			if ((sock->state == SS_CONNECTING) ||
			    (sock->state == SS_CONNECTED)) {
				sock->state = SS_DISCONNECTING;
				tipc_disconnect(tport->ref);
			}
			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
		}
	}

	/*
	 * Delete TIPC port; this ensures no more messages are queued
	 * (also disconnects an active connection & sends a 'FIN-' to peer)
	 */

	res = tipc_deleteport(tport->ref);

	/* Discard any remaining (connection-based) messages in receive queue */

	discard_rx_queue(sk);

	/* Reject any messages that accumulated in backlog queue */

	sock->state = SS_DISCONNECTING;
	release_sock(sk);

	sock_put(sk);
	sock->sk = NULL;

	return res;
}
Пример #26
0
void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
{
	struct link *link;
	struct tipc_media_addr media_addr;
	struct tipc_msg *msg = buf_msg(buf);
	u32 dest = msg_dest_domain(msg);
	u32 orig = msg_prevnode(msg);
	u32 net_id = msg_bc_netid(msg);
	u32 type = msg_type(msg);

	msg_get_media_addr(msg,&media_addr);
	msg_dbg(msg, "RECV:");
	buf_discard(buf);

	if (net_id != tipc_net_id)
		return;
	if (!tipc_addr_domain_valid(dest))
		return;
	if (!tipc_addr_node_valid(orig))
		return;
	if (orig == tipc_own_addr) {
		if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
			disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
		return;
	}
	if (!in_scope(dest, tipc_own_addr))
		return;
	if (is_slave(tipc_own_addr) && is_slave(orig))
		return;
	if (is_slave(orig) && !in_own_cluster(orig))
		return;
	if (in_own_cluster(orig)) {
		/* Always accept link here */
		struct sk_buff *rbuf;
		struct tipc_media_addr *addr;
		struct tipc_node *n_ptr = tipc_node_find(orig);
		int link_fully_up;

		dbg(" in own cluster\n");
		if (n_ptr == NULL) {
			n_ptr = tipc_node_create(orig);
			if (!n_ptr)
				return;
		}
		spin_lock_bh(&n_ptr->lock);
		link = n_ptr->links[b_ptr->identity];
		if (!link) {
			dbg("creating link\n");
			link = tipc_link_create(b_ptr, orig, &media_addr);
			if (!link) {
				spin_unlock_bh(&n_ptr->lock);
				return;
			}
		}
		addr = &link->media_addr;
		if (memcmp(addr, &media_addr, sizeof(*addr))) {
			if (tipc_link_is_up(link) || (!link->started)) {
				disc_dupl_alert(b_ptr, orig, &media_addr);
				spin_unlock_bh(&n_ptr->lock);
				return;
			}
			warn("Resetting link <%s>, peer interface address changed\n",
			     link->name);
			memcpy(addr, &media_addr, sizeof(*addr));
			tipc_link_reset(link);
		}
		link_fully_up = (link->state == WORKING_WORKING);
		spin_unlock_bh(&n_ptr->lock);
		if ((type == DSC_RESP_MSG) || link_fully_up)
			return;
		rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
		if (rbuf != NULL) {
			msg_dbg(buf_msg(rbuf),"SEND:");
			b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
			buf_discard(rbuf);
		}
	}
}
Пример #27
0
void tipc_disc_recv_msg(struct sk_buff *buf, struct bearer *b_ptr)
{
	struct link *link;
	struct tipc_media_addr media_addr;
	struct tipc_msg *msg = buf_msg(buf);
	u32 dest = msg_dest_domain(msg);
	u32 orig = msg_prevnode(msg);
	u32 net_id = msg_bc_netid(msg);
	u32 type = msg_type(msg);

	msg_get_media_addr(msg, &media_addr);
	buf_discard(buf);

	if (net_id != tipc_net_id)
		return;
	if (!tipc_addr_domain_valid(dest))
		return;
	if (!tipc_addr_node_valid(orig))
		return;
	if (orig == tipc_own_addr) {
		if (memcmp(&media_addr, &b_ptr->publ.addr, sizeof(media_addr)))
			disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
		return;
	}
	if (!tipc_in_scope(dest, tipc_own_addr))
		return;
	if (in_own_cluster(orig)) {
		/* Always accept link here */
		struct sk_buff *rbuf;
		struct tipc_media_addr *addr;
		struct tipc_node *n_ptr = tipc_node_find(orig);
		int link_fully_up;

		if (n_ptr == NULL) {
			n_ptr = tipc_node_create(orig);
			if (!n_ptr)
				return;
		}
		spin_lock_bh(&n_ptr->lock);

		/* Don't talk to neighbor during cleanup after last session */

		if (n_ptr->cleanup_required) {
			spin_unlock_bh(&n_ptr->lock);
			return;
		}

		link = n_ptr->links[b_ptr->identity];
		if (!link) {
			link = tipc_link_create(b_ptr, orig, &media_addr);
			if (!link) {
				spin_unlock_bh(&n_ptr->lock);
				return;
			}
		}
		addr = &link->media_addr;
		if (memcmp(addr, &media_addr, sizeof(*addr))) {
			if (tipc_link_is_up(link) || (!link->started)) {
				disc_dupl_alert(b_ptr, orig, &media_addr);
				spin_unlock_bh(&n_ptr->lock);
				return;
			}
			warn("Resetting link <%s>, peer interface address changed\n",
			     link->name);
			memcpy(addr, &media_addr, sizeof(*addr));
			tipc_link_reset(link);
		}
		link_fully_up = link_working_working(link);
		spin_unlock_bh(&n_ptr->lock);
		if ((type == DSC_RESP_MSG) || link_fully_up)
			return;
		rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
		if (rbuf != NULL) {
			b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
			buf_discard(rbuf);
		}
	}
}
Пример #28
0
void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
	struct tipc_node *n_ptr;
	struct tipc_link *link;
	struct tipc_media_addr media_addr, *addr;
	struct sk_buff *rbuf;
	struct tipc_msg *msg = buf_msg(buf);
	u32 dest = msg_dest_domain(msg);
	u32 orig = msg_prevnode(msg);
	u32 net_id = msg_bc_netid(msg);
	u32 type = msg_type(msg);
	int link_fully_up;

	media_addr.broadcast = 1;
	b_ptr->media->msg2addr(&media_addr, msg_media_addr(msg));
	buf_discard(buf);

	/* Validate discovery message from requesting node */
	if (net_id != tipc_net_id)
		return;
	if (media_addr.broadcast)
		return;
	if (!tipc_addr_domain_valid(dest))
		return;
	if (!tipc_addr_node_valid(orig))
		return;
	if (orig == tipc_own_addr) {
		if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr)))
			disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr);
		return;
	}
	if (!tipc_in_scope(dest, tipc_own_addr))
		return;
	if (!tipc_in_scope(b_ptr->link_req->domain, orig))
		return;

	/* Locate structure corresponding to requesting node */
	n_ptr = tipc_node_find(orig);
	if (!n_ptr) {
		n_ptr = tipc_node_create(orig);
		if (!n_ptr)
			return;
	}
	tipc_node_lock(n_ptr);

	link = n_ptr->links[b_ptr->identity];

	/* Create a link endpoint for this bearer, if necessary */
	if (!link) {
		link = tipc_link_create(n_ptr, b_ptr, &media_addr);
		if (!link) {
			tipc_node_unlock(n_ptr);
			return;
		}
	}

	/*
	 * Ensure requesting node's media address is correct
	 *
	 * If media address doesn't match and the link is working, reject the
	 * request (must be from a duplicate node).
	 *
	 * If media address doesn't match and the link is not working, accept
	 * the new media address and reset the link to ensure it starts up
	 * cleanly.
	 */
	addr = &link->media_addr;
	if (memcmp(addr, &media_addr, sizeof(*addr))) {
		if (tipc_link_is_up(link) || (!link->started)) {
			disc_dupl_alert(b_ptr, orig, &media_addr);
			tipc_node_unlock(n_ptr);
			return;
		}
		warn("Resetting link <%s>, peer interface address changed\n",
		     link->name);
		memcpy(addr, &media_addr, sizeof(*addr));
		tipc_link_reset(link);
	}

	/* Accept discovery message & send response, if necessary */
	link_fully_up = link_working_working(link);

	if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) {
		rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
		if (rbuf) {
			b_ptr->media->send_msg(rbuf, b_ptr, &media_addr);
			buf_discard(rbuf);
		}
	}

	tipc_node_unlock(n_ptr);
}
Пример #29
0
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
	struct sk_buff *crs;
	struct sk_buff *next;
	unsigned int released = 0;

	spin_lock_bh(&bc_lock);

	/* Bail out if tx queue is empty (no clean up is required) */
	crs = bcl->first_out;
	if (!crs)
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
		if (bclink->bcast_nodes.count)
			acked = bcl->fsm_msg_cnt;
		else
			acked = bcl->next_out_no;
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
		if (less(acked, buf_seqno(crs)) ||
		    less(bcl->fsm_msg_cnt, acked) ||
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
	while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
		crs = crs->next;

	/* Update packets that node is now acknowledging */

	while (crs && less_eq(buf_seqno(crs), acked)) {
		next = crs->next;

		if (crs != bcl->next_out)
			bcbuf_decr_acks(crs);
		else {
			bcbuf_set_acks(crs, 0);
			bcl->next_out = next;
			bclink_set_last_sent();
		}

		if (bcbuf_acks(crs) == 0) {
			bcl->first_out = next;
			bcl->out_queue_size--;
			buf_discard(crs);
			released = 1;
		}
		crs = next;
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */

	if (unlikely(bcl->next_out)) {
		tipc_link_push_queue(bcl);
		bclink_set_last_sent();
	}
	if (unlikely(released && !list_empty(&bcl->waiting_ports)))
		tipc_link_wakeup_ports(bcl, 0);
exit:
	spin_unlock_bh(&bc_lock);
}