Esempio n. 1
0
/**
 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
 * @list: the buffer chain
 * @skb: buffer to be appended and replaced
 * @mtu: max allowable size for the bundle buffer, inclusive header
 * @dnode: destination node for message. (Not always present in header)
 * Replaces buffer if successful
 * Returns true if success, otherwise false
 */
bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
			  u32 mtu, u32 dnode)
{
	struct sk_buff *bskb;
	struct tipc_msg *bmsg;
	struct tipc_msg *msg = buf_msg(skb);
	u32 msz = msg_size(msg);
	u32 max = mtu - INT_H_SIZE;

	if (msg_user(msg) == MSG_FRAGMENTER)
		return false;
	if (msg_user(msg) == CHANGEOVER_PROTOCOL)
		return false;
	if (msg_user(msg) == BCAST_PROTOCOL)
		return false;
	if (msz > (max / 2))
		return false;

	bskb = tipc_buf_acquire(max);
	if (!bskb)
		return false;

	skb_trim(bskb, INT_H_SIZE);
	bmsg = buf_msg(bskb);
	tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
	msg_set_seqno(bmsg, msg_seqno(msg));
	msg_set_ack(bmsg, msg_ack(msg));
	msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
	TIPC_SKB_CB(bskb)->bundling = true;
	__skb_queue_tail(list, bskb);
	return tipc_msg_bundle(list, skb, mtu);
}
Esempio n. 2
0
/**
 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
 * @list: the buffer chain of the existing buffer ("bundle")
 * @skb:  buffer to be appended
 * @mtu:  max allowable size for the bundle buffer
 * Consumes buffer if successful
 * Returns true if bundling could be performed, otherwise false
 */
bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
{
	struct sk_buff *bskb = skb_peek_tail(list);
	struct tipc_msg *bmsg = buf_msg(bskb);
	struct tipc_msg *msg = buf_msg(skb);
	unsigned int bsz = msg_size(bmsg);
	unsigned int msz = msg_size(msg);
	u32 start = align(bsz);
	u32 max = mtu - INT_H_SIZE;
	u32 pad = start - bsz;

	if (likely(msg_user(msg) == MSG_FRAGMENTER))
		return false;
	if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
		return false;
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
		return false;
	if (likely(msg_user(bmsg) != MSG_BUNDLER))
		return false;
	if (likely(!TIPC_SKB_CB(bskb)->bundling))
		return false;
	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
		return false;
	if (unlikely(max < (start + msz)))
		return false;

	skb_put(bskb, pad + msz);
	skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
	msg_set_size(bmsg, start + msz);
	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
	kfree_skb(skb);
	return true;
}
Esempio n. 3
0
/* tipc_msg_reassemble() - clone a buffer chain of fragments and
 *                         reassemble the clones into one message
 */
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
{
	struct sk_buff *skb;
	struct sk_buff *frag = NULL;
	struct sk_buff *head = NULL;
	int hdr_sz;

	/* Copy header if single buffer */
	if (skb_queue_len(list) == 1) {
		skb = skb_peek(list);
		hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
		return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
	}

	/* Clone all fragments and reassemble */
	skb_queue_walk(list, skb) {
		frag = skb_clone(skb, GFP_ATOMIC);
		if (!frag)
			goto error;
		frag->next = NULL;
		if (tipc_buf_append(&head, &frag))
			break;
		if (!head)
			goto error;
	}
Esempio n. 4
0
/**
 * tipc_msg_eval: determine fate of message that found no destination
 * @buf: the buffer containing the message.
 * @dnode: return value: next-hop node, if message to be forwarded
 * @err: error code to use, if message to be rejected
 *
 * Does not consume buffer
 * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
 * code if message to be rejected
 */
int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
{
	struct tipc_msg *msg = buf_msg(buf);
	u32 dport;

	if (msg_type(msg) != TIPC_NAMED_MSG)
		return -TIPC_ERR_NO_PORT;
	if (skb_linearize(buf))
		return -TIPC_ERR_NO_NAME;
	if (msg_data_sz(msg) > MAX_FORWARD_SIZE)
		return -TIPC_ERR_NO_NAME;
	if (msg_reroute_cnt(msg) > 0)
		return -TIPC_ERR_NO_NAME;

	*dnode = addr_domain(msg_lookup_scope(msg));
	dport = tipc_nametbl_translate(msg_nametype(msg),
				       msg_nameinst(msg),
				       dnode);
	if (!dport)
		return -TIPC_ERR_NO_NAME;
	msg_incr_reroute_cnt(msg);
	msg_set_destnode(msg, *dnode);
	msg_set_destport(msg, dport);
	return TIPC_OK;
}
Esempio n. 5
0
/**
 * tipc_msg_reverse(): swap source and destination addresses and add error code
 * @buf:  buffer containing message to be reversed
 * @dnode: return value: node where to send message after reversal
 * @err:  error code to be set in message
 * Consumes buffer if failure
 * Returns true if success, otherwise false
 */
bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
{
	struct tipc_msg *msg = buf_msg(buf);
	uint imp = msg_importance(msg);
	struct tipc_msg ohdr;
	uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);

	if (skb_linearize(buf))
		goto exit;
	if (msg_dest_droppable(msg))
		goto exit;
	if (msg_errcode(msg))
		goto exit;

	memcpy(&ohdr, msg, msg_hdr_sz(msg));
	imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE);
	if (msg_isdata(msg))
		msg_set_importance(msg, imp);
	msg_set_errcode(msg, err);
	msg_set_origport(msg, msg_destport(&ohdr));
	msg_set_destport(msg, msg_origport(&ohdr));
	msg_set_prevnode(msg, tipc_own_addr);
	if (!msg_short(msg)) {
		msg_set_orignode(msg, msg_destnode(&ohdr));
		msg_set_destnode(msg, msg_orignode(&ohdr));
	}
	msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
	skb_trim(buf, msg_size(msg));
	skb_orphan(buf);
	*dnode = msg_orignode(&ohdr);
	return true;
exit:
	kfree_skb(buf);
	return false;
}
Esempio n. 6
0
static void net_route_named_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	u32 dnode;
	u32 dport;

	if (!msg_named(msg)) {
		msg_dbg(msg, "tipc_net->drop_nam:");
		buf_discard(buf);
		return;
	}

	dnode = addr_domain(msg_lookup_scope(msg));
	dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
	dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
	    msg_nametype(msg), msg_nameinst(msg), dport, dnode);
	if (dport) {
		msg_set_destnode(msg, dnode);
		msg_set_destport(msg, dport);
		tipc_net_route_msg(buf);
		return;
	}
	msg_dbg(msg, "tipc_net->rej:NO NAME: ");
	tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
}
Esempio n. 7
0
static void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
			 u32 lower, u32 upper)
{
	struct sk_buff *buf_copy;
	struct tipc_node *n_ptr;
	u32 n_num;
	u32 tstop;

	assert(lower <= upper);
	assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
	       ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
	assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
	       ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
	assert(in_own_cluster(c_ptr->addr));

	tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
	if (tstop > upper)
		tstop = upper;
	for (n_num = lower; n_num <= tstop; n_num++) {
		n_ptr = c_ptr->nodes[n_num];
		if (n_ptr && tipc_node_has_active_links(n_ptr)) {
			buf_copy = skb_copy(buf, GFP_ATOMIC);
			if (buf_copy == NULL)
				break;
			msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
			tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
		}
	}
	buf_discard(buf);
}
Esempio n. 8
0
void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
{
	struct sk_buff *buf;
	struct tipc_msg *msg;
	u32 highest = c_ptr->highest_node;
	u32 n_num;
	int send = 0;

	assert(is_slave(dest));
	assert(in_own_cluster(c_ptr->addr));
	buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
	if (buf) {
		msg = buf_msg(buf);
		msg_set_remote_node(msg, c_ptr->addr);
		msg_set_type(msg, LOCAL_ROUTING_TABLE);
		for (n_num = 1; n_num <= highest; n_num++) {
			if (c_ptr->nodes[n_num] &&
			    tipc_node_has_active_links(c_ptr->nodes[n_num])) {
				send = 1;
				msg_set_dataoctet(msg, n_num);
			}
		}
		if (send)
			tipc_link_send(buf, dest, dest);
		else
			buf_discard(buf);
	} else {
		warn("Memory squeeze: broadcast of local route failed\n");
	}
}
/**
 * filter_rcv - validate incoming message
 * @sk: socket
 * @buf: message
 *
 * Enqueues message on receive queue if acceptable; optionally handles
 * disconnect indication for a connected socket.
 *
 * Called with socket lock already taken; port lock may also be taken.
 *
 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
 */
static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
{
	struct socket *sock = sk->sk_socket;
	struct tipc_msg *msg = buf_msg(buf);
	unsigned int limit = rcvbuf_limit(sk, buf);
	u32 res = TIPC_OK;

	/* Reject message if it is wrong sort of message for socket */
	if (msg_type(msg) > TIPC_DIRECT_MSG)
		return TIPC_ERR_NO_PORT;

	if (sock->state == SS_READY) {
		if (msg_connected(msg))
			return TIPC_ERR_NO_PORT;
	} else {
		res = filter_connect(tipc_sk(sk), &buf);
		if (res != TIPC_OK || buf == NULL)
			return res;
	}

	/* Reject message if there isn't room to queue it */
	if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
		return TIPC_ERR_OVERLOAD;

	/* Enqueue message */
	TIPC_SKB_CB(buf)->handle = 0;
	__skb_queue_tail(&sk->sk_receive_queue, buf);
	skb_set_owner_r(buf, sk);

	sk->sk_data_ready(sk, 0);
	return TIPC_OK;
}
Esempio n. 10
0
/**
 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
 * @skb: the packet to be sent
 * @b: the bearer through which the packet is to be sent
 * @dest: peer destination address
 */
int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
		     struct tipc_bearer *b, struct tipc_media_addr *dest)
{
	struct net_device *dev;
	int delta;
	void *tipc_ptr;

	dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
	if (!dev)
		return 0;

	/* Send RESET message even if bearer is detached from device */
	tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
	if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb))))
		goto drop;

	delta = dev->hard_header_len - skb_headroom(skb);
	if ((delta > 0) &&
	    pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
		goto drop;

	skb_reset_network_header(skb);
	skb->dev = dev;
	skb->protocol = htons(ETH_P_TIPC);
	dev_hard_header(skb, dev, ETH_P_TIPC, dest->value,
			dev->dev_addr, skb->len);
	dev_queue_xmit(skb);
	return 0;
drop:
	kfree_skb(skb);
	return 0;
}
Esempio n. 11
0
static void bclink_send_nack(struct tipc_node *n_ptr)
{
	struct sk_buff *buf;
	struct tipc_msg *msg;

	if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
		return;

	buf = tipc_buf_acquire(INT_H_SIZE);
	if (buf) {
		msg = buf_msg(buf);
		tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
			 INT_H_SIZE, n_ptr->addr);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
		msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
		msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
		msg_set_bcast_tag(msg, tipc_own_tag);

		tipc_bearer_send(&bcbearer->bearer, buf, NULL);
		bcl->stats.sent_nacks++;
		buf_discard(buf);

		/*
		 * Ensure we doesn't send another NACK msg to the node
		 * until 16 more deferred messages arrive from it
		 * (i.e. helps prevent all nodes from NACK'ing at same time)
		 */

		n_ptr->bclink.nack_sync = tipc_own_tag;
	}
}
Esempio n. 12
0
void tipc_net_route_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg;
	u32 dnode;

	if (!buf)
		return;
	msg = buf_msg(buf);

	msg_incr_reroute_cnt(msg);
	if (msg_reroute_cnt(msg) > 6) {
		if (msg_errcode(msg)) {
			msg_dbg(msg, "NET>DISC>:");
			buf_discard(buf);
		} else {
			msg_dbg(msg, "NET>REJ>:");
			tipc_reject_msg(buf, msg_destport(msg) ?
					TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
		}
		return;
	}

	msg_dbg(msg, "tipc_net->rout: ");

	/* Handle message for this node */
	dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
	if (tipc_in_scope(dnode, tipc_own_addr)) {
		if (msg_isdata(msg)) {
			if (msg_mcast(msg))
				tipc_port_recv_mcast(buf, NULL);
			else if (msg_destport(msg))
				tipc_port_recv_msg(buf);
			else
				net_route_named_msg(buf);
			return;
		}
		switch (msg_user(msg)) {
		case ROUTE_DISTRIBUTOR:
			tipc_cltr_recv_routing_table(buf);
			break;
		case NAME_DISTRIBUTOR:
			tipc_named_recv(buf);
			break;
		case CONN_MANAGER:
			tipc_port_recv_proto_msg(buf);
			break;
		default:
			msg_dbg(msg,"DROP/NET/<REC<");
			buf_discard(buf);
		}
		return;
	}

	/* Handle message for another node */
	msg_dbg(msg, "NET>SEND>: ");
	tipc_link_send(buf, dnode, msg_link_selector(msg));
}
Esempio n. 13
0
/**
 * tipc_disc_rcv - handle incoming discovery message (request or response)
 * @net: the applicable net namespace
 * @buf: buffer containing message
 * @bearer: bearer that message arrived on
 */
void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
		   struct tipc_bearer *bearer)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_media_addr maddr;
	struct sk_buff *rskb;
	struct tipc_msg *hdr = buf_msg(skb);
	u32 ddom = msg_dest_domain(hdr);
	u32 onode = msg_prevnode(hdr);
	u32 net_id = msg_bc_netid(hdr);
	u32 mtyp = msg_type(hdr);
	u32 signature = msg_node_sig(hdr);
	u16 caps = msg_node_capabilities(hdr);
	bool respond = false;
	bool dupl_addr = false;
	int err;

	err = bearer->media->msg2addr(bearer, &maddr, msg_media_addr(hdr));
	kfree_skb(skb);
	if (err)
		return;

	/* Ensure message from node is valid and communication is permitted */
	if (net_id != tn->net_id)
		return;
	if (maddr.broadcast)
		return;
	if (!tipc_addr_domain_valid(ddom))
		return;
	if (!tipc_addr_node_valid(onode))
		return;

	if (in_own_node(net, onode)) {
		if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
			disc_dupl_alert(bearer, tn->own_addr, &maddr);
		return;
	}
	if (!tipc_in_scope(ddom, tn->own_addr))
		return;
	if (!tipc_in_scope(bearer->domain, onode))
		return;

	tipc_node_check_dest(net, onode, bearer, caps, signature,
			     &maddr, &respond, &dupl_addr);
	if (dupl_addr)
		disc_dupl_alert(bearer, onode, &maddr);

	/* Send response, if necessary */
	if (respond && (mtyp == DSC_REQ_MSG)) {
		rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
		if (!rskb)
			return;
		tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
		tipc_bearer_xmit_skb(net, bearer->identity, rskb, &maddr);
	}
}
/**
 * rcvbuf_limit - get proper overload limit of socket receive queue
 * @sk: socket
 * @buf: message
 *
 * For all connection oriented messages, irrespective of importance,
 * the default overload value (i.e. 67MB) is set as limit.
 *
 * For all connectionless messages, by default new queue limits are
 * as belows:
 *
 * TIPC_LOW_IMPORTANCE       (5MB)
 * TIPC_MEDIUM_IMPORTANCE    (10MB)
 * TIPC_HIGH_IMPORTANCE      (20MB)
 * TIPC_CRITICAL_IMPORTANCE  (40MB)
 *
 * Returns overload limit according to corresponding message importance
 */
static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	unsigned int limit;

	if (msg_connected(msg))
		limit = CONN_OVERLOAD_LIMIT;
	else
		limit = sk->sk_rcvbuf << (msg_importance(msg) + 5);
	return limit;
}
Esempio n. 15
0
/**
 * tipc_bclink_update_link_state - update broadcast link state
 *
 * tipc_net_lock and node lock set
 */
void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
{
	struct sk_buff *buf;

	/* Ignore "stale" link state info */

	if (less_eq(last_sent, n_ptr->bclink.last_in))
		return;

	/* Update link synchronization state; quit if in sync */

	bclink_update_last_sent(n_ptr, last_sent);

	if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
		return;

	/* Update out-of-sync state; quit if loss is still unconfirmed */

	if ((++n_ptr->bclink.oos_state) == 1) {
		if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
			return;
		n_ptr->bclink.oos_state++;
	}

	/* Don't NACK if one has been recently sent (or seen) */

	if (n_ptr->bclink.oos_state & 0x1)
		return;

	/* Send NACK */

	buf = tipc_buf_acquire(INT_H_SIZE);
	if (buf) {
		struct tipc_msg *msg = buf_msg(buf);

		tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
			      INT_H_SIZE, n_ptr->addr);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
		msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head
				 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
				 : n_ptr->bclink.last_sent);

		spin_lock_bh(&bc_lock);
		tipc_bearer_send(&bcbearer->bearer, buf, NULL);
		bcl->stats.sent_nacks++;
		spin_unlock_bh(&bc_lock);
		kfree_skb(buf);

		n_ptr->bclink.oos_state++;
	}
}
Esempio n. 16
0
static int release(struct socket *sock)
{
	struct tipc_sock *tsock = tipc_sk(sock->sk);
	struct sock *sk = sock->sk;
	int res = TIPC_OK;
	struct sk_buff *buf;

	dbg("sock_delete: %x\n",tsock);
	if (!tsock)
		return 0;
	down_interruptible(&tsock->sem);
	if (!sock->sk) {
		up(&tsock->sem);
		return 0;
	}

	/* Reject unreceived messages, unless no longer connected */

	while (sock->state != SS_DISCONNECTING) {
		sock_lock(tsock);
		buf = skb_dequeue(&sk->sk_receive_queue);
		if (!buf)
			tsock->p->usr_handle = NULL;
		sock_unlock(tsock);
		if (!buf)
			break;
		if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
			buf_discard(buf);
		else
			tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
		atomic_dec(&tipc_queue_size);
	}

	/* Delete TIPC port */

	res = tipc_deleteport(tsock->p->ref);
	sock->sk = NULL;

	/* Discard any remaining messages */

	while ((buf = skb_dequeue(&sk->sk_receive_queue))) {
		buf_discard(buf);
		atomic_dec(&tipc_queue_size);
	}

	up(&tsock->sem);

	sock_put(sk);

	atomic_dec(&tipc_user_count);
	return res;
}
Esempio n. 17
0
static int shutdown(struct socket *sock, int how)
{
	struct sock *sk = sock->sk;
	struct tipc_port *tport = tipc_sk_port(sk);
	struct sk_buff *buf;
	int res;

	if (how != SHUT_RDWR)
		return -EINVAL;

	lock_sock(sk);

	switch (sock->state) {
	case SS_CONNECTING:
	case SS_CONNECTED:

		/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
restart:
		buf = __skb_dequeue(&sk->sk_receive_queue);
		if (buf) {
			atomic_dec(&tipc_queue_size);
			if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
				buf_discard(buf);
				goto restart;
			}
			tipc_disconnect(tport->ref);
			tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
		} else {
			tipc_shutdown(tport->ref);
		}

		sock->state = SS_DISCONNECTING;

		/* fall through */

	case SS_DISCONNECTING:

		/* Discard any unreceived messages; wake up sleeping tasks */

		discard_rx_queue(sk);
		if (waitqueue_active(sk_sleep(sk)))
			wake_up_interruptible(sk_sleep(sk));
		res = 0;
		break;

	default:
		res = -ENOTCONN;
	}

	release_sock(sk);
	return res;
}
Esempio n. 18
0
static struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
{
	u32 size = INT_H_SIZE + data_size;
	struct sk_buff *buf = buf_acquire(size);
	struct tipc_msg *msg;

	if (buf) {
		msg = buf_msg(buf);
		memset((char *)msg, 0, size);
		msg_init(msg, ROUTE_DISTRIBUTOR, 0, INT_H_SIZE, dest);
	}
	return buf;
}
Esempio n. 19
0
void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
				u32 lower, u32 upper)
{
	struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
	struct tipc_msg *msg;

	if (buf) {
		msg = buf_msg(buf);
		msg_set_remote_node(msg, dest);
		msg_set_type(msg, ROUTE_REMOVAL);
		tipc_cltr_multicast(c_ptr, buf, lower, upper);
	} else {
		warn("Memory squeeze: broadcast of lost route failed\n");
	}
}
Esempio n. 20
0
File: bearer.c Progetto: krzk/linux
/* tipc_bearer_xmit_skb - sends buffer to destination over bearer
 */
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
			  struct sk_buff *skb,
			  struct tipc_media_addr *dest)
{
	struct tipc_msg *hdr = buf_msg(skb);
	struct tipc_bearer *b;

	rcu_read_lock();
	b = bearer_get(net, bearer_id);
	if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr))))
		b->media->send_msg(net, skb, b, dest);
	else
		kfree_skb(skb);
	rcu_read_unlock();
}
Esempio n. 21
0
static struct sk_buff *tipc_disc_init_msg(u32 type,
					  u32 dest_domain,
					  struct tipc_bearer *b_ptr)
{
	struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
	struct tipc_msg *msg;

	if (buf) {
		msg = buf_msg(buf);
		tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
		msg_set_non_seq(msg, 1);
		msg_set_dest_domain(msg, dest_domain);
		msg_set_bc_netid(msg, tipc_net_id);
		b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
	}
	return buf;
}
Esempio n. 22
0
/**
 * tipc_disc_init_msg - initialize a link setup message
 * @net: the applicable net namespace
 * @type: message type (request or response)
 * @b: ptr to bearer issuing message
 */
static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
			       struct tipc_bearer *b)
{
	struct tipc_net *tn = net_generic(net, tipc_net_id);
	struct tipc_msg *msg;
	u32 dest_domain = b->domain;

	msg = buf_msg(buf);
	tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
		      MAX_H_SIZE, dest_domain);
	msg_set_non_seq(msg, 1);
	msg_set_node_sig(msg, tn->random);
	msg_set_node_capabilities(msg, TIPC_NODE_CAPABILITIES);
	msg_set_dest_domain(msg, dest_domain);
	msg_set_bc_netid(msg, tn->net_id);
	b->media->addr2msg(msg_media_addr(msg), &b->addr);
}
Esempio n. 23
0
static struct sk_buff *tipc_disc_init_msg(u32 type,
					  u32 req_links,
					  u32 dest_domain,
					  struct bearer *b_ptr)
{
	struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
	struct tipc_msg *msg;

	if (buf) {
		msg = buf_msg(buf);
		msg_init(msg, LINK_CONFIG, type, DSC_H_SIZE, dest_domain);
		msg_set_non_seq(msg, 1);
		msg_set_req_links(msg, req_links);
		msg_set_dest_domain(msg, dest_domain);
		msg_set_bc_netid(msg, tipc_net_id);
		msg_set_media_addr(msg, &b_ptr->publ.addr);
	}
	return buf;
}
Esempio n. 24
0
File: bearer.c Progetto: krzk/linux
/* tipc_bearer_xmit() -send buffer to destination over bearer
 */
void tipc_bearer_xmit(struct net *net, u32 bearer_id,
		      struct sk_buff_head *xmitq,
		      struct tipc_media_addr *dst)
{
	struct tipc_bearer *b;
	struct sk_buff *skb, *tmp;

	if (skb_queue_empty(xmitq))
		return;

	rcu_read_lock();
	b = bearer_get(net, bearer_id);
	if (unlikely(!b))
		__skb_queue_purge(xmitq);
	skb_queue_walk_safe(xmitq, skb, tmp) {
		__skb_dequeue(xmitq);
		if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
			b->media->send_msg(net, skb, b, dst);
		else
			kfree_skb(skb);
	}
Esempio n. 25
0
File: net.c Progetto: 7799/linux
static void net_route_named_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	u32 dnode;
	u32 dport;

	if (!msg_named(msg)) {
		kfree_skb(buf);
		return;
	}

	dnode = addr_domain(msg_lookup_scope(msg));
	dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
	if (dport) {
		msg_set_destnode(msg, dnode);
		msg_set_destport(msg, dport);
		tipc_net_route_msg(buf);
		return;
	}
	tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
}
Esempio n. 26
0
/*
 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
 */
int tipc_bclink_send_msg(struct sk_buff *buf)
{
	int res;

	spin_lock_bh(&bc_lock);

	if (!bclink->bcast_nodes.count) {
		res = msg_data_sz(buf_msg(buf));
		kfree_skb(buf);
		goto exit;
	}

	res = tipc_link_send_buf(bcl, buf);
	if (likely(res >= 0)) {
		bclink_set_last_sent();
		bcl->stats.queue_sz_counts++;
		bcl->stats.accu_queue_sz += bcl->out_queue_size;
	}
exit:
	spin_unlock_bh(&bc_lock);
	return res;
}
Esempio n. 27
0
File: net.c Progetto: 7799/linux
void tipc_net_route_msg(struct sk_buff *buf)
{
	struct tipc_msg *msg;
	u32 dnode;

	if (!buf)
		return;
	msg = buf_msg(buf);

	/* Handle message for this node */
	dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
	if (tipc_in_scope(dnode, tipc_own_addr)) {
		if (msg_isdata(msg)) {
			if (msg_mcast(msg))
				tipc_port_mcast_rcv(buf, NULL);
			else if (msg_destport(msg))
				tipc_port_rcv(buf);
			else
				net_route_named_msg(buf);
			return;
		}
		switch (msg_user(msg)) {
		case NAME_DISTRIBUTOR:
			tipc_named_rcv(buf);
			break;
		case CONN_MANAGER:
			tipc_port_proto_rcv(buf);
			break;
		default:
			kfree_skb(buf);
		}
		return;
	}

	/* Handle message for another node */
	skb_trim(buf, msg_size(msg));
	tipc_link_xmit(buf, dnode, msg_link_selector(msg));
}
Esempio n. 28
0
void tipc_cltr_broadcast(struct sk_buff *buf)
{
	struct sk_buff *buf_copy;
	struct cluster *c_ptr;
	struct tipc_node *n_ptr;
	u32 n_num;
	u32 tstart;
	u32 tstop;
	u32 node_type;

	if (tipc_mode == TIPC_NET_MODE) {
		c_ptr = tipc_cltr_find(tipc_own_addr);
		assert(in_own_cluster(c_ptr->addr));	/* For now */

		/* Send to standard nodes, then repeat loop sending to slaves */
		tstart = 1;
		tstop = c_ptr->highest_node;
		for (node_type = 1; node_type <= 2; node_type++) {
			for (n_num = tstart; n_num <= tstop; n_num++) {
				n_ptr = c_ptr->nodes[n_num];
				if (n_ptr && tipc_node_has_active_links(n_ptr)) {
					buf_copy = skb_copy(buf, GFP_ATOMIC);
					if (buf_copy == NULL)
						goto exit;
					msg_set_destnode(buf_msg(buf_copy),
							 n_ptr->addr);
					tipc_link_send(buf_copy, n_ptr->addr,
						       n_ptr->addr);
				}
			}
			tstart = LOWEST_SLAVE;
			tstop = c_ptr->highest_slave;
		}
	}
exit:
	buf_discard(buf);
}
Esempio n. 29
0
struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
				uint data_sz, u32 dnode, u32 onode,
				u32 dport, u32 oport, int errcode)
{
	struct tipc_msg *msg;
	struct sk_buff *buf;

	buf = tipc_buf_acquire(hdr_sz + data_sz);
	if (unlikely(!buf))
		return NULL;

	msg = buf_msg(buf);
	tipc_msg_init(msg, user, type, hdr_sz, dnode);
	msg_set_size(msg, hdr_sz + data_sz);
	msg_set_prevnode(msg, onode);
	msg_set_origport(msg, oport);
	msg_set_destport(msg, dport);
	msg_set_errcode(msg, errcode);
	if (hdr_sz > SHORT_H_SIZE) {
		msg_set_orignode(msg, onode);
		msg_set_destnode(msg, dnode);
	}
	return buf;
}
Esempio n. 30
0
/**
 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
 *
 * Send packet over as many bearers as necessary to reach all nodes
 * that have joined the broadcast link.
 *
 * Returns 0 (packet sent successfully) under all circumstances,
 * since the broadcast link's pseudo-bearer never blocks
 */
static int tipc_bcbearer_send(struct sk_buff *buf,
			      struct tipc_bearer *unused1,
			      struct tipc_media_addr *unused2)
{
	int bp_index;

	/* Prepare broadcast link message for reliable transmission,
	 * if first time trying to send it;
	 * preparation is skipped for broadcast link protocol messages
	 * since they are sent in an unreliable manner and don't need it
	 */
	if (likely(!msg_non_seq(buf_msg(buf)))) {
		struct tipc_msg *msg;

		bcbuf_set_acks(buf, bclink->bcast_nodes.count);
		msg = buf_msg(buf);
		msg_set_non_seq(msg, 1);
		msg_set_mc_netid(msg, tipc_net_id);
		bcl->stats.sent_info++;

		if (WARN_ON(!bclink->bcast_nodes.count)) {
			dump_stack();
			return 0;
		}
	}

	/* Send buffer over bearers until all targets reached */
	bcbearer->remains = bclink->bcast_nodes;

	for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
		struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
		struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
		struct tipc_bearer *b = p;
		struct sk_buff *tbuf;

		if (!p)
			break; /* No more bearers to try */

		if (tipc_bearer_blocked(p)) {
			if (!s || tipc_bearer_blocked(s))
				continue; /* Can't use either bearer */
			b = s;
		}

		tipc_nmap_diff(&bcbearer->remains, &b->nodes,
			       &bcbearer->remains_new);
		if (bcbearer->remains_new.count == bcbearer->remains.count)
			continue; /* Nothing added by bearer pair */

		if (bp_index == 0) {
			/* Use original buffer for first bearer */
			tipc_bearer_send(b, buf, &b->bcast_addr);
		} else {
			/* Avoid concurrent buffer access */
			tbuf = pskb_copy(buf, GFP_ATOMIC);
			if (!tbuf)
				break;
			tipc_bearer_send(b, tbuf, &b->bcast_addr);
			kfree_skb(tbuf); /* Bearer keeps a clone */
		}

		/* Swap bearers for next packet */
		if (s) {
			bcbearer->bpairs[bp_index].primary = s;
			bcbearer->bpairs[bp_index].secondary = p;
		}

		if (bcbearer->remains_new.count == 0)
			break; /* All targets reached */

		bcbearer->remains = bcbearer->remains_new;
	}

	return 0;
}