bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id) { bool supp = false; struct tipc_bearer *b; rcu_read_lock(); b = bearer_get(net, bearer_id); if (b) supp = (b->bcast_addr.broadcast == TIPC_BROADCAST_SUPPORT); rcu_read_unlock(); return supp; }
/* tipc_bearer_reset_all - reset all links on all bearers */ void tipc_bearer_reset_all(struct net *net) { struct tipc_bearer *b; int i; for (i = 0; i < MAX_BEARERS; i++) { b = bearer_get(net, i); if (b) clear_bit_unlock(0, &b->up); } for (i = 0; i < MAX_BEARERS; i++) { b = bearer_get(net, i); if (b) tipc_reset_bearer(net, b); } for (i = 0; i < MAX_BEARERS; i++) { b = bearer_get(net, i); if (b) test_and_set_bit_lock(0, &b->up); } }
/* tipc_bearer_xmit_skb - sends buffer to destination over bearer */ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id, struct sk_buff *skb, struct tipc_media_addr *dest) { struct tipc_msg *hdr = buf_msg(skb); struct tipc_bearer *b; rcu_read_lock(); b = bearer_get(net, bearer_id); if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr)))) b->media->send_msg(net, skb, b, dest); else kfree_skb(skb); rcu_read_unlock(); }
/* tipc_bearer_xmit() -send buffer to destination over bearer */ void tipc_bearer_xmit(struct net *net, u32 bearer_id, struct sk_buff_head *xmitq, struct tipc_media_addr *dst) { struct tipc_bearer *b; struct sk_buff *skb, *tmp; if (skb_queue_empty(xmitq)) return; rcu_read_lock(); b = bearer_get(net, bearer_id); if (unlikely(!b)) __skb_queue_purge(xmitq); skb_queue_walk_safe(xmitq, skb, tmp) { __skb_dequeue(xmitq); if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb)))) b->media->send_msg(net, skb, b, dst); else kfree_skb(skb); }