Ejemplo n.º 1
0
/**
 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
 * @list: the buffer chain of the existing buffer ("bundle")
 * @skb:  buffer to be appended
 * @mtu:  max allowable size for the bundle buffer
 * Consumes buffer if successful
 * Returns true if bundling could be performed, otherwise false
 */
bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
{
	struct sk_buff *bskb = skb_peek_tail(list);
	struct tipc_msg *bmsg = buf_msg(bskb);
	struct tipc_msg *msg = buf_msg(skb);
	unsigned int bsz = msg_size(bmsg);
	unsigned int msz = msg_size(msg);
	u32 start = align(bsz);
	u32 max = mtu - INT_H_SIZE;
	u32 pad = start - bsz;

	if (likely(msg_user(msg) == MSG_FRAGMENTER))
		return false;
	if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
		return false;
	if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
		return false;
	if (likely(msg_user(bmsg) != MSG_BUNDLER))
		return false;
	if (likely(!TIPC_SKB_CB(bskb)->bundling))
		return false;
	if (unlikely(skb_tailroom(bskb) < (pad + msz)))
		return false;
	if (unlikely(max < (start + msz)))
		return false;

	skb_put(bskb, pad + msz);
	skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
	msg_set_size(bmsg, start + msz);
	msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
	kfree_skb(skb);
	return true;
}
Ejemplo n.º 2
0
/*
 * When forwarding bridge frames, we save a copy of the original
 * header before processing.
 */
int nf_bridge_copy_header(struct sk_buff *skb)
{
	int err;
	int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);

	err = skb_cow_head(skb, header_size);
	if (err)
		return err;

	skb_copy_to_linear_data_offset(skb, -header_size,
				       skb->nf_bridge->data, header_size);
	__skb_push(skb, nf_bridge_encap_header_len(skb));
	return 0;
}
Ejemplo n.º 3
0
/* This is called when br_netfilter has called into iptables/netfilter,
 * and DNAT has taken place on a bridge-forwarded packet.
 *
 * neigh->output has created a new MAC header, with local br0 MAC
 * as saddr.
 *
 * This restores the original MAC saddr of the bridged packet
 * before invoking bridge forward logic to transmit the packet.
 */
static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
    struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);

    skb_pull(skb, ETH_HLEN);
    nf_bridge->mask &= ~BRNF_BRIDGED_DNAT;

    BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));

    skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
                                   nf_bridge->neigh_header,
                                   ETH_HLEN - ETH_ALEN);
    skb->dev = nf_bridge->physindev;
    br_handle_frame_finish(NULL, skb);
}
Ejemplo n.º 4
0
/* Fill in the header for fragmented IP packets handled by
 * the IPv4 connection tracking code.
 */
int nf_bridge_copy_header(struct sk_buff *skb) {
  int err;
  unsigned int header_size;

  nf_bridge_update_protocol(skb);
  header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
  err = skb_cow_head(skb, header_size);
  if (err) {
    return err;
  }

  skb_copy_to_linear_data_offset(skb, -header_size,
                                 skb->nf_bridge->data, header_size);
  __skb_push(skb, nf_bridge_encap_header_len(skb));
  return 0;
}
Ejemplo n.º 5
0
/* This is called when br_netfilter has called into iptables/netfilter,
 * and DNAT has taken place on a bridge-forwarded packet.
 *
 * neigh->output has created a new MAC header, with local br0 MAC
 * as saddr.
 *
 * This restores the original MAC saddr of the bridged packet
 * before invoking bridge forward logic to transmit the packet.
 */
static void br_nf_pre_routing_finish_bridge_slow(struct sk_buff *skb)
{
	struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb);

	skb_pull(skb, ETH_HLEN);
	nf_bridge->bridged_dnat = 0;

	BUILD_BUG_ON(sizeof(nf_bridge->neigh_header) != (ETH_HLEN - ETH_ALEN));

	skb_copy_to_linear_data_offset(skb, -(ETH_HLEN - ETH_ALEN),
				       nf_bridge->neigh_header,
				       ETH_HLEN - ETH_ALEN);
	skb->dev = nf_bridge->physindev;

	nf_bridge->physoutdev = NULL;
	br_handle_frame_finish(dev_net(skb->dev), NULL, skb);
}
Ejemplo n.º 6
0
static int br_nf_push_frag_xmit(struct sock *sk, struct sk_buff *skb)
{
    struct brnf_frag_data *data;
    int err;

    data = this_cpu_ptr(&brnf_frag_data_storage);
    err = skb_cow_head(skb, data->size);

    if (err) {
        kfree_skb(skb);
        return 0;
    }

    skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
    __skb_push(skb, data->encap_size);

    return br_dev_queue_push_xmit(sk, skb);
}
Ejemplo n.º 7
0
int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
		   u32 num_sect, unsigned int total_len,
			    int max_size, int usrmem, struct sk_buff **buf)
{
	int dsz, sz, hsz, pos, res, cnt;

	dsz = total_len;
	pos = hsz = msg_hdr_sz(hdr);
	sz = hsz + dsz;
	msg_set_size(hdr, sz);
	if (unlikely(sz > max_size)) {
		*buf = NULL;
		return dsz;
	}

	*buf = tipc_buf_acquire(sz);
	if (!(*buf))
		return -ENOMEM;
	skb_copy_to_linear_data(*buf, hdr, hsz);
	for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
		if (likely(usrmem))
			res = !copy_from_user((*buf)->data + pos,
					      msg_sect[cnt].iov_base,
					      msg_sect[cnt].iov_len);
		else
			skb_copy_to_linear_data_offset(*buf, pos,
						       msg_sect[cnt].iov_base,
						       msg_sect[cnt].iov_len);
		pos += msg_sect[cnt].iov_len;
	}
	if (likely(res))
		return dsz;

	buf_discard(*buf);
	*buf = NULL;
	return -EFAULT;
}
Ejemplo n.º 8
0
static int br_nf_push_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	struct brnf_frag_data *data;
	int err;

	data = this_cpu_ptr(&brnf_frag_data_storage);
	err = skb_cow_head(skb, data->size);

	if (err) {
		kfree_skb(skb);
		return 0;
	}

	if (data->vlan_tci) {
		skb->vlan_tci = data->vlan_tci;
		skb->vlan_proto = data->vlan_proto;
	}

	skb_copy_to_linear_data_offset(skb, -data->size, data->mac, data->size);
	__skb_push(skb, data->encap_size);

	nf_bridge_info_free(skb);
	return br_dev_queue_push_xmit(net, sk, skb);
}
Ejemplo n.º 9
0
static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
{
	__u16 appl_id;
	int _len, _len2;
	__u8 msghead[64];
	hycapictrl_info *cinfo = ctrl->driverdata;
	u16 retval = CAPI_NOERROR;

	appl_id = CAPIMSG_APPID(skb->data);
	switch (_hycapi_appCheck(appl_id, ctrl->cnr))
	{
	case 0:
/*			printk(KERN_INFO "Need to register\n"); */
		hycapi_register_internal(ctrl,
					 appl_id,
					 &(hycapi_applications[appl_id - 1].rp));
		break;
	case 1:
		break;
	default:
		printk(KERN_ERR "HYCAPI: Controller mixup!\n");
		retval = CAPI_ILLAPPNR;
		goto out;
	}
	switch (CAPIMSG_CMD(skb->data)) {
	case CAPI_DISCONNECT_B3_RESP:
		capilib_free_ncci(&cinfo->ncci_head, appl_id,
				  CAPIMSG_NCCI(skb->data));
		break;
	case CAPI_DATA_B3_REQ:
		_len = CAPIMSG_LEN(skb->data);
		if (_len > 22) {
			_len2 = _len - 22;
			skb_copy_from_linear_data(skb, msghead, 22);
			skb_copy_to_linear_data_offset(skb, _len2,
						       msghead, 22);
			skb_pull(skb, _len2);
			CAPIMSG_SETLEN(skb->data, 22);
			retval = capilib_data_b3_req(&cinfo->ncci_head,
						     CAPIMSG_APPID(skb->data),
						     CAPIMSG_NCCI(skb->data),
						     CAPIMSG_MSGID(skb->data));
		}
		break;
	case CAPI_LISTEN_REQ:
		if (hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1])
		{
			kfree_skb(hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1]);
			hycapi_applications[appl_id - 1].listen_req[ctrl->cnr - 1] = NULL;
		}
		if (!(hycapi_applications[appl_id  -1].listen_req[ctrl->cnr - 1] = skb_copy(skb, GFP_ATOMIC)))
		{
			printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n");
		}
		break;
	default:
		break;
	}
out:
	if (retval == CAPI_NOERROR)
		hycapi_sendmsg_internal(ctrl, skb);
	else
		dev_kfree_skb_any(skb);

	return retval;
}
Ejemplo n.º 10
0
/**
 * tipc_msg_build - create buffer chain containing specified header and data
 * @mhdr: Message header, to be prepended to data
 * @m: User message
 * @offset: Posision in iov to start copying from
 * @dsz: Total length of user data
 * @pktmax: Max packet size that can be used
 * @list: Buffer or chain of buffers to be returned to caller
 *
 * Returns message data size or errno: -ENOMEM, -EFAULT
 */
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
		   int dsz, int pktmax, struct sk_buff_head *list)
{
	int mhsz = msg_hdr_sz(mhdr);
	int msz = mhsz + dsz;
	int pktno = 1;
	int pktsz;
	int pktrem = pktmax;
	int drem = dsz;
	struct tipc_msg pkthdr;
	struct sk_buff *skb;
	char *pktpos;
	int rc;

	msg_set_size(mhdr, msz);

	/* No fragmentation needed? */
	if (likely(msz <= pktmax)) {
		skb = tipc_buf_acquire(msz);
		if (unlikely(!skb))
			return -ENOMEM;
		__skb_queue_tail(list, skb);
		skb_copy_to_linear_data(skb, mhdr, mhsz);
		pktpos = skb->data + mhsz;
		if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
						 dsz))
			return dsz;
		rc = -EFAULT;
		goto error;
	}

	/* Prepare reusable fragment header */
	tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
		      INT_H_SIZE, msg_destnode(mhdr));
	msg_set_size(&pkthdr, pktmax);
	msg_set_fragm_no(&pkthdr, pktno);

	/* Prepare first fragment */
	skb = tipc_buf_acquire(pktmax);
	if (!skb)
		return -ENOMEM;
	__skb_queue_tail(list, skb);
	pktpos = skb->data;
	skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
	pktpos += INT_H_SIZE;
	pktrem -= INT_H_SIZE;
	skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
	pktpos += mhsz;
	pktrem -= mhsz;

	do {
		if (drem < pktrem)
			pktrem = drem;

		if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
			rc = -EFAULT;
			goto error;
		}
		drem -= pktrem;
		offset += pktrem;

		if (!drem)
			break;

		/* Prepare new fragment: */
		if (drem < (pktmax - INT_H_SIZE))
			pktsz = drem + INT_H_SIZE;
		else
			pktsz = pktmax;
		skb = tipc_buf_acquire(pktsz);
		if (!skb) {
			rc = -ENOMEM;
			goto error;
		}
		__skb_queue_tail(list, skb);
		msg_set_type(&pkthdr, FRAGMENT);
		msg_set_size(&pkthdr, pktsz);
		msg_set_fragm_no(&pkthdr, ++pktno);
		skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
		pktpos = skb->data + INT_H_SIZE;
		pktrem = pktsz - INT_H_SIZE;

	} while (1);
	msg_set_type(buf_msg(skb), LAST_FRAGMENT);
	return dsz;
error:
	__skb_queue_purge(list);
	__skb_queue_head_init(list);
	return rc;
}