Example #1
0
s32 mpls_vpws_input(struct sk_buff * skb)
{
    struct sk_buff * new_skb;

    MPLS_DEBUG_COUNTER_INC(vpws_input);

    /*
        Recover the eth header, Because skb_realloc_headroom will not
        copy data out of skb->data to new skb, then, we will lose the 
        eth header.
    */    
    skb_push(skb, ETH_HLEN);

    new_skb = skb_realloc_headroom(skb, MPLS_L2VPN_HLEN);
    if (unlikely(!new_skb))
        goto drop;

    /*
        Set skb socket to new skb.
    */
    if (skb->sk)
    {
        skb_set_owner_w(new_skb, skb->sk);
    }  
    kfree_skb(skb);

    return mpls_vpws_forward(new_skb);

drop:
    kfree_skb(skb);
    MPLS_DEBUG_COUNTER_INC(vpws_input_skb_alloc);
    return MPLS_L2VPN_DROP;
}
Example #2
0
static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
				gfp_t priority)
{
	const struct pnpipehdr *oph = pnp_hdr(oskb);
	struct sk_buff *skb;
	struct pnpipehdr *ph;
	struct sockaddr_pn dst;

	skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
	if (!skb)
		return -ENOMEM;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PHONET_HEADER);
	ph = (struct pnpipehdr *)skb_put(skb, sizeof(*ph) + 4);

	ph->utid = oph->utid;
	ph->message_id = PNS_PEP_CTRL_RESP;
	ph->pipe_handle = oph->pipe_handle;
	ph->data[0] = oph->data[1]; /* CTRL id */
	ph->data[1] = oph->data[0]; /* PEP type */
	ph->data[2] = code; /* error code, at an usual offset */
	ph->data[3] = PAD;
	ph->data[4] = PAD;

	pn_skb_get_src_sockaddr(oskb, &dst);
	return pn_skb_send(sk, skb, &dst);
}
Example #3
0
static int ebt_target_snat(struct sk_buff **pskb, unsigned int hooknr,
   const struct net_device *in, const struct net_device *out,
   const void *data, unsigned int datalen)
{
	struct ebt_nat_info *info = (struct ebt_nat_info *) data;

	if (skb_shared(*pskb) || skb_cloned(*pskb)) {
		struct sk_buff *nskb;

		nskb = skb_copy(*pskb, GFP_ATOMIC);
		if (!nskb)
			return NF_DROP;
		if ((*pskb)->sk)
			skb_set_owner_w(nskb, (*pskb)->sk);
		kfree_skb(*pskb);
		*pskb = nskb;
	}
	memcpy(eth_hdr(*pskb)->h_source, info->mac, ETH_ALEN);
	if (!(info->target & NAT_ARP_BIT) &&
	    eth_hdr(*pskb)->h_proto == htons(ETH_P_ARP)) {
		struct arphdr _ah, *ap;

		ap = skb_header_pointer(*pskb, 0, sizeof(_ah), &_ah);
		if (ap == NULL)
			return EBT_DROP;
		if (ap->ar_hln != ETH_ALEN)
			goto out;
		if (skb_store_bits(*pskb, sizeof(_ah), info->mac,ETH_ALEN))
			return EBT_DROP;
	}
out:
	return info->target | ~EBT_VERDICT_BITS;
}
void ax25_transmit_buffer(ax25_cb *ax25, struct sk_buff *skb, int type)
{
    struct sk_buff *skbn;
    unsigned char *ptr;
    int headroom;

    if (ax25->ax25_dev == NULL) {
        ax25_disconnect(ax25, ENETUNREACH);
        return;
    }

    headroom = ax25_addr_size(ax25->digipeat);

    if (skb_headroom(skb) < headroom) {
        if ((skbn = skb_realloc_headroom(skb, headroom)) == NULL) {
            printk(KERN_CRIT "AX.25: ax25_transmit_buffer - out of memory\n");
            kfree_skb(skb);
            return;
        }

        if (skb->sk != NULL)
            skb_set_owner_w(skbn, skb->sk);

        kfree_skb(skb);
        skb = skbn;
    }

    ptr = skb_push(skb, headroom);

    ax25_addr_build(ptr, &ax25->source_addr, &ax25->dest_addr, ax25->digipeat, type, ax25->modulus);

    ax25_queue_xmit(skb, ax25->ax25_dev->dev);
}
Example #5
0
int dccp_connect(struct sock *sk)
{
	struct sk_buff *skb;
	struct inet_connection_sock *icsk = inet_csk(sk);

	dccp_connect_init(sk);

	skb = alloc_skb(MAX_DCCP_HEADER + 15, sk->sk_allocation);
	if (unlikely(skb == NULL))
		return -ENOBUFS;

	/* Reserve space for headers. */
	skb_reserve(skb, MAX_DCCP_HEADER);

	DCCP_SKB_CB(skb)->dccpd_type = DCCP_PKT_REQUEST;
	skb->csum = 0;
	skb_set_owner_w(skb, sk);

	BUG_TRAP(sk->sk_send_head == NULL);
	sk->sk_send_head = skb;
	dccp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
	DCCP_INC_STATS(DCCP_MIB_ACTIVEOPENS);

	/* Timer for repeating the REQUEST until an answer. */
	inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
				  icsk->icsk_rto, DCCP_RTO_MAX);
	return 0;
}
Example #6
0
static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
{
	struct pep_sock *pn = pep_sk(sk);
	struct pnpipehdr *ph;
	struct sk_buff *skb;

	skb = alloc_skb(MAX_PNPIPE_HEADER + 4, priority);
	if (!skb)
		return -ENOMEM;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER + 4);
	__skb_push(skb, sizeof(*ph) + 4);
	skb_reset_transport_header(skb);
	ph = pnp_hdr(skb);
	ph->utid = 0;
	ph->message_id = PNS_PEP_STATUS_IND;
	ph->pipe_handle = pn->pipe_handle;
	ph->pep_type = PN_PEP_TYPE_COMMON;
	ph->data[1] = type;
	ph->data[2] = PAD;
	ph->data[3] = PAD;
	ph->data[4] = status;

	return pn_skb_send(sk, skb, &pipe_srv);
}
Example #7
0
int ip_vs_make_skb_writable(struct sk_buff **pskb, int writable_len)
{
	struct sk_buff *skb = *pskb;

	/* skb is already used, better copy skb and its payload */
	if (unlikely(skb_shared(skb) || skb->sk))
		goto copy_skb;

	/* skb data is already used, copy it */
	if (unlikely(skb_cloned(skb)))
		goto copy_data;

	return pskb_may_pull(skb, writable_len);

  copy_data:
	if (unlikely(writable_len > skb->len))
		return 0;
	return !pskb_expand_head(skb, 0, 0, GFP_ATOMIC);

  copy_skb:
	if (unlikely(writable_len > skb->len))
		return 0;
	skb = skb_copy(skb, GFP_ATOMIC);
	if (!skb)
		return 0;
	BUG_ON(skb_is_nonlinear(skb));

	/* Rest of kernel will get very unhappy if we pass it a
	   suddenly-orphaned skbuff */
	if ((*pskb)->sk)
		skb_set_owner_w(skb, (*pskb)->sk);
	kfree_skb(*pskb);
	*pskb = skb;
	return 1;
}
Example #8
0
static int pep_reply(struct sock *sk, struct sk_buff *oskb,
			u8 code, const void *data, int len, gfp_t priority)
{
	const struct pnpipehdr *oph = pnp_hdr(oskb);
	struct pnpipehdr *ph;
	struct sk_buff *skb;

	skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
	if (!skb)
		return -ENOMEM;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, data, len);
	__skb_push(skb, sizeof(*ph));
	skb_reset_transport_header(skb);
	ph = pnp_hdr(skb);
	ph->utid = oph->utid;
	ph->message_id = oph->message_id + 1; /* REQ -> RESP */
	ph->pipe_handle = oph->pipe_handle;
	ph->error_code = code;

	return pn_skb_send(sk, skb, &pipe_srv);
}
Example #9
0
static int rose_rebuild_header(struct sk_buff *skb)
{
	struct net_device *dev = skb->dev;
	struct net_device_stats *stats = netdev_priv(dev);
	unsigned char *bp = (unsigned char *)skb->data;
	struct sk_buff *skbn;

#ifdef CONFIG_INET
	if (arp_find(bp + 7, skb)) {
		return 1;
	}

	if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
		kfree_skb(skb);
		return 1;
	}

	if (skb->sk != NULL)
		skb_set_owner_w(skbn, skb->sk);

	kfree_skb(skb);

	if (!rose_route_frame(skbn, NULL)) {
		kfree_skb(skbn);
		stats->tx_errors++;
		return 1;
	}

	stats->tx_packets++;
	stats->tx_bytes += skbn->len;
#endif
	return 1;
}
Example #10
0
struct sk_buff *ax25_rt_build_path(struct sk_buff *skb, ax25_address *src,
	ax25_address *dest, ax25_digi *digi)
{
	struct sk_buff *skbn;
	unsigned char *bp;
	int len;

	len = digi->ndigi * AX25_ADDR_LEN;

	if (skb_headroom(skb) < len) {
		if ((skbn = skb_realloc_headroom(skb, len)) == NULL) {
			printk(KERN_CRIT "AX.25: ax25_dg_build_path - out of memory\n");
			return NULL;
		}

		if (skb->sk != NULL)
			skb_set_owner_w(skbn, skb->sk);

		consume_skb(skb);

		skb = skbn;
	}

	bp = skb_push(skb, len);

	ax25_addr_build(bp, src, dest, digi, AX25_COMMAND, AX25_MODULUS);

	return skb;
}
Example #11
0
File: llc_if.c Project: 274914765/C
/**
 *    llc_send_disc - Called by upper layer to close a connection
 *    @sk: connection to be closed
 *
 *    Upper layer calls this when it wants to close an established LLC
 *    connection with a remote machine. This function packages a proper event
 *    and sends it to connection component state machine. Returns 0 for
 *    success, 1 otherwise.
 */
int llc_send_disc(struct sock *sk)
{
    u16 rc = 1;
    struct llc_conn_state_ev *ev;
    struct sk_buff *skb;

    sock_hold(sk);
    if (sk->sk_type != SOCK_STREAM || sk->sk_state != TCP_ESTABLISHED ||
        llc_sk(sk)->state == LLC_CONN_STATE_ADM ||
        llc_sk(sk)->state == LLC_CONN_OUT_OF_SVC)
        goto out;
    /*
     * Postpone unassigning the connection from its SAP and returning the
     * connection until all ACTIONs have been completely executed
     */
    skb = alloc_skb(0, GFP_ATOMIC);
    if (!skb)
        goto out;
    skb_set_owner_w(skb, sk);
    sk->sk_state  = TCP_CLOSING;
    ev          = llc_conn_ev(skb);
    ev->type      = LLC_CONN_EV_TYPE_PRIM;
    ev->prim      = LLC_DISC_PRIM;
    ev->prim_type = LLC_PRIM_TYPE_REQ;
    rc = llc_conn_state_process(sk, skb);
out:
    sock_put(sk);
    return rc;
}
Example #12
0
/* enqueue @skb on sk_send_head for retransmission, return clone to send now */
static struct sk_buff *dccp_skb_entail(struct sock *sk, struct sk_buff *skb)
{
	skb_set_owner_w(skb, sk);
	WARN_ON(sk->sk_send_head);
	sk->sk_send_head = skb;
	return skb_clone(sk->sk_send_head, gfp_any());
}
Example #13
0
static struct sk_buff *ndisc_alloc_skb(struct net_device *dev,
				       int len)
{
	int hlen = LL_RESERVED_SPACE(dev);
	int tlen = dev->needed_tailroom;
	struct sock *sk = dev_net(dev)->ipv6.ndisc_sk;
	struct sk_buff *skb;

	skb = alloc_skb(hlen + sizeof(struct ipv6hdr) + len + tlen, GFP_ATOMIC);
	if (!skb) {
		ND_PRINTK(0, err, "ndisc: %s failed to allocate an skb\n",
			  __func__);
		return NULL;
	}

	skb->protocol = htons(ETH_P_IPV6);
	skb->dev = dev;

	skb_reserve(skb, hlen + sizeof(struct ipv6hdr));
	skb_reset_transport_header(skb);

	/* Manually assign socket ownership as we avoid calling
	 * sock_alloc_send_pskb() to bypass wmem buffer limits
	 */
	skb_set_owner_w(skb, sk);

	return skb;
}
void x25_kick(struct sock *sk)
{
	struct sk_buff *skb, *skbn;
	unsigned short start, end;
	int modulus;
	struct x25_sock *x25 = x25_sk(sk);

	if (x25->state != X25_STATE_3)
		return;

	if (skb_peek(&x25->interrupt_out_queue) != NULL &&
		!test_and_set_bit(X25_INTERRUPT_FLAG, &x25->flags)) {

		skb = skb_dequeue(&x25->interrupt_out_queue);
		x25_transmit_link(skb, x25->neighbour);
	}

	if (x25->condition & X25_COND_PEER_RX_BUSY)
		return;

	if (!skb_peek(&sk->sk_write_queue))
		return;

	modulus = x25->neighbour->extended ? X25_EMODULUS : X25_SMODULUS;

	start   = skb_peek(&x25->ack_queue) ? x25->vs : x25->va;
	end     = (x25->va + x25->facilities.winsize_out) % modulus;

	if (start == end)
		return;

	x25->vs = start;


	skb = skb_dequeue(&sk->sk_write_queue);

	do {
		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
			skb_queue_head(&sk->sk_write_queue, skb);
			break;
		}

		skb_set_owner_w(skbn, sk);

		x25_send_iframe(sk, skbn);

		x25->vs = (x25->vs + 1) % modulus;

		skb_queue_tail(&x25->ack_queue, skb);

	} while (x25->vs != end &&
		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);

	x25->vl         = x25->vr;
	x25->condition &= ~X25_COND_ACK_PENDING;

	x25_stop_timer(sk);
}
Example #15
0
/* When forwarding a packet, we must ensure that we've got enough headroom
 * for the encapsulation packet in the skb.  This also gives us an
 * opportunity to figure out what the payload_len, dsfield, ttl, and df
 * values should be, so that we won't need to look at the old ip header
 * again
 */
static struct sk_buff *
ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
			   unsigned int max_headroom, __u8 *next_protocol,
			   __u32 *payload_len, __u8 *dsfield, __u8 *ttl,
			   __be16 *df)
{
	struct sk_buff *new_skb = NULL;
	struct iphdr *old_iph = NULL;
#ifdef CONFIG_IP_VS_IPV6
	struct ipv6hdr *old_ipv6h = NULL;
#endif

	ip_vs_drop_early_demux_sk(skb);

	if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
		new_skb = skb_realloc_headroom(skb, max_headroom);
		if (!new_skb)
			goto error;
		if (skb->sk)
			skb_set_owner_w(new_skb, skb->sk);
		consume_skb(skb);
		skb = new_skb;
	}

#ifdef CONFIG_IP_VS_IPV6
	if (skb_af == AF_INET6) {
		old_ipv6h = ipv6_hdr(skb);
		*next_protocol = IPPROTO_IPV6;
		if (payload_len)
			*payload_len =
				ntohs(old_ipv6h->payload_len) +
				sizeof(*old_ipv6h);
		*dsfield = ipv6_get_dsfield(old_ipv6h);
		*ttl = old_ipv6h->hop_limit;
		if (df)
			*df = 0;
	} else
#endif
	{
		old_iph = ip_hdr(skb);
		/* Copy DF, reset fragment offset and MF */
		if (df)
			*df = (old_iph->frag_off & htons(IP_DF));
		*next_protocol = IPPROTO_IPIP;

		/* fix old IP header checksum */
		ip_send_check(old_iph);
		*dsfield = ipv4_get_dsfield(old_iph);
		*ttl = old_iph->ttl;
		if (payload_len)
			*payload_len = ntohs(old_iph->tot_len);
	}

	return skb;
error:
	kfree_skb(skb);
	return ERR_PTR(-ENOMEM);
}
Example #16
0
void rose_kick(struct sock *sk)
{
	struct rose_sock *rose = rose_sk(sk);
	struct sk_buff *skb, *skbn;
	unsigned short start, end;

	if (rose->state != ROSE_STATE_3)
		return;

	if (rose->condition & ROSE_COND_PEER_RX_BUSY)
		return;

	if (!skb_peek(&sk->sk_write_queue))
		return;

	start = (skb_peek(&rose->ack_queue) == NULL) ? rose->va : rose->vs;
	end   = (rose->va + sysctl_rose_window_size) % ROSE_MODULUS;

	if (start == end)
		return;

	rose->vs = start;

	/*
	 * Transmit data until either we're out of data to send or
	 * the window is full.
	 */

	skb  = skb_dequeue(&sk->sk_write_queue);

	do {
		if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
			skb_queue_head(&sk->sk_write_queue, skb);
			break;
		}

		skb_set_owner_w(skbn, sk);

		/*
		 * Transmit the frame copy.
		 */
		rose_send_iframe(sk, skbn);

		rose->vs = (rose->vs + 1) % ROSE_MODULUS;

		/*
		 * Requeue the original data frame.
		 */
		skb_queue_tail(&rose->ack_queue, skb);

	} while (rose->vs != end &&
		 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL);

	rose->vl         = rose->vr;
	rose->condition &= ~ROSE_COND_ACK_PENDING;

	rose_stop_timer(sk);
}
static unsigned int
target(struct sk_buff **pskb,
       const struct net_device *in, const struct net_device *out,
       unsigned int hooknum, const struct xt_target *target,
       const void *targinfo)
{
	const struct arpt_mangle *mangle = targinfo;
	struct arphdr *arp;
	unsigned char *arpptr;
	int pln, hln;

	if (skb_shared(*pskb) || skb_cloned(*pskb)) {
		struct sk_buff *nskb;

		nskb = skb_copy(*pskb, GFP_ATOMIC);
		if (!nskb)
			return NF_DROP;
		if ((*pskb)->sk)
			skb_set_owner_w(nskb, (*pskb)->sk);
		kfree_skb(*pskb);
		*pskb = nskb;
	}

	arp = (*pskb)->nh.arph;
	arpptr = (*pskb)->nh.raw + sizeof(*arp);
	pln = arp->ar_pln;
	hln = arp->ar_hln;
	/* We assume that pln and hln were checked in the match */
	if (mangle->flags & ARPT_MANGLE_SDEV) {
		if (ARPT_DEV_ADDR_LEN_MAX < hln ||
		   (arpptr + hln > (**pskb).tail))
			return NF_DROP;
		memcpy(arpptr, mangle->src_devaddr, hln);
	}
	arpptr += hln;
	if (mangle->flags & ARPT_MANGLE_SIP) {
		if (ARPT_MANGLE_ADDR_LEN_MAX < pln ||
		   (arpptr + pln > (**pskb).tail))
			return NF_DROP;
		memcpy(arpptr, &mangle->u_s.src_ip, pln);
	}
	arpptr += pln;
	if (mangle->flags & ARPT_MANGLE_TDEV) {
		if (ARPT_DEV_ADDR_LEN_MAX < hln ||
		   (arpptr + hln > (**pskb).tail))
			return NF_DROP;
		memcpy(arpptr, mangle->tgt_devaddr, hln);
	}
	arpptr += hln;
	if (mangle->flags & ARPT_MANGLE_TIP) {
		if (ARPT_MANGLE_ADDR_LEN_MAX < pln ||
		   (arpptr + pln > (**pskb).tail))
			return NF_DROP;
		memcpy(arpptr, &mangle->u_t.tgt_ip, pln);
	}
	return mangle->target;
}
Example #18
0
static int restore_unix_rqueue(struct sock *sk, struct cpt_sock_image *si,
                               loff_t pos, struct cpt_context *ctx)
{
    loff_t endpos;

    pos = pos + si->cpt_hdrlen;
    endpos = pos + si->cpt_next;
    while (pos < endpos) {
        struct sk_buff *skb;
        struct sock *owner_sk;
        __u32 owner;

        skb = rst_skb(sk, &pos, &owner, NULL, ctx);
        if (IS_ERR(skb)) {
            if (PTR_ERR(skb) == -EINVAL) {
                int err;

                err = rst_sock_attr(&pos, sk, ctx);
                if (err)
                    return err;
            }
            return PTR_ERR(skb);
        }

        owner_sk = unix_peer(sk);
        if (owner != -1) {
            cpt_object_t *pobj;
            pobj = lookup_cpt_obj_byindex(CPT_OBJ_SOCKET, owner, ctx);
            if (pobj == NULL) {
                eprintk_ctx("orphan af_unix skb?\n");
                kfree_skb(skb);
                continue;
            }
            owner_sk = pobj->o_obj;
        }
        if (owner_sk == NULL) {
            dprintk_ctx("orphan af_unix skb 2?\n");
            kfree_skb(skb);
            continue;
        }
        skb_set_owner_w(skb, owner_sk);
        if (UNIXCB(skb).fp)
            skb->destructor = unix_destruct_fds;
        skb_queue_tail(&sk->sk_receive_queue, skb);
        if (sk->sk_state == TCP_LISTEN) {
            struct socket *sock = skb->sk->sk_socket;
            if (sock == NULL) BUG();
            if (sock->file) BUG();
            skb->sk->sk_socket = NULL;
            skb->sk->sk_sleep = NULL;
            sock->sk = NULL;
            sock_release(sock);
        }
    }
    return 0;
}
Example #19
0
File: sock.c Project: nhanh0/hah
/*
 * Allocate a skb from the socket's send buffer.
 */
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force, int priority)
{
	if (force || atomic_read(&sk->wmem_alloc) < sk->sndbuf) {
		struct sk_buff * skb = alloc_skb(size, priority);
		if (skb) {
			skb_set_owner_w(skb, sk);
			return skb;
		}
	}
	return NULL;
}
Example #20
0
void lapb_kick(lapb_cb *lapb)
{
	struct sk_buff *skb, *skbn;
	unsigned short modulus, start, end;

	modulus = (lapb->mode & LAPB_EXTENDED) ? LAPB_EMODULUS : LAPB_SMODULUS;

	start = (skb_peek(&lapb->ack_queue) == NULL) ? lapb->va : lapb->vs;
	end   = (lapb->va + lapb->window) % modulus;

	if (!(lapb->condition & LAPB_PEER_RX_BUSY_CONDITION) &&
	    start != end                                &&
	    skb_peek(&lapb->write_queue) != NULL) {

		lapb->vs = start;

		/*
		 * Dequeue the frame and copy it.
		 */
		skb  = skb_dequeue(&lapb->write_queue);

		do {
			if ((skbn = skb_clone(skb, GFP_ATOMIC)) == NULL) {
				skb_queue_head(&lapb->write_queue, skb);
				break;
			}

			if (skb->sk != NULL)
				skb_set_owner_w(skbn, skb->sk);

			/*
			 * Transmit the frame copy.
			 */
			lapb_send_iframe(lapb, skbn, LAPB_POLLOFF);

			lapb->vs = (lapb->vs + 1) % modulus;

			/*
			 * Requeue the original data frame.
			 */
			skb_queue_tail(&lapb->ack_queue, skb);

		} while (lapb->vs != end && (skb = skb_dequeue(&lapb->write_queue)) != NULL);

		lapb->condition &= ~LAPB_ACK_PENDING_CONDITION;

		if (!lapb_t1timer_running(lapb))
			lapb_start_t1timer(lapb);
	}
}
Example #21
0
static int ipq_mangle_ipv4(ipq_verdict_msg_t *v, ipq_queue_element_t *e)
{
	int diff;
	struct iphdr *user_iph = (struct iphdr *)v->payload;

	if (v->data_len < sizeof(*user_iph))
		return 0;
	diff = v->data_len - e->skb->len;
	if (diff < 0)
		skb_trim(e->skb, v->data_len);
	else if (diff > 0) {
		if (v->data_len > 0xFFFF)
			return -EINVAL;
		if (diff > skb_tailroom(e->skb)) {
			struct sk_buff *newskb;
			
			newskb = skb_copy_expand(e->skb,
			                         skb_headroom(e->skb),
			                         diff,
			                         GFP_ATOMIC);
			if (newskb == NULL) {
				printk(KERN_WARNING "ip_queue: OOM "
				      "in mangle, dropping packet\n");
				return -ENOMEM;
			}
			if (e->skb->sk)
				skb_set_owner_w(newskb, e->skb->sk);
			kfree_skb(e->skb);
			e->skb = newskb;
		}
		skb_put(e->skb, diff);
	}
	memcpy(e->skb->data, v->payload, v->data_len);
	e->skb->nfcache |= NFC_ALTERED;

	/*
	 * Extra routing may needed on local out, as the QUEUE target never
	 * returns control to the table.
	 */
	if (e->info->hook == NF_IP_LOCAL_OUT) {
		struct iphdr *iph = e->skb->nh.iph;

		if (!(iph->tos == e->rt_info.tos
		      && iph->daddr == e->rt_info.daddr
		      && iph->saddr == e->rt_info.saddr))
			return route_me_harder(e->skb);
	}
	return 0;
}
Example #22
0
static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
					int len, gfp_t priority)
{
	struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
	if (!skb)
		return NULL;
	skb_set_owner_w(skb, sk);

	skb_reserve(skb, MAX_PNPIPE_HEADER);
	__skb_put(skb, len);
	skb_copy_to_linear_data(skb, payload, len);
	__skb_push(skb, sizeof(struct pnpipehdr));
	skb_reset_transport_header(skb);
	return skb;
}
Example #23
0
/* modelled after ip_finish_output2 */
static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);
	struct rtable *rt = (struct rtable *)dst;
	struct net_device *dev = dst->dev;
	unsigned int hh_len = LL_RESERVED_SPACE(dev);
	struct neighbour *neigh;
	u32 nexthop;
	int ret = -EINVAL;

	nf_reset(skb);

	/* Be paranoid, rather than too clever. */
	if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (!skb2) {
			ret = -ENOMEM;
			goto err;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);

		consume_skb(skb);
		skb = skb2;
	}

	rcu_read_lock_bh();

	nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
	neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
	if (unlikely(!neigh))
		neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
	if (!IS_ERR(neigh)) {
		sock_confirm_neigh(skb, neigh);
		ret = neigh_output(neigh, skb);
		rcu_read_unlock_bh();
		return ret;
	}

	rcu_read_unlock_bh();
err:
	vrf_tx_error(skb->dev, skb);
	return ret;
}
/* Unusual, but possible case. */
static int enlarge_skb(struct sk_buff **pskb, unsigned int extra)
{
	struct sk_buff *nskb;

	if ((*pskb)->len + extra > 65535)
		return 0;

	nskb = skb_copy_expand(*pskb, skb_headroom(*pskb), extra, GFP_ATOMIC);
	if (!nskb)
		return 0;

	/* Transfer socket to new skb. */
	if ((*pskb)->sk)
		skb_set_owner_w(nskb, (*pskb)->sk);
	kfree_skb(*pskb);
	*pskb = nskb;
	return 1;
}
Example #25
0
struct sk_buff *dn_alloc_skb(struct sock *sk, int size, gfp_t pri)
{
	struct sk_buff *skb;
	int hdr = 64;

	if ((skb = alloc_skb(size + hdr, pri)) == NULL)
		return NULL;

	skb->protocol = htons(ETH_P_DNA_RT);
	skb->pkt_type = PACKET_OUTGOING;

	if (sk)
		skb_set_owner_w(skb, sk);

	skb_reserve(skb, hdr);

	return skb;
}
Example #26
0
/* Stolen from ip_finish_output2
 * PRE : skb->dev is set to the device we are leaving by
 *       skb->dst is not NULL
 * POST: the packet is sent with the link layer header pushed
 *       the packet is destroyed
 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct hh_cache *hh = dst->hh;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);
	unsigned seq;

	/* Be paranoid, rather than too clever. */
//	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
	if (unlikely(skb_headroom(skb) < hh_len &&   (dev->header_ops && dev->header_ops->create) )) {

		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (skb2 == NULL) {
			kfree_skb(skb);
			return;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (hh) {
		do {
			int hh_alen;

			seq = read_seqbegin(&hh->hh_lock);
			hh_alen = HH_DATA_ALIGN(hh->hh_len);
			memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
		} while (read_seqretry(&hh->hh_lock, seq));
		skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		if (net_ratelimit())
			DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
		kfree_skb(skb);
	}
}
Example #27
0
static int ebt_target_dnat(struct sk_buff **pskb, unsigned int hooknr,
   const struct net_device *in, const struct net_device *out,
   const void *data, unsigned int datalen)
{
	struct ebt_nat_info *info = (struct ebt_nat_info *)data;

	if (skb_shared(*pskb) || skb_cloned(*pskb)) {
		struct sk_buff *nskb;

		nskb = skb_copy(*pskb, GFP_ATOMIC);
		if (!nskb)
			return NF_DROP;
		if ((*pskb)->sk)
			skb_set_owner_w(nskb, (*pskb)->sk);
		kfree_skb(*pskb);
		*pskb = nskb;
	}
	memcpy(eth_hdr(*pskb)->h_dest, info->mac, ETH_ALEN);
	return info->target;
}
Example #28
0
/**
 *	llc_alloc_frame - allocates sk_buff for frame
 *	@dev: network device this skb will be sent over
 *	@type: pdu type to allocate
 *	@data_size: data size to allocate
 *
 *	Allocates an sk_buff for frame and initializes sk_buff fields.
 *	Returns allocated skb or %NULL when out of memory.
 */
struct sk_buff *llc_alloc_frame(struct sock *sk, struct net_device *dev,
				u8 type, u32 data_size)
{
	int hlen = type == LLC_PDU_TYPE_U ? 3 : 4;
	struct sk_buff *skb;

	hlen += llc_mac_header_len(dev->type);
	skb = alloc_skb(hlen + data_size, GFP_ATOMIC);

	if (skb) {
		skb_reset_mac_header(skb);
		skb_reserve(skb, hlen);
		skb_reset_network_header(skb);
		skb_reset_transport_header(skb);
		skb->protocol = htons(ETH_P_802_2);
		skb->dev      = dev;
		if (sk != NULL)
			skb_set_owner_w(skb, sk);
	}
	return skb;
}
Example #29
0
/* Stolen from ip_finish_output2
 * PRE : skb->dev is set to the device we are leaving by
 *       skb->dst is not NULL
 * POST: the packet is sent with the link layer header pushed
 *       the packet is destroyed
 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb_dst(skb);
	struct hh_cache *hh = dst->hh;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);

	/* Be paranoid, rather than too clever. */
	if (unlikely(skb_headroom(skb) < hh_len )) {
		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (skb2 == NULL) {
			kfree_skb(skb);
			return;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (hh) {
		int hh_alen;

		write_seqlock_bh(&hh->hh_lock);
		hh_alen = HH_DATA_ALIGN(hh->hh_len);
		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
		write_sequnlock_bh(&hh->hh_lock);
		skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		if (net_ratelimit())
			pr_debug(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
		kfree_skb(skb);
	}
}
Example #30
0
/*
 * Direct send packets to output.
 * Stolen from ip_finish_output2.
 */
static inline int bcm_fast_path_output(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);
	int ret = 0;

	/* Be paranoid, rather than too clever. */
	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, hh_len);
		if (skb2 == NULL) {
			kfree_skb(skb);
			return -ENOMEM;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (dst->hh)
		ret = neigh_hh_output(dst->hh, skb);
	else if (dst->neighbour)
		ret = dst->neighbour->output(skb);
	else {
#ifdef DEBUG
		if (net_ratelimit())
			printk(KERN_DEBUG "bcm_fast_path_output: No header cache and no neighbour!\n");
#endif
		kfree_skb(skb);
		return -EINVAL;
	}

	/* Don't return 1 */
	return (ret == 1) ? 0 : ret;
}