コード例 #1
0
ファイル: act_nat.c プロジェクト: acassis/emlinux-ssd1935
static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
			int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_nat *p = a->priv;
	struct tc_nat *opt;
	struct tcf_t t;
	int s;

	s = sizeof(*opt);

	/* netlink spinlocks held above us - must use ATOMIC */
	opt = kzalloc(s, GFP_ATOMIC);
	if (unlikely(!opt))
		return -ENOBUFS;

	opt->old_addr = p->old_addr;
	opt->new_addr = p->new_addr;
	opt->mask = p->mask;
	opt->flags = p->flags;

	opt->index = p->tcf_index;
	opt->action = p->tcf_action;
	opt->refcnt = p->tcf_refcnt - ref;
	opt->bindcnt = p->tcf_bindcnt - bind;

	RTA_PUT(skb, TCA_NAT_PARMS, s, opt);
	t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
	t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
	t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
	RTA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);

	kfree(opt);

	return skb->len;

rtattr_failure:
	nlmsg_trim(skb, b);
	kfree(opt);
	return -1;
}
コード例 #2
0
ファイル: if_usb.c プロジェクト: nocl/linux-libre
static int __if_usb_submit_rx_urb(struct if_usb_card *cardp,
				  void (*callbackfn)(struct urb *urb))
{
	struct sk_buff *skb;
	int ret = -1;

	lbtf_deb_enter(LBTF_DEB_USB);

	skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
	if (!skb) {
		pr_err("No free skb\n");
		lbtf_deb_leave(LBTF_DEB_USB);
		return -1;
	}

	cardp->rx_skb = skb;

	/* Fill the receive configuration URB and initialise the Rx call back */
	usb_fill_bulk_urb(cardp->rx_urb, cardp->udev,
			  usb_rcvbulkpipe(cardp->udev, cardp->ep_in),
			  skb_tail_pointer(skb),
			  MRVDRV_ETH_RX_PACKET_BUFFER_SIZE, callbackfn, cardp);

	cardp->rx_urb->transfer_flags |= URB_ZERO_PACKET;

	lbtf_deb_usb2(&cardp->udev->dev, "Pointer for rx_urb %p\n",
		cardp->rx_urb);
	ret = usb_submit_urb(cardp->rx_urb, GFP_ATOMIC);
	if (ret) {
		lbtf_deb_usbd(&cardp->udev->dev,
			"Submit Rx URB failed: %d\n", ret);
		kfree_skb(skb);
		cardp->rx_skb = NULL;
		lbtf_deb_leave(LBTF_DEB_USB);
		return -1;
	} else {
		lbtf_deb_usb2(&cardp->udev->dev, "Submit Rx URB success\n");
		lbtf_deb_leave(LBTF_DEB_USB);
		return 0;
	}
}
コード例 #3
0
struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
			     struct cfpkt *addpkt,
			     u16 expectlen)
{
	struct sk_buff *dst = pkt_to_skb(dstpkt);
	struct sk_buff *add = pkt_to_skb(addpkt);
	u16 addlen = skb_headlen(add);
	u16 neededtailspace;
	struct sk_buff *tmp;
	u16 dstlen;
	u16 createlen;
	if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
		cfpkt_destroy(addpkt);
		return dstpkt;
	}
	if (expectlen > addlen)
		neededtailspace = expectlen;
	else
		neededtailspace = addlen;

	if (dst->tail + neededtailspace > dst->end) {
		/* Create a dumplicate of 'dst' with more tail space */
		struct cfpkt *tmppkt;
		dstlen = skb_headlen(dst);
		createlen = dstlen + neededtailspace;
		tmppkt = cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX);
		if (tmppkt == NULL)
			return NULL;
		tmp = pkt_to_skb(tmppkt);
		skb_set_tail_pointer(tmp, dstlen);
		tmp->len = dstlen;
		memcpy(tmp->data, dst->data, dstlen);
		cfpkt_destroy(dstpkt);
		dst = tmp;
	}
	memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add));
	cfpkt_destroy(addpkt);
	dst->tail += addlen;
	dst->len += addlen;
	return skb_to_pkt(dst);
}
コード例 #4
0
ファイル: act_bpf.c プロジェクト: AlexShiLucky/linux
static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
			int bind, int ref)
{
	unsigned char *tp = skb_tail_pointer(skb);
	struct tcf_bpf *prog = to_bpf(act);
	struct tc_act_bpf opt = {
		.index   = prog->tcf_index,
		.refcnt  = refcount_read(&prog->tcf_refcnt) - ref,
		.bindcnt = atomic_read(&prog->tcf_bindcnt) - bind,
	};
	struct tcf_t tm;
	int ret;

	spin_lock_bh(&prog->tcf_lock);
	opt.action = prog->tcf_action;
	if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	if (tcf_bpf_is_ebpf(prog))
		ret = tcf_bpf_dump_ebpf_info(prog, skb);
	else
		ret = tcf_bpf_dump_bpf_info(prog, skb);
	if (ret)
		goto nla_put_failure;

	tcf_tm_dump(&tm, &prog->tcf_tm);
	if (nla_put_64bit(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm,
			  TCA_ACT_BPF_PAD))
		goto nla_put_failure;

	spin_unlock_bh(&prog->tcf_lock);
	return skb->len;

nla_put_failure:
	spin_unlock_bh(&prog->tcf_lock);
	nlmsg_trim(skb, tp);
	return -1;
}

static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
	[TCA_ACT_BPF_PARMS]	= { .len = sizeof(struct tc_act_bpf) },
コード例 #5
0
ファイル: p54pci.c プロジェクト: 274914765/C
static void p54p_refill_rx_ring(struct ieee80211_hw *dev)
{
    struct p54p_priv *priv = dev->priv;
    struct p54p_ring_control *ring_control = priv->ring_control;
    u32 limit, host_idx, idx;

    host_idx = le32_to_cpu(ring_control->host_idx[0]);
    limit = host_idx;
    limit -= le32_to_cpu(ring_control->device_idx[0]);
    limit = ARRAY_SIZE(ring_control->rx_data) - limit;

    idx = host_idx % ARRAY_SIZE(ring_control->rx_data);
    while (limit-- > 1) {
        struct p54p_desc *desc = &ring_control->rx_data[idx];

        if (!desc->host_addr) {
            struct sk_buff *skb;
            dma_addr_t mapping;
            skb = dev_alloc_skb(MAX_RX_SIZE);
            if (!skb)
                break;

            mapping = pci_map_single(priv->pdev,
                         skb_tail_pointer(skb),
                         MAX_RX_SIZE,
                         PCI_DMA_FROMDEVICE);
            desc->host_addr = cpu_to_le32(mapping);
            desc->device_addr = 0;    // FIXME: necessary?
            desc->len = cpu_to_le16(MAX_RX_SIZE);
            desc->flags = 0;
            priv->rx_buf[idx] = skb;
        }

        idx++;
        host_idx++;
        idx %= ARRAY_SIZE(ring_control->rx_data);
    }

    wmb();
    ring_control->host_idx[0] = cpu_to_le32(host_idx);
}
コード例 #6
0
ファイル: sch_sfq.c プロジェクト: Mr-Aloof/wl500g
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned char *b = skb_tail_pointer(skb);
	struct tc_sfq_qopt opt;

	opt.quantum = q->quantum;
	opt.perturb_period = q->perturb_period;
	opt.limit = q->limit;
	opt.divisor = q->hash_divisor;
	opt.flows = q->depth;
	opt.hash_kind = q->hash_kind;

	RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);

	return skb->len;

rtattr_failure:
	nlmsg_trim(skb, b);
	return -1;
}
コード例 #7
0
ファイル: sch_sfq.c プロジェクト: NKSG/INTER_MANET_NS3
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned char *b = skb_tail_pointer(skb);
	struct tc_sfq_qopt opt;

	opt.quantum = q->quantum;
	opt.perturb_period = q->perturb_period / HZ;

	opt.limit = q->limit;
	opt.divisor = SFQ_HASH_DIVISOR;
	opt.flows = q->limit;

	NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);

	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}
コード例 #8
0
static int tcf_ipt_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_ipt *ipt = a->priv;
	struct xt_entry_target *t;
	struct tcf_t tm;
	struct tc_cnt c;

	/* for simple targets kernel size == user size
	 * user name = target name
	 * for foolproof you need to not assume this
	 */

	t = kmemdup(ipt->tcfi_t, ipt->tcfi_t->u.user.target_size, GFP_ATOMIC);
	if (unlikely(!t))
		goto nla_put_failure;

	c.bindcnt = ipt->tcf_bindcnt - bind;
	c.refcnt = ipt->tcf_refcnt - ref;
	strcpy(t->u.user.name, ipt->tcfi_t->u.kernel.target->name);

	if (nla_put(skb, TCA_IPT_TARG, ipt->tcfi_t->u.user.target_size, t) ||
	    nla_put_u32(skb, TCA_IPT_INDEX, ipt->tcf_index) ||
	    nla_put_u32(skb, TCA_IPT_HOOK, ipt->tcfi_hook) ||
	    nla_put(skb, TCA_IPT_CNT, sizeof(struct tc_cnt), &c) ||
	    nla_put_string(skb, TCA_IPT_TABLE, ipt->tcfi_tname))
		goto nla_put_failure;
	tm.install = jiffies_to_clock_t(jiffies - ipt->tcf_tm.install);
	tm.lastuse = jiffies_to_clock_t(jiffies - ipt->tcf_tm.lastuse);
	tm.expires = jiffies_to_clock_t(ipt->tcf_tm.expires);
	if (nla_put(skb, TCA_IPT_TM, sizeof (tm), &tm))
		goto nla_put_failure;
	kfree(t);
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	kfree(t);
	return -1;
}
コード例 #9
0
ファイル: act_bpf.c プロジェクト: RealJohnGalt/linux
static int tcf_bpf_dump(struct sk_buff *skb, struct tc_action *act,
			int bind, int ref)
{
	unsigned char *tp = skb_tail_pointer(skb);
	struct tcf_bpf *prog = act->priv;
	struct tc_act_bpf opt = {
		.index   = prog->tcf_index,
		.refcnt  = prog->tcf_refcnt - ref,
		.bindcnt = prog->tcf_bindcnt - bind,
		.action  = prog->tcf_action,
	};
	struct tcf_t tm;
	int ret;

	if (nla_put(skb, TCA_ACT_BPF_PARMS, sizeof(opt), &opt))
		goto nla_put_failure;

	if (tcf_bpf_is_ebpf(prog))
		ret = tcf_bpf_dump_ebpf_info(prog, skb);
	else
		ret = tcf_bpf_dump_bpf_info(prog, skb);
	if (ret)
		goto nla_put_failure;

	tm.install = jiffies_to_clock_t(jiffies - prog->tcf_tm.install);
	tm.lastuse = jiffies_to_clock_t(jiffies - prog->tcf_tm.lastuse);
	tm.expires = jiffies_to_clock_t(prog->tcf_tm.expires);

	if (nla_put(skb, TCA_ACT_BPF_TM, sizeof(tm), &tm))
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, tp);
	return -1;
}

static const struct nla_policy act_bpf_policy[TCA_ACT_BPF_MAX + 1] = {
	[TCA_ACT_BPF_PARMS]	= { .len = sizeof(struct tc_act_bpf) },
コード例 #10
0
ファイル: output_core.c プロジェクト: RobinSystems/linux-3.13
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
{
	u16 offset = sizeof(struct ipv6hdr);
	struct ipv6_opt_hdr *exthdr =
				(struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
	unsigned int packet_len = skb_tail_pointer(skb) -
		skb_network_header(skb);
	int found_rhdr = 0;
	*nexthdr = &ipv6_hdr(skb)->nexthdr;

	while (offset + 1 <= packet_len) {

		switch (**nexthdr) {

		case NEXTHDR_HOP:
			break;
		case NEXTHDR_ROUTING:
			found_rhdr = 1;
			break;
		case NEXTHDR_DEST:
#if IS_ENABLED(CONFIG_IPV6_MIP6)
			if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
				break;
#endif
			if (found_rhdr)
				return offset;
			break;
		default :
			return offset;
		}

		offset += ipv6_optlen(exthdr);
		*nexthdr = &exthdr->nexthdr;
		exthdr = (struct ipv6_opt_hdr *)(skb_network_header(skb) +
						 offset);
	}

	return offset;
}
コード例 #11
0
ファイル: p54usb.c プロジェクト: Tigrouzen/k1099
static void p54u_rx_cb(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb;
	struct ieee80211_hw *dev = info->dev;
	struct p54u_priv *priv = dev->priv;

	if (unlikely(urb->status)) {
		info->urb = NULL;
		usb_free_urb(urb);
		return;
	}

	skb_unlink(skb, &priv->rx_queue);
	skb_put(skb, urb->actual_length);
	if (!priv->hw_type)
		skb_pull(skb, sizeof(struct net2280_tx_hdr));

	if (p54_rx(dev, skb)) {
		skb = dev_alloc_skb(MAX_RX_SIZE);
		if (unlikely(!skb)) {
			usb_free_urb(urb);
			/* TODO check rx queue length and refill *somewhere* */
			return;
		}

		info = (struct p54u_rx_info *) skb->cb;
		info->urb = urb;
		info->dev = dev;
		urb->transfer_buffer = skb_tail_pointer(skb);
		urb->context = skb;
		skb_queue_tail(&priv->rx_queue, skb);
	} else {
		skb_trim(skb, 0);
		skb_queue_tail(&priv->rx_queue, skb);
	}

	usb_submit_urb(urb, GFP_ATOMIC);
}
コード例 #12
0
ファイル: datagram.c プロジェクト: LILA-99/linux-2.6
void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct ipv6hdr *iph;
	struct sk_buff *skb;
	struct ip6_mtuinfo *mtu_info;

	if (!np->rxopt.bits.rxpmtu)
		return;

	skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
	if (!skb)
		return;

	skb_put(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	iph = ipv6_hdr(skb);
	ipv6_addr_copy(&iph->daddr, &fl6->daddr);

	mtu_info = IP6CBMTU(skb);
	if (!mtu_info) {
		kfree_skb(skb);
		return;
	}

	mtu_info->ip6m_mtu = mtu;
	mtu_info->ip6m_addr.sin6_family = AF_INET6;
	mtu_info->ip6m_addr.sin6_port = 0;
	mtu_info->ip6m_addr.sin6_flowinfo = 0;
	mtu_info->ip6m_addr.sin6_scope_id = fl6->flowi6_oif;
	ipv6_addr_copy(&mtu_info->ip6m_addr.sin6_addr, &ipv6_hdr(skb)->daddr);

	__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
	skb_reset_transport_header(skb);

	skb = xchg(&np->rxpmtu, skb);
	kfree_skb(skb);
}
コード例 #13
0
void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sock_exterr_skb *serr;
	struct ipv6hdr *iph;
	struct sk_buff *skb;

	if (!np->recverr)
		return;

	skb = alloc_skb(sizeof(struct ipv6hdr), GFP_ATOMIC);
	if (!skb)
		return;

	skb->protocol = htons(ETH_P_IPV6);

	skb_put(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	iph = ipv6_hdr(skb);
	iph->daddr = fl6->daddr;

	serr = SKB_EXT_ERR(skb);
	serr->ee.ee_errno = err;
	serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
	serr->ee.ee_type = 0;
	serr->ee.ee_code = 0;
	serr->ee.ee_pad = 0;
	serr->ee.ee_info = info;
	serr->ee.ee_data = 0;
	serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
	serr->port = fl6->fl6_dport;

	__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
	skb_reset_transport_header(skb);

	if (sock_queue_err_skb(sk, skb))
		kfree_skb(skb);
}
コード例 #14
0
ファイル: sch_prio.c プロジェクト: AlexShiLucky/linux
static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct prio_sched_data *q = qdisc_priv(sch);
	unsigned char *b = skb_tail_pointer(skb);
	struct tc_prio_qopt opt;
	int err;

	opt.bands = q->bands;
	memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX + 1);

	err = prio_dump_offload(sch);
	if (err)
		goto nla_put_failure;

	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}
コード例 #15
0
ファイル: gen_stats.c プロジェクト: vaibhav92/linux
/**
 * gnet_stats_finish_copy - finish dumping procedure
 * @d: dumping handle
 *
 * Corrects the length of the top level TLV to include all TLVs added
 * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
 * if gnet_stats_start_copy_compat() was used and releases the statistics
 * lock.
 *
 * Returns 0 on success or -1 with the statistic lock released
 * if the room in the socket buffer was not sufficient.
 */
int
gnet_stats_finish_copy(struct gnet_dump *d)
{
    if (d->tail)
        d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;

    if (d->compat_tc_stats)
        if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
                            sizeof(d->tc_stats)) < 0)
            return -1;

    if (d->compat_xstats && d->xstats) {
        if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
                            d->xstats_len) < 0)
            return -1;
    }

    kfree(d->xstats);
    d->xstats = NULL;
    d->xstats_len = 0;
    spin_unlock_bh(d->lock);
    return 0;
}
コード例 #16
0
static int rtl8180_init_rx_ring(struct ieee80211_hw *dev)
{
	struct rtl8180_priv *priv = dev->priv;
	struct rtl8180_rx_desc *entry;
	int i;

	priv->rx_ring = pci_alloc_consistent(priv->pdev,
					     sizeof(*priv->rx_ring) * 32,
					     &priv->rx_ring_dma);

	if (!priv->rx_ring || (unsigned long)priv->rx_ring & 0xFF) {
		printk(KERN_ERR "%s: Cannot allocate RX ring\n",
		       wiphy_name(dev->wiphy));
		return -ENOMEM;
	}

	memset(priv->rx_ring, 0, sizeof(*priv->rx_ring) * 32);
	priv->rx_idx = 0;

	for (i = 0; i < 32; i++) {
		struct sk_buff *skb = dev_alloc_skb(MAX_RX_SIZE);
		dma_addr_t *mapping;
		entry = &priv->rx_ring[i];
		if (!skb)
			return 0;

		priv->rx_buf[i] = skb;
		mapping = (dma_addr_t *)skb->cb;
		*mapping = pci_map_single(priv->pdev, skb_tail_pointer(skb),
					  MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
		entry->rx_buf = cpu_to_le32(*mapping);
		entry->flags = cpu_to_le32(RTL818X_RX_DESC_FLAG_OWN |
					   MAX_RX_SIZE);
	}
	entry->flags |= cpu_to_le32(RTL818X_RX_DESC_FLAG_EOR);
	return 0;
}
コード例 #17
0
ファイル: ip_sockglue.c プロジェクト: AshishNamdev/linux
void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 port, u32 info)
{
	struct inet_sock *inet = inet_sk(sk);
	struct sock_exterr_skb *serr;
	struct iphdr *iph;
	struct sk_buff *skb;

	if (!inet->recverr)
		return;

	skb = alloc_skb(sizeof(struct iphdr), GFP_ATOMIC);
	if (!skb)
		return;

	skb_put(skb, sizeof(struct iphdr));
	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	iph->daddr = daddr;

	serr = SKB_EXT_ERR(skb);
	serr->ee.ee_errno = err;
	serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
	serr->ee.ee_type = 0;
	serr->ee.ee_code = 0;
	serr->ee.ee_pad = 0;
	serr->ee.ee_info = info;
	serr->ee.ee_data = 0;
	serr->addr_offset = (u8 *)&iph->daddr - skb_network_header(skb);
	serr->port = port;

	__skb_pull(skb, skb_tail_pointer(skb) - skb->data);
	skb_reset_transport_header(skb);

	if (sock_queue_err_skb(sk, skb))
		kfree_skb(skb);
}
コード例 #18
0
ファイル: sch_sfq.c プロジェクト: AkyZero/wrapfs-latest
static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	struct sfq_sched_data *q = qdisc_priv(sch);
	unsigned char *b = skb_tail_pointer(skb);
	struct tc_sfq_qopt_v1 opt;
	struct red_parms *p = q->red_parms;

	memset(&opt, 0, sizeof(opt));
	opt.v0.quantum	= q->quantum;
	opt.v0.perturb_period = q->perturb_period / HZ;
	opt.v0.limit	= q->limit;
	opt.v0.divisor	= q->divisor;
	opt.v0.flows	= q->maxflows;
	opt.depth	= q->maxdepth;
	opt.headdrop	= q->headdrop;

	if (p) {
		opt.qth_min	= p->qth_min >> p->Wlog;
		opt.qth_max	= p->qth_max >> p->Wlog;
		opt.Wlog	= p->Wlog;
		opt.Plog	= p->Plog;
		opt.Scell_log	= p->Scell_log;
		opt.max_P	= p->max_P;
	}
	memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
	opt.flags	= q->flags;

	if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
		goto nla_put_failure;

	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}
コード例 #19
0
ファイル: act_simple.c プロジェクト: NKSG/INTER_MANET_NS3
static inline int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
				int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_defact *d = a->priv;
	struct tc_defact opt;
	struct tcf_t t;

	opt.index = d->tcf_index;
	opt.refcnt = d->tcf_refcnt - ref;
	opt.bindcnt = d->tcf_bindcnt - bind;
	opt.action = d->tcf_action;
	NLA_PUT(skb, TCA_DEF_PARMS, sizeof(opt), &opt);
	NLA_PUT_STRING(skb, TCA_DEF_DATA, d->tcfd_defdata);
	t.install = jiffies_to_clock_t(jiffies - d->tcf_tm.install);
	t.lastuse = jiffies_to_clock_t(jiffies - d->tcf_tm.lastuse);
	t.expires = jiffies_to_clock_t(d->tcf_tm.expires);
	NLA_PUT(skb, TCA_DEF_TM, sizeof(t), &t);
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}
コード例 #20
0
ファイル: ks8842.c プロジェクト: mikuhatsune001/linux2.6.32
static int ks8842_tx_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct ks8842_adapter *adapter = netdev_priv(netdev);
	int len = skb->len;
	u32 *ptr = (u32 *)skb->data;
	u32 ctrl;

	dev_dbg(&adapter->pdev->dev,
		"%s: len %u head %p data %p tail %p end %p\n",
		__func__, skb->len, skb->head, skb->data,
		skb_tail_pointer(skb), skb_end_pointer(skb));

	/* check FIFO buffer space, we need space for CRC and command bits */
	if (ks8842_tx_fifo_space(adapter) < len + 8)
		return NETDEV_TX_BUSY;

	/* the control word, enable IRQ, port 1 and the length */
	ctrl = 0x8000 | 0x100 | (len << 16);
	ks8842_write32(adapter, 17, ctrl, REG_QMU_DATA_LO);

	netdev->stats.tx_bytes += len;

	/* copy buffer */
	while (len > 0) {
		iowrite32(*ptr, adapter->hw_addr + REG_QMU_DATA_LO);
		len -= sizeof(u32);
		ptr++;
	}

	/* enqueue packet */
	ks8842_write16(adapter, 17, 1, REG_TXQCR);

	dev_kfree_skb(skb);

	return NETDEV_TX_OK;
}
コード例 #21
0
ファイル: act_mirred.c プロジェクト: cilynx/dd-wrt
static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_mirred *m = a->priv;
	struct tc_mirred opt;
	struct tcf_t t;

	opt.index = m->tcf_index;
	opt.action = m->tcf_action;
	opt.refcnt = m->tcf_refcnt - ref;
	opt.bindcnt = m->tcf_bindcnt - bind;
	opt.eaction = m->tcfm_eaction;
	opt.ifindex = m->tcfm_ifindex;
	RTA_PUT(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt);
	t.install = jiffies_to_clock_t(jiffies - m->tcf_tm.install);
	t.lastuse = jiffies_to_clock_t(jiffies - m->tcf_tm.lastuse);
	t.expires = jiffies_to_clock_t(m->tcf_tm.expires);
	RTA_PUT(skb, TCA_MIRRED_TM, sizeof(t), &t);
	return skb->len;

rtattr_failure:
	nlmsg_trim(skb, b);
	return -1;
}
コード例 #22
0
ファイル: dxe.c プロジェクト: Lyude/linux
static int wcn36xx_dxe_fill_skb(struct device *dev,
				struct wcn36xx_dxe_ctl *ctl,
				gfp_t gfp)
{
	struct wcn36xx_dxe_desc *dxe = ctl->desc;
	struct sk_buff *skb;

	skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
	if (skb == NULL)
		return -ENOMEM;

	dxe->dst_addr_l = dma_map_single(dev,
					 skb_tail_pointer(skb),
					 WCN36XX_PKT_SIZE,
					 DMA_FROM_DEVICE);
	if (dma_mapping_error(dev, dxe->dst_addr_l)) {
		dev_err(dev, "unable to map skb\n");
		kfree_skb(skb);
		return -ENOMEM;
	}
	ctl->skb = skb;

	return 0;
}
コード例 #23
0
static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
{
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_rx_desc *entry = NULL;
	int i, rx_queue_idx;
	u8 tmp_one = 1;

	/*
	 *rx_queue_idx 0:RX_MPDU_QUEUE
	 *rx_queue_idx 1:RX_CMD_QUEUE
	 */
	for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
	     rx_queue_idx++) {
		rtlpci->rx_ring[rx_queue_idx].desc =
		    pci_alloc_consistent(rtlpci->pdev,
					 sizeof(*rtlpci->rx_ring[rx_queue_idx].
						desc) * rtlpci->rxringcount,
					 &rtlpci->rx_ring[rx_queue_idx].dma);

		if (!rtlpci->rx_ring[rx_queue_idx].desc ||
		    (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
			RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
				 ("Cannot allocate RX ring\n"));
			return -ENOMEM;
		}

		memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
		       sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
		       rtlpci->rxringcount);

		rtlpci->rx_ring[rx_queue_idx].idx = 0;

		for (i = 0; i < rtlpci->rxringcount; i++) {
			struct sk_buff *skb =
			    dev_alloc_skb(rtlpci->rxbuffersize);
			u32 bufferaddress;
			if (!skb)
				return 0;
			entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];

			/*skb->dev = dev; */

			rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;

			/*
			 *just set skb->cb to mapping addr
			 *for pci_unmap_single use
			 */
			*((dma_addr_t *) skb->cb) =
			    pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
					   rtlpci->rxbuffersize,
					   PCI_DMA_FROMDEVICE);

			bufferaddress = (u32)(*((dma_addr_t *)skb->cb));
			rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
						    HW_DESC_RXBUFF_ADDR,
						    (u8 *)&bufferaddress);
			rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
						    HW_DESC_RXPKT_LEN,
						    (u8 *)&rtlpci->
						    rxbuffersize);
			rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
						    HW_DESC_RXOWN,
						    (u8 *)&tmp_one);
		}

		rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
					    HW_DESC_RXERO, (u8 *)&tmp_one);
	}
	return 0;
}
コード例 #24
0
ファイル: ss_skb.c プロジェクト: nekulin/tempesta
/**
 * Fragment @skb to add some room if @len > 0 or delete data otherwise.
 */
static int
__skb_fragment(struct sk_buff *skb, struct sk_buff *pskb,
	       char *pspt, int len, TfwStr *it)
{
	int i, ret;
	long offset;
	unsigned int d_size;
	struct sk_buff *f_skb, **next_fdp;

	SS_DBG("[%d]: %s: in: len [%d] pspt [%p], skb [%p]: head [%p]"
		" data [%p] tail [%p] end [%p] len [%u] data_len [%u]"
		" truesize [%u] nr_frags [%u]\n",
		smp_processor_id(), __func__, len, pspt, skb, skb->head,
		skb->data, skb_tail_pointer(skb), skb_end_pointer(skb),
		skb->len, skb->data_len, skb->truesize,
		skb_shinfo(skb)->nr_frags);
	BUG_ON(!len);

	if (abs(len) > PAGE_SIZE) {
		SS_WARN("Attempt to add or delete too much data: %u\n", len);
		return -EINVAL;
	}

	/*
	 * Use @it to hold the return values from __split_pgfrag()
	 * and __split_linear_data(). @it->ptr, @it->skb, and
	 * @it->flags may be set to actual values. If a new SKB is
	 * allocated, then it is stored in @it->skb. @it->ptr holds
	 * the pointer either to data after the deleted data, or to
	 * the area for new data. @it->flags is set when @it->ptr
	 * points to data in @it->skb. Otherwise, @it->ptr points
	 * to data in @skb.
	 *
	 * Determine where the split begins within the SKB, then do
	 * the job using the right function.
	 */

	/* See if the split starts in the linear data. */
	d_size = skb_headlen(skb);
	offset = pspt - (char *)skb->data;

	if ((offset >= 0) && (offset < d_size)) {
		int t_size = d_size - offset;
		len = max(len, -t_size);
		ret = __split_linear_data(skb, pspt, len, it);
		goto done;
	}

	/* See if the split starts in the page fragments data. */
	for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
		d_size = skb_frag_size(frag);
		offset = pspt - (char *)skb_frag_address(frag);

		if ((offset >= 0) && (offset < d_size)) {
			int t_size = d_size - offset;
			len = max(len, -t_size);
			ret = __split_pgfrag(skb, i, offset, len, it);
			goto done;
		}
	}

	/* See if the split starts in the SKB fragments data. */
	skb_walk_frags(skb, f_skb) {
		ret = __skb_fragment(f_skb, skb, pspt, len, it);
		if (ret != -ENOENT)
			return ret;
	}
コード例 #25
0
static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
{
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;

	struct ieee80211_rx_status rx_status = { 0 };
	unsigned int count = rtlpci->rxringcount;
	u8 own;
	u8 tmp_one;
	u32 bufferaddress;
	bool unicast = false;

	struct rtl_stats stats = {
		.signal = 0,
		.noise = -98,
		.rate = 0,
	};

	/*RX NORMAL PKT */
	while (count--) {
		/*rx descriptor */
		struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
				rtlpci->rx_ring[rx_queue_idx].idx];
		/*rx pkt */
		struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
				rtlpci->rx_ring[rx_queue_idx].idx];

		own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
						       false, HW_DESC_OWN);

		if (own) {
			/*wait data to be filled by hardware */
			return;
		} else {
			struct ieee80211_hdr *hdr;
			__le16 fc;
			struct sk_buff *new_skb = NULL;

			rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
							 &rx_status,
							 (u8 *) pdesc, skb);

			pci_unmap_single(rtlpci->pdev,
					 *((dma_addr_t *) skb->cb),
					 rtlpci->rxbuffersize,
					 PCI_DMA_FROMDEVICE);

			skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
							 false,
							 HW_DESC_RXPKT_LEN));
			skb_reserve(skb,
				    stats.rx_drvinfo_size + stats.rx_bufshift);

			/*
			 *NOTICE This can not be use for mac80211,
			 *this is done in mac80211 code,
			 *if you done here sec DHCP will fail
			 *skb_trim(skb, skb->len - 4);
			 */

			hdr = (struct ieee80211_hdr *)(skb->data);
			fc = hdr->frame_control;

			if (!stats.crc) {
				memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
				       sizeof(rx_status));

				if (is_broadcast_ether_addr(hdr->addr1))
					;/*TODO*/
				else {
					if (is_multicast_ether_addr(hdr->addr1))
						;/*TODO*/
					else {
						unicast = true;
						rtlpriv->stats.rxbytesunicast +=
						    skb->len;
					}
				}

				rtl_is_special_data(hw, skb, false);

				if (ieee80211_is_data(fc)) {
					rtlpriv->cfg->ops->led_control(hw,
							       LED_CTL_RX);

					if (unicast)
						rtlpriv->link_info.
						    num_rx_inperiod++;
				}

				if (unlikely(!rtl_action_proc(hw, skb,
				    false))) {
					dev_kfree_skb_any(skb);
				} else {
					struct sk_buff *uskb = NULL;
					u8 *pdata;
					uskb = dev_alloc_skb(skb->len + 128);
					if (!uskb) {
						RT_TRACE(rtlpriv,
							(COMP_INTR | COMP_RECV),
							DBG_EMERG,
							("can't alloc rx skb\n"));
						goto done;
					}
					memcpy(IEEE80211_SKB_RXCB(uskb),
							&rx_status,
							sizeof(rx_status));
					pdata = (u8 *)skb_put(uskb, skb->len);
					memcpy(pdata, skb->data, skb->len);
					dev_kfree_skb_any(skb);

					ieee80211_rx_irqsafe(hw, uskb);
				}
			} else {
				dev_kfree_skb_any(skb);
			}

			if (((rtlpriv->link_info.num_rx_inperiod +
				rtlpriv->link_info.num_tx_inperiod) > 8) ||
				(rtlpriv->link_info.num_rx_inperiod > 2)) {
				rtl_lps_leave(hw);
			}

			new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
			if (unlikely(!new_skb)) {
				RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
					 DBG_EMERG,
					 ("can't alloc skb for rx\n"));
				goto done;
			}
			skb = new_skb;
			/*skb->dev = dev; */

			rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
							     rx_ring
							     [rx_queue_idx].
							     idx] = skb;
			*((dma_addr_t *) skb->cb) =
			    pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
					   rtlpci->rxbuffersize,
					   PCI_DMA_FROMDEVICE);

		}
done:
		bufferaddress = (u32)(*((dma_addr_t *) skb->cb));
		tmp_one = 1;
		rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
					    HW_DESC_RXBUFF_ADDR,
					    (u8 *)&bufferaddress);
		rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
					    (u8 *)&tmp_one);
		rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
					    HW_DESC_RXPKT_LEN,
					    (u8 *)&rtlpci->rxbuffersize);

		if (rtlpci->rx_ring[rx_queue_idx].idx ==
		    rtlpci->rxringcount - 1)
			rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
						    HW_DESC_RXERO,
						    (u8 *)&tmp_one);

		rtlpci->rx_ring[rx_queue_idx].idx =
		    (rtlpci->rx_ring[rx_queue_idx].idx + 1) %
		    rtlpci->rxringcount;
	}

}

static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
{
	struct ieee80211_hw *hw = dev_id;
	struct rtl_priv *rtlpriv = rtl_priv(hw);
	struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
	unsigned long flags;
	u32 inta = 0;
	u32 intb = 0;

	if (rtlpci->irq_enabled == 0)
		return IRQ_HANDLED;

	spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);

	/*read ISR: 4/8bytes */
	rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);

	/*Shared IRQ or HW disappared */
	if (!inta || inta == 0xffff)
		goto done;

	/*<1> beacon related */
	if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("beacon ok interrupt!\n"));
	}

	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("beacon err interrupt!\n"));
	}

	if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("beacon interrupt!\n"));
	}

	if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("prepare beacon for interrupt!\n"));
		tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
	}

	/*<3> Tx related */
	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("IMR_TXFOVW!\n"));

	if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("Manage ok interrupt!\n"));
		_rtl_pci_tx_isr(hw, MGNT_QUEUE);
	}

	if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("HIGH_QUEUE ok interrupt!\n"));
		_rtl_pci_tx_isr(hw, HIGH_QUEUE);
	}

	if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
		rtlpriv->link_info.num_tx_inperiod++;

		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("BK Tx OK interrupt!\n"));
		_rtl_pci_tx_isr(hw, BK_QUEUE);
	}

	if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
		rtlpriv->link_info.num_tx_inperiod++;

		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("BE TX OK interrupt!\n"));
		_rtl_pci_tx_isr(hw, BE_QUEUE);
	}

	if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
		rtlpriv->link_info.num_tx_inperiod++;

		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("VI TX OK interrupt!\n"));
		_rtl_pci_tx_isr(hw, VI_QUEUE);
	}

	if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
		rtlpriv->link_info.num_tx_inperiod++;

		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
			 ("Vo TX OK interrupt!\n"));
		_rtl_pci_tx_isr(hw, VO_QUEUE);
	}

	/*<2> Rx related */
	if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
		RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
		tasklet_schedule(&rtlpriv->works.irq_tasklet);
	}

	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
			 ("rx descriptor unavailable!\n"));
		tasklet_schedule(&rtlpriv->works.irq_tasklet);
	}

	if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
		RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
		tasklet_schedule(&rtlpriv->works.irq_tasklet);
	}

	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
	return IRQ_HANDLED;

done:
	spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
	return IRQ_HANDLED;
}

static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
{
	_rtl_pci_rx_interrupt(hw);
}
コード例 #26
0
static int tcf_gact_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
	unsigned char *b = skb_tail_pointer(skb);
	struct tcf_gact *gact = a->priv;
	struct tc_gact opt = {
		.index   = gact->tcf_index,
		.refcnt  = gact->tcf_refcnt - ref,
		.bindcnt = gact->tcf_bindcnt - bind,
		.action  = gact->tcf_action,
	};
	struct tcf_t t;

	NLA_PUT(skb, TCA_GACT_PARMS, sizeof(opt), &opt);
#ifdef CONFIG_GACT_PROB
	if (gact->tcfg_ptype) {
		struct tc_gact_p p_opt = {
			.paction = gact->tcfg_paction,
			.pval = gact->tcfg_pval,
			.ptype = gact->tcfg_ptype,
		};
		NLA_PUT(skb, TCA_GACT_PROB, sizeof(p_opt), &p_opt);
	}
#endif
	t.install = jiffies_to_clock_t(jiffies - gact->tcf_tm.install);
	t.lastuse = jiffies_to_clock_t(jiffies - gact->tcf_tm.lastuse);
	t.expires = jiffies_to_clock_t(gact->tcf_tm.expires);
	NLA_PUT(skb, TCA_GACT_TM, sizeof(t), &t);
	return skb->len;

nla_put_failure:
	nlmsg_trim(skb, b);
	return -1;
}

static struct tc_action_ops act_gact_ops = {
	.kind		=	"gact",
	.hinfo		=	&gact_hash_info,
	.type		=	TCA_ACT_GACT,
	.capab		=	TCA_CAP_NONE,
	.owner		=	THIS_MODULE,
	.act		=	tcf_gact,
	.dump		=	tcf_gact_dump,
	.cleanup	=	tcf_gact_cleanup,
	.lookup		=	tcf_hash_search,
	.init		=	tcf_gact_init,
	.walk		=	tcf_generic_walker
};

MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
MODULE_DESCRIPTION("Generic Classifier actions");
MODULE_LICENSE("GPL");

static int __init gact_init_module(void)
{
#ifdef CONFIG_GACT_PROB
	printk(KERN_INFO "GACT probability on\n");
#else
	printk(KERN_INFO "GACT probability NOT on\n");
#endif
	return tcf_register_action(&act_gact_ops);
}

static void __exit gact_cleanup_module(void)
{
	tcf_unregister_action(&act_gact_ops);
}

module_init(gact_init_module);
module_exit(gact_cleanup_module);
コード例 #27
0
ファイル: recv_linux.c プロジェクト: neverhover/rtl8821au
int rtw_recv_indicatepkt(_adapter *padapter, union recv_frame *precv_frame)
{
	struct recv_priv *precvpriv;
	_queue	*pfree_recv_queue;
	_pkt *skb;
	struct mlme_priv*pmlmepriv = &padapter->mlmepriv;
	struct rx_pkt_attrib *pattrib;
	
	if(NULL == precv_frame)
		goto _recv_indicatepkt_drop;

	DBG_COUNTER(padapter->rx_logs.os_indicate);
	pattrib = &precv_frame->u.hdr.attrib;
	precvpriv = &(padapter->recvpriv);
	pfree_recv_queue = &(precvpriv->free_recv_queue);

#ifdef CONFIG_DRVEXT_MODULE
	if (drvext_rx_handler(padapter, precv_frame->u.hdr.rx_data, precv_frame->u.hdr.len) == _SUCCESS)
	{
		goto _recv_indicatepkt_drop;
	}
#endif

#ifdef CONFIG_WAPI_SUPPORT
	if (rtw_wapi_check_for_drop(padapter,precv_frame))
	{
		WAPI_TRACE(WAPI_ERR, "%s(): Rx Reorder Drop case!!\n", __FUNCTION__);
		goto _recv_indicatepkt_drop;
	}
#endif

	skb = precv_frame->u.hdr.pkt;
	if(skb == NULL)
	{
		RT_TRACE(_module_recv_osdep_c_,_drv_err_,("rtw_recv_indicatepkt():skb==NULL something wrong!!!!\n"));
		goto _recv_indicatepkt_drop;
	}

	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():skb != NULL !!!\n"));		
	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("rtw_recv_indicatepkt():precv_frame->u.hdr.rx_head=%p  precv_frame->hdr.rx_data=%p\n", precv_frame->u.hdr.rx_head, precv_frame->u.hdr.rx_data));
	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("precv_frame->hdr.rx_tail=%p precv_frame->u.hdr.rx_end=%p precv_frame->hdr.len=%d \n", precv_frame->u.hdr.rx_tail, precv_frame->u.hdr.rx_end, precv_frame->u.hdr.len));

	skb->data = precv_frame->u.hdr.rx_data;

	skb_set_tail_pointer(skb, precv_frame->u.hdr.len);

	skb->len = precv_frame->u.hdr.len;

	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n skb->head=%p skb->data=%p skb->tail=%p skb->end=%p skb->len=%d\n", skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb), skb->len));

#ifdef CONFIG_AUTO_AP_MODE	
#if 1 //for testing
#if 1
	if (0x8899 == pattrib->eth_type)
	{
		rtw_os_ksocket_send(padapter, precv_frame);

		//goto _recv_indicatepkt_drop;
	}
#else
	if (0x8899 == pattrib->eth_type)
	{
		rtw_auto_ap_mode_rx(padapter, precv_frame);
		
		goto _recv_indicatepkt_end;
	}
#endif
#endif
#endif //CONFIG_AUTO_AP_MODE

	rtw_os_recv_indicate_pkt(padapter, skb, pattrib);

_recv_indicatepkt_end:

	precv_frame->u.hdr.pkt = NULL; // pointers to NULL before rtw_free_recvframe()

	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	RT_TRACE(_module_recv_osdep_c_,_drv_info_,("\n rtw_recv_indicatepkt :after rtw_os_recv_indicate_pkt!!!!\n"));


        return _SUCCESS;

_recv_indicatepkt_drop:

	 //enqueue back to free_recv_queue
	 if(precv_frame)
		 rtw_free_recvframe(precv_frame, pfree_recv_queue);

	 DBG_COUNTER(padapter->rx_logs.os_indicate_err);

	 return _FAIL;

}
コード例 #28
0
void dn_nsp_send_conninit(struct sock *sk, unsigned char msgflg)
{
	struct dn_scp *scp = DN_SK(sk);
	struct nsp_conn_init_msg *msg;
	unsigned char aux;
	unsigned char menuver;
	struct dn_skb_cb *cb;
	unsigned char type = 1;
	gfp_t allocation = (msgflg == NSP_CI) ? sk->sk_allocation : GFP_ATOMIC;
	struct sk_buff *skb = dn_alloc_skb(sk, 200, allocation);

	if (!skb)
		return;

	cb  = DN_SKB_CB(skb);
	msg = (struct nsp_conn_init_msg *)skb_put(skb,sizeof(*msg));

	msg->msgflg	= msgflg;
	msg->dstaddr	= 0x0000;		/* Remote Node will assign it*/

	msg->srcaddr	= scp->addrloc;
	msg->services	= scp->services_loc;	/* Requested flow control    */
	msg->info	= scp->info_loc;	/* Version Number            */
	msg->segsize	= cpu_to_le16(scp->segsize_loc);	/* Max segment size  */

	if (scp->peer.sdn_objnum)
		type = 0;

	skb_put(skb, dn_sockaddr2username(&scp->peer,
					  skb_tail_pointer(skb), type));
	skb_put(skb, dn_sockaddr2username(&scp->addr,
					  skb_tail_pointer(skb), 2));

	menuver = DN_MENUVER_ACC | DN_MENUVER_USR;
	if (scp->peer.sdn_flags & SDF_PROXY)
		menuver |= DN_MENUVER_PRX;
	if (scp->peer.sdn_flags & SDF_UICPROXY)
		menuver |= DN_MENUVER_UIC;

	*skb_put(skb, 1) = menuver;	/* Menu Version		*/

	aux = scp->accessdata.acc_userl;
	*skb_put(skb, 1) = aux;
	if (aux > 0)
	memcpy(skb_put(skb, aux), scp->accessdata.acc_user, aux);

	aux = scp->accessdata.acc_passl;
	*skb_put(skb, 1) = aux;
	if (aux > 0)
	memcpy(skb_put(skb, aux), scp->accessdata.acc_pass, aux);

	aux = scp->accessdata.acc_accl;
	*skb_put(skb, 1) = aux;
	if (aux > 0)
	memcpy(skb_put(skb, aux), scp->accessdata.acc_acc, aux);

	aux = (__u8)le16_to_cpu(scp->conndata_out.opt_optl);
	*skb_put(skb, 1) = aux;
	if (aux > 0)
	memcpy(skb_put(skb,aux), scp->conndata_out.opt_data, aux);

	scp->persist = dn_nsp_persist(sk);
	scp->persist_fxn = dn_nsp_retrans_conninit;

	cb->rt_flags = DN_RT_F_RQR;

	dn_nsp_send(skb);
}
コード例 #29
0
ファイル: ctcm_main.c プロジェクト: 12rafael/jellytimekernel
/**
 * Transmit a packet.
 * This is a helper function for ctcm_tx().
 *
 *  ch		Channel to be used for sending.
 *  skb		Pointer to struct sk_buff of packet to send.
 *            The linklevel header has already been set up
 *            by ctcm_tx().
 *
 * returns 0 on success, -ERRNO on failure. (Never fails.)
 */
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	/* we need to acquire the lock for testing the state
	 * otherwise we can have an IRQ changing the state to
	 * TXIDLE after the test but before acquiring the lock.
	 */
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to
		 * trans_skb. trans_skb usually has a pre-allocated
		 * idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header. It gets added
			 * again on retransmit.
			 */
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); /* xtime */
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		/*
		 * Remove our header. It gets added
		 * again on retransmit.
		 */
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
コード例 #30
0
ファイル: recv_linux.c プロジェクト: 513855417/linux
int rtw_recv_indicatepkt(struct adapter *padapter,
			 struct recv_frame *precv_frame)
{
	struct recv_priv *precvpriv;
	struct __queue *pfree_recv_queue;
	struct sk_buff *skb;
	struct mlme_priv *pmlmepriv = &padapter->mlmepriv;


	precvpriv = &(padapter->recvpriv);
	pfree_recv_queue = &(precvpriv->free_recv_queue);

	skb = precv_frame->pkt;
	if (!skb) {
		RT_TRACE(_module_recv_osdep_c_, _drv_err_,
			 ("rtw_recv_indicatepkt():skb == NULL something wrong!!!!\n"));
		goto _recv_indicatepkt_drop;
	}

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("rtw_recv_indicatepkt():skb != NULL !!!\n"));
	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("rtw_recv_indicatepkt():precv_frame->rx_head =%p  precv_frame->hdr.rx_data =%p\n",
		 precv_frame->rx_head, precv_frame->rx_data));
	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("precv_frame->hdr.rx_tail =%p precv_frame->rx_end =%p precv_frame->hdr.len =%d\n",
		 precv_frame->rx_tail, precv_frame->rx_end,
		 precv_frame->len));

	skb->data = precv_frame->rx_data;

	skb_set_tail_pointer(skb, precv_frame->len);

	skb->len = precv_frame->len;

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("skb->head =%p skb->data =%p skb->tail =%p skb->end =%p skb->len =%d\n",
		 skb->head, skb->data, skb_tail_pointer(skb),
		 skb_end_pointer(skb), skb->len));

	if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
		struct sk_buff *pskb2 = NULL;
		struct sta_info *psta = NULL;
		struct sta_priv *pstapriv = &padapter->stapriv;
		struct rx_pkt_attrib *pattrib = &precv_frame->attrib;
		int bmcast = IS_MCAST(pattrib->dst);

		if (memcmp(pattrib->dst, myid(&padapter->eeprompriv),
			   ETH_ALEN)) {
			if (bmcast) {
				psta = rtw_get_bcmc_stainfo(padapter);
				pskb2 = skb_clone(skb, GFP_ATOMIC);
			} else {
				psta = rtw_get_stainfo(pstapriv, pattrib->dst);
			}

			if (psta) {
				struct net_device *pnetdev;

				pnetdev = (struct net_device *)padapter->pnetdev;
				skb->dev = pnetdev;
				skb_set_queue_mapping(skb, rtw_recv_select_queue(skb));

				rtw_xmit_entry(skb, pnetdev);

				if (bmcast)
					skb = pskb2;
				else
					goto _recv_indicatepkt_end;
			}
		}
	}

	rcu_read_lock();
	rcu_dereference(padapter->pnetdev->rx_handler_data);
	rcu_read_unlock();

	skb->ip_summed = CHECKSUM_NONE;
	skb->dev = padapter->pnetdev;
	skb->protocol = eth_type_trans(skb, padapter->pnetdev);

	netif_rx(skb);

_recv_indicatepkt_end:

	/*  pointers to NULL before rtw_free_recvframe() */
	precv_frame->pkt = NULL;

	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	RT_TRACE(_module_recv_osdep_c_, _drv_info_,
		 ("\n rtw_recv_indicatepkt :after netif_rx!!!!\n"));


	return _SUCCESS;

_recv_indicatepkt_drop:

	 /* enqueue back to free_recv_queue */
	rtw_free_recvframe(precv_frame, pfree_recv_queue);

	 return _FAIL;
}