Esempio n. 1
0
/*
 * Update the dynamic parts of a beacon frame based on the current state.
 */
int
ieee80211_beacon_update(struct ieee80211_node *ni,
	struct ieee80211_beacon_offsets *bo, struct sk_buff *skb, 
	int mcast, int *is_dtim)
{
	struct ieee80211vap *vap = ni->ni_vap;
	struct ieee80211com *ic = ni->ni_ic;
	int len_changed = 0;
	u_int16_t capinfo;

	IEEE80211_LOCK_IRQ(ic);

	/* Check if we need to change channel right now */
	if ((ic->ic_flags & IEEE80211_F_DOTH) &&
	    (vap->iv_flags & IEEE80211_F_CHANSWITCH)) {
		struct ieee80211_channel *c = 
			ieee80211_doth_findchan(vap, ic->ic_chanchange_chan);
		
		if (!vap->iv_chanchange_count && !c) {
			vap->iv_flags &= ~IEEE80211_F_CHANSWITCH;
			ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;
		} else if (vap->iv_chanchange_count &&
			   ((!ic->ic_chanchange_tbtt) ||
			    (vap->iv_chanchange_count == ic->ic_chanchange_tbtt))) {
			u_int8_t *frm;

			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
					"%s: reinit beacon\n", __func__);

			/* NB: ic_bsschan is in the DSPARMS beacon IE, so must
			 * set this prior to the beacon re-init, below. */
			if (c == NULL) {
				/* Requested channel invalid; drop the channel
				 * switch announcement and do nothing. */
				IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
						"%s: find channel failure\n", __func__);
			} else
				ic->ic_bsschan = c;

			skb_pull(skb, sizeof(struct ieee80211_frame));
			skb_trim(skb, 0);
			frm = skb->data;
			skb_put(skb, ieee80211_beacon_init(ni, bo, frm) - frm);
			skb_push(skb, sizeof(struct ieee80211_frame));

			vap->iv_chanchange_count = 0;
			vap->iv_flags &= ~IEEE80211_F_CHANSWITCH;
			ic->ic_flags &= ~IEEE80211_F_CHANSWITCH;

			/* NB: Only for the first VAP to get here, and when we
			 * have a valid channel to which to change. */
			if (c && (ic->ic_curchan != c)) {
				ic->ic_curchan = c;
				ic->ic_set_channel(ic);
			}

			len_changed = 1;
		}
	}

	/* XXX faster to recalculate entirely or just changes? */
	if (vap->iv_opmode == IEEE80211_M_IBSS)
		capinfo = IEEE80211_CAPINFO_IBSS;
	else
		capinfo = IEEE80211_CAPINFO_ESS;

	if (vap->iv_flags & IEEE80211_F_PRIVACY)
		capinfo |= IEEE80211_CAPINFO_PRIVACY;
	if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
	    IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan))
		capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE;
	if (ic->ic_flags & IEEE80211_F_SHSLOT)
		capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME;
	if (ic->ic_flags & IEEE80211_F_DOTH)
		capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT;

	*bo->bo_caps = htole16(capinfo);

	if (vap->iv_flags & IEEE80211_F_WME) {
		struct ieee80211_wme_state *wme = &ic->ic_wme;

		/*
		 * Check for aggressive mode change.  When there is
		 * significant high priority traffic in the BSS
		 * throttle back BE traffic by using conservative
		 * parameters.  Otherwise BE uses aggressive params
		 * to optimize performance of legacy/non-QoS traffic.
		 */
		if (wme->wme_flags & WME_F_AGGRMODE) {
			if (wme->wme_hipri_traffic >
			    wme->wme_hipri_switch_thresh) {
				IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni,
					"%s: traffic %u, disable aggressive mode",
					__func__, wme->wme_hipri_traffic);
				wme->wme_flags &= ~WME_F_AGGRMODE;
				ieee80211_wme_updateparams_locked(vap);
				wme->wme_hipri_traffic =
					wme->wme_hipri_switch_hysteresis;
			} else
				wme->wme_hipri_traffic = 0;
		} else {
			if (wme->wme_hipri_traffic <=
			    wme->wme_hipri_switch_thresh) {
				IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni,
					"%s: traffic %u, enable aggressive mode",
					__func__, wme->wme_hipri_traffic);
				wme->wme_flags |= WME_F_AGGRMODE;
				ieee80211_wme_updateparams_locked(vap);
				wme->wme_hipri_traffic = 0;
			} else
				wme->wme_hipri_traffic =
					wme->wme_hipri_switch_hysteresis;
		}
		/* XXX multi-bss */
		if (vap->iv_flags & IEEE80211_F_WMEUPDATE) {
			(void) ieee80211_add_wme_param(bo->bo_wme, wme, IEEE80211_VAP_UAPSD_ENABLED(vap));
			vap->iv_flags &= ~IEEE80211_F_WMEUPDATE;
		}
	}

	if (vap->iv_opmode == IEEE80211_M_HOSTAP) {	/* NB: no IBSS support*/
		struct ieee80211_tim_ie *tie =
			(struct ieee80211_tim_ie *) bo->bo_tim;
		if (vap->iv_flags & IEEE80211_F_TIMUPDATE) {
			u_int timlen, timoff, i;
			/*
			 * ATIM/DTIM needs updating.  If it fits in the
			 * current space allocated then just copy in the
			 * new bits.  Otherwise we need to move any trailing
			 * data to make room.  Note that we know there is
			 * contiguous space because ieee80211_beacon_allocate
			 * ensures there is space in the mbuf to write a
			 * maximal-size virtual bitmap (based on ic_max_aid).
			 */
			/*
			 * Calculate the bitmap size and offset, copy any
			 * trailer out of the way, and then copy in the
			 * new bitmap and update the information element.
			 * Note that the tim bitmap must contain at least
			 * one byte and any offset must be even.
			 */
			if (vap->iv_ps_pending != 0) {
				timoff = 128;		/* impossibly large */
				for (i = 0; i < vap->iv_tim_len; i++)
					if (vap->iv_tim_bitmap[i]) {
						timoff = i & BITCTL_BUFD_UCAST_AID_MASK;
						break;
					}
				KASSERT(timoff != 128, ("tim bitmap empty!"));
				for (i = vap->iv_tim_len-1; i >= timoff; i--)
					if (vap->iv_tim_bitmap[i])
						break;
				timlen = 1 + (i - timoff);
			} else {
				timoff = 0;
				timlen = 1;
			}
			if (timlen != bo->bo_tim_len) {
				/* copy up/down trailer */
				int trailer_adjust =
					(tie->tim_bitmap+timlen) - (bo->bo_tim_trailer);
				memmove(tie->tim_bitmap+timlen, bo->bo_tim_trailer,
					bo->bo_tim_trailerlen);
				bo->bo_tim_trailer = tie->tim_bitmap+timlen;
				bo->bo_chanswitch += trailer_adjust;
				bo->bo_wme += trailer_adjust;
				bo->bo_erp += trailer_adjust;
				bo->bo_ath_caps += trailer_adjust;
				bo->bo_xr += trailer_adjust;
				if (timlen > bo->bo_tim_len)
					skb_put(skb, timlen - bo->bo_tim_len);
				else
					skb_trim(skb, skb->len - (bo->bo_tim_len - timlen));
				bo->bo_tim_len = timlen;

				/* update information element */
				tie->tim_len = 3 + timlen;
				tie->tim_bitctl = timoff;
				len_changed = 1;
			}
			memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff,
				bo->bo_tim_len);

			vap->iv_flags &= ~IEEE80211_F_TIMUPDATE;

			IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni,
				"%s: TIM updated, pending %u, off %u, len %u",
				__func__, vap->iv_ps_pending, timoff, timlen);
		}
		/* count down DTIM period */
		if (tie->tim_count == 0)
			tie->tim_count = tie->tim_period - 1;
		else
			tie->tim_count--;
		/* update state for buffered multicast frames on DTIM */
		if (mcast && (tie->tim_count == 0))
			tie->tim_bitctl |= BITCTL_BUFD_MCAST;
		else
			tie->tim_bitctl &= ~BITCTL_BUFD_MCAST;
		*is_dtim = (tie->tim_count == 0);
	}

	/* Whenever we want to switch to a new channel, we need to follow the
	 * following steps:
	 *
	 * - iv_chanchange_count= number of beacon intervals elapsed (0)
	 * - ic_chanchange_tbtt = number of beacon intervals before switching
	 * - ic_chanchange_chan = IEEE channel number after switching
	 * - ic_flags |= IEEE80211_F_CHANSWITCH */

	if (IEEE80211_IS_MODE_BEACON(vap->iv_opmode)) {

		if ((ic->ic_flags & IEEE80211_F_DOTH) &&
		    (ic->ic_flags & IEEE80211_F_CHANSWITCH)) {
			struct ieee80211_ie_csa *csa_ie =
				(struct ieee80211_ie_csa *)bo->bo_chanswitch;

			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH, 
					"%s: Sending 802.11h chanswitch IE: "
					"%d/%d\n", __func__, 
					ic->ic_chanchange_chan, 
					ic->ic_chanchange_tbtt);
			if (!vap->iv_chanchange_count) {
				vap->iv_flags |= IEEE80211_F_CHANSWITCH;

				/* copy out trailer to open up a slot */
				memmove(bo->bo_chanswitch + sizeof(*csa_ie),
					bo->bo_chanswitch, 
					bo->bo_chanswitch_trailerlen);

				/* add ie in opened slot */
				csa_ie->csa_id = IEEE80211_ELEMID_CHANSWITCHANN;
				/* fixed length */
				csa_ie->csa_len = sizeof(*csa_ie) - 2;
				/* STA shall transmit no further frames */
				csa_ie->csa_mode = 1;
				csa_ie->csa_chan = ic->ic_chanchange_chan;
				csa_ie->csa_count = ic->ic_chanchange_tbtt;

				/* update the trailer lens */
				bo->bo_chanswitch_trailerlen += sizeof(*csa_ie);
				bo->bo_tim_trailerlen += sizeof(*csa_ie);
				bo->bo_wme += sizeof(*csa_ie);
				bo->bo_erp += sizeof(*csa_ie);
				bo->bo_ath_caps += sizeof(*csa_ie);
				bo->bo_xr += sizeof(*csa_ie);

				/* indicate new beacon length so other layers 
				 * may manage memory */
				skb_put(skb, sizeof(*csa_ie));
				len_changed = 1;
			} else if(csa_ie->csa_count)
				csa_ie->csa_count--;
			
			vap->iv_chanchange_count++;
			IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH,
				"%s: CHANSWITCH IE, change in %d TBTT\n",
				__func__, csa_ie->csa_count);
		}
#ifdef ATH_SUPERG_XR
		if (vap->iv_flags & IEEE80211_F_XRUPDATE) {
			if (vap->iv_xrvap)
				(void)ieee80211_add_xr_param(bo->bo_xr, vap);
			vap->iv_flags &= ~IEEE80211_F_XRUPDATE;
		}
#endif
		if ((ic->ic_flags_ext & IEEE80211_FEXT_ERPUPDATE) && 
				(bo->bo_erp != NULL)) {
			(void)ieee80211_add_erp(bo->bo_erp, ic);
			ic->ic_flags_ext &= ~IEEE80211_FEXT_ERPUPDATE;
		}
	}
	/* if it is a mode change beacon for dynamic turbo case */
	if (((ic->ic_ath_cap & IEEE80211_ATHC_BOOST) != 0) ^
	    IEEE80211_IS_CHAN_TURBO(ic->ic_curchan))
		ieee80211_add_athAdvCap(bo->bo_ath_caps, 
				vap->iv_bss->ni_ath_flags,
				vap->iv_bss->ni_ath_defkeyindex);
	/* add APP_IE buffer if app updated it */
	if (vap->iv_flags_ext & IEEE80211_FEXT_APPIE_UPDATE) {
		/* adjust the buffer size if the size is changed */
		if (vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length != 
				bo->bo_appie_buf_len) {
			int diff_len;
			diff_len = 
				vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].
					length - 
				bo->bo_appie_buf_len;

			if (diff_len > 0)
				skb_put(skb, diff_len);
			else
				skb_trim(skb, skb->len + diff_len);

			bo->bo_appie_buf_len = 
				vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].
					length;
			/* update the trailer lens */
			bo->bo_chanswitch_trailerlen += diff_len;
			bo->bo_tim_trailerlen += diff_len;

			len_changed = 1;
		}
		memcpy(bo->bo_appie_buf, 
			vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].ie,
			vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length);

		vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE;
	}

	IEEE80211_UNLOCK_IRQ(ic);

	return len_changed;
}
Esempio n. 2
0
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev));
    struct ccci_request *req = NULL;
    struct ccci_header *ccci_h;
    int ret;
    int skb_len = skb->len;
    static int tx_busy_retry_cnt = 0;
    int tx_queue, tx_channel;

#ifndef FEATURE_SEQ_CHECK_EN
    struct netdev_entity *nent = (struct netdev_entity *)port->private_data;
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n",
                 port->name, skb_headroom(skb), skb->len, nent->tx_seq_num);
#else
    CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len);
#endif

    if(unlikely(skb->len > CCMNI_MTU)) {
        CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) {
        CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n",
                     port->name, skb->len, skb_headroom(skb), dev->hard_header_len);
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }
    if(unlikely(port->modem->md_state != READY)) {
        dev_kfree_skb(skb);
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
    }

    req = ccci_alloc_req(OUT, -1, 1, 0);
    if(req) {
        if(likely(port->rx_ch != CCCI_CCMNI3_RX)) {
            if(unlikely(skb_is_ack(skb))) {
                tx_channel = port->tx_ch==CCCI_CCMNI1_TX?CCCI_CCMNI1_DL_ACK:CCCI_CCMNI2_DL_ACK;
                tx_queue = NET_ACK_TXQ_INDEX(port);
            } else {
                tx_channel = port->tx_ch;
                tx_queue = NET_DAT_TXQ_INDEX(port);
            }
        } else {
            tx_channel = port->tx_ch;
            tx_queue = NET_DAT_TXQ_INDEX(port);
        }

        req->skb = skb;
        req->policy = FREE;
        ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header));
        ccci_h->channel = tx_channel;
        ccci_h->data[0] = 0;
        ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push
#ifndef FEATURE_SEQ_CHECK_EN
        ccci_h->reserved = nent->tx_seq_num++;
#else
        ccci_h->reserved = 0;
#endif
        ret = port->modem->ops->send_request(port->modem, tx_queue, req);
        if(ret) {
            skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserve header again
            req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it
            ccci_free_req(req);
            goto tx_busy;
        }
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb_len;
        tx_busy_retry_cnt = 0;
    } else {
        CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n");
        goto tx_busy;
    }
    return NETDEV_TX_OK;

tx_busy:
    if(unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) {
        if((++tx_busy_retry_cnt)%20000 == 0)
            CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name, tx_busy_retry_cnt);
    } else {
        port->tx_busy_count++;
    }
    return NETDEV_TX_BUSY;
}
Esempio n. 3
0
/*
 * Allocate a beacon frame and fillin the appropriate bits.
 */
struct sk_buff *
ieee80211_beacon_alloc(struct ieee80211_node *ni,
	struct ieee80211_beacon_offsets *bo)
{
	struct ieee80211vap *vap = ni->ni_vap;
	struct ieee80211com *ic = ni->ni_ic;
	struct ieee80211_frame *wh;
	struct sk_buff *skb;
	int pktlen;
	u_int8_t *frm;
	struct ieee80211_rateset *rs;

	/*
	 * beacon frame format
	 *	[8] time stamp
	 *	[2] beacon interval
	 *	[2] capability information
	 *	[tlv] ssid
	 *	[tlv] supported rates
	 *	[7] FH/DS parameter set
	 *	[tlv] IBSS/TIM parameter set
	 *	[tlv] country code 
	 *	[3] power constraint
	 *	[5] channel switch announcement
	 *	[3] extended rate phy (ERP)
	 *	[tlv] extended supported rates
	 *	[tlv] WME parameters
	 *	[tlv] WPA/RSN parameters
	 *	[tlv] Atheros Advanced Capabilities
	 *	[tlv] AtherosXR parameters
	 * XXX Vendor-specific OIDs (e.g. Atheros)
	 * NB: we allocate the max space required for the TIM bitmap.
	 */
	rs = &ni->ni_rates;
	pktlen = 8					/* time stamp */
		 + sizeof(u_int16_t)			/* beacon interval */
		 + sizeof(u_int16_t)			/* capability information */
		 + 2 + ni->ni_esslen			/* ssid */
		 + 2 + IEEE80211_RATE_SIZE		/* supported rates */
		 + 7 					/* FH/DS parameters max(7,3) */
		 + 2 + 4 + vap->iv_tim_len		/* IBSS/TIM parameter set*/
		 + ic->ic_country_ie.country_len + 2	/* country code */
		 + 3					/* power constraint */
		 + 5					/* channel switch announcement */
		 + 3					/* ERP */
		 + 2 + (IEEE80211_RATE_MAXSIZE - IEEE80211_RATE_SIZE) /* Ext. Supp. Rates */
		 + (vap->iv_caps & IEEE80211_C_WME ?	/* WME */
			sizeof(struct ieee80211_wme_param) : 0)
		 + (vap->iv_caps & IEEE80211_C_WPA ?	/* WPA 1+2 */
			2 * sizeof(struct ieee80211_ie_wpa) : 0)
		 + sizeof(struct ieee80211_ie_athAdvCap)
#ifdef ATH_SUPERG_XR
		 + (ic->ic_ath_cap & IEEE80211_ATHC_XR ?	/* XR */
			sizeof(struct ieee80211_xr_param) : 0)
#endif
		 ;
	skb = ieee80211_getmgtframe(&frm, pktlen);
	if (skb == NULL) {
		IEEE80211_NOTE(vap, IEEE80211_MSG_ANY, ni,
			"%s: cannot get buf; size %u", __func__, pktlen);
		vap->iv_stats.is_tx_nobuf++;
		return NULL;
	}

	SKB_NI(skb) = ieee80211_ref_node(ni);

	frm = ieee80211_beacon_init(ni, bo, frm);

	skb_trim(skb, frm - skb->data);

	wh = (struct ieee80211_frame *)
		skb_push(skb, sizeof(struct ieee80211_frame));
	wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
		IEEE80211_FC0_SUBTYPE_BEACON;
	wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
	wh->i_dur = 0;
	IEEE80211_ADDR_COPY(wh->i_addr1, ic->ic_dev->broadcast);
	IEEE80211_ADDR_COPY(wh->i_addr2, vap->iv_myaddr);
	IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_bssid);
	IEEE80211_DPRINTF(vap, IEEE80211_MSG_ASSOC,
			  "%s: beacon bssid:" MAC_FMT "\n",
			  __func__, MAC_ADDR(wh->i_addr3));
	*(u_int16_t *)wh->i_seq = 0;

	return skb;
}
Esempio n. 4
0
/* This function handles received packet. Necessary action is taken based on
 * cmd/event/data.
 */
static int mwifiex_usb_recv(struct mwifiex_adapter *adapter,
			    struct sk_buff *skb, u8 ep)
{
	struct device *dev = adapter->dev;
	u32 recv_type;
	__le32 tmp;
	int ret;

	if (adapter->hs_activated)
		mwifiex_process_hs_config(adapter);

	if (skb->len < INTF_HEADER_LEN) {
		dev_err(dev, "%s: invalid skb->len\n", __func__);
		return -1;
	}

	switch (ep) {
	case MWIFIEX_USB_EP_CMD_EVENT:
		dev_dbg(dev, "%s: EP_CMD_EVENT\n", __func__);
		skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN);
		recv_type = le32_to_cpu(tmp);
		skb_pull(skb, INTF_HEADER_LEN);

		switch (recv_type) {
		case MWIFIEX_USB_TYPE_CMD:
			if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) {
				dev_err(dev, "CMD: skb->len too large\n");
				ret = -1;
				goto exit_restore_skb;
			} else if (!adapter->curr_cmd) {
				dev_dbg(dev, "CMD: no curr_cmd\n");
				if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
					mwifiex_process_sleep_confirm_resp(
							adapter, skb->data,
							skb->len);
					ret = 0;
					goto exit_restore_skb;
				}
				ret = -1;
				goto exit_restore_skb;
			}

			adapter->curr_cmd->resp_skb = skb;
			adapter->cmd_resp_received = true;
			break;
		case MWIFIEX_USB_TYPE_EVENT:
			if (skb->len < sizeof(u32)) {
				dev_err(dev, "EVENT: skb->len too small\n");
				ret = -1;
				goto exit_restore_skb;
			}
			skb_copy_from_linear_data(skb, &tmp, sizeof(u32));
			adapter->event_cause = le32_to_cpu(tmp);
			dev_dbg(dev, "event_cause %#x\n", adapter->event_cause);

			if (skb->len > MAX_EVENT_SIZE) {
				dev_err(dev, "EVENT: event body too large\n");
				ret = -1;
				goto exit_restore_skb;
			}

			memcpy(adapter->event_body, skb->data +
			       MWIFIEX_EVENT_HEADER_LEN, skb->len);

			adapter->event_received = true;
			adapter->event_skb = skb;
			break;
		default:
			dev_err(dev, "unknown recv_type %#x\n", recv_type);
			return -1;
		}
		break;
	case MWIFIEX_USB_EP_DATA:
		dev_dbg(dev, "%s: EP_DATA\n", __func__);
		if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) {
			dev_err(dev, "DATA: skb->len too large\n");
			return -1;
		}
		skb_queue_tail(&adapter->usb_rx_data_q, skb);
		adapter->data_received = true;
		break;
	default:
		dev_err(dev, "%s: unknown endport %#x\n", __func__, ep);
		return -1;
	}

	return -EINPROGRESS;

exit_restore_skb:
	/* The buffer will be reused for further cmds/events */
	skb_push(skb, INTF_HEADER_LEN);

	return ret;
}
Esempio n. 5
0
/*
 *	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
			  struct net_device *dev)
{
	struct net *net = container_of(fq->q.net, struct net, ipv6.frags);
	struct sk_buff *fp, *head = fq->q.fragments;
	int    payload_len;
	unsigned int nhoff;

	fq_kill(fq);

	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);

		if (!fp)
			goto out_oom;

		fp->next = head->next;
		if (!fp->next)
			fq->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, fq->q.fragments);
		head->next = fq->q.fragments->next;

		kfree_skb(fq->q.fragments);
		fq->q.fragments = head;
	}

	WARN_ON(head == NULL);
	WARN_ON(FRAG6_CB(head)->offset != 0);

	/* Unfragmented part is taken from the first segment. */
	payload_len = ((head->data - skb_network_header(head)) -
		       sizeof(struct ipv6hdr) + fq->q.len -
		       sizeof(struct frag_hdr));
	if (payload_len > IPV6_MAXPLEN)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &fq->q.net->mem);
	}

	/* We have to remove fragment header from datagram and to relocate
	 * header in order to calculate ICV correctly. */
	nhoff = fq->nhoffset;
	skb_network_header(head)[nhoff] = skb_transport_header(head)[0];
	memmove(head->head + sizeof(struct frag_hdr), head->head,
		(head->data - head->head) - sizeof(struct frag_hdr));
	head->mac_header += sizeof(struct frag_hdr);
	head->network_header += sizeof(struct frag_hdr);

	skb_shinfo(head)->frag_list = head->next;
	skb_reset_transport_header(head);
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	atomic_sub(head->truesize, &fq->q.net->mem);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = fq->q.stamp;
	ipv6_hdr(head)->payload_len = htons(payload_len);
	IP6CB(head)->nhoff = nhoff;
	IP6CB(head)->flags |= IP6SKB_FRAGMENTED;

	/* Yes, and fold redundant checksum back. 8) */
	if (head->ip_summed == CHECKSUM_COMPLETE)
		head->csum = csum_partial(skb_network_header(head),
					  skb_network_header_len(head),
					  head->csum);

	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
	rcu_read_unlock();
	fq->q.fragments = NULL;
	fq->q.fragments_tail = NULL;
	return 1;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
	rcu_read_lock();
	IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
	rcu_read_unlock();
	return -1;
}
static int dccp_insert_option_ackvec(struct sock *sk, struct sk_buff *skb)
{
	struct dccp_sock *dp = dccp_sk(sk);
	struct dccp_ackvec *av = dp->dccps_hc_rx_ackvec;
	struct dccp_skb_cb *dcb = DCCP_SKB_CB(skb);
	const u16 buflen = dccp_ackvec_buflen(av);
	/* Figure out how many options do we need to represent the ackvec */
	const u8 nr_opts = DIV_ROUND_UP(buflen, DCCP_SINGLE_OPT_MAXLEN);
	u16 len = buflen + 2 * nr_opts;
	u8 i, nonce = 0;
	const unsigned char *tail, *from;
	unsigned char *to;

	if (dcb->dccpd_opt_len + len > DCCP_MAX_OPT_LEN) {
		DCCP_WARN("Lacking space for %u bytes on %s packet\n", len,
			  dccp_packet_name(dcb->dccpd_type));
		return -1;
	}
	/*
	 * Since Ack Vectors are variable-length, we can not always predict
	 * their size. To catch exception cases where the space is running out
	 * on the skb, a separate Sync is scheduled to carry the Ack Vector.
	 */
	if (len > DCCPAV_MIN_OPTLEN &&
	    len + dcb->dccpd_opt_len + skb->len > dp->dccps_mss_cache) {
		DCCP_WARN("No space left for Ack Vector (%u) on skb (%u+%u), "
			  "MPS=%u ==> reduce payload size?\n", len, skb->len,
			  dcb->dccpd_opt_len, dp->dccps_mss_cache);
		dp->dccps_sync_scheduled = 1;
		return 0;
	}
	dcb->dccpd_opt_len += len;

	to   = skb_push(skb, len);
	len  = buflen;
	from = av->av_buf + av->av_buf_head;
	tail = av->av_buf + DCCPAV_MAX_ACKVEC_LEN;

	for (i = 0; i < nr_opts; ++i) {
		int copylen = len;

		if (len > DCCP_SINGLE_OPT_MAXLEN)
			copylen = DCCP_SINGLE_OPT_MAXLEN;

		/*
		 * RFC 4340, 12.2: Encode the Nonce Echo for this Ack Vector via
		 * its type; ack_nonce is the sum of all individual buf_nonce's.
		 */
		nonce ^= av->av_buf_nonce[i];

		*to++ = DCCPO_ACK_VECTOR_0 + av->av_buf_nonce[i];
		*to++ = copylen + 2;

		/* Check if buf_head wraps */
		if (from + copylen > tail) {
			const u16 tailsize = tail - from;

			memcpy(to, from, tailsize);
			to	+= tailsize;
			len	-= tailsize;
			copylen	-= tailsize;
			from	= av->av_buf;
		}

		memcpy(to, from, copylen);
		from += copylen;
		to   += copylen;
		len  -= copylen;
	}
	/*
	 * Each sent Ack Vector is recorded in the list, as per A.2 of RFC 4340.
	 */
	if (dccp_ackvec_update_records(av, dcb->dccpd_seq, nonce))
		return -ENOBUFS;
	return 0;
}
Esempio n. 7
0
static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
{
	struct bfusb_data *data = hci_get_drvdata(hdev);
	struct sk_buff *nskb;
	unsigned char buf[3];
	int sent = 0, size, count;

	BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);

	if (!test_bit(HCI_RUNNING, &hdev->flags))
		return -EBUSY;

	switch (bt_cb(skb)->pkt_type) {
	case HCI_COMMAND_PKT:
		hdev->stat.cmd_tx++;
		break;
	case HCI_ACLDATA_PKT:
		hdev->stat.acl_tx++;
		break;
	case HCI_SCODATA_PKT:
		hdev->stat.sco_tx++;
		break;
	};

	/* Prepend skb with frame type */
	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);

	count = skb->len;

	/* Max HCI frame size seems to be 1511 + 1 */
	nskb = bt_skb_alloc(count + 32, GFP_ATOMIC);
	if (!nskb) {
		BT_ERR("Can't allocate memory for new packet");
		return -ENOMEM;
	}

	nskb->dev = (void *) data;

	while (count) {
		size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE);

		buf[0] = 0xc1 | ((sent == 0) ? 0x04 : 0) | ((count == size) ? 0x08 : 0);
		buf[1] = 0x00;
		buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size;

		memcpy(skb_put(nskb, 3), buf, 3);
		skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);

		sent  += size;
		count -= size;
	}

	/* Don't send frame with multiple size of bulk max packet */
	if ((nskb->len % data->bulk_pkt_size) == 0) {
		buf[0] = 0xdd;
		buf[1] = 0x00;
		memcpy(skb_put(nskb, 2), buf, 2);
	}

	read_lock(&data->lock);

	skb_queue_tail(&data->transmit_q, nskb);
	bfusb_tx_wakeup(data);

	read_unlock(&data->lock);

	kfree_skb(skb);

	return 0;
}
Esempio n. 8
0
/*
 *	Create the VLAN header for an arbitrary protocol layer 
 *
 *	saddr=NULL	means use device source address
 *	daddr=NULL	means leave destination address (eg unresolved arp)
 *
 *  This is called when the SKB is moving down the stack towards the
 *  physical devices.
 */
int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
                         unsigned short type, void *daddr, void *saddr,
                         unsigned len)
{
	struct vlan_hdr *vhdr;
	unsigned short veth_TCI = 0;
	int rc = 0;
	int build_vlan_header = 0;
	struct net_device *vdev = dev; /* save this for the bottom of the method */

#ifdef VLAN_DEBUG
	printk(VLAN_DBG "%s: skb: %p type: %hx len: %x vlan_id: %hx, daddr: %p\n",
		__FUNCTION__, skb, type, len, VLAN_DEV_INFO(dev)->vlan_id, daddr);
#endif

	/* build vlan header only if re_order_header flag is NOT set.  This
	 * fixes some programs that get confused when they see a VLAN device
	 * sending a frame that is VLAN encoded (the consensus is that the VLAN
	 * device should look completely like an Ethernet device when the
	 * REORDER_HEADER flag is set)	The drawback to this is some extra 
	 * header shuffling in the hard_start_xmit.  Users can turn off this
	 * REORDER behaviour with the vconfig tool.
	 */
	build_vlan_header = ((VLAN_DEV_INFO(dev)->flags & 1) == 0);

	if (build_vlan_header) {
		vhdr = (struct vlan_hdr *) skb_push(skb, VLAN_HLEN);

		/* build the four bytes that make this a VLAN header. */

		/* Now, construct the second two bytes. This field looks something
		 * like:
		 * usr_priority: 3 bits	 (high bits)
		 * CFI		 1 bit
		 * VLAN ID	 12 bits (low bits)
		 *
		 */
		veth_TCI = VLAN_DEV_INFO(dev)->vlan_id;
		veth_TCI |= vlan_dev_get_egress_qos_mask(dev, skb);

		vhdr->h_vlan_TCI = htons(veth_TCI);

		/*
		 *  Set the protocol type.
		 *  For a packet of type ETH_P_802_3 we put the length in here instead.
		 *  It is up to the 802.2 layer to carry protocol information.
		 */

		if (type != ETH_P_802_3) {
			vhdr->h_vlan_encapsulated_proto = htons(type);
		} else {
			vhdr->h_vlan_encapsulated_proto = htons(len);
		}
	}

	/* Before delegating work to the lower layer, enter our MAC-address */
	if (saddr == NULL)
		saddr = dev->dev_addr;

	dev = VLAN_DEV_INFO(dev)->real_dev;

	/* MPLS can send us skbuffs w/out enough space.	 This check will grow the
	 * skb if it doesn't have enough headroom.  Not a beautiful solution, so
	 * I'll tick a counter so that users can know it's happening...	 If they
	 * care...
	 */

	/* NOTE:  This may still break if the underlying device is not the final
	 * device (and thus there are more headers to add...)  It should work for
	 * good-ole-ethernet though.
	 */
	if (skb_headroom(skb) < dev->hard_header_len) {
		struct sk_buff *sk_tmp = skb;
		skb = skb_realloc_headroom(sk_tmp, dev->hard_header_len);
		kfree_skb(sk_tmp);
		if (skb == NULL) {
			struct net_device_stats *stats = vlan_dev_get_stats(vdev);
			stats->tx_dropped++;
			return -ENOMEM;
		}
		VLAN_DEV_INFO(vdev)->cnt_inc_headroom_on_tx++;
#ifdef VLAN_DEBUG
		printk(VLAN_DBG "%s: %s: had to grow skb.\n", __FUNCTION__, vdev->name);
#endif
	}

	if (build_vlan_header) {
		/* Now make the underlying real hard header */
		rc = dev->hard_header(skb, dev, ETH_P_8021Q, daddr, saddr, len + VLAN_HLEN);

		if (rc > 0) {
			rc += VLAN_HLEN;
		} else if (rc < 0) {
			rc -= VLAN_HLEN;
		}
	} else {
		/* If here, then we'll just make a normal looking ethernet frame,
		 * but, the hard_start_xmit method will insert the tag (it has to
		 * be able to do this for bridged and other skbs that don't come
		 * down the protocol stack in an orderly manner.
		 */
		rc = dev->hard_header(skb, dev, type, daddr, saddr, len);
	}

	return rc;
}
static int ipip_output(struct xfrm_state *x, struct sk_buff *skb)
{
	skb_push(skb, -skb_network_offset(skb));
	return 0;
}
Esempio n. 10
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
	     struct ipv6_txoptions *opt)
{
	struct net *net = sock_net(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct in6_addr *first_hop = &fl6->daddr;
	struct dst_entry *dst = skb_dst(skb);
	struct ipv6hdr *hdr;
	u8  proto = fl6->flowi6_proto;
	int seg_len = skb->len;
	int hlimit = -1;
	int tclass = 0;
	u32 mtu;

	if (opt) {
		unsigned int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			if (skb2 == NULL) {
				IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
					      IPSTATS_MIB_OUTDISCARDS);
				kfree_skb(skb);
				return -ENOBUFS;
			}
			kfree_skb(skb);
			skb = skb2;
			skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	hdr = ipv6_hdr(skb);

	/*
	 *	Fill in the IPv6 header
	 */
	if (np) {
		tclass = np->tclass;
		hlimit = np->hop_limit;
	}
	if (hlimit < 0)
		hlimit = ip6_dst_hoplimit(dst);

	*(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | fl6->flowlabel;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;

	mtu = dst_mtu(dst);
	if ((skb->len <= mtu) || skb->local_df || skb_is_gso(skb)) {
		IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_OUT, skb->len);
		return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
			       dst->dev, dst_output);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Esempio n. 11
0
int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
	     struct ipv6_txoptions *opt)
{
	struct ipv6_pinfo * np = sk ? &sk->net_pinfo.af_inet6 : NULL;
	struct in6_addr *first_hop = fl->nl_u.ip6_u.daddr;
	struct dst_entry *dst = skb->dst;
	struct ipv6hdr *hdr;
	u8  proto = fl->proto;
	int seg_len = skb->len;
	int hlimit;

	if (opt) {
		int head_room;

		/* First: exthdrs may take lots of space (~8K for now)
		   MAX_HEADER is not enough.
		 */
		head_room = opt->opt_nflen + opt->opt_flen;
		seg_len += head_room;
		head_room += sizeof(struct ipv6hdr) + ((dst->dev->hard_header_len + 15)&~15);

		if (skb_headroom(skb) < head_room) {
			struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
			kfree_skb(skb);
			skb = skb2;
			if (skb == NULL)
				return -ENOBUFS;
			if (sk)
				skb_set_owner_w(skb, sk);
		}
		if (opt->opt_flen)
			ipv6_push_frag_opts(skb, opt, &proto);
		if (opt->opt_nflen)
			ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop);
	}

	hdr = skb->nh.ipv6h = (struct ipv6hdr*)skb_push(skb, sizeof(struct ipv6hdr));

	/*
	 *	Fill in the IPv6 header
	 */

	*(u32*)hdr = htonl(0x60000000) | fl->fl6_flowlabel;
	hlimit = -1;
	if (np)
		hlimit = np->hop_limit;
	if (hlimit < 0)
		hlimit = ((struct rt6_info*)dst)->rt6i_hoplimit;

	hdr->payload_len = htons(seg_len);
	hdr->nexthdr = proto;
	hdr->hop_limit = hlimit;

	ipv6_addr_copy(&hdr->saddr, fl->nl_u.ip6_u.saddr);
	ipv6_addr_copy(&hdr->daddr, first_hop);

	if (skb->len <= dst->pmtu) {
		IP6_INC_STATS(Ip6OutRequests);
		return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, ip6_maybe_reroute);
	}

	if (net_ratelimit())
		printk(KERN_DEBUG "IPv6: sending pkt_too_big to self\n");
	skb->dev = dst->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, dst->pmtu, skb->dev);
	kfree_skb(skb);
	return -EMSGSIZE;
}
Esempio n. 12
0
int ip6_push_pending_frames(struct sock *sk)
{
	struct sk_buff *skb, *tmp_skb;
	struct sk_buff **tail_skb;
	struct in6_addr final_dst_buf, *final_dst = &final_dst_buf;
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct net *net = sock_net(sk);
	struct ipv6hdr *hdr;
	struct ipv6_txoptions *opt = np->cork.opt;
	struct rt6_info *rt = (struct rt6_info *)inet->cork.base.dst;
	struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
	unsigned char proto = fl6->flowi6_proto;
	int err = 0;

	if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
		goto out;
	tail_skb = &(skb_shinfo(skb)->frag_list);

	/* move skb->data to ip header from ext header */
	if (skb->data < skb_network_header(skb))
		__skb_pull(skb, skb_network_offset(skb));
	while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
		__skb_pull(tmp_skb, skb_network_header_len(skb));
		*tail_skb = tmp_skb;
		tail_skb = &(tmp_skb->next);
		skb->len += tmp_skb->len;
		skb->data_len += tmp_skb->len;
		skb->truesize += tmp_skb->truesize;
		tmp_skb->destructor = NULL;
		tmp_skb->sk = NULL;
	}

	/* Allow local fragmentation. */
	if (np->pmtudisc < IPV6_PMTUDISC_DO)
		skb->local_df = 1;

	ipv6_addr_copy(final_dst, &fl6->daddr);
	__skb_pull(skb, skb_network_header_len(skb));
	if (opt && opt->opt_flen)
		ipv6_push_frag_opts(skb, opt, &proto);
	if (opt && opt->opt_nflen)
		ipv6_push_nfrag_opts(skb, opt, &proto, &final_dst);

	skb_push(skb, sizeof(struct ipv6hdr));
	skb_reset_network_header(skb);
	hdr = ipv6_hdr(skb);

	*(__be32*)hdr = fl6->flowlabel |
		     htonl(0x60000000 | ((int)np->cork.tclass << 20));

	hdr->hop_limit = np->cork.hop_limit;
	hdr->nexthdr = proto;
	ipv6_addr_copy(&hdr->saddr, &fl6->saddr);
	ipv6_addr_copy(&hdr->daddr, final_dst);

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;

	skb_dst_set(skb, dst_clone(&rt->dst));
	IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
	if (proto == IPPROTO_ICMPV6) {
		struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));

		ICMP6MSGOUT_INC_STATS_BH(net, idev, icmp6_hdr(skb)->icmp6_type);
		ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTMSGS);
	}

	err = ip6_local_out(skb);
	if (err) {
		if (err > 0)
			err = net_xmit_errno(err);
		if (err)
			goto error;
	}

out:
	ip6_cork_release(inet, np);
	return err;
error:
	IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	goto out;
}
Esempio n. 13
0
/*
 *	Check if this packet is complete.
 *	Returns NULL on failure by any reason, and pointer
 *	to current nexthdr field in reassembled frame.
 *
 *	It is called with locked fq, and caller must check that
 *	queue is eligible for reassembly i.e. it is not COMPLETE,
 *	the last and the first frames arrived and all the bits are here.
 */
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff **skb_in,
			  struct net_device *dev)
{
	struct sk_buff *fp, *head = fq->fragments;
	struct inet6_dev *idev;
	int    remove_fraghdr = 0;
	int    payload_len;
	int    nhoff;

	fq_kill(fq);

	BUG_TRAP(head != NULL);
	BUG_TRAP(FRAG6_CB(head)->offset == 0);

	idev = in6_dev_get(dev);

	/* Unfragmented part is taken from the first segment. */
	payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len;
	nhoff = head->h.raw - head->nh.raw;

	if (payload_len > 65535) {
		if (payload_len > 65535 + 8)
			goto out_oversize;
		remove_fraghdr = 1;
	}

#ifdef CONFIG_IPV6_IPSEC
	remove_fraghdr = 1;
#endif /* CONFIG_IPV6_IPSEC */

	/* Head of list must not be cloned. */
	if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC))
		goto out_oom;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_shinfo(head)->frag_list) {
		struct sk_buff *clone;
		int i, plen = 0;

		if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL)
			goto out_oom;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_shinfo(head)->frag_list = NULL;
		for (i=0; i<skb_shinfo(head)->nr_frags; i++)
			plen += skb_shinfo(head)->frags[i].size;
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		atomic_add(clone->truesize, &ip6_frag_mem);
	}

	/* Normally we do not remove frag header from datagram, but
	 * we have to do this and to relocate header, when payload
	 * is > 65535-8. */
	if (remove_fraghdr) {
		nhoff = fq->nhoffset;
		head->nh.raw[nhoff] = head->h.raw[0];
		memmove(head->head+8, head->head, (head->data-head->head)-8);
		head->mac.raw += 8;
		head->nh.raw += 8;
		payload_len -= 8;
	} else {
		((struct frag_hdr*)head->h.raw)->frag_off = 0;
	}

	skb_shinfo(head)->frag_list = head->next;
	head->h.raw = head->data;
	skb_push(head, head->data - head->nh.raw);
	atomic_sub(head->truesize, &ip6_frag_mem);

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_HW)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
		atomic_sub(fp->truesize, &ip6_frag_mem);
	}

	head->next = NULL;
	head->dev = dev;
	head->stamp = fq->stamp;
	head->nh.ipv6h->payload_len = ntohs(payload_len);

	*skb_in = head;

	/* Yes, and fold redundant checksum back. 8) */
	if (head->ip_summed == CHECKSUM_HW)
		head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);

	IP6_INC_STATS_BH(idev,Ip6ReasmOKs);
	if (idev)
		in6_dev_put(idev);
	fq->fragments = NULL;
	return nhoff;

out_oversize:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: payload len = %d\n", payload_len);
	goto out_fail;
out_oom:
	if (net_ratelimit())
		printk(KERN_DEBUG "ip6_frag_reasm: no memory for reassembly\n");
out_fail:
	IP6_INC_STATS_BH(idev,Ip6ReasmFails);
	if (idev)
		in6_dev_put(idev);
	return -1;
}
Esempio n. 14
0
static int clip_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct clip_priv *clip_priv = PRIV(dev);
	struct atmarp_entry *entry;
	struct atm_vcc *vcc;
	int old;
	unsigned long flags;

	pr_debug("clip_start_xmit (skb %p)\n", skb);
	if (!skb->dst) {
		printk(KERN_ERR "clip_start_xmit: skb->dst == NULL\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	if (!skb->dst->neighbour) {
#if 0
		skb->dst->neighbour = clip_find_neighbour(skb->dst, 1);
		if (!skb->dst->neighbour) {
			dev_kfree_skb(skb);	/* lost that one */
			clip_priv->stats.tx_dropped++;
			return 0;
		}
#endif
		printk(KERN_ERR "clip_start_xmit: NO NEIGHBOUR !\n");
		dev_kfree_skb(skb);
		clip_priv->stats.tx_dropped++;
		return 0;
	}
	entry = NEIGH2ENTRY(skb->dst->neighbour);
	if (!entry->vccs) {
		if (time_after(jiffies, entry->expires)) {
			/* should be resolved */
			entry->expires = jiffies + ATMARP_RETRY_DELAY * HZ;
			to_atmarpd(act_need, PRIV(dev)->number, entry->ip);
		}
		if (entry->neigh->arp_queue.qlen < ATMARP_MAX_UNRES_PACKETS)
			skb_queue_tail(&entry->neigh->arp_queue, skb);
		else {
			dev_kfree_skb(skb);
			clip_priv->stats.tx_dropped++;
		}
		return 0;
	}
	pr_debug("neigh %p, vccs %p\n", entry, entry->vccs);
	ATM_SKB(skb)->vcc = vcc = entry->vccs->vcc;
	pr_debug("using neighbour %p, vcc %p\n", skb->dst->neighbour, vcc);
	if (entry->vccs->encap) {
		void *here;

		here = skb_push(skb, RFC1483LLC_LEN);
		memcpy(here, llc_oui, sizeof(llc_oui));
		((__be16 *) here)[3] = skb->protocol;
	}
	atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
	ATM_SKB(skb)->atm_options = vcc->atm_options;
	entry->vccs->last_use = jiffies;
	pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
	old = xchg(&entry->vccs->xoff, 1);	/* assume XOFF ... */
	if (old) {
		printk(KERN_WARNING "clip_start_xmit: XOFF->XOFF transition\n");
		return 0;
	}
	clip_priv->stats.tx_packets++;
	clip_priv->stats.tx_bytes += skb->len;
	vcc->send(vcc, skb);
	if (atm_may_send(vcc, 0)) {
		entry->vccs->xoff = 0;
		return 0;
	}
	spin_lock_irqsave(&clip_priv->xoff_lock, flags);
	netif_stop_queue(dev);	/* XOFF -> throttle immediately */
	barrier();
	if (!entry->vccs->xoff)
		netif_start_queue(dev);
	/* Oh, we just raced with clip_pop. netif_start_queue should be
	   good enough, because nothing should really be asleep because
	   of the brief netif_stop_queue. If this isn't true or if it
	   changes, use netif_wake_queue instead. */
	spin_unlock_irqrestore(&clip_priv->xoff_lock, flags);
	return 0;
}
Esempio n. 15
0
int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct sdio_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb;

	if (id >= SDIO_DMUX_NUM_CHANNELS)
		return -EINVAL;
	if (!skb)
		return -EINVAL;
	if (!sdio_mux_initialized)
		return -ENODEV;
	if (fatal_error)
		return -ENODEV;

	DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (sdio_ch_is_in_reset(id)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: port is in reset: %d\n", __func__,
				sdio_ch[id].status);
		return -ENETRESET;
	}
	if (!sdio_ch_is_local_open(id)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
		return -ENODEV;
	}
	if (sdio_ch[id].use_wm &&
			(sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: watermark exceeded: %d\n", __func__, id);
		return -EAGAIN;
	}
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

	spin_lock_irqsave(&sdio_mux_write_lock, flags);
	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_ATOMIC);
		if (new_skb == NULL) {
			pr_err("%s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

	DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);
	__skb_queue_tail(&sdio_mux_write_pool, skb);

	spin_lock(&sdio_ch[id].lock);
	sdio_ch[id].num_tx_pkts++;
	spin_unlock(&sdio_ch[id].lock);

	queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
	spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
	return rc;
}
Esempio n. 16
0
/**
@ingroup tx_functions
This function despatches the IP packets with the given vcid
to the target via the host h/w interface.
@return  zero(success) or -ve value(failure)
*/
INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/
					struct sk_buff *Packet, /**<data buffer*/
					USHORT Vcid)			/**<VCID for this packet*/
{
	int		status=0;
#ifdef GDMA_INTERFACE  
	int dontfree = 0;
#endif
	BOOLEAN bHeaderSupressionEnabled = FALSE;
	B_UINT16            uiClassifierRuleID;
	int QueueIndex = NO_OF_QUEUES + 1;
	B_UINT32 time_spent_on_host = 0 ;
	if(!Adapter || !Packet)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got NULL Adapter or Packet");
		return -EINVAL;
	}
	if(Packet->len > MAX_DEVICE_DESC_SIZE)
	{
		status = STATUS_FAILURE;
		goto errExit;
	}
	
	/* Get the Classifier Rule ID */
	uiClassifierRuleID = *((UINT32*) (Packet->cb)+SKB_CB_CLASSIFICATION_OFFSET);
	QueueIndex = SearchVcid( Adapter,Vcid);
	if(QueueIndex < NO_OF_QUEUES)
	{
		bHeaderSupressionEnabled = 
			Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled;
		bHeaderSupressionEnabled = 
			bHeaderSupressionEnabled & Adapter->bPHSEnabled;
	}
	if(Adapter->device_removed)
		{
		status = STATUS_FAILURE;
		goto errExit;
		}
	
	status = PHSTransmit(Adapter, &Packet, Vcid, uiClassifierRuleID, bHeaderSupressionEnabled, 
							(UINT *)&Packet->len, Adapter->PackInfo[QueueIndex].bEthCSSupport);

	if(status != STATUS_SUCCESS)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "PHS Transmit failed..\n");
		goto errExit;
	}

	Leader.Vcid	= Vcid;
	
    if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET ))
	{
        BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending TCP ACK\n");
		Leader.Status = LEADER_STATUS_TCP_ACK;
	}
	else
	{
		Leader.Status = LEADER_STATUS;
	}
	
	if(Adapter->PackInfo[QueueIndex].bEthCSSupport)
	{
		Leader.PLength = Packet->len;
		if(skb_headroom(Packet) < LEADER_SIZE)
        {
			if((status = skb_cow(Packet,LEADER_SIZE)))
			{
				BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"bcm_transmit : Failed To Increase headRoom\n");
				goto errExit;
			}
		}
		skb_push(Packet, LEADER_SIZE);
		memcpy(Packet->data, &Leader, LEADER_SIZE);
	}
	
	else	
	{
		Leader.PLength = Packet->len - ETH_HLEN;
		memcpy((LEADER*)skb_pull(Packet, (ETH_HLEN - LEADER_SIZE)), &Leader, LEADER_SIZE);
	}

	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Packet->len = %d", Packet->len);
	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Vcid = %d", Vcid);

#ifndef BCM_SHM_INTERFACE
	status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter, 
			Packet->data, (Leader.PLength + LEADER_SIZE));
#else
	status = tx_pkts_to_firmware(Packet,Packet->len,0);
#endif
	if(status)
	{
		BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Tx Failed..\n");
	}
	else
	{
		Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength;
		atomic_add(Leader.PLength, &Adapter->GoodTxByteCount);
		atomic_inc(&Adapter->TxTotalPacketCount);
#ifdef GDMA_INTERFACE  
    dontfree = 1;
#endif
	}


	
	time_spent_on_host = jiffies - *((UINT32*) (Packet->cb) + SKB_CB_LATENCY_OFFSET);
	BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_TIME_SPENT_IN_HOST, DBG_LVL_ALL, "TIME SPENT ON HOST :%#X \n",time_spent_on_host);
	atomic_dec(&Adapter->CurrNumFreeTxDesc);

errExit:

	if(STATUS_SUCCESS == status)
	{
		Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3;
		Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len);
		Adapter->PackInfo[QueueIndex].uiSentPackets++;
		Adapter->PackInfo[QueueIndex].NumOfPacketsSent++;

		atomic_dec(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount);
#ifdef BCM_SHM_INTERFACE
		if(atomic_read(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount) < 0)
		{
			atomic_set(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount, 0);
		}
#endif
		Adapter->PackInfo[QueueIndex].uiThisPeriodSentBytes += Leader.PLength;
	}
	

#ifdef GDMA_INTERFACE  
  if(!dontfree){
  	bcm_kfree_skb(Packet);
  }
#else
  	bcm_kfree_skb(Packet);
#endif  
	return status;
}
Esempio n. 17
0
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
			 struct net_device *dev)
{
	struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
	struct iphdr *iph;
	struct sk_buff *fp, *head = qp->q.fragments;
	int len;
	int ihlen;
	int err;
	u8 ecn;

	ipq_kill(qp);

	ecn = ip_frag_ecn_table[qp->ecn];
	if (unlikely(ecn == 0xff)) {
		err = -EINVAL;
		goto out_fail;
	}
	/* Make the one we just received the head. */
	if (prev) {
		head = prev->next;
		fp = skb_clone(head, GFP_ATOMIC);
		if (!fp)
			goto out_nomem;

		fp->next = head->next;
		if (!fp->next)
			qp->q.fragments_tail = fp;
		prev->next = fp;

		skb_morph(head, qp->q.fragments);
		head->next = qp->q.fragments->next;

		consume_skb(qp->q.fragments);
		qp->q.fragments = head;
	}

	WARN_ON(!head);
	WARN_ON(FRAG_CB(head)->offset != 0);

	/* Allocate a new buffer for the datagram. */
	ihlen = ip_hdrlen(head);
	len = ihlen + qp->q.len;

	err = -E2BIG;
	if (len > 65535)
		goto out_oversize;

	/* Head of list must not be cloned. */
	if (skb_unclone(head, GFP_ATOMIC))
		goto out_nomem;

	/* If the first fragment is fragmented itself, we split
	 * it to two chunks: the first with data and paged part
	 * and the second, holding only fragments. */
	if (skb_has_frag_list(head)) {
		struct sk_buff *clone;
		int i, plen = 0;

		clone = alloc_skb(0, GFP_ATOMIC);
		if (!clone)
			goto out_nomem;
		clone->next = head->next;
		head->next = clone;
		skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
		skb_frag_list_init(head);
		for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
			plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
		clone->len = clone->data_len = head->data_len - plen;
		head->data_len -= clone->len;
		head->len -= clone->len;
		clone->csum = 0;
		clone->ip_summed = head->ip_summed;
		add_frag_mem_limit(qp->q.net, clone->truesize);
	}

	skb_shinfo(head)->frag_list = head->next;
	skb_push(head, head->data - skb_network_header(head));

	for (fp=head->next; fp; fp = fp->next) {
		head->data_len += fp->len;
		head->len += fp->len;
		if (head->ip_summed != fp->ip_summed)
			head->ip_summed = CHECKSUM_NONE;
		else if (head->ip_summed == CHECKSUM_COMPLETE)
			head->csum = csum_add(head->csum, fp->csum);
		head->truesize += fp->truesize;
	}
	sub_frag_mem_limit(qp->q.net, head->truesize);

	head->next = NULL;
	head->dev = dev;
	head->tstamp = qp->q.stamp;
	IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);

	iph = ip_hdr(head);
	iph->tot_len = htons(len);
	iph->tos |= ecn;

	/* When we set IP_DF on a refragmented skb we must also force a
	 * call to ip_fragment to avoid forwarding a DF-skb of size s while
	 * original sender only sent fragments of size f (where f < s).
	 *
	 * We only set DF/IPSKB_FRAG_PMTU if such DF fragment was the largest
	 * frag seen to avoid sending tiny DF-fragments in case skb was built
	 * from one very small df-fragment and one large non-df frag.
	 */
	if (qp->max_df_size == qp->q.max_size) {
		IPCB(head)->flags |= IPSKB_FRAG_PMTU;
		iph->frag_off = htons(IP_DF);
	} else {
		iph->frag_off = 0;
	}

	ip_send_check(iph);

	__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
	qp->q.fragments = NULL;
	qp->q.fragments_tail = NULL;
	return 0;

out_nomem:
	net_dbg_ratelimited("queue_glue: no memory for gluing queue %p\n", qp);
	err = -ENOMEM;
	goto out_fail;
out_oversize:
	net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail:
	__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
	return err;
}
Esempio n. 18
0
static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
			      int ifindex)
{
	const struct ipv6hdr *iph = ipv6_hdr(skb);
	struct flowi6 fl6 = {
		.daddr          = iph->daddr,
		.saddr          = iph->saddr,
		.flowlabel      = ip6_flowinfo(iph),
		.flowi6_mark    = skb->mark,
		.flowi6_proto   = iph->nexthdr,
		.flowi6_iif     = ifindex,
	};
	struct net *net = dev_net(vrf_dev);
	struct rt6_info *rt6;

	rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
				   RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
	if (unlikely(!rt6))
		return;

	if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
		return;

	skb_dst_set(skb, &rt6->dst);
}

static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
				   struct sk_buff *skb)
{
	int orig_iif = skb->skb_iif;
	bool need_strict;

	/* loopback traffic; do not push through packet taps again.
	 * Reset pkt_type for upper layers to process skb
	 */
	if (skb->pkt_type == PACKET_LOOPBACK) {
		skb->dev = vrf_dev;
		skb->skb_iif = vrf_dev->ifindex;
		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
		skb->pkt_type = PACKET_HOST;
		goto out;
	}

	/* if packet is NDISC or addressed to multicast or link-local
	 * then keep the ingress interface
	 */
	need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
	if (!ipv6_ndisc_frame(skb) && !need_strict) {
		vrf_rx_stats(vrf_dev, skb->len);
		skb->dev = vrf_dev;
		skb->skb_iif = vrf_dev->ifindex;

		if (!list_empty(&vrf_dev->ptype_all)) {
			skb_push(skb, skb->mac_len);
			dev_queue_xmit_nit(skb, vrf_dev);
			skb_pull(skb, skb->mac_len);
		}

		IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
	}

	if (need_strict)
		vrf_ip6_input_dst(skb, vrf_dev, orig_iif);

	skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
out:
	return skb;
}
Esempio n. 19
0
int ath10k_htc_send(struct ath10k_htc *htc,
		    enum ath10k_htc_ep_id eid,
		    struct sk_buff *skb)
{
	struct ath10k *ar = htc->ar;
	struct ath10k_htc_ep *ep = &htc->endpoint[eid];
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
	struct ath10k_hif_sg_item sg_item;
	struct device *dev = htc->ar->dev;
	int credits = 0;
	int ret;

	if (htc->ar->state == ATH10K_STATE_WEDGED)
		return -ECOMM;

	if (eid >= ATH10K_HTC_EP_COUNT) {
		ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
		return -ENOENT;
	}

	skb_push(skb, sizeof(struct ath10k_htc_hdr));

	if (ep->tx_credit_flow_enabled) {
		credits = DIV_ROUND_UP(skb->len, htc->target_credit_size);
		spin_lock_bh(&htc->tx_lock);
		if (ep->tx_credits < credits) {
			ath10k_dbg(ar, ATH10K_DBG_HTC,
				   "htc insufficient credits ep %d required %d available %d\n",
				   eid, credits, ep->tx_credits);
			spin_unlock_bh(&htc->tx_lock);
			ret = -EAGAIN;
			goto err_pull;
		}
		ep->tx_credits -= credits;
		ath10k_dbg(ar, ATH10K_DBG_HTC,
			   "htc ep %d consumed %d credits (total %d)\n",
			   eid, credits, ep->tx_credits);
		spin_unlock_bh(&htc->tx_lock);
	}

	ath10k_htc_prepare_tx_skb(ep, skb);

	skb_cb->eid = eid;
	skb_cb->paddr = dma_map_single(dev, skb->data, skb->len, DMA_TO_DEVICE);
	ret = dma_mapping_error(dev, skb_cb->paddr);
	if (ret) {
		ret = -EIO;
		goto err_credits;
	}

	sg_item.transfer_id = ep->eid;
	sg_item.transfer_context = skb;
	sg_item.vaddr = skb->data;
	sg_item.paddr = skb_cb->paddr;
	sg_item.len = skb->len;

	ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
	if (ret)
		goto err_unmap;

	return 0;

err_unmap:
	dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
	if (ep->tx_credit_flow_enabled) {
		spin_lock_bh(&htc->tx_lock);
		ep->tx_credits += credits;
		ath10k_dbg(ar, ATH10K_DBG_HTC,
			   "htc ep %d reverted %d credits back (total %d)\n",
			   eid, credits, ep->tx_credits);
		spin_unlock_bh(&htc->tx_lock);

		if (ep->ep_ops.ep_tx_credits)
			ep->ep_ops.ep_tx_credits(htc->ar);
	}
err_pull:
	skb_pull(skb, sizeof(struct ath10k_htc_hdr));
	return ret;
}
Esempio n. 20
0
/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
 * Convert Ethernet header into a suitable IEEE 802.11 header depending on
 * device configuration. */
int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct hostap_interface *iface;
	local_info_t *local;
	int need_headroom, need_tailroom = 0;
	struct ieee80211_hdr hdr;
	u16 fc, ethertype = 0;
	enum {
		WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
	} use_wds = WDS_NO;
	u8 *encaps_data;
	int hdr_len, encaps_len, skip_header_bytes;
	int to_assoc_ap = 0;
	struct hostap_skb_tx_data *meta;

	iface = netdev_priv(dev);
	local = iface->local;

	if (skb->len < ETH_HLEN) {
		printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
		       "(len=%d)\n", dev->name, skb->len);
		kfree_skb(skb);
		return 0;
	}

	if (local->ddev != dev) {
		use_wds = (local->iw_mode == IW_MODE_MASTER &&
			   !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
			WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
		if (dev == local->stadev) {
			to_assoc_ap = 1;
			use_wds = WDS_NO;
		} else if (dev == local->apdev) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "AP device with Ethernet net dev\n", dev->name);
			kfree_skb(skb);
			return 0;
		}
	} else {
		if (local->iw_mode == IW_MODE_REPEAT) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "non-WDS link in Repeater mode\n", dev->name);
			kfree_skb(skb);
			return 0;
		} else if (local->iw_mode == IW_MODE_INFRA &&
			   (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
			   memcmp(skb->data + ETH_ALEN, dev->dev_addr,
				  ETH_ALEN) != 0) {
			/* AP client mode: send frames with foreign src addr
			 * using 4-addr WDS frames */
			use_wds = WDS_COMPLIANT_FRAME;
		}
	}

	/* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
	 * ==>
	 * Prism2 TX frame with 802.11 header:
	 * txdesc (address order depending on used mode; includes dst_addr and
	 * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
	 * proto[2], payload {, possible addr4[6]} */

	ethertype = (skb->data[12] << 8) | skb->data[13];

	memset(&hdr, 0, sizeof(hdr));

	/* Length of data after IEEE 802.11 header */
	encaps_data = NULL;
	encaps_len = 0;
	skip_header_bytes = ETH_HLEN;
	if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
		encaps_data = bridge_tunnel_header;
		encaps_len = sizeof(bridge_tunnel_header);
		skip_header_bytes -= 2;
	} else if (ethertype >= 0x600) {
		encaps_data = rfc1042_header;
		encaps_len = sizeof(rfc1042_header);
		skip_header_bytes -= 2;
	}

	fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
	hdr_len = IEEE80211_DATA_HDR3_LEN;

	if (use_wds != WDS_NO) {
		/* Note! Prism2 station firmware has problems with sending real
		 * 802.11 frames with four addresses; until these problems can
		 * be fixed or worked around, 4-addr frames needed for WDS are
		 * using incompatible format: FromDS flag is not set and the
		 * fourth address is added after the frame payload; it is
		 * assumed, that the receiving station knows how to handle this
		 * frame format */

		if (use_wds == WDS_COMPLIANT_FRAME) {
			fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
			/* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
			 * Addr4 = SA */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			hdr_len += ETH_ALEN;
		} else {
			/* bogus 4-addr format to workaround Prism2 station
			 * f/w bug */
			fc |= IEEE80211_FCTL_TODS;
			/* From DS: Addr1 = DA (used as RA),
			 * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
			 */

			/* SA from skb->data + ETH_ALEN will be added after
			 * frame payload; use hdr.addr4 as a temporary buffer
			 */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			need_tailroom += ETH_ALEN;
		}

		/* send broadcast and multicast frames to broadcast RA, if
		 * configured; otherwise, use unicast RA of the WDS link */
		if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
		    skb->data[0] & 0x01)
			memset(&hdr.addr1, 0xff, ETH_ALEN);
		else if (iface->type == HOSTAP_INTERFACE_WDS)
			memcpy(&hdr.addr1, iface->u.wds.remote_addr,
			       ETH_ALEN);
		else
			memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
		fc |= IEEE80211_FCTL_FROMDS;
		/* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
						 ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
		fc |= IEEE80211_FCTL_TODS;
		/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
		memcpy(&hdr.addr1, to_assoc_ap ?
		       local->assoc_ap_addr : local->bssid, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_ADHOC) {
		/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
	}

	hdr.frame_control = cpu_to_le16(fc);

	skb_pull(skb, skip_header_bytes);
	need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
	if (skb_tailroom(skb) < need_tailroom) {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return 0;
		}
		if (pskb_expand_head(skb, need_headroom, need_tailroom,
				     GFP_ATOMIC)) {
			kfree_skb(skb);
			iface->stats.tx_dropped++;
			return 0;
		}
	} else if (skb_headroom(skb) < need_headroom) {
		struct sk_buff *tmp = skb;
		skb = skb_realloc_headroom(skb, need_headroom);
		kfree_skb(tmp);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return 0;
		}
	} else {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return 0;
		}
	}

	if (encaps_data)
		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
	memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
	if (use_wds == WDS_OWN_FRAME) {
		memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
	}

	iface->stats.tx_packets++;
	iface->stats.tx_bytes += skb->len;

	skb_reset_mac_header(skb);
	meta = (struct hostap_skb_tx_data *) skb->cb;
	memset(meta, 0, sizeof(*meta));
	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
	if (use_wds)
		meta->flags |= HOSTAP_TX_FLAGS_WDS;
	meta->ethertype = ethertype;
	meta->iface = iface;

	/* Send IEEE 802.11 encapsulated frame using the master radio device */
	skb->dev = local->dev;
	dev_queue_xmit(skb);
	return 0;
}
Esempio n. 21
0
void p2020_get_from_tdmdir_and_put_to_ethernet(struct net_device *dev)
{
	struct gfar_private *priv = netdev_priv(dev);
	struct gfar_priv_rx_q *rx_queue = priv->rx_queue[priv->num_rx_queues-1];
	struct rxbd8 *bdp = rx_queue->cur_rx;
	struct sk_buff *skb=NULL;
	__u8 *eth;
	__be16 protocol = htons(ETH_P_IP);
	netdev_tx_t (*xmit)(struct sk_buff *, struct net_device *)= dev->netdev_ops->ndo_start_xmit;	
	unsigned char *data;
    u16 len;
	u16 ret;

	
//#if 0	
	

	len =  get_tdmdir_packet_length();      
	data = get_tdmdir_packet_data();      
	//Char Buffer only
	   
	 // printk("+put_to_eth_rfirst|0x%02x|0x%02x|0x%02x|0x%02x+\n\r",data[0],data[1],data[2],data[3]);
	 // printk("+put_to_eth_rlast |0x%02x|0x%02x|0x%02x|0x%02x+\n\r",data[len-3],data[len-2],data[len-1],data[len]);
	
	
	//printk("virt_TSEC_|0x%02x|0x%02x|0x%02x|0x%02x|0x%02x|0x%02x|0x%02x|0x%02x\n\r",data[0],data[1],data[2],data[3],data[4],data[5],data[6],data[7]);
	  
	
	skb = netdev_alloc_skb(dev, len);
	  if (!skb) 
	  {
		dev->stats.rx_dropped++;
		priv->extra_stats.rx_skbmissing++;
		printk("no cannot allocate Virtual Ethernt SKB buffer \n\r");
		//return;
	  }
	 

	  
	    //printk("+++++++++++++++++SKB _OK+++++++++++++++\n\r"); 
	   //copy received packet to skb buffer 
	    memcpy(skb->data, data, len);
	    //  Reserve for ethernet and IP header 
		eth = (__u8 *) skb_push(skb, 14);
		memcpy(eth, data, 12);
	     *(__be16 *) & eth[12] = protocol;		
	    //printk("+++++++++++++++++MEMCPY_OK+++++++++++++++\n\r");
		skb_put(skb, len);
		//printk("+++++++++++++++++SKB_PUT_OK+++++++++++++++\n\r");
	    // Tell the skb what kind of packet this is 
		skb->protocol = eth_type_trans(skb, dev);
		//printk("+++++++++++++++++skb->protocol_OK+++++++++++++++\n\r");
		//////////////////Create and Fill packet to Send///////////////
	    ret = (*xmit)(skb, dev);
		
	    switch (ret) 
		{
		 case NETDEV_TX_OK:
		 //printk("mpcdrv:->>>>>>>>>>>>>>>>>>>>NETDEV_Transmit_OK\n");
		 break;
	
	
		 case NETDEV_TX_BUSY:
		 printk("++++++mpcdrv:NETDEV_Transmit_bUSY+++\n");
		 break;
	
		 default:
		 printk("+++++++++++mpcdrv:Unriable +++++++++\n");
		 break;
	     }
		
		if (NET_RX_DROP == ret) 
		{priv->extra_stats.kernel_dropped++;} 
		else {
		// Increment the number of packets 
		dev->stats.rx_packets++;dev->stats.rx_bytes += len;}
	   


}
Esempio n. 22
0
/* process a frame handed over to us from linux network layer. First byte
   semantics as defined in Documentation/networking/x25-iface.txt
*/
static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb)
{
	unsigned char firstbyte = skb->data[0];
	enum wan_states *state = &((ix25_pdata_t *)cprot->proto_data)->state;
	int ret = 0;
	IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n",
		  MY_DEVNAME(cprot->net_dev), firstbyte, *state);
	switch (firstbyte) {
	case X25_IFACE_DATA:
		if (*state == WAN_CONNECTED) {
			skb_pull(skb, 1);
			cprot->net_dev->trans_start = jiffies;
			ret = (cprot->dops->data_req(cprot, skb));
			/* prepare for future retransmissions */
			if (ret) skb_push(skb, 1);
			return ret;
		}
		illegal_state_warn(*state, firstbyte);
		break;
	case X25_IFACE_CONNECT:
		if (*state == WAN_DISCONNECTED) {
			*state = WAN_CONNECTING;
			ret = cprot->dops->connect_req(cprot);
			if (ret) {
				/* reset state and notify upper layer about
				 * immidiatly failed attempts */
				isdn_x25iface_disconn_ind(cprot);
			}
		} else {
			illegal_state_warn(*state, firstbyte);
		}
		break;
	case X25_IFACE_DISCONNECT:
		switch (*state) {
		case WAN_DISCONNECTED:
			/* Should not happen. However, give upper layer a
			   chance to recover from inconstistency  but don't
			   trust the lower layer sending the disconn_confirm
			   when already disconnected */
			printk(KERN_WARNING "isdn_x25iface_xmit: disconnect "
			       " requested while disconnected\n");
			isdn_x25iface_disconn_ind(cprot);
			break; /* prevent infinite loops */
		case WAN_CONNECTING:
		case WAN_CONNECTED:
			*state = WAN_DISCONNECTED;
			cprot->dops->disconn_req(cprot);
			break;
		default:
			illegal_state_warn(*state, firstbyte);
		}
		break;
	case X25_IFACE_PARAMS:
		printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb"
		       " options not yet supported\n");
		break;
	default:
		printk(KERN_WARNING "isdn_x25iface_xmit: frame with illegal"
		       " first byte %x ignored:\n", firstbyte);
	}
	dev_kfree_skb(skb);
	return 0;
}
Esempio n. 23
0
/**
 * Transmit a packet.
 * This is a helper function for ctcm_tx().
 *
 *  ch		Channel to be used for sending.
 *  skb		Pointer to struct sk_buff of packet to send.
 *            The linklevel header has already been set up
 *            by ctcm_tx().
 *
 * returns 0 on success, -ERRNO on failure. (Never fails.)
 */
static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
{
	unsigned long saveflags;
	struct ll_header header;
	int rc = 0;
	__u16 block_len;
	int ccw_idx;
	struct sk_buff *nskb;
	unsigned long hi;

	/* we need to acquire the lock for testing the state
	 * otherwise we can have an IRQ changing the state to
	 * TXIDLE after the test but before acquiring the lock.
	 */
	spin_lock_irqsave(&ch->collect_lock, saveflags);
	if (fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) {
		int l = skb->len + LL_HEADER_LENGTH;

		if (ch->collect_len + l > ch->max_bufsize - 2) {
			spin_unlock_irqrestore(&ch->collect_lock, saveflags);
			return -EBUSY;
		} else {
			atomic_inc(&skb->users);
			header.length = l;
			header.type = skb->protocol;
			header.unused = 0;
			memcpy(skb_push(skb, LL_HEADER_LENGTH), &header,
			       LL_HEADER_LENGTH);
			skb_queue_tail(&ch->collect_queue, skb);
			ch->collect_len += l;
		}
		spin_unlock_irqrestore(&ch->collect_lock, saveflags);
				goto done;
	}
	spin_unlock_irqrestore(&ch->collect_lock, saveflags);
	/*
	 * Protect skb against beeing free'd by upper
	 * layers.
	 */
	atomic_inc(&skb->users);
	ch->prof.txlen += skb->len;
	header.length = skb->len + LL_HEADER_LENGTH;
	header.type = skb->protocol;
	header.unused = 0;
	memcpy(skb_push(skb, LL_HEADER_LENGTH), &header, LL_HEADER_LENGTH);
	block_len = skb->len + 2;
	*((__u16 *)skb_push(skb, 2)) = block_len;

	/*
	 * IDAL support in CTCM is broken, so we have to
	 * care about skb's above 2G ourselves.
	 */
	hi = ((unsigned long)skb_tail_pointer(skb) + LL_HEADER_LENGTH) >> 31;
	if (hi) {
		nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
		if (!nskb) {
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		} else {
			memcpy(skb_put(nskb, skb->len), skb->data, skb->len);
			atomic_inc(&nskb->users);
			atomic_dec(&skb->users);
			dev_kfree_skb_irq(skb);
			skb = nskb;
		}
	}

	ch->ccw[4].count = block_len;
	if (set_normalized_cda(&ch->ccw[4], skb->data)) {
		/*
		 * idal allocation failed, try via copying to
		 * trans_skb. trans_skb usually has a pre-allocated
		 * idal.
		 */
		if (ctcm_checkalloc_buffer(ch)) {
			/*
			 * Remove our header. It gets added
			 * again on retransmit.
			 */
			atomic_dec(&skb->users);
			skb_pull(skb, LL_HEADER_LENGTH + 2);
			ctcm_clear_busy(ch->netdev);
			return -ENOMEM;
		}

		skb_reset_tail_pointer(ch->trans_skb);
		ch->trans_skb->len = 0;
		ch->ccw[1].count = skb->len;
		skb_copy_from_linear_data(skb,
				skb_put(ch->trans_skb, skb->len), skb->len);
		atomic_dec(&skb->users);
		dev_kfree_skb_irq(skb);
		ccw_idx = 0;
	} else {
		skb_queue_tail(&ch->io_queue, skb);
		ccw_idx = 3;
	}
	ch->retry = 0;
	fsm_newstate(ch->fsm, CTC_STATE_TX);
	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
	ch->prof.send_stamp = current_kernel_time(); /* xtime */
	rc = ccw_device_start(ch->cdev, &ch->ccw[ccw_idx],
					(unsigned long)ch, 0xff, 0);
	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
	if (ccw_idx == 3)
		ch->prof.doios_single++;
	if (rc != 0) {
		fsm_deltimer(&ch->timer);
		ctcm_ccw_check_rc(ch, rc, "single skb TX");
		if (ccw_idx == 3)
			skb_dequeue_tail(&ch->io_queue);
		/*
		 * Remove our header. It gets added
		 * again on retransmit.
		 */
		skb_pull(skb, LL_HEADER_LENGTH + 2);
	} else if (ccw_idx == 0) {
		struct net_device *dev = ch->netdev;
		struct ctcm_priv *priv = dev->ml_priv;
		priv->stats.tx_packets++;
		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
	}
done:
	ctcm_clear_busy(ch->netdev);
	return rc;
}
Esempio n. 24
0
static inline void
Memhscx_int_main(struct IsdnCardState *cs, u_char val)
{

	u_char exval;
	struct BCState *bcs;

	if (val & 0x01) { // EXB
		bcs = cs->bcs + 1;
		exval = MemReadHSCX(cs, 1, HSCX_EXIR);
		if (exval & 0x40) {
			if (bcs->mode == 1)
				Memhscx_fill_fifo(bcs);
			else {
				/* Here we lost an TX interrupt, so
				   * restart transmitting the whole frame.
				 */
				if (bcs->tx_skb) {
					skb_push(bcs->tx_skb, bcs->hw.hscx.count);
					bcs->tx_cnt += bcs->hw.hscx.count;
					bcs->hw.hscx.count = 0;
				}
				MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX B EXIR %x Lost TX", exval);
			}
		} else if (cs->debug & L1_DEB_HSCX)
			debugl1(cs, "HSCX B EXIR %x", exval);
	}
	if (val & 0xf8) {
		if (cs->debug & L1_DEB_HSCX)
			debugl1(cs, "HSCX B interrupt %x", val);
		Memhscx_interrupt(cs, val, 1);
	}
	if (val & 0x02) {	// EXA
		bcs = cs->bcs;
		exval = MemReadHSCX(cs, 0, HSCX_EXIR);
		if (exval & 0x40) {
			if (bcs->mode == L1_MODE_TRANS)
				Memhscx_fill_fifo(bcs);
			else {
				/* Here we lost an TX interrupt, so
				   * restart transmitting the whole frame.
				 */
				if (bcs->tx_skb) {
					skb_push(bcs->tx_skb, bcs->hw.hscx.count);
					bcs->tx_cnt += bcs->hw.hscx.count;
					bcs->hw.hscx.count = 0;
				}
				MemWriteHSCXCMDR(cs, bcs->hw.hscx.hscx, 0x01);
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "HSCX A EXIR %x Lost TX", exval);
			}
		} else if (cs->debug & L1_DEB_HSCX)
			debugl1(cs, "HSCX A EXIR %x", exval);
	}
	if (val & 0x04) {	// ICA
		exval = MemReadHSCX(cs, 0, HSCX_ISTA);
		if (cs->debug & L1_DEB_HSCX)
			debugl1(cs, "HSCX A interrupt %x", exval);
		Memhscx_interrupt(cs, exval, 0);
	}
}