Example #1
0
int 
ipsec_sadb_cleanup(__u8 proto)
{
	int i;
	int error = 0;
	struct ipsec_sa *ips, **ipsprev, *tdbdel;
        char sa[SATOA_BUF];
	size_t sa_len;

	KLIPS_PRINT(debug_xform,
		    "klips_debug:ipsec_tdbcleanup: "
		    "cleaning up proto=%d.\n",
		    proto);

	spin_lock_bh(&tdb_lock);

	for (i = 0; i < SADB_HASHMOD; i++) {
		ipsprev = &(ipsec_sadb_hash[i]);
		ips = ipsec_sadb_hash[i];
		for(; ips;) {
			sa_len = satoa(ips->ips_said, 0, sa, SATOA_BUF);
			KLIPS_PRINT(debug_xform,
				    "klips_debug:ipsec_tdbcleanup: "
				    "checking SA:%s, hash=%d",
				    sa_len ? sa : " (error)",
				    i);
			tdbdel = ips;
			ips = tdbdel->ips_hnext;
			if(ips) {
				sa_len = satoa(ips->ips_said, 0, sa, SATOA_BUF);
				KLIPS_PRINT(debug_xform,
					    ", hnext=%s",
					    sa_len ? sa : " (error)");
			}
			if(*ipsprev) {
				sa_len = satoa((*ipsprev)->ips_said, 0, sa, SATOA_BUF);
				KLIPS_PRINT(debug_xform,
					    ", *ipsprev=%s",
					    sa_len ? sa : " (error)");
				if((*ipsprev)->ips_hnext) {
					sa_len = satoa((*ipsprev)->ips_hnext->ips_said, 0, sa, SATOA_BUF);
					KLIPS_PRINT(debug_xform,
						    ", *ipsprev->ips_hnext=%s",
						    sa_len ? sa : " (error)");
				}
			}
			KLIPS_PRINT(debug_xform,
				    ".\n");
			if(!proto || (proto == tdbdel->ips_said.proto)) {
				sa_len = satoa(tdbdel->ips_said, 0, sa, SATOA_BUF);
				KLIPS_PRINT(debug_xform,
					    "klips_debug:ipsec_tdbcleanup: "
					    "deleting SA chain:%s.\n",
					    sa_len ? sa : " (error)");
				if((error = ipsec_sa_delchain(tdbdel))) {
					SENDERR(-error);
				}
				ipsprev = &(ipsec_sadb_hash[i]);
				ips = ipsec_sadb_hash[i];

				KLIPS_PRINT(debug_xform,
					    "klips_debug:ipsec_tdbcleanup: "
					    "deleted SA chain:%s",
					    sa_len ? sa : " (error)");
				if(ips) {
					sa_len = satoa(ips->ips_said, 0, sa, SATOA_BUF);
					KLIPS_PRINT(debug_xform,
						    ", tdbh[%d]=%s",
						    i,
						    sa_len ? sa : " (error)");
				}
				if(*ipsprev) {
					sa_len = satoa((*ipsprev)->ips_said, 0, sa, SATOA_BUF);
					KLIPS_PRINT(debug_xform,
						    ", *ipsprev=%s",
						    sa_len ? sa : " (error)");
					if((*ipsprev)->ips_hnext) {
						sa_len = satoa((*ipsprev)->ips_hnext->ips_said, 0, sa, SATOA_BUF);
						KLIPS_PRINT(debug_xform,
							    ", *ipsprev->ips_hnext=%s",
							    sa_len ? sa : " (error)");
					}
				}
				KLIPS_PRINT(debug_xform,
					    ".\n");
			} else {
				ipsprev = &tdbdel;
			}
		}
	}
 errlab:

	spin_unlock_bh(&tdb_lock);

	return(error);
}
Example #2
0
static void
autofw_expect(struct nf_conn *ct, struct nf_conntrack_expect *exp)
{
	struct nf_nat_range pre_range;
	u_int32_t newdstip, newsrcip;
	u_int16_t port;
	int ret;
	struct nf_conn_help *help;
	struct nf_conn *exp_ct = exp->master;
	struct nf_conntrack_expect *newexp;
	int count;

	/* expect has been removed from expect list, but expect isn't free yet. */
	help = nfct_help(exp_ct);
	DEBUGP("autofw_nat_expected: got ");
	NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);

	spin_lock_bh(&nf_nat_autofw_lock);

	port = ntohs(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all);
	newdstip = exp->tuple.dst.u3.ip;
	newsrcip = exp->tuple.src.u3.ip;
	if (port < ntohs(help->help.ct_autofw_info.dport[0]) ||
		port > ntohs(help->help.ct_autofw_info.dport[1])) {
		spin_unlock_bh(&nf_nat_autofw_lock);
			return;
	}

	/* Only need to do PRE_ROUTING */
	port -= ntohs(help->help.ct_autofw_info.dport[0]);
	port += ntohs(help->help.ct_autofw_info.to[0]);
	pre_range.flags = IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED;
	pre_range.min_ip = pre_range.max_ip = newdstip;
	pre_range.min.all = pre_range.max.all = htons(port);
	nf_nat_setup_info(ct, &pre_range, NF_IP_PRE_ROUTING);

	spin_unlock_bh(&nf_nat_autofw_lock);

	/* Add expect again */

	/* alloc will set exp->master = exp_ct */
	newexp = nf_conntrack_expect_alloc(exp_ct);
	if (!newexp)
		return;

	newexp->tuple.src.u3.ip = exp->tuple.src.u3.ip;
	newexp->tuple.dst.protonum = exp->tuple.dst.protonum;
	newexp->mask.src.u3.ip = 0xFFFFFFFF;
	newexp->mask.dst.protonum = 0xFF;

	newexp->tuple.dst.u3.ip = exp->tuple.dst.u3.ip;
	newexp->mask.dst.u3.ip = 0x0;

	for (count = 1; count < NF_CT_TUPLE_L3SIZE; count++) {
		newexp->tuple.src.u3.all[count] = 0x0;
		newexp->tuple.dst.u3.all[count] = 0x0;
	}

	newexp->mask.dst.u.all = 0x0;
	newexp->mask.src.u.all = 0x0;
	newexp->mask.src.l3num = 0x0;

	newexp->expectfn = autofw_expect;
	newexp->helper = NULL;
	newexp->flags = 0;

	/*
	 * exp->timeout.expires will set as
	 * (jiffies + helper->timeout * HZ), when insert exp.
	*/
	ret = nf_conntrack_expect_related(newexp);
	if (ret == 0)
		nf_conntrack_expect_put(newexp);
}
Example #3
0
int ath9k_wiphy_select(struct ath_wiphy *aphy)
{
	struct ath_softc *sc = aphy->sc;
	bool now;

	spin_lock_bh(&sc->wiphy_lock);
	if (__ath9k_wiphy_scanning(sc)) {
		/*
		 * For now, we are using mac80211 sw scan and it expects to
		 * have full control over channel changes, so avoid wiphy
		 * scheduling during a scan. This could be optimized if the
		 * scanning control were moved into the driver.
		 */
		spin_unlock_bh(&sc->wiphy_lock);
		return -EBUSY;
	}
	if (__ath9k_wiphy_pausing(sc)) {
		if (sc->wiphy_select_failures == 0)
			sc->wiphy_select_first_fail = jiffies;
		sc->wiphy_select_failures++;
		if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
		{
			printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
			       "out; disable/enable hw to recover\n");
			__ath9k_wiphy_mark_all_paused(sc);
			/*
			 * TODO: this workaround to fix hardware is unlikely to
			 * be specific to virtual wiphy changes. It can happen
			 * on normal channel change, too, and as such, this
			 * should really be made more generic. For example,
			 * tricker radio disable/enable on GTT interrupt burst
			 * (say, 10 GTT interrupts received without any TX
			 * frame being completed)
			 */
			spin_unlock_bh(&sc->wiphy_lock);
			ath_radio_disable(sc, aphy->hw);
			ath_radio_enable(sc, aphy->hw);
			/* Only the primary wiphy hw is used for queuing work */
			ieee80211_queue_work(aphy->sc->hw,
				   &aphy->sc->chan_work);
			return -EBUSY; /* previous select still in progress */
		}
		spin_unlock_bh(&sc->wiphy_lock);
		return -EBUSY; /* previous select still in progress */
	}
	sc->wiphy_select_failures = 0;

	/* Store the new channel */
	sc->chan_idx = aphy->chan_idx;
	sc->chan_is_ht = aphy->chan_is_ht;
	sc->next_wiphy = aphy;

	__ath9k_wiphy_pause_all(sc);
	now = !__ath9k_wiphy_pausing(aphy->sc);
	spin_unlock_bh(&sc->wiphy_lock);

	if (now) {
		/* Ready to request channel change immediately */
		ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
	}

	/*
	 * wiphys will be unpaused in ath9k_tx_status() once channel has been
	 * changed if any wiphy needs time to become paused.
	 */

	return 0;
}
Example #4
0
void dev_mc_upload(struct net_device *dev)
{
    spin_lock_bh(&dev->xmit_lock);
    __dev_mc_upload(dev);
    spin_unlock_bh(&dev->xmit_lock);
}
Example #5
0
int sock_setsockopt(struct socket *sock, int level, int optname,
		    char *optval, int optlen)
{
	struct sock *sk=sock->sk;
#ifdef CONFIG_FILTER
	struct sk_filter *filter;
#endif
	int val;
	int valbool;
	struct linger ling;
	int ret = 0;
	
	/*
	 *	Options without arguments
	 */

#ifdef SO_DONTLINGER		/* Compatibility item... */
	switch(optname)
	{
		case SO_DONTLINGER:
			sk->linger=0;
			return 0;
	}
#endif	
		
  	if(optlen<sizeof(int))
  		return(-EINVAL);
  	
	if (get_user(val, (int *)optval))
		return -EFAULT;
	
  	valbool = val?1:0;

	lock_sock(sk);

  	switch(optname) 
  	{
		case SO_DEBUG:	
			if(val && !capable(CAP_NET_ADMIN))
			{
				ret = -EACCES;
			}
			else
				sk->debug=valbool;
			break;
		case SO_REUSEADDR:
			sk->reuse = valbool;
			break;
		case SO_TYPE:
		case SO_ERROR:
			ret = -ENOPROTOOPT;
		  	break;
		case SO_DONTROUTE:
			sk->localroute=valbool;
			break;
		case SO_BROADCAST:
			sk->broadcast=valbool;
			break;
		case SO_SNDBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			   
			if (val > sysctl_wmem_max)
				val = sysctl_wmem_max;

			sk->userlocks |= SOCK_SNDBUF_LOCK;
			if ((val * 2) < SOCK_MIN_SNDBUF)
				sk->sndbuf = SOCK_MIN_SNDBUF;
			else
				sk->sndbuf = (val * 2);

			/*
			 *	Wake up sending tasks if we
			 *	upped the value.
			 */
			sk->write_space(sk);
			break;

		case SO_RCVBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			  
			if (val > sysctl_rmem_max)
				val = sysctl_rmem_max;

			sk->userlocks |= SOCK_RCVBUF_LOCK;
			/* FIXME: is this lower bound the right one? */
			if ((val * 2) < SOCK_MIN_RCVBUF)
				sk->rcvbuf = SOCK_MIN_RCVBUF;
			else
				sk->rcvbuf = (val * 2);
			break;

		case SO_KEEPALIVE:
#ifdef CONFIG_INET
			if (sk->protocol == IPPROTO_TCP)
			{
				tcp_set_keepalive(sk, valbool);
			}
#endif
			sk->keepopen = valbool;
			break;

	 	case SO_OOBINLINE:
			sk->urginline = valbool;
			break;

	 	case SO_NO_CHECK:
			sk->no_check = valbool;
			break;

		case SO_PRIORITY:
			if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 
				sk->priority = val;
			else
				ret = -EPERM;
			break;

		case SO_LINGER:
			if(optlen<sizeof(ling)) {
				ret = -EINVAL;	/* 1003.1g */
				break;
			}
			if (copy_from_user(&ling,optval,sizeof(ling))) {
				ret = -EFAULT;
				break;
			}
			if(ling.l_onoff==0) {
				sk->linger=0;
			} else {
#if (BITS_PER_LONG == 32)
				if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
					sk->lingertime=MAX_SCHEDULE_TIMEOUT;
				else
#endif
					sk->lingertime=ling.l_linger*HZ;
				sk->linger=1;
			}
			break;

		case SO_BSDCOMPAT:
			sk->bsdism = valbool;
			break;

		case SO_PASSCRED:
			sock->passcred = valbool;
			break;

		case SO_TIMESTAMP:
			sk->rcvtstamp = valbool;
			break;

		case SO_RCVLOWAT:
			if (val < 0)
				val = INT_MAX;
			sk->rcvlowat = val ? : 1;
			break;

		case SO_RCVTIMEO:
			ret = sock_set_timeout(&sk->rcvtimeo, optval, optlen);
			break;

		case SO_SNDTIMEO:
			ret = sock_set_timeout(&sk->sndtimeo, optval, optlen);
			break;

#ifdef CONFIG_NETDEVICES
		case SO_BINDTODEVICE:
		{
			char devname[IFNAMSIZ]; 

			/* Sorry... */ 
			if (!capable(CAP_NET_RAW)) {
				ret = -EPERM;
				break;
			}

			/* Bind this socket to a particular device like "eth0",
			 * as specified in the passed interface name. If the
			 * name is "" or the option length is zero the socket 
			 * is not bound. 
			 */ 

			if (!valbool) {
				sk->bound_dev_if = 0;
			} else {
				if (optlen > IFNAMSIZ) 
					optlen = IFNAMSIZ; 
				if (copy_from_user(devname, optval, optlen)) {
					ret = -EFAULT;
					break;
				}

				/* Remove any cached route for this socket. */
				sk_dst_reset(sk);

				if (devname[0] == '\0') {
					sk->bound_dev_if = 0;
				} else {
					struct net_device *dev = dev_get_by_name(devname);
					if (!dev) {
						ret = -ENODEV;
						break;
					}
					sk->bound_dev_if = dev->ifindex;
					dev_put(dev);
				}
			}
			break;
		}
#endif


#ifdef CONFIG_FILTER
		case SO_ATTACH_FILTER:
			ret = -EINVAL;
			if (optlen == sizeof(struct sock_fprog)) {
				struct sock_fprog fprog;

				ret = -EFAULT;
				if (copy_from_user(&fprog, optval, sizeof(fprog)))
					break;

				ret = sk_attach_filter(&fprog, sk);
			}
			break;

		case SO_DETACH_FILTER:
			spin_lock_bh(&sk->lock.slock);
			filter = sk->filter;
                        if (filter) {
				sk->filter = NULL;
				spin_unlock_bh(&sk->lock.slock);
				sk_filter_release(sk, filter);
				break;
			}
			spin_unlock_bh(&sk->lock.slock);
			ret = -ENONET;
			break;
#endif
		/* We implement the SO_SNDLOWAT etc to
		   not be settable (1003.1g 5.3) */
		default:
		  	ret = -ENOPROTOOPT;
			break;
  	}
	release_sock(sk);
	return ret;
}
Example #6
0
static int mipi_dsi_off(struct platform_device *pdev)
{
	int ret = 0;
	struct msm_fb_data_type *mfd;
	struct msm_panel_info *pinfo;

	mfd = platform_get_drvdata(pdev);
	pinfo = &mfd->panel_info;

	if (mdp_rev >= MDP_REV_41)
		mutex_lock(&mfd->dma->ov_mutex);
	else
		htc_mdp_sem_down(current, &mfd->dma->mutex);

	mdp4_overlay_dsi_state_set(ST_DSI_SUSPEND);

	mipi_dsi_clk_cfg(1);

	
	mipi_dsi_cmd_mdp_busy();

	mipi_dsi_op_mode_config(DSI_CMD_MODE);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (pinfo->lcd.vsync_enable) {
			if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
				if (MDP_REV_303 != mdp_rev)
					gpio_free(vsync_gpio);
			}
			mipi_dsi_set_tear_off(mfd);
		}
	}

	if (panel_type != PANEL_ID_PROTOU_LG && panel_type != PANEL_ID_PROTODCG_LG)
		ret = panel_next_off(pdev);

#ifdef CONFIG_MSM_BUS_SCALING
	mdp_bus_scale_update_request(0);
#endif

	spin_lock_bh(&dsi_clk_lock);
	mipi_dsi_clk_disable();

	
	MIPI_OUTP(MIPI_DSI_BASE + 0x0000, 0);

	mipi_dsi_phy_ctrl(0);

	mipi_dsi_ahb_ctrl(0);
	spin_unlock_bh(&dsi_clk_lock);

	mipi_dsi_unprepare_clocks();
	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
		mipi_dsi_pdata->dsi_power_save(0);

	if (mdp_rev >= MDP_REV_41)
		mutex_unlock(&mfd->dma->ov_mutex);
	else
		htc_mdp_sem_up(&mfd->dma->mutex);

	pr_debug("%s-:\n", __func__);

	return ret;
}
void ax25_linkfail_release(struct ax25_linkfail *lf)
{
    spin_lock_bh(&linkfail_lock);
    hlist_del_init(&lf->lf_node);
    spin_unlock_bh(&linkfail_lock);
}
Example #8
0
static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
					   struct ieee80211_vif *vif)
{
	struct ath_softc *sc = hw->priv;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ath_buf *bf;
	struct ath_vif *avp;
	struct sk_buff *skb;
	struct ath_txq *cabq;
	struct ieee80211_tx_info *info;
	int cabq_depth;

	ath9k_reset_beacon_status(sc);

	avp = (void *)vif->drv_priv;
	cabq = sc->beacon.cabq;

	if ((avp->av_bcbuf == NULL) || !avp->is_bslot_active)
		return NULL;

	/* Release the old beacon first */

	bf = avp->av_bcbuf;
	skb = bf->bf_mpdu;
	if (skb) {
		dma_unmap_single(sc->dev, bf->bf_buf_addr,
				 skb->len, DMA_TO_DEVICE);
		dev_kfree_skb_any(skb);
		bf->bf_buf_addr = 0;
	}

	/* Get a new beacon from mac80211 */

	skb = ieee80211_beacon_get(hw, vif);
	bf->bf_mpdu = skb;
	if (skb == NULL)
		return NULL;
	((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
		avp->tsf_adjust;

	info = IEEE80211_SKB_CB(skb);
	if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
		/*
		 * TODO: make sure the seq# gets assigned properly (vs. other
		 * TX frames)
		 */
		struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
		sc->tx.seq_no += 0x10;
		hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
		hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
	}

	bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
					 skb->len, DMA_TO_DEVICE);
	if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
		dev_kfree_skb_any(skb);
		bf->bf_mpdu = NULL;
		bf->bf_buf_addr = 0;
		ath_err(common, "dma_mapping_error on beaconing\n");
		return NULL;
	}

	skb = ieee80211_get_buffered_bc(hw, vif);

	/*
	 * if the CABQ traffic from previous DTIM is pending and the current
	 *  beacon is also a DTIM.
	 *  1) if there is only one vif let the cab traffic continue.
	 *  2) if there are more than one vif and we are using staggered
	 *     beacons, then drain the cabq by dropping all the frames in
	 *     the cabq so that the current vifs cab traffic can be scheduled.
	 */
	spin_lock_bh(&cabq->axq_lock);
	cabq_depth = cabq->axq_depth;
	spin_unlock_bh(&cabq->axq_lock);

	if (skb && cabq_depth) {
		if (sc->nvifs > 1) {
			ath_dbg(common, ATH_DBG_BEACON,
				"Flushing previous cabq traffic\n");
			ath_draintxq(sc, cabq, false);
		}
	}

	ath_beacon_setup(sc, avp, bf, info->control.rates[0].idx);

	while (skb) {
		ath_tx_cabq(hw, skb);
		skb = ieee80211_get_buffered_bc(hw, vif);
	}

	return bf;
}
Example #9
0
/*
 * Returns true if the MPDU was buffered\dropped, false if it should be passed
 * to upper layer.
 */
static bool iwl_mvm_reorder(struct iwl_mvm *mvm,
			    struct napi_struct *napi,
			    int queue,
			    struct ieee80211_sta *sta,
			    struct sk_buff *skb,
			    struct iwl_rx_mpdu_desc *desc)
{
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
	struct iwl_mvm_sta *mvm_sta;
	struct iwl_mvm_baid_data *baid_data;
	struct iwl_mvm_reorder_buffer *buffer;
	struct sk_buff *tail;
	u32 reorder = le32_to_cpu(desc->reorder_data);
	bool amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
	bool last_subframe =
		desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
	u8 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
	u8 sub_frame_idx = desc->amsdu_info &
			   IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
	int index;
	u16 nssn, sn;
	u8 baid;

	baid = (reorder & IWL_RX_MPDU_REORDER_BAID_MASK) >>
		IWL_RX_MPDU_REORDER_BAID_SHIFT;

	if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
		return false;

	/* no sta yet */
	if (WARN_ON(IS_ERR_OR_NULL(sta)))
		return false;

	mvm_sta = iwl_mvm_sta_from_mac80211(sta);

	/* not a data packet */
	if (!ieee80211_is_data_qos(hdr->frame_control) ||
	    is_multicast_ether_addr(hdr->addr1))
		return false;

	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
		return false;

	baid_data = rcu_dereference(mvm->baid_map[baid]);
	if (WARN(!baid_data,
		 "Received baid %d, but no data exists for this BAID\n", baid))
		return false;
	if (WARN(tid != baid_data->tid || mvm_sta->sta_id != baid_data->sta_id,
		 "baid 0x%x is mapped to sta:%d tid:%d, but was received for sta:%d tid:%d\n",
		 baid, baid_data->sta_id, baid_data->tid, mvm_sta->sta_id,
		 tid))
		return false;

	nssn = reorder & IWL_RX_MPDU_REORDER_NSSN_MASK;
	sn = (reorder & IWL_RX_MPDU_REORDER_SN_MASK) >>
		IWL_RX_MPDU_REORDER_SN_SHIFT;

	buffer = &baid_data->reorder_buf[queue];

	spin_lock_bh(&buffer->lock);

	/*
	 * If there was a significant jump in the nssn - adjust.
	 * If the SN is smaller than the NSSN it might need to first go into
	 * the reorder buffer, in which case we just release up to it and the
	 * rest of the function will take of storing it and releasing up to the
	 * nssn
	 */
	if (!iwl_mvm_is_sn_less(nssn, buffer->head_sn + buffer->buf_size,
				buffer->buf_size)) {
		u16 min_sn = ieee80211_sn_less(sn, nssn) ? sn : nssn;

		iwl_mvm_release_frames(mvm, sta, napi, buffer, min_sn);
	}

	/* drop any oudated packets */
	if (ieee80211_sn_less(sn, buffer->head_sn))
		goto drop;

	/* release immediately if allowed by nssn and no stored frames */
	if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
		if (iwl_mvm_is_sn_less(buffer->head_sn, nssn,
				       buffer->buf_size) &&
		   (!amsdu || last_subframe))
			buffer->head_sn = nssn;
		/* No need to update AMSDU last SN - we are moving the head */
		spin_unlock_bh(&buffer->lock);
		return false;
	}

	index = sn % buffer->buf_size;

	/*
	 * Check if we already stored this frame
	 * As AMSDU is either received or not as whole, logic is simple:
	 * If we have frames in that position in the buffer and the last frame
	 * originated from AMSDU had a different SN then it is a retransmission.
	 * If it is the same SN then if the subframe index is incrementing it
	 * is the same AMSDU - otherwise it is a retransmission.
	 */
	tail = skb_peek_tail(&buffer->entries[index]);
	if (tail && !amsdu)
		goto drop;
	else if (tail && (sn != buffer->last_amsdu ||
			  buffer->last_sub_index >= sub_frame_idx))
		goto drop;

	/* put in reorder buffer */
	__skb_queue_tail(&buffer->entries[index], skb);
	buffer->num_stored++;
	buffer->reorder_time[index] = jiffies;

	if (amsdu) {
		buffer->last_amsdu = sn;
		buffer->last_sub_index = sub_frame_idx;
	}

	/*
	 * We cannot trust NSSN for AMSDU sub-frames that are not the last.
	 * The reason is that NSSN advances on the first sub-frame, and may
	 * cause the reorder buffer to advance before all the sub-frames arrive.
	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
	 * already ahead and it will be dropped.
	 * If the last sub-frame is not on this queue - we will get frame
	 * release notification with up to date NSSN.
	 */
	if (!amsdu || last_subframe)
		iwl_mvm_release_frames(mvm, sta, napi, buffer, nssn);

	spin_unlock_bh(&buffer->lock);
	return true;

drop:
	kfree_skb(skb);
	spin_unlock_bh(&buffer->lock);
	return true;
}
Example #10
0
static int
ip6mr_cache_unresolved(mifi_t mifi, struct sk_buff *skb)
{
	int err;
	struct mfc6_cache *c;

	spin_lock_bh(&mfc_unres_lock);
	for (c = mfc_unres_queue; c; c = c->next) {
		if (ipv6_addr_equal(&c->mf6c_mcastgrp, &ipv6_hdr(skb)->daddr) &&
		    ipv6_addr_equal(&c->mf6c_origin, &ipv6_hdr(skb)->saddr))
			break;
	}

	if (c == NULL) {
		/*
		 *	Create a new entry if allowable
		 */

		if (atomic_read(&cache_resolve_queue_len) >= 10 ||
		    (c = ip6mr_cache_alloc_unres()) == NULL) {
			spin_unlock_bh(&mfc_unres_lock);

			kfree_skb(skb);
			return -ENOBUFS;
		}

		/*
		 *	Fill in the new cache entry
		 */
		c->mf6c_parent = -1;
		c->mf6c_origin = ipv6_hdr(skb)->saddr;
		c->mf6c_mcastgrp = ipv6_hdr(skb)->daddr;

		/*
		 *	Reflect first query at pim6sd
		 */
		if ((err = ip6mr_cache_report(skb, mifi, MRT6MSG_NOCACHE)) < 0) {
			/* If the report failed throw the cache entry
			   out - Brad Parker
			 */
			spin_unlock_bh(&mfc_unres_lock);

			kmem_cache_free(mrt_cachep, c);
			kfree_skb(skb);
			return err;
		}

		atomic_inc(&cache_resolve_queue_len);
		c->next = mfc_unres_queue;
		mfc_unres_queue = c;

		ipmr_do_expire_process(1);
	}

	/*
	 *	See if we can append the packet
	 */
	if (c->mfc_un.unres.unresolved.qlen > 3) {
		kfree_skb(skb);
		err = -ENOBUFS;
	} else {
		skb_queue_tail(&c->mfc_un.unres.unresolved, skb);
		err = 0;
	}

	spin_unlock_bh(&mfc_unres_lock);
	return err;
}
static void
brcms_ops_bss_info_changed(struct ieee80211_hw *hw,
                           struct ieee80211_vif *vif,
                           struct ieee80211_bss_conf *info, u32 changed)
{
    struct brcms_info *wl = hw->priv;
    struct bcma_device *core = wl->wlc->hw->d11core;

    if (changed & BSS_CHANGED_ASSOC) {
        /* association status changed (associated/disassociated)
         * also implies a change in the AID.
         */
        brcms_err(core, "%s: %s: %sassociated\n", KBUILD_MODNAME,
                  __func__, info->assoc ? "" : "dis");
        spin_lock_bh(&wl->lock);
        brcms_c_associate_upd(wl->wlc, info->assoc);
        spin_unlock_bh(&wl->lock);
    }
    if (changed & BSS_CHANGED_ERP_SLOT) {
        s8 val;

        /* slot timing changed */
        if (info->use_short_slot)
            val = 1;
        else
            val = 0;
        spin_lock_bh(&wl->lock);
        brcms_c_set_shortslot_override(wl->wlc, val);
        spin_unlock_bh(&wl->lock);
    }

    if (changed & BSS_CHANGED_HT) {
        /* 802.11n parameters changed */
        u16 mode = info->ht_operation_mode;

        spin_lock_bh(&wl->lock);
        brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_CFG,
                               mode & IEEE80211_HT_OP_MODE_PROTECTION);
        brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_NONGF,
                               mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
        brcms_c_protection_upd(wl->wlc, BRCMS_PROT_N_OBSS,
                               mode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT);
        spin_unlock_bh(&wl->lock);
    }
    if (changed & BSS_CHANGED_BASIC_RATES) {
        struct ieee80211_supported_band *bi;
        u32 br_mask, i;
        u16 rate;
        struct brcm_rateset rs;
        int error;

        /* retrieve the current rates */
        spin_lock_bh(&wl->lock);
        brcms_c_get_current_rateset(wl->wlc, &rs);
        spin_unlock_bh(&wl->lock);

        br_mask = info->basic_rates;
        bi = hw->wiphy->bands[brcms_c_get_curband(wl->wlc)];
        for (i = 0; i < bi->n_bitrates; i++) {
            /* convert to internal rate value */
            rate = (bi->bitrates[i].bitrate << 1) / 10;

            /* set/clear basic rate flag */
            brcms_set_basic_rate(&rs, rate, br_mask & 1);
            br_mask >>= 1;
        }

        /* update the rate set */
        spin_lock_bh(&wl->lock);
        error = brcms_c_set_rateset(wl->wlc, &rs);
        spin_unlock_bh(&wl->lock);
        if (error)
            brcms_err(core, "changing basic rates failed: %d\n",
                      error);
    }
    if (changed & BSS_CHANGED_BEACON_INT) {
        /* Beacon interval changed */
        spin_lock_bh(&wl->lock);
        brcms_c_set_beacon_period(wl->wlc, info->beacon_int);
        spin_unlock_bh(&wl->lock);
    }
    if (changed & BSS_CHANGED_BSSID) {
        /* BSSID changed, for whatever reason (IBSS and managed mode) */
        spin_lock_bh(&wl->lock);
        brcms_c_set_addrmatch(wl->wlc, RCM_BSSID_OFFSET, info->bssid);
        spin_unlock_bh(&wl->lock);
    }
    if (changed & BSS_CHANGED_BEACON)
        /* Beacon data changed, retrieve new beacon (beaconing modes) */
        brcms_err(core, "%s: beacon changed\n", __func__);

    if (changed & BSS_CHANGED_BEACON_ENABLED) {
        /* Beaconing should be enabled/disabled (beaconing modes) */
        brcms_err(core, "%s: Beacon enabled: %s\n", __func__,
                  info->enable_beacon ? "true" : "false");
    }

    if (changed & BSS_CHANGED_CQM) {
        /* Connection quality monitor config changed */
        brcms_err(core, "%s: cqm change: threshold %d, hys %d "
                  " (implement)\n", __func__, info->cqm_rssi_thold,
                  info->cqm_rssi_hyst);
    }

    if (changed & BSS_CHANGED_IBSS) {
        /* IBSS join status changed */
        brcms_err(core, "%s: IBSS joined: %s (implement)\n",
                  __func__, info->ibss_joined ? "true" : "false");
    }

    if (changed & BSS_CHANGED_ARP_FILTER) {
        /* Hardware ARP filter address list or state changed */
        brcms_err(core, "%s: arp filtering: enabled %s, count %d"
                  " (implement)\n", __func__, info->arp_filter_enabled ?
                  "true" : "false", info->arp_addr_cnt);
    }

    if (changed & BSS_CHANGED_QOS) {
        /*
         * QoS for this association was enabled/disabled.
         * Note that it is only ever disabled for station mode.
         */
        brcms_err(core, "%s: qos enabled: %s (implement)\n",
                  __func__, info->qos ? "true" : "false");
    }
    return;
}
Example #12
0
static int
tcf_ipt_init(struct rtattr *rta, struct rtattr *est, struct tc_action *a,
             int ovr, int bind)
{
	struct rtattr *tb[TCA_IPT_MAX];
	struct tcf_ipt *p;
	struct ipt_entry_target *td, *t;
	char *tname;
	int ret = 0, err;
	u32 hook = 0;
	u32 index = 0;

	if (rta == NULL || rtattr_parse_nested(tb, TCA_IPT_MAX, rta) < 0)
		return -EINVAL;

	if (tb[TCA_IPT_HOOK-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_IPT_HOOK-1]) < sizeof(u32))
		return -EINVAL;
	if (tb[TCA_IPT_TARG-1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_IPT_TARG-1]) < sizeof(*t))
		return -EINVAL;
	td = (struct ipt_entry_target *)RTA_DATA(tb[TCA_IPT_TARG-1]);
	if (RTA_PAYLOAD(tb[TCA_IPT_TARG-1]) < td->u.target_size)
		return -EINVAL;

	if (tb[TCA_IPT_INDEX-1] != NULL &&
	    RTA_PAYLOAD(tb[TCA_IPT_INDEX-1]) >= sizeof(u32))
		index = *(u32 *)RTA_DATA(tb[TCA_IPT_INDEX-1]);

	p = tcf_hash_check(index, a, ovr, bind);
	if (p == NULL) {
		p = tcf_hash_create(index, est, a, sizeof(*p), ovr, bind);
		if (p == NULL)
			return -ENOMEM;
		ret = ACT_P_CREATED;
	} else {
		if (!ovr) {
			tcf_ipt_release(p, bind);
			return -EEXIST;
		}
	}

	hook = *(u32 *)RTA_DATA(tb[TCA_IPT_HOOK-1]);

	err = -ENOMEM;
	tname = kmalloc(IFNAMSIZ, GFP_KERNEL);
	if (tname == NULL)
		goto err1;
	if (tb[TCA_IPT_TABLE - 1] == NULL ||
	    rtattr_strlcpy(tname, tb[TCA_IPT_TABLE-1], IFNAMSIZ) >= IFNAMSIZ)
		strcpy(tname, "mangle");

	t = kmalloc(td->u.target_size, GFP_KERNEL);
	if (t == NULL)
		goto err2;
	memcpy(t, td, td->u.target_size);

	if ((err = ipt_init_target(t, tname, hook)) < 0)
		goto err3;

	spin_lock_bh(&p->lock);
	if (ret != ACT_P_CREATED) {
		ipt_destroy_target(p->t);
		kfree(p->tname);
		kfree(p->t);
	}
	p->tname = tname;
	p->t     = t;
	p->hook  = hook;
	spin_unlock_bh(&p->lock);
	if (ret == ACT_P_CREATED)
		tcf_hash_insert(p);
	return ret;

err3:
	kfree(t);
err2:
	kfree(tname);
err1:
	kfree(p);
	return err;
}
Example #13
0
static int tcf_pedit_init(struct rtattr *rta, struct rtattr *est,
			  struct tc_action *a, int ovr, int bind)
{
	struct rtattr *tb[TCA_PEDIT_MAX];
	struct tc_pedit *parm;
	int ret = 0;
	struct tcf_pedit *p;
	struct tcf_common *pc;
	struct tc_pedit_key *keys = NULL;
	int ksize;

	if (rta == NULL || rtattr_parse_nested(tb, TCA_PEDIT_MAX, rta) < 0)
		return -EINVAL;

	if (tb[TCA_PEDIT_PARMS - 1] == NULL ||
	    RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm))
		return -EINVAL;
	parm = RTA_DATA(tb[TCA_PEDIT_PARMS-1]);
	ksize = parm->nkeys * sizeof(struct tc_pedit_key);
	if (RTA_PAYLOAD(tb[TCA_PEDIT_PARMS-1]) < sizeof(*parm) + ksize)
		return -EINVAL;

	pc = tcf_hash_check(parm->index, a, bind, &pedit_hash_info);
	if (!pc) {
		if (!parm->nkeys)
			return -EINVAL;
		pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
				     &pedit_idx_gen, &pedit_hash_info);
		if (unlikely(!pc))
			return -ENOMEM;
		p = to_pedit(pc);
		keys = kmalloc(ksize, GFP_KERNEL);
		if (keys == NULL) {
			kfree(pc);
			return -ENOMEM;
		}
		ret = ACT_P_CREATED;
	} else {
		p = to_pedit(pc);
		if (!ovr) {
			tcf_hash_release(pc, bind, &pedit_hash_info);
			return -EEXIST;
		}
		if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
			keys = kmalloc(ksize, GFP_KERNEL);
			if (keys == NULL)
				return -ENOMEM;
		}
	}

	spin_lock_bh(&p->tcf_lock);
	p->tcfp_flags = parm->flags;
	p->tcf_action = parm->action;
	if (keys) {
		kfree(p->tcfp_keys);
		p->tcfp_keys = keys;
		p->tcfp_nkeys = parm->nkeys;
	}
	memcpy(p->tcfp_keys, parm->keys, ksize);
	spin_unlock_bh(&p->tcf_lock);
	if (ret == ACT_P_CREATED)
		tcf_hash_insert(pc, &pedit_hash_info);
	return ret;
}
Example #14
0
static int help(struct sk_buff *skb,
		unsigned int protoff,
		struct nf_conn *ct,
		enum ip_conntrack_info ctinfo)
{
	unsigned int dataoff, datalen;
	struct tcphdr _tcph, *th;
	char *sb_ptr;
	int ret = NF_ACCEPT;
	int dir = CTINFO2DIR(ctinfo);
	struct nf_ct_sane_master *ct_sane_info;
	struct nf_conntrack_expect *exp;
	struct nf_conntrack_tuple *tuple;
	struct sane_request *req;
	struct sane_reply_net_start *reply;
	int family = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num;

	ct_sane_info = &nfct_help(ct)->help.ct_sane_info;
	/* Until there's been traffic both ways, don't look in packets. */
	if (ctinfo != IP_CT_ESTABLISHED &&
	    ctinfo != IP_CT_ESTABLISHED_REPLY)
		return NF_ACCEPT;

	/* Not a full tcp header? */
	th = skb_header_pointer(skb, protoff, sizeof(_tcph), &_tcph);
	if (th == NULL)
		return NF_ACCEPT;

	/* No data? */
	dataoff = protoff + th->doff * 4;
	if (dataoff >= skb->len)
		return NF_ACCEPT;

	datalen = skb->len - dataoff;

	spin_lock_bh(&nf_sane_lock);
	sb_ptr = skb_header_pointer(skb, dataoff, datalen, sane_buffer);
	BUG_ON(sb_ptr == NULL);

	if (dir == IP_CT_DIR_ORIGINAL) {
		if (datalen != sizeof(struct sane_request))
			goto out;

		req = (struct sane_request *)sb_ptr;
		if (req->RPC_code != htonl(SANE_NET_START)) {
			/* Not an interesting command */
			ct_sane_info->state = SANE_STATE_NORMAL;
			goto out;
		}

		/* We're interested in the next reply */
		ct_sane_info->state = SANE_STATE_START_REQUESTED;
		goto out;
	}

	/* Is it a reply to an uninteresting command? */
	if (ct_sane_info->state != SANE_STATE_START_REQUESTED)
		goto out;

	/* It's a reply to SANE_NET_START. */
	ct_sane_info->state = SANE_STATE_NORMAL;

	if (datalen < sizeof(struct sane_reply_net_start)) {
		pr_debug("nf_ct_sane: NET_START reply too short\n");
		goto out;
	}

	reply = (struct sane_reply_net_start *)sb_ptr;
	if (reply->status != htonl(SANE_STATUS_SUCCESS)) {
		/* saned refused the command */
		pr_debug("nf_ct_sane: unsuccessful SANE_STATUS = %u\n",
			 ntohl(reply->status));
		goto out;
	}

	/* Invalid saned reply? Ignore it. */
	if (reply->zero != 0)
		goto out;

	exp = nf_conntrack_expect_alloc(ct);
	if (exp == NULL) {
		ret = NF_DROP;
		goto out;
	}

	tuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
	nf_conntrack_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT, family,
				 &tuple->src.u3, &tuple->dst.u3,
				 IPPROTO_TCP, NULL, &reply->port);

	pr_debug("nf_ct_sane: expect: ");
	NF_CT_DUMP_TUPLE(&exp->tuple);

	/* Can't expect this?  Best to drop packet now. */
	if (nf_conntrack_expect_related(exp) != 0)
		ret = NF_DROP;

	nf_conntrack_expect_put(exp);

out:
	spin_unlock_bh(&nf_sane_lock);
	return ret;
}
Example #15
0
int esp6_output(struct sk_buff *skb)
{
	int err;
	int hdr_len = 0;
	struct dst_entry *dst = skb->dst;
	struct xfrm_state *x  = dst->xfrm;
	struct ipv6hdr *iph = NULL, *top_iph;
	struct ipv6_esp_hdr *esph;
	struct crypto_tfm *tfm;
	struct esp_data *esp;
	struct sk_buff *trailer;
	int blksize;
	int clen;
	int alen;
	int nfrags;
	u8 *prevhdr;
	u8 nexthdr = 0;

	/* First, if the skb is not checksummed, complete checksum. */
	if (skb->ip_summed == CHECKSUM_HW && skb_checksum_help(skb) == NULL) {
		err = -EINVAL;
		goto error_nolock;
	}

	spin_lock_bh(&x->lock);
	err = xfrm_check_output(x, skb, AF_INET6);
	if (err)
		goto error;
	err = -ENOMEM;

	/* Strip IP header in transport mode. Save it. */

	if (!x->props.mode) {
		hdr_len = ip6_find_1stfragopt(skb, &prevhdr);
		nexthdr = *prevhdr;
		*prevhdr = IPPROTO_ESP;
		iph = kmalloc(hdr_len, GFP_ATOMIC);
		if (!iph) {
			err = -ENOMEM;
			goto error;
		}
		memcpy(iph, skb->nh.raw, hdr_len);
		__skb_pull(skb, hdr_len);
	}

	/* Now skb is pure payload to encrypt */

	/* Round to block size */
	clen = skb->len;

	esp = x->data;
	alen = esp->auth.icv_trunc_len;
	tfm = esp->conf.tfm;
	blksize = (crypto_tfm_alg_blocksize(tfm) + 3) & ~3;
	clen = (clen + 2 + blksize-1)&~(blksize-1);
	if (esp->conf.padlen)
		clen = (clen + esp->conf.padlen-1)&~(esp->conf.padlen-1);

	if ((nfrags = skb_cow_data(skb, clen-skb->len+alen, &trailer)) < 0) {
		if (!x->props.mode && iph) kfree(iph);
		goto error;
	}

	/* Fill padding... */
	do {
		int i;
		for (i=0; i<clen-skb->len - 2; i++)
			*(u8*)(trailer->tail + i) = i+1;
	} while (0);
	*(u8*)(trailer->tail + clen-skb->len - 2) = (clen - skb->len)-2;
	pskb_put(skb, trailer, clen - skb->len);

	if (x->props.mode) {
		iph = skb->nh.ipv6h;
		top_iph = (struct ipv6hdr*)skb_push(skb, x->props.header_len);
		esph = (struct ipv6_esp_hdr*)(top_iph+1);
		*(u8*)(trailer->tail - 1) = IPPROTO_IPV6;
		top_iph->version = 6;
		top_iph->priority = iph->priority;
		top_iph->flow_lbl[0] = iph->flow_lbl[0];
		top_iph->flow_lbl[1] = iph->flow_lbl[1];
		top_iph->flow_lbl[2] = iph->flow_lbl[2];
		if (x->props.flags & XFRM_STATE_NOECN)
			IP6_ECN_clear(top_iph);
		top_iph->nexthdr = IPPROTO_ESP;
		top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr));
		top_iph->hop_limit = iph->hop_limit;
		ipv6_addr_copy(&top_iph->saddr,
			       (struct in6_addr *)&x->props.saddr);
		ipv6_addr_copy(&top_iph->daddr,
			       (struct in6_addr *)&x->id.daddr);
	} else { 
		esph = (struct ipv6_esp_hdr*)skb_push(skb, x->props.header_len);
		skb->h.raw = (unsigned char*)esph;
		top_iph = (struct ipv6hdr*)skb_push(skb, hdr_len);
		memcpy(top_iph, iph, hdr_len);
		kfree(iph);
		top_iph->payload_len = htons(skb->len + alen - sizeof(struct ipv6hdr));
		*(u8*)(trailer->tail - 1) = nexthdr;
	}

	esph->spi = x->id.spi;
	esph->seq_no = htonl(++x->replay.oseq);

	if (esp->conf.ivlen)
		crypto_cipher_set_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));

	do {
		struct scatterlist sgbuf[nfrags>MAX_SG_ONSTACK ? 0 : nfrags];
		struct scatterlist *sg = sgbuf;

		if (unlikely(nfrags > MAX_SG_ONSTACK)) {
			sg = kmalloc(sizeof(struct scatterlist)*nfrags, GFP_ATOMIC);
			if (!sg)
				goto error;
		}
		skb_to_sgvec(skb, sg, esph->enc_data+esp->conf.ivlen-skb->data, clen);
		crypto_cipher_encrypt(tfm, sg, sg, clen);
		if (unlikely(sg != sgbuf))
			kfree(sg);
	} while (0);

	if (esp->conf.ivlen) {
		memcpy(esph->enc_data, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
		crypto_cipher_get_iv(tfm, esp->conf.ivec, crypto_tfm_alg_ivsize(tfm));
	}

	if (esp->auth.icv_full_len) {
		esp->auth.icv(esp, skb, (u8*)esph-skb->data,
			sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen+clen, trailer->tail);
		pskb_put(skb, trailer, alen);
	}

	skb->nh.raw = skb->data;

	x->curlft.bytes += skb->len;
	x->curlft.packets++;
	spin_unlock_bh(&x->lock);
	if ((skb->dst = dst_pop(dst)) == NULL) {
		err = -EHOSTUNREACH;
		goto error_nolock;
	}
	return NET_XMIT_BYPASS;

error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	return err;
}
Example #16
0
/*
 *	Handle MSG_ERRQUEUE
 */
int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len)
{
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sock_exterr_skb *serr;
	struct sk_buff *skb, *skb2;
	struct sockaddr_in6 *sin;
	struct {
		struct sock_extended_err ee;
		struct sockaddr_in6	 offender;
	} errhdr;
	int err;
	int copied;

	err = -EAGAIN;
	skb = skb_dequeue(&sk->sk_error_queue);
	if (skb == NULL)
		goto out;

	copied = skb->len;
	if (copied > len) {
		msg->msg_flags |= MSG_TRUNC;
		copied = len;
	}
	err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
	if (err)
		goto out_free_skb;

	sock_recv_timestamp(msg, sk, skb);

	serr = SKB_EXT_ERR(skb);

	sin = (struct sockaddr_in6 *)msg->msg_name;
	if (sin) {
		const unsigned char *nh = skb_network_header(skb);
		sin->sin6_family = AF_INET6;
		sin->sin6_flowinfo = 0;
		sin->sin6_port = serr->port;
		sin->sin6_scope_id = 0;
		if (skb->protocol == htons(ETH_P_IPV6)) {
			ipv6_addr_copy(&sin->sin6_addr,
				  (struct in6_addr *)(nh + serr->addr_offset));
			if (np->sndflow)
				sin->sin6_flowinfo =
					(*(__be32 *)(nh + serr->addr_offset - 24) &
					 IPV6_FLOWINFO_MASK);
			if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
				sin->sin6_scope_id = IP6CB(skb)->iif;
		} else {
			ipv6_addr_set_v4mapped(*(__be32 *)(nh + serr->addr_offset),
					       &sin->sin6_addr);
		}
	}

	memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
	sin = &errhdr.offender;
	sin->sin6_family = AF_UNSPEC;
	if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
		sin->sin6_family = AF_INET6;
		sin->sin6_flowinfo = 0;
		sin->sin6_scope_id = 0;
		if (skb->protocol == htons(ETH_P_IPV6)) {
			ipv6_addr_copy(&sin->sin6_addr, &ipv6_hdr(skb)->saddr);
			if (np->rxopt.all)
				datagram_recv_ctl(sk, msg, skb);
			if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL)
				sin->sin6_scope_id = IP6CB(skb)->iif;
		} else {
			struct inet_sock *inet = inet_sk(sk);

			ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
					       &sin->sin6_addr);
			if (inet->cmsg_flags)
				ip_cmsg_recv(msg, skb);
		}
	}

	put_cmsg(msg, SOL_IPV6, IPV6_RECVERR, sizeof(errhdr), &errhdr);

	/* Now we could try to dump offended packet options */

	msg->msg_flags |= MSG_ERRQUEUE;
	err = copied;

	/* Reset and regenerate socket error */
	spin_lock_bh(&sk->sk_error_queue.lock);
	sk->sk_err = 0;
	if ((skb2 = skb_peek(&sk->sk_error_queue)) != NULL) {
		sk->sk_err = SKB_EXT_ERR(skb2)->ee.ee_errno;
		spin_unlock_bh(&sk->sk_error_queue.lock);
		sk->sk_error_report(sk);
	} else {
		spin_unlock_bh(&sk->sk_error_queue.lock);
	}

out_free_skb:
	kfree_skb(skb);
out:
	return err;
}
Example #17
0
static int xfrm_output_one(struct sk_buff *skb, int err)
{
	struct dst_entry *dst = skb_dst(skb);
	struct xfrm_state *x = dst->xfrm;
	struct net *net = xs_net(x);

	if (err <= 0)
		goto resume;

	do {
		err = xfrm_state_check_space(x, skb);
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
			goto error_nolock;
		}

		err = x->outer_mode->output(x, skb);
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
			goto error_nolock;
		}

		spin_lock_bh(&x->lock);
		err = xfrm_state_check_expire(x);
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
			goto error;
		}

		if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
			XFRM_SKB_CB(skb)->seq.output = ++x->replay.oseq;
			if (unlikely(x->replay.oseq == 0)) {
				XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
				x->replay.oseq--;
				xfrm_audit_state_replay_overflow(x, skb);
				err = -EOVERFLOW;
				goto error;
			}
			if (xfrm_aevent_is_on(net))
				xfrm_replay_notify(x, XFRM_REPLAY_UPDATE);
		}

		x->curlft.bytes += skb->len;
		x->curlft.packets++;

		spin_unlock_bh(&x->lock);

		err = x->type->output(x, skb);
		if (err == -EINPROGRESS)
			goto out_exit;

resume:
		if (err) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
			goto error_nolock;
		}

		dst = skb_dst_pop(skb);
		if (!dst) {
			XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
			err = -EHOSTUNREACH;
			goto error_nolock;
		}
		skb_dst_set_noref(skb, dst);
		x = dst->xfrm;
	} while (x && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL));

	err = 0;

out_exit:
	return err;
error:
	spin_unlock_bh(&x->lock);
error_nolock:
	kfree_skb(skb);
	goto out_exit;
}
Example #18
0
static void poll_routine(unsigned long __opaque)
{
    struct timer_list *timer = &poll_timer;
    UserPtrs ufuncs;
    int offset = (2*HZ);


    //printk("Came to poll_routine with %x\n", (u32)(__opaque));

    /* Register with DMA incase not already done so */
    if(DriverState < POLLING)
    {
        spin_lock_bh(&RawLock);
        printk("Calling DmaRegister on engine %d and %d\n", ENGINE_TX, ENGINE_RX);
        DriverState = REGISTERED;

        ufuncs.UserInit = myInit;
        ufuncs.UserPutPkt = myPutTxPkt;
        ufuncs.UserSetState = mySetState;
        ufuncs.UserGetState = myGetState;
        ufuncs.privData = 0x54545454;
        spin_unlock_bh(&RawLock);

        if((handle[0] = DmaRegister(ENGINE_TX, MYBAR, &ufuncs, BUFSIZE)) == NULL)
        {
            printk("Register for engine %d failed. Stopping.\n", ENGINE_TX);
            spin_lock_bh(&RawLock);
            DriverState = UNINITIALIZED;
            spin_unlock_bh(&RawLock);
            return;     // This polling will not happen again.
        }
        
        printk("Handle for engine %d is %p\n", ENGINE_TX, handle[0]);

        spin_lock_bh(&RawLock);
        ufuncs.UserInit = myInit;
        ufuncs.UserPutPkt = myPutRxPkt;
        ufuncs.UserGetPkt = myGetRxPkt;
        ufuncs.UserSetState = mySetState;
        ufuncs.UserGetState = myGetState;
        ufuncs.privData = 0x54545456;
        spin_unlock_bh(&RawLock);

        if((handle[2] = DmaRegister(ENGINE_RX, MYBAR, &ufuncs, BUFSIZE)) == NULL)
        {
            printk("Register for engine %d failed. Stopping.\n", ENGINE_RX);
            spin_lock_bh(&RawLock);
            DriverState = UNINITIALIZED;
            spin_unlock_bh(&RawLock);
            return;     // This polling will not happen again.
        }
        printk("Handle for engine %d is %p\n", ENGINE_RX, handle[2]);

        /* Reschedule poll routine */
        timer->expires = jiffies + offset;
        add_timer(timer); //guodebug: add_timer
    }
    else if(DriverState == REGISTERED)
    {
        /* Only if the test mode is set, and only for TX direction */
        if((RawTestMode & TEST_START) && 
           (RawTestMode & (ENABLE_PKTCHK|ENABLE_LOOPBACK)))
        {
            spin_lock_bh(&RawLock);
            /* First change the state */
            RawTestMode &= ~TEST_START;
            RawTestMode |= TEST_IN_PROGRESS;
            spin_unlock_bh(&RawLock);

            /* Now, queue up one packet to start the transmission */
            DmaSetupTransmit(handle[0], 1);

            /* For the first packet, give some gap before the next TX */
            offset = HZ;
        }
        else if(RawTestMode & TEST_IN_PROGRESS)
        {
            int avail;
            int times;

            for(times = 0; times < 9; times++)
            {
                avail = (TxBufs.TotalNum - TxBufs.AllocNum);
                if(avail <= 0) break;

                /* Queue up many packets to continue the transmission */
                if(DmaSetupTransmit(handle[0], 100) == 0) break;
            }

            /* Do the next TX as soon as possible */
            offset = 0;
        }

        /* Reschedule poll routine */
        timer->expires = jiffies + offset;
        add_timer(timer); //guodebug: add_timer
    }
}
void ax25_linkfail_register(struct ax25_linkfail *lf)
{
    spin_lock_bh(&linkfail_lock);
    hlist_add_head(&lf->lf_node, &ax25_linkfail_list);
    spin_unlock_bh(&linkfail_lock);
}
Example #20
0
int mySetState(void * hndl, UserState * ustate, unsigned int privdata)
{
    int val;
    static unsigned int testmode;

    log_verbose(KERN_INFO "Reached mySetState with privdata %x\n", privdata);
    
    /* Check driver state */
    if(DriverState != REGISTERED)
    {
        printk("Driver does not seem to be ready\n");
        return EFAULT;
    }

    /* Check handle value */
    if((hndl != handle[0]) && (hndl != handle[2]))
    {
        printk("Came with wrong handle\n");
        return EBADF;
    }

    /* Valid only for TX engine */
    if(privdata == 0x54545454)
    {
        spin_lock_bh(&RawLock);

        /* Set up the value to be written into the register */
        RawTestMode = ustate->TestMode;

        if(RawTestMode & TEST_START) 
        {
            testmode = 0;
            if(RawTestMode & ENABLE_LOOPBACK) testmode |= LOOPBACK;
#ifndef XAUI
            if(RawTestMode & ENABLE_PKTCHK) testmode |= PKTCHKR;
            if(RawTestMode & ENABLE_PKTGEN) testmode |= PKTGENR;
#endif
        }
        else
        {
            /* Deliberately not clearing the loopback bit, incase a
             * loopback test was going on - allows the loopback path
             * to drain off packets. Just stopping the source of packets.
             */
#ifndef XAUI
            if(RawTestMode & ENABLE_PKTCHK) testmode &= ~PKTCHKR;
            if(RawTestMode & ENABLE_PKTGEN) testmode &= ~PKTGENR;
#endif
        }

        printk("SetState TX with RawTestMode %x, reg value %x\n", 
                                                    RawTestMode, testmode);

        /* Now write the registers */
        if(RawTestMode & TEST_START)
        {
#ifndef XAUI
            if(!(RawTestMode & (ENABLE_PKTCHK|ENABLE_PKTGEN|ENABLE_LOOPBACK)))
            {
                printk("%s Driver: TX Test Start with wrong mode %x\n", 
                                                MYNAME, testmode);
                RawTestMode = 0;
                spin_unlock_bh(&RawLock);
                return EBADRQC;
            }
#endif

            printk("%s Driver: Starting the test - mode %x, reg %x\n", 
                                            MYNAME, RawTestMode, testmode);

            /* Next, set packet sizes. Ensure they don't exceed PKTSIZEs */
            RawMinPktSize = ustate->MinPktSize;
            RawMaxPktSize = ustate->MaxPktSize;

#ifndef XAUI
            /* Set RX packet size for memory path */
            val = RawMaxPktSize;
            if(val % BYTEMULTIPLE)
                val -= (val % BYTEMULTIPLE);
            printk("Reg %x = %x\n", PKT_SIZE_ADDRESS, val);
            RawMinPktSize = RawMaxPktSize = val;

            /* Now ensure the sizes remain within bounds */
            if(RawMaxPktSize > MAXPKTSIZE)
                RawMinPktSize = RawMaxPktSize = MAXPKTSIZE;
            if(RawMinPktSize < MINPKTSIZE)
                RawMinPktSize = RawMaxPktSize = MINPKTSIZE;
            if(RawMinPktSize > RawMaxPktSize)
                RawMinPktSize = RawMaxPktSize;
            val = RawMaxPktSize;

            printk("========Reg %x = %d\n", PKT_SIZE_ADDRESS, val);
            XIo_Out32(TXbarbase+PKT_SIZE_ADDRESS, val);
            printk("RxPktSize %d\n", val);
#else
            /* Now ensure the sizes remain within bounds */
            if(RawMaxPktSize > MAXPKTSIZE)
                RawMaxPktSize = MAXPKTSIZE;
            if(RawMinPktSize < MINPKTSIZE)
                RawMinPktSize = MINPKTSIZE;
            if(RawMinPktSize > RawMaxPktSize)
                RawMinPktSize = RawMaxPktSize;

            printk("MinPktSize %d MaxPktSize %d\n", 
                                RawMinPktSize, RawMaxPktSize);
#endif

/* Incase the last test was a loopback test, that bit may not be cleared. */
            XIo_Out32(TXbarbase+TX_CONFIG_ADDRESS, 0);
            if(RawTestMode & (ENABLE_PKTCHK|ENABLE_LOOPBACK))
            {
                TxSeqNo = 0;
#ifdef XAUI
                RxSeqNo = 0;
#endif
                if(RawTestMode & ENABLE_LOOPBACK)
                    RxSeqNo = 0;
                printk("========Reg %x = %x\n", TX_CONFIG_ADDRESS, testmode);
                XIo_Out32(TXbarbase+TX_CONFIG_ADDRESS, testmode);
            }
#ifndef XAUI
            if(RawTestMode & ENABLE_PKTGEN)
            {
                RxSeqNo = 0;
                printk("========Reg %x = %x\n", RX_CONFIG_ADDRESS, testmode);
                XIo_Out32(TXbarbase+RX_CONFIG_ADDRESS, testmode);
            }
#endif

#ifdef XAUI
            /* Wait for the link status to be established */
            mdelay(300);

            /* Now, check if the link status is fine */
            val = XIo_In32(TXbarbase+LINK_STATUS_ADDRESS);
            printk("Link status is %x\n", val);
            if(!(val & (RX_LINK_UP|RX_ALIGNED)))
            {
                printk(KERN_ERR "Link status is down %x\n", val);
                XIo_Out32(TXbarbase+TX_CONFIG_ADDRESS, 0);
                RawTestMode = 0;
                spin_unlock_bh(&RawLock);
                return ENOLINK;
            }
#endif
        }
        /* Else, stop the test. Do not remove any loopback here because
         * the DMA queues and hardware FIFOs must drain first.
         */
        else
        {
            printk("%s Driver: Stopping the test, mode %x\n", MYNAME, testmode);
            printk("========Reg %x = %x\n", TX_CONFIG_ADDRESS, testmode);
            XIo_Out32(TXbarbase+TX_CONFIG_ADDRESS, testmode);
#ifndef XAUI
            printk("========Reg %x = %x\n", RX_CONFIG_ADDRESS, testmode);
            XIo_Out32(TXbarbase+RX_CONFIG_ADDRESS, testmode);
#endif

            /* Not resetting sequence numbers here - causes problems
             * in debugging. Instead, reset the sequence numbers when
             * starting a test.
             */
        }

        PrintSummary();
        spin_unlock_bh(&RawLock);
    }

    return 0;
}
static int help(struct sk_buff **pskb,
		struct ip_conntrack *ct, enum ip_conntrack_info ctinfo)
{
	unsigned int dataoff;
	struct tcphdr _tcph, *th;
	char *data, *data_limit, *ib_ptr;
	int dir = CTINFO2DIR(ctinfo);
	struct ip_conntrack_expect *exp;
	u32 seq;
	u_int32_t dcc_ip;
	u_int16_t dcc_port;
	int i, ret = NF_ACCEPT;
	char *addr_beg_p, *addr_end_p;
	typeof(ip_nat_irc_hook) ip_nat_irc;

	DEBUGP("entered\n");

	/* If packet is coming from IRC server */
	if (dir == IP_CT_DIR_REPLY)
		return NF_ACCEPT;

	/* Until there's been traffic both ways, don't look in packets. */
	if (ctinfo != IP_CT_ESTABLISHED
	    && ctinfo != IP_CT_ESTABLISHED + IP_CT_IS_REPLY) {
		DEBUGP("Conntrackinfo = %u\n", ctinfo);
		return NF_ACCEPT;
	}

	/* Not a full tcp header? */
	th = skb_header_pointer(*pskb, (*pskb)->nh.iph->ihl*4,
				sizeof(_tcph), &_tcph);
	if (th == NULL)
		return NF_ACCEPT;

	/* No data? */
	dataoff = (*pskb)->nh.iph->ihl*4 + th->doff*4;
	if (dataoff >= (*pskb)->len)
		return NF_ACCEPT;

	spin_lock_bh(&irc_buffer_lock);
	ib_ptr = skb_header_pointer(*pskb, dataoff,
				    (*pskb)->len - dataoff, irc_buffer);
	BUG_ON(ib_ptr == NULL);

	data = ib_ptr;
	data_limit = ib_ptr + (*pskb)->len - dataoff;

	/* strlen("\1DCC SENT t AAAAAAAA P\1\n")=24
	 * 5+MINMATCHLEN+strlen("t AAAAAAAA P\1\n")=14 */
	while (data < (data_limit - (19 + MINMATCHLEN))) {
		if (memcmp(data, "\1DCC ", 5)) {
			data++;
			continue;
		}

		data += 5;
		/* we have at least (19+MINMATCHLEN)-5 bytes valid data left */

		DEBUGP("DCC found in master %u.%u.%u.%u:%u %u.%u.%u.%u:%u...\n",
			NIPQUAD(iph->saddr), ntohs(th->source),
			NIPQUAD(iph->daddr), ntohs(th->dest));

		for (i = 0; i < ARRAY_SIZE(dccprotos); i++) {
			if (memcmp(data, dccprotos[i], strlen(dccprotos[i]))) {
				/* no match */
				continue;
			}

			DEBUGP("DCC %s detected\n", dccprotos[i]);
			data += strlen(dccprotos[i]);
			/* we have at least
			 * (19+MINMATCHLEN)-5-dccprotos[i].matchlen bytes valid
			 * data left (== 14/13 bytes) */
			if (parse_dcc((char *)data, data_limit, &dcc_ip,
				       &dcc_port, &addr_beg_p, &addr_end_p)) {
				/* unable to parse */
				DEBUGP("unable to parse dcc command\n");
				continue;
			}
			DEBUGP("DCC bound ip/port: %u.%u.%u.%u:%u\n",
				HIPQUAD(dcc_ip), dcc_port);

			/* dcc_ip can be the internal OR external (NAT'ed) IP
			 * Tiago Sousa <*****@*****.**> */
			if (ct->tuplehash[dir].tuple.src.ip != htonl(dcc_ip)
			    && ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.ip != htonl(dcc_ip)) {
				if (net_ratelimit())
					printk(KERN_WARNING
						"Forged DCC command from "
						"%u.%u.%u.%u: %u.%u.%u.%u:%u\n",
				NIPQUAD(ct->tuplehash[dir].tuple.src.ip),
						HIPQUAD(dcc_ip), dcc_port);

				continue;
			}

			exp = ip_conntrack_expect_alloc(ct);
			if (exp == NULL) {
				ret = NF_DROP;
				goto out;
			}

			/* save position of address in dcc string,
			 * necessary for NAT */
			DEBUGP("tcph->seq = %u\n", th->seq);
			seq = ntohl(th->seq) + (addr_beg_p - ib_ptr);

			/* We refer to the reverse direction ("!dir")
			 * tuples here, because we're expecting
			 * something in the other * direction.
			 * Doesn't matter unless NAT is happening.  */
			exp->tuple = ((struct ip_conntrack_tuple)
				{ { 0, { 0 } },
				  { ct->tuplehash[!dir].tuple.dst.ip,
				    { .tcp = { htons(dcc_port) } },
				    IPPROTO_TCP }});
Example #22
0
static int DmaSetupTransmit(void * hndl, int num)
{
    int i, result;
    static int pktsize=0;
    int total, bufsize, fragment;
    int bufindex;
    unsigned char * bufVA;
    PktBuf * pbuf;
    int origseqno;
    //static unsigned short lastno=0;

    log_verbose("Reached DmaSetupTransmit with handle %p, num %d\n", hndl, num);

    /* Check driver state */
    if(DriverState != REGISTERED)
    {
        printk("Driver does not seem to be ready\n");
        return 0;
    }

    /* Check handle value */
    if(hndl != handle[0])
    {
        printk("Came with wrong handle\n");
        return 0;
    }

    /* Check number of packets */
    if(!num)
    {
        printk("Came with 0 packets for sending\n");
        return 0;
    }

    /* Hold the spinlock only when calling the buffer management APIs. */
    spin_lock_bh(&RawLock);
    origseqno = TxSeqNo;
    for(i=0, bufindex=0; i<num; i++)            /* Total packets loop */
    {
        //printk("i %d bufindex %d\n", i, bufindex);

#ifdef XAUI
        /* Generate a random number in-between min and max */
        if(RawMinPktSize != RawMaxPktSize)
        {
            pktsize += BYTEMULTIPLE;
            if(pktsize % BYTEMULTIPLE)
                pktsize -= (pktsize % BYTEMULTIPLE);
            if(pktsize < RawMinPktSize) pktsize = RawMinPktSize;
            if(pktsize > RawMaxPktSize) pktsize = RawMaxPktSize;
        }
        else
#endif
            /* Fix the packet size to be the maximum entered in GUI */
            pktsize = RawMaxPktSize;

        //printk("pktsize is %d\n", pktsize);

        total = 0;
        fragment = 0;
        while(total < pktsize)      /* Packet fragments loop */
        {
            //printk("Buf loop total %d bufindex %d\n", total, bufindex);

            pbuf = &(pkts[bufindex]);

            /* Allocate a buffer. DMA driver will map to PCI space. */
            bufVA = AllocBuf(&TxBufs);

            log_verbose(KERN_INFO "TX: The buffer after alloc is at address %x size %d\n", (u32) bufVA, (u32) BUFSIZE);
            if (bufVA == NULL) 
            {
                //printk("TX: AllocBuf failed\n");
                //printk("[%d]", (num-i-1));
                break;
            }
            pbuf->pktBuf = bufVA;
            pbuf->bufInfo = bufVA;
            bufsize = ((total + BUFSIZE) > pktsize) ? 
                                        (pktsize - total) : BUFSIZE ;
            total += bufsize;

            //printk("bufsize %d total %d\n", bufsize, total);

            log_verbose(KERN_INFO "Calling FormatBuffer pktsize %d bufsize %d fragment %d\n", 
                                pktsize, bufsize, fragment);
            FormatBuffer(bufVA, pktsize, bufsize, fragment);

            pbuf->size = bufsize;
            pbuf->userInfo = TxSeqNo; 
            pbuf->flags = PKT_ALL;
            if(!fragment)
                pbuf->flags |= PKT_SOP;
            if(total == pktsize)
            {
                pbuf->flags |= PKT_EOP;
#ifdef XAUI
                pbuf->size = bufsize - 4;
#endif
            }

            //printk("flags %x\n", pbuf->flags);
            //printk("TxSeqNo %u\n", TxSeqNo);

            bufindex++;
            fragment++;
        }
        if(total < pktsize)
        {
            /* There must have been some error in the middle of the packet */

            if(fragment)
            {
                /* First, adjust the number of buffers to queue up or else 
                 * partial packets will get transmitted, which will cause a 
                 * problem later.
                 */
                bufindex -= fragment;

                /* Now, free any unused buffers from the partial packet, so 
                 * that buffers are not lost.
                 */
                log_normal(KERN_ERR "Tried to send pkt of size %d, only %d fragments possible\n", 
                                                pktsize, fragment);
                FreeUnusedBuf(&TxBufs, fragment);
            }
            break;
        }

#ifdef XAUI
        /* Reset size so that next time it starts from a low value */
        if(pktsize >= RawMaxPktSize) pktsize = 0;
#endif

        /* Increment packet sequence number */
        //if(lastno != TxSeqNo) printk(" %u-%u.", lastno, TxSeqNo);
        TxSeqNo++;
        //lastno = TxSeqNo;
    }
    spin_unlock_bh(&RawLock);

    //printk("[p%d-%d-%d] ", num, i, bufindex);

    if(i == 0)
        /* No buffers available */
        return 0;

    log_verbose("%s: Sending packet length %d seqno %d\n", 
                                        MYNAME, pktsize, TxSeqNo);
    result = DmaSendPkt(hndl, pkts, bufindex);
    TxBufCnt += result;
    if(result != bufindex)
    {
        log_normal(KERN_ERR "Tried to send %d pkts in %d buffers, sent only %d\n", 
                                    num, bufindex, result);
        //printk("[s%d-%d,%d-%d]", bufindex, result, TxSeqNo, origseqno);
        if(result) TxSeqNo = pkts[result].userInfo;
        else TxSeqNo = origseqno;
        //printk("-%u-", TxSeqNo);
        //lastno = TxSeqNo;

        spin_lock_bh(&RawLock);
        FreeUnusedBuf(&TxBufs, (bufindex-result));
        spin_unlock_bh(&RawLock);
        return 0;
    }
    else return 1;
}
Example #23
0
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
	struct sk_buff *skb_out;
	struct cdc_mbim_state *info = (void *)&dev->data;
	struct cdc_ncm_ctx *ctx = info->ctx;
	__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
	u16 tci = 0;
	bool is_ip;
	u8 *c;

	if (!ctx)
		goto error;

	if (skb) {
		if (skb->len <= ETH_HLEN)
			goto error;

		/* Some applications using e.g. packet sockets will
		 * bypass the VLAN acceleration and create tagged
		 * ethernet frames directly.  We primarily look for
		 * the accelerated out-of-band tag, but fall back if
		 * required
		 */
		skb_reset_mac_header(skb);
		if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
		    __vlan_get_tag(skb, &tci) == 0) {
			is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
			skb_pull(skb, VLAN_ETH_HLEN);
		} else {
			is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
			skb_pull(skb, ETH_HLEN);
		}

		/* Is IP session <0> tagged too? */
		if (info->flags & FLAG_IPS0_VLAN) {
			/* drop all untagged packets */
			if (!tci)
				goto error;
			/* map MBIM_IPS0_VID to IPS<0> */
			if (tci == MBIM_IPS0_VID)
				tci = 0;
		}

		/* mapping VLANs to MBIM sessions:
		 *   no tag     => IPS session <0> if !FLAG_IPS0_VLAN
		 *   1 - 255    => IPS session <vlanid>
		 *   256 - 511  => DSS session <vlanid - 256>
		 *   512 - 4093 => unsupported, drop
		 *   4094       => IPS session <0> if FLAG_IPS0_VLAN
		 */

		switch (tci & 0x0f00) {
		case 0x0000: /* VLAN ID 0 - 255 */
			if (!is_ip)
				goto error;
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		case 0x0100: /* VLAN ID 256 - 511 */
			if (is_ip)
				goto error;
			sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		default:
			netif_err(dev, tx_err, dev->net,
				  "unsupported tci=0x%04x\n", tci);
			goto error;
		}
	}

	spin_lock_bh(&ctx->mtx);
	skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
	spin_unlock_bh(&ctx->mtx);
	return skb_out;

error:
	if (skb)
		dev_kfree_skb_any(skb);

	return NULL;
}
Example #24
0
/**
 * iscsi_sw_tcp_xmit_segment - transmit segment
 * @tcp_conn: the iSCSI TCP connection
 * @segment: the buffer to transmnit
 *
 * This function transmits as much of the buffer as
 * the network layer will accept, and returns the number of
 * bytes transmitted.
 *
 * If CRC hashing is enabled, the function will compute the
 * hash as it goes. When the entire segment has been transmitted,
 * it will retrieve the hash value and send it as well.
 */
static int iscsi_sw_tcp_xmit_segment(struct iscsi_tcp_conn *tcp_conn,
				     struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sk = tcp_sw_conn->sock;
	unsigned int copied = 0;
	int r = 0;

	while (!iscsi_tcp_segment_done(tcp_conn, segment, 0, r)) {
		struct scatterlist *sg;
		unsigned int offset, copy;
		int flags = 0;

		r = 0;
		offset = segment->copied;
		copy = segment->size - offset;

		if (segment->total_copied + segment->size < segment->total_size)
			flags |= MSG_MORE;

		/* Use sendpage if we can; else fall back to sendmsg */
		if (!segment->data) {
			sg = segment->sg;
			offset += segment->sg_offset + sg->offset;
			r = tcp_sw_conn->sendpage(sk, sg_page(sg), offset,
						  copy, flags);
		} else {
			struct msghdr msg = { .msg_flags = flags };
			struct kvec iov = {
				.iov_base = segment->data + offset,
				.iov_len = copy
			};

			r = kernel_sendmsg(sk, &msg, &iov, 1, copy);
		}

		if (r < 0) {
			iscsi_tcp_segment_unmap(segment);
			return r;
		}
		copied += r;
	}
	return copied;
}

/**
 * iscsi_sw_tcp_xmit - TCP transmit
 **/
static int iscsi_sw_tcp_xmit(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;
	unsigned int consumed = 0;
	int rc = 0;

	while (1) {
		rc = iscsi_sw_tcp_xmit_segment(tcp_conn, segment);
		/*
		 * We may not have been able to send data because the conn
		 * is getting stopped. libiscsi will know so propagate err
		 * for it to do the right thing.
		 */
		if (rc == -EAGAIN)
			return rc;
		else if (rc < 0) {
			rc = ISCSI_ERR_XMIT_FAILED;
			goto error;
		} else if (rc == 0)
			break;

		consumed += rc;

		if (segment->total_copied >= segment->total_size) {
			if (segment->done != NULL) {
				rc = segment->done(tcp_conn, segment);
				if (rc != 0)
					goto error;
			}
		}
	}

	ISCSI_SW_TCP_DBG(conn, "xmit %d bytes\n", consumed);

	conn->txdata_octets += consumed;
	return consumed;

error:
	/* Transmit error. We could initiate error recovery
	 * here. */
	ISCSI_SW_TCP_DBG(conn, "Error sending PDU, errno=%d\n", rc);
	iscsi_conn_failure(conn, rc);
	return -EIO;
}

/**
 * iscsi_tcp_xmit_qlen - return the number of bytes queued for xmit
 */
static inline int iscsi_sw_tcp_xmit_qlen(struct iscsi_conn *conn)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct iscsi_segment *segment = &tcp_sw_conn->out.segment;

	return segment->total_copied - segment->total_size;
}

static int iscsi_sw_tcp_pdu_xmit(struct iscsi_task *task)
{
	struct iscsi_conn *conn = task->conn;
	int rc;

	while (iscsi_sw_tcp_xmit_qlen(conn)) {
		rc = iscsi_sw_tcp_xmit(conn);
		if (rc == 0)
			return -EAGAIN;
		if (rc < 0)
			return rc;
	}

	return 0;
}

/*
 * This is called when we're done sending the header.
 * Simply copy the data_segment to the send segment, and return.
 */
static int iscsi_sw_tcp_send_hdr_done(struct iscsi_tcp_conn *tcp_conn,
				      struct iscsi_segment *segment)
{
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->out.segment = tcp_sw_conn->out.data_segment;
	ISCSI_SW_TCP_DBG(tcp_conn->iscsi_conn,
			 "Header done. Next segment size %u total_size %u\n",
			 tcp_sw_conn->out.segment.size,
			 tcp_sw_conn->out.segment.total_size);
	return 0;
}

static void iscsi_sw_tcp_send_hdr_prep(struct iscsi_conn *conn, void *hdr,
				       size_t hdrlen)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	ISCSI_SW_TCP_DBG(conn, "%s\n", conn->hdrdgst_en ?
			 "digest enabled" : "digest disabled");

	/* Clear the data segment - needs to be filled in by the
	 * caller using iscsi_tcp_send_data_prep() */
	memset(&tcp_sw_conn->out.data_segment, 0,
	       sizeof(struct iscsi_segment));

	/* If header digest is enabled, compute the CRC and
	 * place the digest into the same buffer. We make
	 * sure that both iscsi_tcp_task and mtask have
	 * sufficient room.
	 */
	if (conn->hdrdgst_en) {
		iscsi_tcp_dgst_header(&tcp_sw_conn->tx_hash, hdr, hdrlen,
				      hdr + hdrlen);
		hdrlen += ISCSI_DIGEST_SIZE;
	}

	/* Remember header pointer for later, when we need
	 * to decide whether there's a payload to go along
	 * with the header. */
	tcp_sw_conn->out.hdr = hdr;

	iscsi_segment_init_linear(&tcp_sw_conn->out.segment, hdr, hdrlen,
				  iscsi_sw_tcp_send_hdr_done, NULL);
}

/*
 * Prepare the send buffer for the payload data.
 * Padding and checksumming will all be taken care
 * of by the iscsi_segment routines.
 */
static int
iscsi_sw_tcp_send_data_prep(struct iscsi_conn *conn, struct scatterlist *sg,
			    unsigned int count, unsigned int offset,
			    unsigned int len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "offset=%d, datalen=%d %s\n", offset, len,
			 conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	return iscsi_segment_seek_sg(&tcp_sw_conn->out.data_segment,
				     sg, count, offset, len,
				     NULL, tx_hash);
}

static void
iscsi_sw_tcp_send_linear_data_prep(struct iscsi_conn *conn, void *data,
				   size_t len)
{
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct hash_desc *tx_hash = NULL;
	unsigned int hdr_spec_len;

	ISCSI_SW_TCP_DBG(conn, "datalen=%zd %s\n", len, conn->datadgst_en ?
			 "digest enabled" : "digest disabled");

	/* Make sure the datalen matches what the caller
	   said he would send. */
	hdr_spec_len = ntoh24(tcp_sw_conn->out.hdr->dlength);
	WARN_ON(iscsi_padded(len) != iscsi_padded(hdr_spec_len));

	if (conn->datadgst_en)
		tx_hash = &tcp_sw_conn->tx_hash;

	iscsi_segment_init_linear(&tcp_sw_conn->out.data_segment,
				data, len, NULL, tx_hash);
}

static int iscsi_sw_tcp_pdu_init(struct iscsi_task *task,
				 unsigned int offset, unsigned int count)
{
	struct iscsi_conn *conn = task->conn;
	int err = 0;

	iscsi_sw_tcp_send_hdr_prep(conn, task->hdr, task->hdr_len);

	if (!count)
		return 0;

	if (!task->sc)
		iscsi_sw_tcp_send_linear_data_prep(conn, task->data, count);
	else {
		struct scsi_data_buffer *sdb = scsi_out(task->sc);

		err = iscsi_sw_tcp_send_data_prep(conn, sdb->table.sgl,
						  sdb->table.nents, offset,
						  count);
	}

	if (err) {
		/* got invalid offset/len */
		return -EIO;
	}
	return 0;
}

static int iscsi_sw_tcp_pdu_alloc(struct iscsi_task *task, uint8_t opcode)
{
	struct iscsi_tcp_task *tcp_task = task->dd_data;

	task->hdr = task->dd_data + sizeof(*tcp_task);
	task->hdr_max = sizeof(struct iscsi_sw_tcp_hdrbuf) - ISCSI_DIGEST_SIZE;
	return 0;
}

static struct iscsi_cls_conn *
iscsi_sw_tcp_conn_create(struct iscsi_cls_session *cls_session,
			 uint32_t conn_idx)
{
	struct iscsi_conn *conn;
	struct iscsi_cls_conn *cls_conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;

	cls_conn = iscsi_tcp_conn_setup(cls_session, sizeof(*tcp_sw_conn),
					conn_idx);
	if (!cls_conn)
		return NULL;
	conn = cls_conn->dd_data;
	tcp_conn = conn->dd_data;
	tcp_sw_conn = tcp_conn->dd_data;

	tcp_sw_conn->tx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->tx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->tx_hash.tfm))
		goto free_conn;

	tcp_sw_conn->rx_hash.tfm = crypto_alloc_hash("crc32c", 0,
						     CRYPTO_ALG_ASYNC);
	tcp_sw_conn->rx_hash.flags = 0;
	if (IS_ERR(tcp_sw_conn->rx_hash.tfm))
		goto free_tx_tfm;
	tcp_conn->rx_hash = &tcp_sw_conn->rx_hash;

	return cls_conn;

free_tx_tfm:
	crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
free_conn:
	iscsi_conn_printk(KERN_ERR, conn,
			  "Could not create connection due to crc32c "
			  "loading error. Make sure the crc32c "
			  "module is built as a module or into the "
			  "kernel\n");
	iscsi_tcp_conn_teardown(cls_conn);
	return NULL;
}

static void iscsi_sw_tcp_release_conn(struct iscsi_conn *conn)
{
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	if (!sock)
		return;

	sock_hold(sock->sk);
	iscsi_sw_tcp_conn_restore_callbacks(tcp_sw_conn);
	sock_put(sock->sk);

	spin_lock_bh(&session->lock);
	tcp_sw_conn->sock = NULL;
	spin_unlock_bh(&session->lock);
	sockfd_put(sock);
}

static void iscsi_sw_tcp_conn_destroy(struct iscsi_cls_conn *cls_conn)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	iscsi_sw_tcp_release_conn(conn);

	if (tcp_sw_conn->tx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->tx_hash.tfm);
	if (tcp_sw_conn->rx_hash.tfm)
		crypto_free_hash(tcp_sw_conn->rx_hash.tfm);

	iscsi_tcp_conn_teardown(cls_conn);
}

static void iscsi_sw_tcp_conn_stop(struct iscsi_cls_conn *cls_conn, int flag)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct socket *sock = tcp_sw_conn->sock;

	/* userspace may have goofed up and not bound us */
	if (!sock)
		return;

	if (sock->sk->sk_sleep) {
		sock->sk->sk_err = EIO;
		wake_up_interruptible(sock->sk->sk_sleep);
	}

	/* stop xmit side */
	iscsi_suspend_tx(conn);

	/* stop recv side and release socket */
	iscsi_sw_tcp_release_conn(conn);

	iscsi_conn_stop(cls_conn, flag);
}

static int
iscsi_sw_tcp_conn_bind(struct iscsi_cls_session *cls_session,
		       struct iscsi_cls_conn *cls_conn, uint64_t transport_eph,
		       int is_leading)
{
	struct iscsi_session *session = cls_session->dd_data;
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sock *sk;
	struct socket *sock;
	int err;

	/* lookup for existing socket */
	sock = sockfd_lookup((int)transport_eph, &err);
	if (!sock) {
		iscsi_conn_printk(KERN_ERR, conn,
				  "sockfd_lookup failed %d\n", err);
		return -EEXIST;
	}

	err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
	if (err)
		goto free_socket;

	spin_lock_bh(&session->lock);
	/* bind iSCSI connection and socket */
	tcp_sw_conn->sock = sock;
	spin_unlock_bh(&session->lock);

	/* setup Socket parameters */
	sk = sock->sk;
	sk->sk_reuse = 1;
	sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
	sk->sk_allocation = GFP_ATOMIC;

	iscsi_sw_tcp_conn_set_callbacks(conn);
	tcp_sw_conn->sendpage = tcp_sw_conn->sock->ops->sendpage;
	/*
	 * set receive state machine into initial state
	 */
	iscsi_tcp_hdr_recv_prep(tcp_conn);
	return 0;

free_socket:
	sockfd_put(sock);
	return err;
}

static int iscsi_sw_tcp_conn_set_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf,
				       int buflen)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_session *session = conn->session;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	int value;

	switch(param) {
	case ISCSI_PARAM_HDRDGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		break;
	case ISCSI_PARAM_DATADGST_EN:
		iscsi_set_param(cls_conn, param, buf, buflen);
		tcp_sw_conn->sendpage = conn->datadgst_en ?
			sock_no_sendpage : tcp_sw_conn->sock->ops->sendpage;
		break;
	case ISCSI_PARAM_MAX_R2T:
		sscanf(buf, "%d", &value);
		if (value <= 0 || !is_power_of_2(value))
			return -EINVAL;
		if (session->max_r2t == value)
			break;
		iscsi_tcp_r2tpool_free(session);
		iscsi_set_param(cls_conn, param, buf, buflen);
		if (iscsi_tcp_r2tpool_alloc(session))
			return -ENOMEM;
		break;
	default:
		return iscsi_set_param(cls_conn, param, buf, buflen);
	}

	return 0;
}

static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn,
				       enum iscsi_param param, char *buf)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;
	struct sockaddr_in6 addr;
	int rc, len;

	switch(param) {
	case ISCSI_PARAM_CONN_PORT:
	case ISCSI_PARAM_CONN_ADDRESS:
		spin_lock_bh(&conn->session->lock);
		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
			spin_unlock_bh(&conn->session->lock);
			return -ENOTCONN;
		}
		rc = kernel_getpeername(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&conn->session->lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_conn_get_param(cls_conn, param, buf);
	}

	return 0;
}

static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost,
				       enum iscsi_host_param param, char *buf)
{
	struct iscsi_sw_tcp_host *tcp_sw_host = iscsi_host_priv(shost);
	struct iscsi_session *session = tcp_sw_host->session;
	struct iscsi_conn *conn;
	struct iscsi_tcp_conn *tcp_conn;
	struct iscsi_sw_tcp_conn *tcp_sw_conn;
	struct sockaddr_in6 addr;
	int rc, len;

	switch (param) {
	case ISCSI_HOST_PARAM_IPADDRESS:
		spin_lock_bh(&session->lock);
		conn = session->leadconn;
		if (!conn) {
			spin_unlock_bh(&session->lock);
			return -ENOTCONN;
		}
		tcp_conn = conn->dd_data;

		tcp_sw_conn = tcp_conn->dd_data;
		if (!tcp_sw_conn->sock) {
			spin_unlock_bh(&session->lock);
			return -ENOTCONN;
		}

		rc = kernel_getsockname(tcp_sw_conn->sock,
					(struct sockaddr *)&addr, &len);
		spin_unlock_bh(&session->lock);
		if (rc)
			return rc;

		return iscsi_conn_get_addr_param((struct sockaddr_storage *)
						 &addr, param, buf);
	default:
		return iscsi_host_get_param(shost, param, buf);
	}

	return 0;
}

static void
iscsi_sw_tcp_conn_get_stats(struct iscsi_cls_conn *cls_conn,
			    struct iscsi_stats *stats)
{
	struct iscsi_conn *conn = cls_conn->dd_data;
	struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
	struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data;

	stats->custom_length = 3;
	strcpy(stats->custom[0].desc, "tx_sendpage_failures");
	stats->custom[0].value = tcp_sw_conn->sendpage_failures_cnt;
	strcpy(stats->custom[1].desc, "rx_discontiguous_hdr");
	stats->custom[1].value = tcp_sw_conn->discontiguous_hdr_cnt;
	strcpy(stats->custom[2].desc, "eh_abort_cnt");
	stats->custom[2].value = conn->eh_abort_cnt;

	iscsi_tcp_conn_get_stats(cls_conn, stats);
}

static struct iscsi_cls_session *
iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
			    uint16_t qdepth, uint32_t initial_cmdsn)
{
	struct iscsi_cls_session *cls_session;
	struct iscsi_session *session;
	struct iscsi_sw_tcp_host *tcp_sw_host;
	struct Scsi_Host *shost;

	if (ep) {
		printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
		return NULL;
	}

	shost = iscsi_host_alloc(&iscsi_sw_tcp_sht,
				 sizeof(struct iscsi_sw_tcp_host), 1);
	if (!shost)
		return NULL;
	shost->transportt = iscsi_sw_tcp_scsi_transport;
	shost->cmd_per_lun = qdepth;
	shost->max_lun = iscsi_max_lun;
	shost->max_id = 0;
	shost->max_channel = 0;
	shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;

	if (iscsi_host_add(shost, NULL))
		goto free_host;

	cls_session = iscsi_session_setup(&iscsi_sw_tcp_transport, shost,
					  cmds_max, 0,
					  sizeof(struct iscsi_tcp_task) +
					  sizeof(struct iscsi_sw_tcp_hdrbuf),
					  initial_cmdsn, 0);
	if (!cls_session)
		goto remove_host;
	session = cls_session->dd_data;
	tcp_sw_host = iscsi_host_priv(shost);
	tcp_sw_host->session = session;

	shost->can_queue = session->scsi_cmds_max;
	if (iscsi_tcp_r2tpool_alloc(session))
		goto remove_session;
	return cls_session;

remove_session:
	iscsi_session_teardown(cls_session);
remove_host:
	iscsi_host_remove(shost);
free_host:
	iscsi_host_free(shost);
	return NULL;
}

static void iscsi_sw_tcp_session_destroy(struct iscsi_cls_session *cls_session)
{
	struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);

	iscsi_tcp_r2tpool_free(cls_session->dd_data);
	iscsi_session_teardown(cls_session);

	iscsi_host_remove(shost);
	iscsi_host_free(shost);
}

static mode_t iscsi_sw_tcp_attr_is_visible(int param_type, int param)
{
	switch (param_type) {
	case ISCSI_HOST_PARAM:
		switch (param) {
		case ISCSI_HOST_PARAM_NETDEV_NAME:
		case ISCSI_HOST_PARAM_HWADDRESS:
		case ISCSI_HOST_PARAM_IPADDRESS:
		case ISCSI_HOST_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	case ISCSI_PARAM:
		switch (param) {
		case ISCSI_PARAM_MAX_RECV_DLENGTH:
		case ISCSI_PARAM_MAX_XMIT_DLENGTH:
		case ISCSI_PARAM_HDRDGST_EN:
		case ISCSI_PARAM_DATADGST_EN:
		case ISCSI_PARAM_CONN_ADDRESS:
		case ISCSI_PARAM_CONN_PORT:
		case ISCSI_PARAM_EXP_STATSN:
		case ISCSI_PARAM_PERSISTENT_ADDRESS:
		case ISCSI_PARAM_PERSISTENT_PORT:
		case ISCSI_PARAM_PING_TMO:
		case ISCSI_PARAM_RECV_TMO:
		case ISCSI_PARAM_INITIAL_R2T_EN:
		case ISCSI_PARAM_MAX_R2T:
		case ISCSI_PARAM_IMM_DATA_EN:
		case ISCSI_PARAM_FIRST_BURST:
		case ISCSI_PARAM_MAX_BURST:
		case ISCSI_PARAM_PDU_INORDER_EN:
		case ISCSI_PARAM_DATASEQ_INORDER_EN:
		case ISCSI_PARAM_ERL:
		case ISCSI_PARAM_TARGET_NAME:
		case ISCSI_PARAM_TPGT:
		case ISCSI_PARAM_USERNAME:
		case ISCSI_PARAM_PASSWORD:
		case ISCSI_PARAM_USERNAME_IN:
		case ISCSI_PARAM_PASSWORD_IN:
		case ISCSI_PARAM_FAST_ABORT:
		case ISCSI_PARAM_ABORT_TMO:
		case ISCSI_PARAM_LU_RESET_TMO:
		case ISCSI_PARAM_TGT_RESET_TMO:
		case ISCSI_PARAM_IFACE_NAME:
		case ISCSI_PARAM_INITIATOR_NAME:
			return S_IRUGO;
		default:
			return 0;
		}
	}

	return 0;
}

static int iscsi_sw_tcp_slave_alloc(struct scsi_device *sdev)
{
	set_bit(QUEUE_FLAG_BIDI, &sdev->request_queue->queue_flags);
	return 0;
}
Example #25
0
static int ath9k_htc_start(struct ieee80211_hw *hw)
{
	struct ath9k_htc_priv *priv = hw->priv;
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ieee80211_channel *curchan = hw->conf.channel;
	struct ath9k_channel *init_channel;
	int ret = 0;
	enum htc_phymode mode;
	__be16 htc_mode;
	u8 cmd_rsp;

	mutex_lock(&priv->mutex);

	ath_dbg(common, ATH_DBG_CONFIG,
		"Starting driver with initial channel: %d MHz\n",
		curchan->center_freq);

	/* Ensure that HW is awake before flushing RX */
	ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
	WMI_CMD(WMI_FLUSH_RECV_CMDID);

	/* setup initial channel */
	init_channel = ath9k_cmn_get_curchannel(hw, ah);

	ath9k_hw_htc_resetinit(ah);
	ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
	if (ret) {
		ath_err(common,
			"Unable to reset hardware; reset status %d (freq %u MHz)\n",
			ret, curchan->center_freq);
		mutex_unlock(&priv->mutex);
		return ret;
	}

	ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
			       &priv->curtxpow);

	mode = ath9k_htc_get_curmode(priv, init_channel);
	htc_mode = cpu_to_be16(mode);
	WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
	WMI_CMD(WMI_ATH_INIT_CMDID);
	WMI_CMD(WMI_START_RECV_CMDID);

	ath9k_host_rx_init(priv);

	ret = ath9k_htc_update_cap_target(priv, 0);
	if (ret)
		ath_dbg(common, ATH_DBG_CONFIG,
			"Failed to update capability in target\n");

	priv->op_flags &= ~OP_INVALID;
	htc_start(priv->htc);

	spin_lock_bh(&priv->tx.tx_lock);
	priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
	spin_unlock_bh(&priv->tx.tx_lock);

	ieee80211_wake_queues(hw);

	mod_timer(&priv->tx.cleanup_timer,
		  jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));

	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
					   AR_STOMP_LOW_WLAN_WGHT);
		ath9k_hw_btcoex_enable(ah);
		ath_htc_resume_btcoex_work(priv);
	}
	mutex_unlock(&priv->mutex);

	return ret;
}
Example #26
0
void rt2x00lib_txdone(struct queue_entry *entry,
		      struct txdone_entry_desc *txdesc)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
	struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
	unsigned int header_length, i;
	u8 rate_idx, rate_flags, retry_rates;
	u8 skbdesc_flags = skbdesc->flags;
	bool success;

	/*
	 * Unmap the skb.
	 */
	rt2x00queue_unmap_skb(entry);

	/*
	 * Remove the extra tx headroom from the skb.
	 */
	skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom);

	/*
	 * Signal that the TX descriptor is no longer in the skb.
	 */
	skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;

	/*
	 * Determine the length of 802.11 header.
	 */
	header_length = ieee80211_get_hdrlen_from_skb(entry->skb);

	/*
	 * Remove L2 padding which was added during
	 */
	if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags))
		rt2x00queue_remove_l2pad(entry->skb, header_length);

	/*
	 * If the IV/EIV data was stripped from the frame before it was
	 * passed to the hardware, we should now reinsert it again because
	 * mac80211 will expect the same data to be present it the
	 * frame as it was passed to us.
	 */
	if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags))
		rt2x00crypto_tx_insert_iv(entry->skb, header_length);

	/*
	 * Send frame to debugfs immediately, after this call is completed
	 * we are going to overwrite the skb->cb array.
	 */
	rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb);

	/*
	 * Determine if the frame has been successfully transmitted.
	 */
	success =
	    test_bit(TXDONE_SUCCESS, &txdesc->flags) ||
	    test_bit(TXDONE_UNKNOWN, &txdesc->flags);

	/*
	 * Update TX statistics.
	 */
	rt2x00dev->link.qual.tx_success += success;
	rt2x00dev->link.qual.tx_failed += !success;

	rate_idx = skbdesc->tx_rate_idx;
	rate_flags = skbdesc->tx_rate_flags;
	retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ?
	    (txdesc->retry + 1) : 1;

	/*
	 * Initialize TX status
	 */
	memset(&tx_info->status, 0, sizeof(tx_info->status));
	tx_info->status.ack_signal = 0;

	/*
	 * Frame was send with retries, hardware tried
	 * different rates to send out the frame, at each
	 * retry it lowered the rate 1 step except when the
	 * lowest rate was used.
	 */
	for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) {
		tx_info->status.rates[i].idx = rate_idx - i;
		tx_info->status.rates[i].flags = rate_flags;

		if (rate_idx - i == 0) {
			/*
			 * The lowest rate (index 0) was used until the
			 * number of max retries was reached.
			 */
			tx_info->status.rates[i].count = retry_rates - i;
			i++;
			break;
		}
		tx_info->status.rates[i].count = 1;
	}
	if (i < (IEEE80211_TX_MAX_RATES - 1))
		tx_info->status.rates[i].idx = -1; /* terminate */

	if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) {
		if (success)
			tx_info->flags |= IEEE80211_TX_STAT_ACK;
		else
			rt2x00dev->low_level_stats.dot11ACKFailureCount++;
	}

	/*
	 * Every single frame has it's own tx status, hence report
	 * every frame as ampdu of size 1.
	 *
	 * TODO: if we can find out how many frames were aggregated
	 * by the hw we could provide the real ampdu_len to mac80211
	 * which would allow the rc algorithm to better decide on
	 * which rates are suitable.
	 */
	if (test_bit(TXDONE_AMPDU, &txdesc->flags) ||
	    tx_info->flags & IEEE80211_TX_CTL_AMPDU) {
		tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
		tx_info->status.ampdu_len = 1;
		tx_info->status.ampdu_ack_len = success ? 1 : 0;

		if (!success)
			tx_info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
	}

	if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
		if (success)
			rt2x00dev->low_level_stats.dot11RTSSuccessCount++;
		else
			rt2x00dev->low_level_stats.dot11RTSFailureCount++;
	}

	/*
	 * Only send the status report to mac80211 when it's a frame
	 * that originated in mac80211. If this was a extra frame coming
	 * through a mac80211 library call (RTS/CTS) then we should not
	 * send the status report back.
	 */
	if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
		if (test_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags))
			ieee80211_tx_status(rt2x00dev->hw, entry->skb);
		else
			ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
	} else
		dev_kfree_skb_any(entry->skb);

	/*
	 * Make this entry available for reuse.
	 */
	entry->skb = NULL;
	entry->flags = 0;

	rt2x00dev->ops->lib->clear_entry(entry);

	rt2x00queue_index_inc(entry, Q_INDEX_DONE);

	/*
	 * If the data queue was below the threshold before the txdone
	 * handler we must make sure the packet queue in the mac80211 stack
	 * is reenabled when the txdone handler has finished. This has to be
	 * serialized with rt2x00mac_tx(), otherwise we can wake up queue
	 * before it was stopped.
	 */
	spin_lock_bh(&entry->queue->tx_lock);
	if (!rt2x00queue_threshold(entry->queue))
		rt2x00queue_unpause_queue(entry->queue);
	spin_unlock_bh(&entry->queue->tx_lock);
}
Example #27
0
int ath9k_wiphy_add(struct ath_softc *sc)
{
	int i, error;
	struct ath_wiphy *aphy;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ieee80211_hw *hw;
	u8 addr[ETH_ALEN];

	hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
	if (hw == NULL)
		return -ENOMEM;

	spin_lock_bh(&sc->wiphy_lock);
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		if (sc->sec_wiphy[i] == NULL)
			break;
	}

	if (i == sc->num_sec_wiphy) {
		/* No empty slot available; increase array length */
		struct ath_wiphy **n;
		n = krealloc(sc->sec_wiphy,
			     (sc->num_sec_wiphy + 1) *
			     sizeof(struct ath_wiphy *),
			     GFP_ATOMIC);
		if (n == NULL) {
			spin_unlock_bh(&sc->wiphy_lock);
			ieee80211_free_hw(hw);
			return -ENOMEM;
		}
		n[i] = NULL;
		sc->sec_wiphy = n;
		sc->num_sec_wiphy++;
	}

	SET_IEEE80211_DEV(hw, sc->dev);

	aphy = hw->priv;
	aphy->sc = sc;
	aphy->hw = hw;
	sc->sec_wiphy[i] = aphy;
	spin_unlock_bh(&sc->wiphy_lock);

	memcpy(addr, common->macaddr, ETH_ALEN);
	addr[0] |= 0x02; /* Locally managed address */
	/*
	 * XOR virtual wiphy index into the least significant bits to generate
	 * a different MAC address for each virtual wiphy.
	 */
	addr[5] ^= i & 0xff;
	addr[4] ^= (i & 0xff00) >> 8;
	addr[3] ^= (i & 0xff0000) >> 16;

	SET_IEEE80211_PERM_ADDR(hw, addr);

	ath9k_set_hw_capab(sc, hw);

	error = ieee80211_register_hw(hw);

	if (error == 0) {
		/* Make sure wiphy scheduler is started (if enabled) */
		ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
	}

	return error;
}
/* Our exported functions */
static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx,
				int do_cont_test)
{
	unsigned char *ptr = buf;
	unsigned int byte_count = (unsigned int)nbytes;
	int err;


	spin_lock_bh(&ctx->prng_lock);

	err = -EINVAL;
	if (ctx->flags & PRNG_NEED_RESET)
		goto done;

	/*
	 * If the FIXED_SIZE flag is on, only return whole blocks of
	 * pseudo random data
	 */
	err = -EINVAL;
	if (ctx->flags & PRNG_FIXED_SIZE) {
		if (nbytes < DEFAULT_BLK_SZ)
			goto done;
		byte_count = DEFAULT_BLK_SZ;
	}

	err = byte_count;

	dbgprint(KERN_CRIT "getting %d random bytes for context %p\n",
		byte_count, ctx);


remainder:
	if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
		if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
			memset(buf, 0, nbytes);
			err = -EINVAL;
			goto done;
		}
	}

	/*
	 * Copy any data less than an entire block
	 */
	if (byte_count < DEFAULT_BLK_SZ) {
empty_rbuf:
		while (ctx->rand_data_valid < DEFAULT_BLK_SZ) {
			*ptr = ctx->rand_data[ctx->rand_data_valid];
			ptr++;
			byte_count--;
			ctx->rand_data_valid++;
			if (byte_count == 0)
				goto done;
		}
	}

	/*
	 * Now copy whole blocks
	 */
	for (; byte_count >= DEFAULT_BLK_SZ; byte_count -= DEFAULT_BLK_SZ) {
		if (ctx->rand_data_valid == DEFAULT_BLK_SZ) {
			if (_get_more_prng_bytes(ctx, do_cont_test) < 0) {
				memset(buf, 0, nbytes);
				err = -EINVAL;
				goto done;
			}
		}
		if (ctx->rand_data_valid > 0)
			goto empty_rbuf;
		memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ);
		ctx->rand_data_valid += DEFAULT_BLK_SZ;
		ptr += DEFAULT_BLK_SZ;
	}

	/*
	 * Now go back and get any remaining partial block
	 */
	if (byte_count)
		goto remainder;

done:
	spin_unlock_bh(&ctx->prng_lock);
	dbgprint(KERN_CRIT "returning %d from get_prng_bytes in context %p\n",
		err, ctx);
	return err;
}
Example #29
0
void tipc_bclink_remove_node(u32 addr)
{
	spin_lock_bh(&bc_lock);
	tipc_nmap_remove(&bclink->bcast_nodes, addr);
	spin_unlock_bh(&bc_lock);
}
void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata, struct ieee80211_mgmt *mgmt,
			 size_t len, struct ieee80211_rx_status *rx_status)
{
	struct ieee80211_local *local = sdata->local;
	struct ieee802_11_elems elems;
	struct sta_info *sta;
	enum plink_event event;
	enum plink_frame_type ftype;
	size_t baselen;
	u8 ie_len;
	u8 *baseaddr;
	__le16 plid, llid, reason;
#ifdef CONFIG_MAC80211_VERBOSE_MPL_DEBUG
	static const char *mplstates[] = {
		[PLINK_LISTEN] = "LISTEN",
		[PLINK_OPN_SNT] = "OPN-SNT",
		[PLINK_OPN_RCVD] = "OPN-RCVD",
		[PLINK_CNF_RCVD] = "CNF_RCVD",
		[PLINK_ESTAB] = "ESTAB",
		[PLINK_HOLDING] = "HOLDING",
		[PLINK_BLOCKED] = "BLOCKED"
	};
#endif

	/* need action_code, aux */
	if (len < IEEE80211_MIN_ACTION_SIZE + 3)
		return;

	if (is_multicast_ether_addr(mgmt->da)) {
		mpl_dbg("Mesh plink: ignore frame from multicast address");
		return;
	}

	baseaddr = mgmt->u.action.u.plink_action.variable;
	baselen = (u8 *) mgmt->u.action.u.plink_action.variable - (u8 *) mgmt;
	if (mgmt->u.action.u.plink_action.action_code == PLINK_CONFIRM) {
		baseaddr += 4;
		baselen += 4;
	}
	ieee802_11_parse_elems(baseaddr, len - baselen, &elems);
	if (!elems.peer_link) {
		mpl_dbg("Mesh plink: missing necessary peer link ie\n");
		return;
	}

	ftype = mgmt->u.action.u.plink_action.action_code;
	ie_len = elems.peer_link_len;
	if ((ftype == PLINK_OPEN && ie_len != 6) ||
	    (ftype == PLINK_CONFIRM && ie_len != 8) ||
	    (ftype == PLINK_CLOSE && ie_len != 8 && ie_len != 10)) {
		mpl_dbg("Mesh plink: incorrect plink ie length %d %d\n",
		    ftype, ie_len);
		return;
	}

	if (ftype != PLINK_CLOSE && (!elems.mesh_id || !elems.mesh_config)) {
		mpl_dbg("Mesh plink: missing necessary ie\n");
		return;
	}
	/* Note the lines below are correct, the llid in the frame is the plid
	 * from the point of view of this host.
	 */
	memcpy(&plid, PLINK_GET_LLID(elems.peer_link), 2);
	if (ftype == PLINK_CONFIRM || (ftype == PLINK_CLOSE && ie_len == 10))
		memcpy(&llid, PLINK_GET_PLID(elems.peer_link), 2);

	rcu_read_lock();

	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta && ftype != PLINK_OPEN) {
		mpl_dbg("Mesh plink: cls or cnf from unknown peer\n");
		rcu_read_unlock();
		return;
	}

	if (sta && sta->plink_state == PLINK_BLOCKED) {
		rcu_read_unlock();
		return;
	}

	/* Now we will figure out the appropriate event... */
	event = PLINK_UNDEFINED;
	if (ftype != PLINK_CLOSE && (!mesh_matches_local(&elems, sdata))) {
		switch (ftype) {
		case PLINK_OPEN:
			event = OPN_RJCT;
			break;
		case PLINK_CONFIRM:
			event = CNF_RJCT;
			break;
		case PLINK_CLOSE:
			/* avoid warning */
			break;
		}
		spin_lock_bh(&sta->lock);
	} else if (!sta) {
		/* ftype == PLINK_OPEN */
		u32 rates;

		rcu_read_unlock();

		if (!mesh_plink_free_count(sdata)) {
			mpl_dbg("Mesh plink error: no more free plinks\n");
			return;
		}

		rates = ieee80211_sta_get_rates(local, &elems, rx_status->band);
		sta = mesh_plink_alloc(sdata, mgmt->sa, rates);
		if (!sta) {
			mpl_dbg("Mesh plink error: plink table full\n");
			return;
		}
		if (sta_info_insert_rcu(sta)) {
			rcu_read_unlock();
			return;
		}
		event = OPN_ACPT;
		spin_lock_bh(&sta->lock);
	} else {
		spin_lock_bh(&sta->lock);
		switch (ftype) {
		case PLINK_OPEN:
			if (!mesh_plink_free_count(sdata) ||
			    (sta->plid && sta->plid != plid))
				event = OPN_IGNR;
			else
				event = OPN_ACPT;
			break;
		case PLINK_CONFIRM:
			if (!mesh_plink_free_count(sdata) ||
			    (sta->llid != llid || sta->plid != plid))
				event = CNF_IGNR;
			else
				event = CNF_ACPT;
			break;
		case PLINK_CLOSE:
			if (sta->plink_state == PLINK_ESTAB)
				/* Do not check for llid or plid. This does not
				 * follow the standard but since multiple plinks
				 * per sta are not supported, it is necessary in
				 * order to avoid a livelock when MP A sees an
				 * establish peer link to MP B but MP B does not
				 * see it. This can be caused by a timeout in
				 * B's peer link establishment or B beign
				 * restarted.
				 */
				event = CLS_ACPT;
			else if (sta->plid != plid)
				event = CLS_IGNR;
			else if (ie_len == 7 && sta->llid != llid)
				event = CLS_IGNR;
			else
				event = CLS_ACPT;
			break;
		default:
			mpl_dbg("Mesh plink: unknown frame subtype\n");
			spin_unlock_bh(&sta->lock);
			rcu_read_unlock();
			return;
		}
	}

	mpl_dbg("Mesh plink (peer, state, llid, plid, event): %pM %s %d %d %d\n",
		mgmt->sa, mplstates[sta->plink_state],
		le16_to_cpu(sta->llid), le16_to_cpu(sta->plid),
		event);
	reason = 0;
	switch (sta->plink_state) {
		/* spin_unlock as soon as state is updated at each case */
	case PLINK_LISTEN:
		switch (event) {
		case CLS_ACPT:
			mesh_plink_fsm_restart(sta);
			spin_unlock_bh(&sta->lock);
			break;
		case OPN_ACPT:
			sta->plink_state = PLINK_OPN_RCVD;
			sta->plid = plid;
			get_random_bytes(&llid, 2);
			sta->llid = llid;
			mesh_plink_timer_set(sta, dot11MeshRetryTimeout(sdata));
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_OPEN, sta->sta.addr, llid,
					    0, 0);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr,
					    llid, plid, 0);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_OPN_SNT:
		switch (event) {
		case OPN_RJCT:
		case CNF_RJCT:
			reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
		case CLS_ACPT:
			if (!reason)
				reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			sta->plink_state = PLINK_HOLDING;
			if (!mod_plink_timer(sta,
					     dot11MeshHoldingTimeout(sdata)))
				sta->ignore_plink_timer = true;

			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			/* retry timer is left untouched */
			sta->plink_state = PLINK_OPN_RCVD;
			sta->plid = plid;
			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		case CNF_ACPT:
			sta->plink_state = PLINK_CNF_RCVD;
			if (!mod_plink_timer(sta,
					     dot11MeshConfirmTimeout(sdata)))
				sta->ignore_plink_timer = true;

			spin_unlock_bh(&sta->lock);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_OPN_RCVD:
		switch (event) {
		case OPN_RJCT:
		case CNF_RJCT:
			reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
		case CLS_ACPT:
			if (!reason)
				reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			sta->plink_state = PLINK_HOLDING;
			if (!mod_plink_timer(sta,
					     dot11MeshHoldingTimeout(sdata)))
				sta->ignore_plink_timer = true;

			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		case CNF_ACPT:
			del_timer(&sta->plink_timer);
			sta->plink_state = PLINK_ESTAB;
			mesh_plink_inc_estab_count(sdata);
			spin_unlock_bh(&sta->lock);
			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
				sta->sta.addr);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_CNF_RCVD:
		switch (event) {
		case OPN_RJCT:
		case CNF_RJCT:
			reason = cpu_to_le16(MESH_CAPABILITY_POLICY_VIOLATION);
		case CLS_ACPT:
			if (!reason)
				reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			sta->plink_state = PLINK_HOLDING;
			if (!mod_plink_timer(sta,
					     dot11MeshHoldingTimeout(sdata)))
				sta->ignore_plink_timer = true;

			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			del_timer(&sta->plink_timer);
			sta->plink_state = PLINK_ESTAB;
			mesh_plink_inc_estab_count(sdata);
			spin_unlock_bh(&sta->lock);
			mpl_dbg("Mesh plink with %pM ESTABLISHED\n",
				sta->sta.addr);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;

	case PLINK_ESTAB:
		switch (event) {
		case CLS_ACPT:
			reason = cpu_to_le16(MESH_CLOSE_RCVD);
			sta->reason = reason;
			__mesh_plink_deactivate(sta);
			sta->plink_state = PLINK_HOLDING;
			llid = sta->llid;
			mod_plink_timer(sta, dot11MeshHoldingTimeout(sdata));
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr, llid,
					    plid, reason);
			break;
		case OPN_ACPT:
			llid = sta->llid;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CONFIRM, sta->sta.addr, llid,
					    plid, 0);
			break;
		default:
			spin_unlock_bh(&sta->lock);
			break;
		}
		break;
	case PLINK_HOLDING:
		switch (event) {
		case CLS_ACPT:
			if (del_timer(&sta->plink_timer))
				sta->ignore_plink_timer = 1;
			mesh_plink_fsm_restart(sta);
			spin_unlock_bh(&sta->lock);
			break;
		case OPN_ACPT:
		case CNF_ACPT:
		case OPN_RJCT:
		case CNF_RJCT:
			llid = sta->llid;
			reason = sta->reason;
			spin_unlock_bh(&sta->lock);
			mesh_plink_frame_tx(sdata, PLINK_CLOSE, sta->sta.addr,
					    llid, plid, reason);
			break;
		default:
			spin_unlock_bh(&sta->lock);
		}
		break;
	default:
		/* should not get here, PLINK_BLOCKED is dealt with at the
		 * beginning of the function
		 */
		spin_unlock_bh(&sta->lock);
		break;
	}

	rcu_read_unlock();
}