コード例 #1
0
void ax25_linkfail_release(struct ax25_linkfail *lf)
{
    spin_lock_bh(&linkfail_lock);
    hlist_del_init(&lf->lf_node);
    spin_unlock_bh(&linkfail_lock);
}
コード例 #2
0
ファイル: agg-tx.c プロジェクト: AK101111/linux
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
					 struct sta_info *sta, u16 tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_OPERATIONAL,
		.tid = tid,
		.timeout = 0,
		.ssn = 0,
	};

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	params.buf_size = tid_tx->buf_size;
	params.amsdu = tid_tx->amsdu;

	ht_dbg(sta->sdata, "Aggregation is on for %pM tid %d\n",
	       sta->sta.addr, tid);

	drv_ampdu_action(local, sta->sdata, &params);

	/*
	 * synchronize with TX path, while splicing the TX path
	 * should block so it won't put more packets onto pending.
	 */
	spin_lock_bh(&sta->lock);

	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);
	/*
	 * Now mark as operational. This will be visible
	 * in the TX path, and lets it go lock-free in
	 * the common case.
	 */
	set_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
	ieee80211_agg_splice_finish(sta->sdata, tid);

	spin_unlock_bh(&sta->lock);

	ieee80211_agg_start_txq(sta, tid, true);
}

void ieee80211_start_tx_ba_cb(struct ieee80211_vif *vif, u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct sta_info *sta;
	struct tid_ampdu_tx *tid_tx;

	trace_api_start_tx_ba_cb(sdata, ra, tid);

	if (tid >= IEEE80211_NUM_TIDS) {
		ht_dbg(sdata, "Bad TID value: tid = %d (>= %d)\n",
		       tid, IEEE80211_NUM_TIDS);
		return;
	}

	mutex_lock(&local->sta_mtx);
	sta = sta_info_get_bss(sdata, ra);
	if (!sta) {
		mutex_unlock(&local->sta_mtx);
		ht_dbg(sdata, "Could not find station: %pM\n", ra);
		return;
	}

	mutex_lock(&sta->ampdu_mlme.mtx);
	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	if (WARN_ON(!tid_tx)) {
		ht_dbg(sdata, "addBA was not requested!\n");
		goto unlock;
	}

	if (WARN_ON(test_and_set_bit(HT_AGG_STATE_DRV_READY, &tid_tx->state)))
		goto unlock;

	if (test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state))
		ieee80211_agg_tx_operational(local, sta, tid);

 unlock:
	mutex_unlock(&sta->ampdu_mlme.mtx);
	mutex_unlock(&local->sta_mtx);
}

void ieee80211_start_tx_ba_cb_irqsafe(struct ieee80211_vif *vif,
				      const u8 *ra, u16 tid)
{
	struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
	struct ieee80211_local *local = sdata->local;
	struct ieee80211_ra_tid *ra_tid;
	struct sk_buff *skb = dev_alloc_skb(0);

	if (unlikely(!skb))
		return;

	ra_tid = (struct ieee80211_ra_tid *) &skb->cb;
	memcpy(&ra_tid->ra, ra, ETH_ALEN);
	ra_tid->tid = tid;

	skb->pkt_type = IEEE80211_SDATA_QUEUE_AGG_START;
	skb_queue_tail(&sdata->skb_queue, skb);
	ieee80211_queue_work(&local->hw, &sdata->work);
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_cb_irqsafe);

int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				   enum ieee80211_agg_stop_reason reason)
{
	int ret;

	mutex_lock(&sta->ampdu_mlme.mtx);

	ret = ___ieee80211_stop_tx_ba_session(sta, tid, reason);

	mutex_unlock(&sta->ampdu_mlme.mtx);

	return ret;
}
コード例 #3
0
static int mipi_dsi_off(struct platform_device *pdev)
{
	int ret = 0;
	struct msm_fb_data_type *mfd;
	struct msm_panel_info *pinfo;

	pr_debug("%s+:\n", __func__);

	mfd = platform_get_drvdata(pdev);
	pinfo = &mfd->panel_info;

	if (mdp_rev >= MDP_REV_41)
		mutex_lock(&mfd->dma->ov_mutex);
	else
		down(&mfd->dma->mutex);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		mipi_dsi_prepare_ahb_clocks();
		mipi_dsi_ahb_ctrl(1);
		mipi_dsi_clk_enable();

		/* make sure dsi_cmd_mdp is idle */
		mipi_dsi_cmd_mdp_busy();
	}

	/*
	 * Desctiption: change to DSI_CMD_MODE since it needed to
	 * tx DCS dsiplay off comamnd to panel
	 */
	mipi_dsi_op_mode_config(DSI_CMD_MODE);

	if (mfd->panel_info.type == MIPI_CMD_PANEL) {
		if (pinfo->lcd.vsync_enable) {
			if (pinfo->lcd.hw_vsync_mode && vsync_gpio >= 0) {
				if (MDP_REV_303 != mdp_rev)
					gpio_free(vsync_gpio);
			}
			mipi_dsi_set_tear_off(mfd);
		}
	}

	ret = panel_next_off(pdev);

	spin_lock_bh(&dsi_clk_lock);

	mipi_dsi_clk_disable();

	/* disbale dsi engine */
	MIPI_OUTP(MIPI_DSI_BASE + 0x0000, 0);

	mipi_dsi_phy_ctrl(0);

	mipi_dsi_ahb_ctrl(0);
	spin_unlock_bh(&dsi_clk_lock);

	mipi_dsi_unprepare_clocks();
	mipi_dsi_unprepare_ahb_clocks();
	if (mipi_dsi_pdata && mipi_dsi_pdata->dsi_power_save)
		mipi_dsi_pdata->dsi_power_save(0);

	if (mdp_rev >= MDP_REV_41)
		mutex_unlock(&mfd->dma->ov_mutex);
	else
		up(&mfd->dma->mutex);

	printk("LCD%s-:\n", __func__);

	return ret;
}
コード例 #4
0
ファイル: br.c プロジェクト: AshishNamdev/linux
/*
 * Handle changes in state of network devices enslaved to a bridge.
 *
 * Note: don't care about up/down if bridge itself is down, because
 *     port state is checked when bridge is brought up.
 */
static int br_device_event(struct notifier_block *unused, unsigned long event, void *ptr)
{
	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
	struct net_bridge_port *p;
	struct net_bridge *br;
	bool changed_addr;
	int err;

	/* register of bridge completed, add sysfs entries */
	if ((dev->priv_flags & IFF_EBRIDGE) && event == NETDEV_REGISTER) {
		br_sysfs_addbr(dev);
		return NOTIFY_DONE;
	}

	/* not a port of a bridge */
	p = br_port_get_rtnl(dev);
	if (!p)
		return NOTIFY_DONE;

	br = p->br;

	switch (event) {
	case NETDEV_CHANGEMTU:
		dev_set_mtu(br->dev, br_min_mtu(br));
		break;

	case NETDEV_CHANGEADDR:
		spin_lock_bh(&br->lock);
		br_fdb_changeaddr(p, dev->dev_addr);
		changed_addr = br_stp_recalculate_bridge_id(br);
		spin_unlock_bh(&br->lock);

		if (changed_addr)
			call_netdevice_notifiers(NETDEV_CHANGEADDR, br->dev);

		break;

	case NETDEV_CHANGE:
		br_port_carrier_check(p);
		break;

	case NETDEV_FEAT_CHANGE:
		netdev_update_features(br->dev);
		break;

	case NETDEV_DOWN:
		spin_lock_bh(&br->lock);
		if (br->dev->flags & IFF_UP)
			br_stp_disable_port(p);
		spin_unlock_bh(&br->lock);
		break;

	case NETDEV_UP:
		if (netif_running(br->dev) && netif_oper_up(dev)) {
			spin_lock_bh(&br->lock);
			br_stp_enable_port(p);
			spin_unlock_bh(&br->lock);
		}
		break;

	case NETDEV_UNREGISTER:
		br_del_if(br, dev);
		break;

	case NETDEV_CHANGENAME:
		err = br_sysfs_renameif(p);
		if (err)
			return notifier_from_errno(err);
		break;

	case NETDEV_PRE_TYPE_CHANGE:
		/* Forbid underlaying device to change its type. */
		return NOTIFY_BAD;

	case NETDEV_RESEND_IGMP:
		/* Propagate to master device */
		call_netdevice_notifiers(event, br->dev);
		break;
	}

	/* Events that may cause spanning tree to refresh */
	if (event == NETDEV_CHANGEADDR || event == NETDEV_UP ||
	    event == NETDEV_CHANGE || event == NETDEV_DOWN)
		br_ifinfo_notify(RTM_NEWLINK, p);

	return NOTIFY_DONE;
}
コード例 #5
0
ファイル: agg-tx.c プロジェクト: AK101111/linux
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_local *local = sta->local;
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.action = IEEE80211_AMPDU_TX_START,
		.tid = tid,
		.buf_size = 0,
		.amsdu = false,
		.timeout = 0,
	};
	int ret;

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	/*
	 * Start queuing up packets for this aggregation session.
	 * We're going to release them once the driver is OK with
	 * that.
	 */
	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	ieee80211_agg_stop_txq(sta, tid);

	/*
	 * Make sure no packets are being processed. This ensures that
	 * we have a valid starting sequence number and that in-flight
	 * packets have been flushed out and no packets for this TID
	 * will go into the driver during the ampdu_action call.
	 */
	synchronize_net();

	params.ssn = sta->tid_seq[tid] >> 4;
	ret = drv_ampdu_action(local, sdata, &params);
	if (ret) {
		ht_dbg(sdata,
		       "BA request denied - HW unavailable for %pM tid %d\n",
		       sta->sta.addr, tid);
		spin_lock_bh(&sta->lock);
		ieee80211_agg_splice_packets(sdata, tid_tx, tid);
		ieee80211_assign_tid_tx(sta, tid, NULL);
		ieee80211_agg_splice_finish(sdata, tid);
		spin_unlock_bh(&sta->lock);

		ieee80211_agg_start_txq(sta, tid, false);

		kfree_rcu(tid_tx, rcu_head);
		return;
	}

	/* activate the timer for the recipient's addBA response */
	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
	ht_dbg(sdata, "activated addBA response timer on %pM tid %d\n",
	       sta->sta.addr, tid);

	spin_lock_bh(&sta->lock);
	sta->ampdu_mlme.last_addba_req_time[tid] = jiffies;
	sta->ampdu_mlme.addba_req_num[tid]++;
	spin_unlock_bh(&sta->lock);

	/* send AddBA request */
	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
				     tid_tx->dialog_token, params.ssn,
				     IEEE80211_MAX_AMPDU_BUF,
				     tid_tx->timeout);
}

/*
 * After accepting the AddBA Response we activated a timer,
 * resetting it after each frame that we send.
 */
static void sta_tx_agg_session_timer_expired(unsigned long data)
{
	/* not an elegant detour, but there is no choice as the timer passes
	 * only one argument, and various sta_info are needed here, so init
	 * flow in sta_info_create gives the TID as data, while the timer_to_id
	 * array gives the sta through container_of */
	u8 *ptid = (u8 *)data;
	u8 *timer_to_id = ptid - *ptid;
	struct sta_info *sta = container_of(timer_to_id, struct sta_info,
					 timer_to_tid[0]);
	struct tid_ampdu_tx *tid_tx;
	unsigned long timeout;

	rcu_read_lock();
	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[*ptid]);
	if (!tid_tx || test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		rcu_read_unlock();
		return;
	}

	timeout = tid_tx->last_tx + TU_TO_JIFFIES(tid_tx->timeout);
	if (time_is_after_jiffies(timeout)) {
		mod_timer(&tid_tx->session_timer, timeout);
		rcu_read_unlock();
		return;
	}

	rcu_read_unlock();

	ht_dbg(sta->sdata, "tx session timer expired on %pM tid %d\n",
	       sta->sta.addr, (u16)*ptid);

	ieee80211_stop_tx_ba_session(&sta->sta, *ptid);
}
コード例 #6
0
ファイル: htt_tx.c プロジェクト: 19Dan01/linux
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
	struct sk_buff *txdesc = NULL;
	struct htt_cmd *cmd;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	u8 vdev_id = skb_cb->vdev_id;
	int len = 0;
	int msdu_id = -1;
	int res;

	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	len += sizeof(cmd->hdr);
	len += sizeof(cmd->mgmt_tx);

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
	if (res < 0) {
		spin_unlock_bh(&htt->tx_lock);
		goto err_tx_dec;
	}
	msdu_id = res;
	spin_unlock_bh(&htt->tx_lock);

	txdesc = ath10k_htc_alloc_skb(ar, len);
	if (!txdesc) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res)
		goto err_free_txdesc;

	skb_put(txdesc, len);
	cmd = (struct htt_cmd *)txdesc->data;
	cmd->hdr.msg_type         = HTT_H2T_MSG_TYPE_MGMT_TX;
	cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr);
	cmd->mgmt_tx.len        = __cpu_to_le32(msdu->len);
	cmd->mgmt_tx.desc_id    = __cpu_to_le32(msdu_id);
	cmd->mgmt_tx.vdev_id    = __cpu_to_le32(vdev_id);
	memcpy(cmd->mgmt_tx.hdr, msdu->data,
	       min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN));

	skb_cb->htt.txbuf = NULL;

	res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txdesc:
	dev_kfree_skb_any(txdesc);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
}
コード例 #7
0
ファイル: mesh_hwmp.c プロジェクト: Jalil89/openwrt
static void hwmp_prep_frame_process(struct ieee80211_sub_if_data *sdata,
				    struct ieee80211_mgmt *mgmt,
				    const u8 *prep_elem, u32 metric)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct mesh_path *mpath;
	const u8 *target_addr, *orig_addr;
	u8 ttl, hopcount, flags;
	u8 next_hop[ETH_ALEN];
	u32 target_sn, orig_sn, lifetime;

	mhwmp_dbg(sdata, "received PREP from %pM\n",
		  PREP_IE_TARGET_ADDR(prep_elem));

	orig_addr = PREP_IE_ORIG_ADDR(prep_elem);
	if (ether_addr_equal(orig_addr, sdata->vif.addr))
		/* destination, no forwarding required */
		return;

	if (!ifmsh->mshcfg.dot11MeshForwarding)
		return;

	ttl = PREP_IE_TTL(prep_elem);
	if (ttl <= 1) {
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
		return;
	}

	rcu_read_lock();
	mpath = mesh_path_lookup(sdata, orig_addr);
	if (mpath)
		spin_lock_bh(&mpath->state_lock);
	else
		goto fail;
	if (!(mpath->flags & MESH_PATH_ACTIVE)) {
		spin_unlock_bh(&mpath->state_lock);
		goto fail;
	}
	memcpy(next_hop, next_hop_deref_protected(mpath)->sta.addr, ETH_ALEN);
	spin_unlock_bh(&mpath->state_lock);
	--ttl;
	flags = PREP_IE_FLAGS(prep_elem);
	lifetime = PREP_IE_LIFETIME(prep_elem);
	hopcount = PREP_IE_HOPCOUNT(prep_elem) + 1;
	target_addr = PREP_IE_TARGET_ADDR(prep_elem);
	target_sn = PREP_IE_TARGET_SN(prep_elem);
	orig_sn = PREP_IE_ORIG_SN(prep_elem);

	mesh_path_sel_frame_tx(MPATH_PREP, flags, orig_addr, orig_sn, 0,
			       target_addr, target_sn, next_hop, hopcount,
			       ttl, lifetime, metric, 0, sdata);
	rcu_read_unlock();

	sdata->u.mesh.mshstats.fwded_unicast++;
	sdata->u.mesh.mshstats.fwded_frames++;
	return;

fail:
	rcu_read_unlock();
	sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
コード例 #8
0
static int tipc_create(struct net *net, struct socket *sock, int protocol,
		       int kern)
{
	const struct proto_ops *ops;
	socket_state state;
	struct sock *sk;
	struct tipc_port *tp_ptr;

	/* Validate arguments */

	if (unlikely(protocol != 0))
		return -EPROTONOSUPPORT;

	switch (sock->type) {
	case SOCK_STREAM:
		ops = &stream_ops;
		state = SS_UNCONNECTED;
		break;
	case SOCK_SEQPACKET:
		ops = &packet_ops;
		state = SS_UNCONNECTED;
		break;
	case SOCK_DGRAM:
	case SOCK_RDM:
		ops = &msg_ops;
		state = SS_READY;
		break;
	default:
		return -EPROTOTYPE;
	}

	/* Allocate socket's protocol area */

	sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto);
	if (sk == NULL)
		return -ENOMEM;

	/* Allocate TIPC port for socket to use */

	tp_ptr = tipc_createport_raw(sk, &dispatch, &wakeupdispatch,
				     TIPC_LOW_IMPORTANCE);
	if (unlikely(!tp_ptr)) {
		sk_free(sk);
		return -ENOMEM;
	}

	/* Finish initializing socket data structures */

	sock->ops = ops;
	sock->state = state;

	sock_init_data(sock, sk);
	sk->sk_backlog_rcv = backlog_rcv;
	tipc_sk(sk)->p = tp_ptr;
	tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT;

	spin_unlock_bh(tp_ptr->lock);

	if (sock->state == SS_READY) {
		tipc_set_portunreturnable(tp_ptr->ref, 1);
		if (sock->type == SOCK_DGRAM)
			tipc_set_portunreliable(tp_ptr->ref, 1);
	}

	return 0;
}
コード例 #9
0
ファイル: virtual.c プロジェクト: 12rafael/jellytimekernel
int ath9k_wiphy_add(struct ath_softc *sc)
{
	int i, error;
	struct ath_wiphy *aphy;
	struct ath_common *common = ath9k_hw_common(sc->sc_ah);
	struct ieee80211_hw *hw;
	u8 addr[ETH_ALEN];

	hw = ieee80211_alloc_hw(sizeof(struct ath_wiphy), &ath9k_ops);
	if (hw == NULL)
		return -ENOMEM;

	spin_lock_bh(&sc->wiphy_lock);
	for (i = 0; i < sc->num_sec_wiphy; i++) {
		if (sc->sec_wiphy[i] == NULL)
			break;
	}

	if (i == sc->num_sec_wiphy) {
		/* No empty slot available; increase array length */
		struct ath_wiphy **n;
		n = krealloc(sc->sec_wiphy,
			     (sc->num_sec_wiphy + 1) *
			     sizeof(struct ath_wiphy *),
			     GFP_ATOMIC);
		if (n == NULL) {
			spin_unlock_bh(&sc->wiphy_lock);
			ieee80211_free_hw(hw);
			return -ENOMEM;
		}
		n[i] = NULL;
		sc->sec_wiphy = n;
		sc->num_sec_wiphy++;
	}

	SET_IEEE80211_DEV(hw, sc->dev);

	aphy = hw->priv;
	aphy->sc = sc;
	aphy->hw = hw;
	sc->sec_wiphy[i] = aphy;
	spin_unlock_bh(&sc->wiphy_lock);

	memcpy(addr, common->macaddr, ETH_ALEN);
	addr[0] |= 0x02; /* Locally managed address */
	/*
	 * XOR virtual wiphy index into the least significant bits to generate
	 * a different MAC address for each virtual wiphy.
	 */
	addr[5] ^= i & 0xff;
	addr[4] ^= (i & 0xff00) >> 8;
	addr[3] ^= (i & 0xff0000) >> 16;

	SET_IEEE80211_PERM_ADDR(hw, addr);

	ath9k_set_hw_capab(sc, hw);

	error = ieee80211_register_hw(hw);

	if (error == 0) {
		/* Make sure wiphy scheduler is started (if enabled) */
		ath9k_wiphy_set_scheduler(sc, sc->wiphy_scheduler_int);
	}

	return error;
}
コード例 #10
0
ファイル: htc_drv_main.c プロジェクト: fleitner/net-next
static int ath9k_htc_start(struct ieee80211_hw *hw)
{
	struct ath9k_htc_priv *priv = hw->priv;
	struct ath_hw *ah = priv->ah;
	struct ath_common *common = ath9k_hw_common(ah);
	struct ieee80211_channel *curchan = hw->conf.channel;
	struct ath9k_channel *init_channel;
	int ret = 0;
	enum htc_phymode mode;
	__be16 htc_mode;
	u8 cmd_rsp;

	mutex_lock(&priv->mutex);

	ath_dbg(common, ATH_DBG_CONFIG,
		"Starting driver with initial channel: %d MHz\n",
		curchan->center_freq);

	/* Ensure that HW is awake before flushing RX */
	ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
	WMI_CMD(WMI_FLUSH_RECV_CMDID);

	/* setup initial channel */
	init_channel = ath9k_cmn_get_curchannel(hw, ah);

	ath9k_hw_htc_resetinit(ah);
	ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
	if (ret) {
		ath_err(common,
			"Unable to reset hardware; reset status %d (freq %u MHz)\n",
			ret, curchan->center_freq);
		mutex_unlock(&priv->mutex);
		return ret;
	}

	ath9k_cmn_update_txpow(ah, priv->curtxpow, priv->txpowlimit,
			       &priv->curtxpow);

	mode = ath9k_htc_get_curmode(priv, init_channel);
	htc_mode = cpu_to_be16(mode);
	WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
	WMI_CMD(WMI_ATH_INIT_CMDID);
	WMI_CMD(WMI_START_RECV_CMDID);

	ath9k_host_rx_init(priv);

	ret = ath9k_htc_update_cap_target(priv, 0);
	if (ret)
		ath_dbg(common, ATH_DBG_CONFIG,
			"Failed to update capability in target\n");

	priv->op_flags &= ~OP_INVALID;
	htc_start(priv->htc);

	spin_lock_bh(&priv->tx.tx_lock);
	priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP;
	spin_unlock_bh(&priv->tx.tx_lock);

	ieee80211_wake_queues(hw);

	mod_timer(&priv->tx.cleanup_timer,
		  jiffies + msecs_to_jiffies(ATH9K_HTC_TX_CLEANUP_INTERVAL));

	if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE) {
		ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
					   AR_STOMP_LOW_WLAN_WGHT);
		ath9k_hw_btcoex_enable(ah);
		ath_htc_resume_btcoex_work(priv);
	}
	mutex_unlock(&priv->mutex);

	return ret;
}
コード例 #11
0
ファイル: nf_nat_autofw.c プロジェクト: 3sOx/asuswrt-merlin
static void
autofw_expect(struct nf_conn *ct, struct nf_conntrack_expect *exp)
{
	struct nf_nat_range pre_range;
	u_int32_t newdstip, newsrcip;
	u_int16_t port;
	int ret;
	struct nf_conn_help *help;
	struct nf_conn *exp_ct = exp->master;
	struct nf_conntrack_expect *newexp;
	int count;

	/* expect has been removed from expect list, but expect isn't free yet. */
	help = nfct_help(exp_ct);
	DEBUGP("autofw_nat_expected: got ");
	NF_CT_DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);

	spin_lock_bh(&nf_nat_autofw_lock);

	port = ntohs(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u.all);
	newdstip = exp->tuple.dst.u3.ip;
	newsrcip = exp->tuple.src.u3.ip;
	if (port < ntohs(help->help.ct_autofw_info.dport[0]) ||
		port > ntohs(help->help.ct_autofw_info.dport[1])) {
		spin_unlock_bh(&nf_nat_autofw_lock);
			return;
	}

	/* Only need to do PRE_ROUTING */
	port -= ntohs(help->help.ct_autofw_info.dport[0]);
	port += ntohs(help->help.ct_autofw_info.to[0]);
	pre_range.flags = IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED;
	pre_range.min_ip = pre_range.max_ip = newdstip;
	pre_range.min.all = pre_range.max.all = htons(port);
	nf_nat_setup_info(ct, &pre_range, NF_IP_PRE_ROUTING);

	spin_unlock_bh(&nf_nat_autofw_lock);

	/* Add expect again */

	/* alloc will set exp->master = exp_ct */
	newexp = nf_conntrack_expect_alloc(exp_ct);
	if (!newexp)
		return;

	newexp->tuple.src.u3.ip = exp->tuple.src.u3.ip;
	newexp->tuple.dst.protonum = exp->tuple.dst.protonum;
	newexp->mask.src.u3.ip = 0xFFFFFFFF;
	newexp->mask.dst.protonum = 0xFF;

	newexp->tuple.dst.u3.ip = exp->tuple.dst.u3.ip;
	newexp->mask.dst.u3.ip = 0x0;

	for (count = 1; count < NF_CT_TUPLE_L3SIZE; count++) {
		newexp->tuple.src.u3.all[count] = 0x0;
		newexp->tuple.dst.u3.all[count] = 0x0;
	}

	newexp->mask.dst.u.all = 0x0;
	newexp->mask.src.u.all = 0x0;
	newexp->mask.src.l3num = 0x0;

	newexp->expectfn = autofw_expect;
	newexp->helper = NULL;
	newexp->flags = 0;

	/*
	 * exp->timeout.expires will set as
	 * (jiffies + helper->timeout * HZ), when insert exp.
	*/
	ret = nf_conntrack_expect_related(newexp);
	if (ret == 0)
		nf_conntrack_expect_put(newexp);
}
コード例 #12
0
int sock_setsockopt(struct socket *sock, int level, int optname,
		    char *optval, int optlen)
{
	struct sock *sk=sock->sk;
#ifdef CONFIG_FILTER
	struct sk_filter *filter;
#endif
	int val;
	int valbool;
	struct linger ling;
	int ret = 0;
	
	/*
	 *	Options without arguments
	 */

#ifdef SO_DONTLINGER		/* Compatibility item... */
	switch(optname)
	{
		case SO_DONTLINGER:
			sk->linger=0;
			return 0;
	}
#endif	
		
  	if(optlen<sizeof(int))
  		return(-EINVAL);
  	
	if (get_user(val, (int *)optval))
		return -EFAULT;
	
  	valbool = val?1:0;

	lock_sock(sk);

  	switch(optname) 
  	{
		case SO_DEBUG:	
			if(val && !capable(CAP_NET_ADMIN))
			{
				ret = -EACCES;
			}
			else
				sk->debug=valbool;
			break;
		case SO_REUSEADDR:
			sk->reuse = valbool;
			break;
		case SO_TYPE:
		case SO_ERROR:
			ret = -ENOPROTOOPT;
		  	break;
		case SO_DONTROUTE:
			sk->localroute=valbool;
			break;
		case SO_BROADCAST:
			sk->broadcast=valbool;
			break;
		case SO_SNDBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			   
			if (val > sysctl_wmem_max)
				val = sysctl_wmem_max;

			sk->userlocks |= SOCK_SNDBUF_LOCK;
			if ((val * 2) < SOCK_MIN_SNDBUF)
				sk->sndbuf = SOCK_MIN_SNDBUF;
			else
				sk->sndbuf = (val * 2);

			/*
			 *	Wake up sending tasks if we
			 *	upped the value.
			 */
			sk->write_space(sk);
			break;

		case SO_RCVBUF:
			/* Don't error on this BSD doesn't and if you think
			   about it this is right. Otherwise apps have to
			   play 'guess the biggest size' games. RCVBUF/SNDBUF
			   are treated in BSD as hints */
			  
			if (val > sysctl_rmem_max)
				val = sysctl_rmem_max;

			sk->userlocks |= SOCK_RCVBUF_LOCK;
			/* FIXME: is this lower bound the right one? */
			if ((val * 2) < SOCK_MIN_RCVBUF)
				sk->rcvbuf = SOCK_MIN_RCVBUF;
			else
				sk->rcvbuf = (val * 2);
			break;

		case SO_KEEPALIVE:
#ifdef CONFIG_INET
			if (sk->protocol == IPPROTO_TCP)
			{
				tcp_set_keepalive(sk, valbool);
			}
#endif
			sk->keepopen = valbool;
			break;

	 	case SO_OOBINLINE:
			sk->urginline = valbool;
			break;

	 	case SO_NO_CHECK:
			sk->no_check = valbool;
			break;

		case SO_PRIORITY:
			if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN)) 
				sk->priority = val;
			else
				ret = -EPERM;
			break;

		case SO_LINGER:
			if(optlen<sizeof(ling)) {
				ret = -EINVAL;	/* 1003.1g */
				break;
			}
			if (copy_from_user(&ling,optval,sizeof(ling))) {
				ret = -EFAULT;
				break;
			}
			if(ling.l_onoff==0) {
				sk->linger=0;
			} else {
#if (BITS_PER_LONG == 32)
				if (ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
					sk->lingertime=MAX_SCHEDULE_TIMEOUT;
				else
#endif
					sk->lingertime=ling.l_linger*HZ;
				sk->linger=1;
			}
			break;

		case SO_BSDCOMPAT:
			sk->bsdism = valbool;
			break;

		case SO_PASSCRED:
			sock->passcred = valbool;
			break;

		case SO_TIMESTAMP:
			sk->rcvtstamp = valbool;
			break;

		case SO_RCVLOWAT:
			if (val < 0)
				val = INT_MAX;
			sk->rcvlowat = val ? : 1;
			break;

		case SO_RCVTIMEO:
			ret = sock_set_timeout(&sk->rcvtimeo, optval, optlen);
			break;

		case SO_SNDTIMEO:
			ret = sock_set_timeout(&sk->sndtimeo, optval, optlen);
			break;

#ifdef CONFIG_NETDEVICES
		case SO_BINDTODEVICE:
		{
			char devname[IFNAMSIZ]; 

			/* Sorry... */ 
			if (!capable(CAP_NET_RAW)) {
				ret = -EPERM;
				break;
			}

			/* Bind this socket to a particular device like "eth0",
			 * as specified in the passed interface name. If the
			 * name is "" or the option length is zero the socket 
			 * is not bound. 
			 */ 

			if (!valbool) {
				sk->bound_dev_if = 0;
			} else {
				if (optlen > IFNAMSIZ) 
					optlen = IFNAMSIZ; 
				if (copy_from_user(devname, optval, optlen)) {
					ret = -EFAULT;
					break;
				}

				/* Remove any cached route for this socket. */
				sk_dst_reset(sk);

				if (devname[0] == '\0') {
					sk->bound_dev_if = 0;
				} else {
					struct net_device *dev = dev_get_by_name(devname);
					if (!dev) {
						ret = -ENODEV;
						break;
					}
					sk->bound_dev_if = dev->ifindex;
					dev_put(dev);
				}
			}
			break;
		}
#endif


#ifdef CONFIG_FILTER
		case SO_ATTACH_FILTER:
			ret = -EINVAL;
			if (optlen == sizeof(struct sock_fprog)) {
				struct sock_fprog fprog;

				ret = -EFAULT;
				if (copy_from_user(&fprog, optval, sizeof(fprog)))
					break;

				ret = sk_attach_filter(&fprog, sk);
			}
			break;

		case SO_DETACH_FILTER:
			spin_lock_bh(&sk->lock.slock);
			filter = sk->filter;
                        if (filter) {
				sk->filter = NULL;
				spin_unlock_bh(&sk->lock.slock);
				sk_filter_release(sk, filter);
				break;
			}
			spin_unlock_bh(&sk->lock.slock);
			ret = -ENONET;
			break;
#endif
		/* We implement the SO_SNDLOWAT etc to
		   not be settable (1003.1g 5.3) */
		default:
		  	ret = -ENOPROTOOPT;
			break;
  	}
	release_sock(sk);
	return ret;
}
コード例 #13
0
ファイル: cdc_mbim.c プロジェクト: DenisLug/mptcp
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
{
	struct sk_buff *skb_out;
	struct cdc_mbim_state *info = (void *)&dev->data;
	struct cdc_ncm_ctx *ctx = info->ctx;
	__le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN);
	u16 tci = 0;
	bool is_ip;
	u8 *c;

	if (!ctx)
		goto error;

	if (skb) {
		if (skb->len <= ETH_HLEN)
			goto error;

		/* Some applications using e.g. packet sockets will
		 * bypass the VLAN acceleration and create tagged
		 * ethernet frames directly.  We primarily look for
		 * the accelerated out-of-band tag, but fall back if
		 * required
		 */
		skb_reset_mac_header(skb);
		if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN &&
		    __vlan_get_tag(skb, &tci) == 0) {
			is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto);
			skb_pull(skb, VLAN_ETH_HLEN);
		} else {
			is_ip = is_ip_proto(eth_hdr(skb)->h_proto);
			skb_pull(skb, ETH_HLEN);
		}

		/* Is IP session <0> tagged too? */
		if (info->flags & FLAG_IPS0_VLAN) {
			/* drop all untagged packets */
			if (!tci)
				goto error;
			/* map MBIM_IPS0_VID to IPS<0> */
			if (tci == MBIM_IPS0_VID)
				tci = 0;
		}

		/* mapping VLANs to MBIM sessions:
		 *   no tag     => IPS session <0> if !FLAG_IPS0_VLAN
		 *   1 - 255    => IPS session <vlanid>
		 *   256 - 511  => DSS session <vlanid - 256>
		 *   512 - 4093 => unsupported, drop
		 *   4094       => IPS session <0> if FLAG_IPS0_VLAN
		 */

		switch (tci & 0x0f00) {
		case 0x0000: /* VLAN ID 0 - 255 */
			if (!is_ip)
				goto error;
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		case 0x0100: /* VLAN ID 256 - 511 */
			if (is_ip)
				goto error;
			sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN);
			c = (u8 *)&sign;
			c[3] = tci;
			break;
		default:
			netif_err(dev, tx_err, dev->net,
				  "unsupported tci=0x%04x\n", tci);
			goto error;
		}
	}

	spin_lock_bh(&ctx->mtx);
	skb_out = cdc_ncm_fill_tx_frame(dev, skb, sign);
	spin_unlock_bh(&ctx->mtx);
	return skb_out;

error:
	if (skb)
		dev_kfree_skb_any(skb);

	return NULL;
}
コード例 #14
0
void dev_mc_upload(struct net_device *dev)
{
    spin_lock_bh(&dev->xmit_lock);
    __dev_mc_upload(dev);
    spin_unlock_bh(&dev->xmit_lock);
}
コード例 #15
0
ファイル: ipsec_sa.c プロジェクト: doctaweeks/libreswan
int ipsec_sadb_cleanup(__u8 proto)
{
	unsigned i;
	int error = 0;
	struct ipsec_sa *ips;

	/* struct ipsec_sa *ipsnext, **ipsprev; */
	/* char sa[SATOT_BUF]; */
	/* size_t sa_len; */

	KLIPS_PRINT(debug_xform,
		    "klips_debug:ipsec_sadb_cleanup: "
		    "cleaning up proto=%d.\n",
		    proto);

	spin_lock_bh(&tdb_lock);

	for (i = 0; i < SADB_HASHMOD; i++) {
		ips = ipsec_sadb_hash[i];

		while (ips) {
			ipsec_sadb_hash[i] = ips->ips_hnext;
			ips->ips_hnext = NULL;
			ipsec_sa_put(ips, IPSEC_REFSAADD);

			ips = ipsec_sadb_hash[i];
		}
	}

/* errlab: */

	spin_unlock_bh(&tdb_lock);

#if IPSEC_SA_REF_CODE
	/* clean up SA reference table */

	/* go through the ref table and clean out all the SAs */
	KLIPS_PRINT(debug_xform,
		    "klips_debug:ipsec_sadb_cleanup: "
		    "removing SAref entries and tables.");
	{
		unsigned table, entry;
		for (table = 0; table < IPSEC_SA_REF_MAINTABLE_NUM_ENTRIES;
		     table++) {
			KLIPS_PRINT(debug_xform,
				    "klips_debug:ipsec_sadb_cleanup: "
				    "cleaning SAref table=%u.\n",
				    table);
			if (ipsec_sadb.refTable[table] == NULL) {
				printk("\n");
				KLIPS_PRINT(debug_xform,
					    "klips_debug:ipsec_sadb_cleanup: "
					    "cleaned %u used refTables.\n",
					    table);
				break;
			}
			for (entry = 0;
			     entry < IPSEC_SA_REF_SUBTABLE_NUM_ENTRIES;
			     entry++) {
				if (ipsec_sadb.refTable[table]->entry[entry] !=
				    NULL) {
					struct ipsec_sa *sa1 =
						ipsec_sadb.refTable[table]->
						entry[entry];
					ipsec_sa_put(sa1, IPSEC_REFOTHER);
					ipsec_sadb.refTable[table]->entry[entry
					] = NULL;
				}
			}
		}
	}
#endif  /* IPSEC_SA_REF_CODE */

	return error;
}
コード例 #16
0
ファイル: virtual.c プロジェクト: 12rafael/jellytimekernel
int ath9k_wiphy_select(struct ath_wiphy *aphy)
{
	struct ath_softc *sc = aphy->sc;
	bool now;

	spin_lock_bh(&sc->wiphy_lock);
	if (__ath9k_wiphy_scanning(sc)) {
		/*
		 * For now, we are using mac80211 sw scan and it expects to
		 * have full control over channel changes, so avoid wiphy
		 * scheduling during a scan. This could be optimized if the
		 * scanning control were moved into the driver.
		 */
		spin_unlock_bh(&sc->wiphy_lock);
		return -EBUSY;
	}
	if (__ath9k_wiphy_pausing(sc)) {
		if (sc->wiphy_select_failures == 0)
			sc->wiphy_select_first_fail = jiffies;
		sc->wiphy_select_failures++;
		if (time_after(jiffies, sc->wiphy_select_first_fail + HZ / 2))
		{
			printk(KERN_DEBUG "ath9k: Previous wiphy select timed "
			       "out; disable/enable hw to recover\n");
			__ath9k_wiphy_mark_all_paused(sc);
			/*
			 * TODO: this workaround to fix hardware is unlikely to
			 * be specific to virtual wiphy changes. It can happen
			 * on normal channel change, too, and as such, this
			 * should really be made more generic. For example,
			 * tricker radio disable/enable on GTT interrupt burst
			 * (say, 10 GTT interrupts received without any TX
			 * frame being completed)
			 */
			spin_unlock_bh(&sc->wiphy_lock);
			ath_radio_disable(sc, aphy->hw);
			ath_radio_enable(sc, aphy->hw);
			/* Only the primary wiphy hw is used for queuing work */
			ieee80211_queue_work(aphy->sc->hw,
				   &aphy->sc->chan_work);
			return -EBUSY; /* previous select still in progress */
		}
		spin_unlock_bh(&sc->wiphy_lock);
		return -EBUSY; /* previous select still in progress */
	}
	sc->wiphy_select_failures = 0;

	/* Store the new channel */
	sc->chan_idx = aphy->chan_idx;
	sc->chan_is_ht = aphy->chan_is_ht;
	sc->next_wiphy = aphy;

	__ath9k_wiphy_pause_all(sc);
	now = !__ath9k_wiphy_pausing(aphy->sc);
	spin_unlock_bh(&sc->wiphy_lock);

	if (now) {
		/* Ready to request channel change immediately */
		ieee80211_queue_work(aphy->sc->hw, &aphy->sc->chan_work);
	}

	/*
	 * wiphys will be unpaused in ath9k_tx_status() once channel has been
	 * changed if any wiphy needs time to become paused.
	 */

	return 0;
}
コード例 #17
0
ファイル: htt_tx.c プロジェクト: 19Dan01/linux
static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt)
{
	spin_lock_bh(&htt->tx_lock);
	__ath10k_htt_tx_dec_pending(htt);
	spin_unlock_bh(&htt->tx_lock);
}
コード例 #18
0
ファイル: bcast.c プロジェクト: shimlee/mptcp_magw
void tipc_bclink_remove_node(u32 addr)
{
	spin_lock_bh(&bc_lock);
	tipc_nmap_remove(&bclink->bcast_nodes, addr);
	spin_unlock_bh(&bc_lock);
}
コード例 #19
0
ファイル: htt_tx.c プロジェクト: 19Dan01/linux
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
	struct ath10k *ar = htt->ar;
	struct device *dev = ar->dev;
	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
	struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
	struct ath10k_hif_sg_item sg_items[2];
	struct htt_data_tx_desc_frag *frags;
	u8 vdev_id = skb_cb->vdev_id;
	u8 tid = skb_cb->htt.tid;
	int prefetch_len;
	int res;
	u8 flags0 = 0;
	u16 msdu_id, flags1 = 0;
	dma_addr_t paddr;
	u32 frags_paddr;
	bool use_frags;

	res = ath10k_htt_tx_inc_pending(htt);
	if (res)
		goto err;

	spin_lock_bh(&htt->tx_lock);
	res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
	if (res < 0) {
		spin_unlock_bh(&htt->tx_lock);
		goto err_tx_dec;
	}
	msdu_id = res;
	spin_unlock_bh(&htt->tx_lock);

	prefetch_len = min(htt->prefetch_len, msdu->len);
	prefetch_len = roundup(prefetch_len, 4);

	/* Since HTT 3.0 there is no separate mgmt tx command. However in case
	 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
	 * fragment list host driver specifies directly frame pointer. */
	use_frags = htt->target_version_major < 3 ||
		    !ieee80211_is_mgmt(hdr->frame_control);

	skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
					   &paddr);
	if (!skb_cb->htt.txbuf) {
		res = -ENOMEM;
		goto err_free_msdu_id;
	}
	skb_cb->htt.txbuf_paddr = paddr;

	if ((ieee80211_is_action(hdr->frame_control) ||
	     ieee80211_is_deauth(hdr->frame_control) ||
	     ieee80211_is_disassoc(hdr->frame_control)) &&
	     ieee80211_has_protected(hdr->frame_control))
		skb_put(msdu, IEEE80211_CCMP_MIC_LEN);

	skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
				       DMA_TO_DEVICE);
	res = dma_mapping_error(dev, skb_cb->paddr);
	if (res)
		goto err_free_txbuf;

	if (likely(use_frags)) {
		frags = skb_cb->htt.txbuf->frags;

		frags[0].paddr = __cpu_to_le32(skb_cb->paddr);
		frags[0].len = __cpu_to_le32(msdu->len);
		frags[1].paddr = 0;
		frags[1].len = 0;

		flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

		frags_paddr = skb_cb->htt.txbuf_paddr;
	} else {
		flags0 |= SM(ATH10K_HW_TXRX_MGMT,
			     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

		frags_paddr = skb_cb->paddr;
	}

	/* Normally all commands go through HTC which manages tx credits for
	 * each endpoint and notifies when tx is completed.
	 *
	 * HTT endpoint is creditless so there's no need to care about HTC
	 * flags. In that case it is trivial to fill the HTC header here.
	 *
	 * MSDU transmission is considered completed upon HTT event. This
	 * implies no relevant resources can be freed until after the event is
	 * received. That's why HTC tx completion handler itself is ignored by
	 * setting NULL to transfer_context for all sg items.
	 *
	 * There is simply no point in pushing HTT TX_FRM through HTC tx path
	 * as it's a waste of resources. By bypassing HTC it is possible to
	 * avoid extra memory allocations, compress data structures and thus
	 * improve performance. */

	skb_cb->htt.txbuf->htc_hdr.eid = htt->eid;
	skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16(
			sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			sizeof(skb_cb->htt.txbuf->cmd_tx) +
			prefetch_len);
	skb_cb->htt.txbuf->htc_hdr.flags = 0;

	if (!ieee80211_has_protected(hdr->frame_control))
		flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;

	flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

	flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
	flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
	if (msdu->ip_summed == CHECKSUM_PARTIAL) {
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
		flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
	}

	/* Prevent firmware from sending up tx inspection requests. There's
	 * nothing ath10k can do with frames requested for inspection so force
	 * it to simply rely a regular tx completion with discard status.
	 */
	flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED;

	skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM;
	skb_cb->htt.txbuf->cmd_tx.flags0 = flags0;
	skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1);
	skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len);
	skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id);
	skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
	skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID);
	skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq);

	trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
	ath10k_dbg(ar, ATH10K_DBG_HTT,
		   "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n",
		   flags0, flags1, msdu->len, msdu_id, frags_paddr,
		   (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq);
	ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
			msdu->data, msdu->len);
	trace_ath10k_tx_hdr(ar, msdu->data, msdu->len);
	trace_ath10k_tx_payload(ar, msdu->data, msdu->len);

	sg_items[0].transfer_id = 0;
	sg_items[0].transfer_context = NULL;
	sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr;
	sg_items[0].paddr = skb_cb->htt.txbuf_paddr +
			    sizeof(skb_cb->htt.txbuf->frags);
	sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_hdr) +
			  sizeof(skb_cb->htt.txbuf->cmd_tx);

	sg_items[1].transfer_id = 0;
	sg_items[1].transfer_context = NULL;
	sg_items[1].vaddr = msdu->data;
	sg_items[1].paddr = skb_cb->paddr;
	sg_items[1].len = prefetch_len;

	res = ath10k_hif_tx_sg(htt->ar,
			       htt->ar->htc.endpoint[htt->eid].ul_pipe_id,
			       sg_items, ARRAY_SIZE(sg_items));
	if (res)
		goto err_unmap_msdu;

	return 0;

err_unmap_msdu:
	dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
err_free_txbuf:
	dma_pool_free(htt->tx_pool,
		      skb_cb->htt.txbuf,
		      skb_cb->htt.txbuf_paddr);
err_free_msdu_id:
	spin_lock_bh(&htt->tx_lock);
	ath10k_htt_tx_free_msdu_id(htt, msdu_id);
	spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
	ath10k_htt_tx_dec_pending(htt);
err:
	return res;
}
コード例 #20
0
ファイル: bcast.c プロジェクト: shimlee/mptcp_magw
/**
 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
 * @n_ptr: node that sent acknowledgement info
 * @acked: broadcast sequence # that has been acknowledged
 *
 * Node is locked, bc_lock unlocked.
 */
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
{
	struct sk_buff *crs;
	struct sk_buff *next;
	unsigned int released = 0;

	spin_lock_bh(&bc_lock);

	/* Bail out if tx queue is empty (no clean up is required) */
	crs = bcl->first_out;
	if (!crs)
		goto exit;

	/* Determine which messages need to be acknowledged */
	if (acked == INVALID_LINK_SEQ) {
		/*
		 * Contact with specified node has been lost, so need to
		 * acknowledge sent messages only (if other nodes still exist)
		 * or both sent and unsent messages (otherwise)
		 */
		if (bclink->bcast_nodes.count)
			acked = bcl->fsm_msg_cnt;
		else
			acked = bcl->next_out_no;
	} else {
		/*
		 * Bail out if specified sequence number does not correspond
		 * to a message that has been sent and not yet acknowledged
		 */
		if (less(acked, buf_seqno(crs)) ||
		    less(bcl->fsm_msg_cnt, acked) ||
		    less_eq(acked, n_ptr->bclink.acked))
			goto exit;
	}

	/* Skip over packets that node has previously acknowledged */
	while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked))
		crs = crs->next;

	/* Update packets that node is now acknowledging */

	while (crs && less_eq(buf_seqno(crs), acked)) {
		next = crs->next;

		if (crs != bcl->next_out)
			bcbuf_decr_acks(crs);
		else {
			bcbuf_set_acks(crs, 0);
			bcl->next_out = next;
			bclink_set_last_sent();
		}

		if (bcbuf_acks(crs) == 0) {
			bcl->first_out = next;
			bcl->out_queue_size--;
			kfree_skb(crs);
			released = 1;
		}
		crs = next;
	}
	n_ptr->bclink.acked = acked;

	/* Try resolving broadcast link congestion, if necessary */

	if (unlikely(bcl->next_out)) {
		tipc_link_push_queue(bcl);
		bclink_set_last_sent();
	}
	if (unlikely(released && !list_empty(&bcl->waiting_ports)))
		tipc_link_wakeup_ports(bcl, 0);
exit:
	spin_unlock_bh(&bc_lock);
}
コード例 #21
0
ファイル: mesh_hwmp.c プロジェクト: Jalil89/openwrt
/**
 * mesh_path_start_discovery - launch a path discovery from the PREQ queue
 *
 * @sdata: local mesh subif
 */
void mesh_path_start_discovery(struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct mesh_preq_queue *preq_node;
	struct mesh_path *mpath;
	u8 ttl, target_flags;
	const u8 *da;
	u32 lifetime;

	spin_lock_bh(&ifmsh->mesh_preq_queue_lock);
	if (!ifmsh->preq_queue_len ||
		time_before(jiffies, ifmsh->last_preq +
				min_preq_int_jiff(sdata))) {
		spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);
		return;
	}

	preq_node = list_first_entry(&ifmsh->preq_queue.list,
			struct mesh_preq_queue, list);
	list_del(&preq_node->list);
	--ifmsh->preq_queue_len;
	spin_unlock_bh(&ifmsh->mesh_preq_queue_lock);

	rcu_read_lock();
	mpath = mesh_path_lookup(sdata, preq_node->dst);
	if (!mpath)
		goto enddiscovery;

	spin_lock_bh(&mpath->state_lock);
	mpath->flags &= ~MESH_PATH_REQ_QUEUED;
	if (preq_node->flags & PREQ_Q_F_START) {
		if (mpath->flags & MESH_PATH_RESOLVING) {
			spin_unlock_bh(&mpath->state_lock);
			goto enddiscovery;
		} else {
			mpath->flags &= ~MESH_PATH_RESOLVED;
			mpath->flags |= MESH_PATH_RESOLVING;
			mpath->discovery_retries = 0;
			mpath->discovery_timeout = disc_timeout_jiff(sdata);
		}
	} else if (!(mpath->flags & MESH_PATH_RESOLVING) ||
			mpath->flags & MESH_PATH_RESOLVED) {
		mpath->flags &= ~MESH_PATH_RESOLVING;
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

	ifmsh->last_preq = jiffies;

	if (time_after(jiffies, ifmsh->last_sn_update +
				net_traversal_jiffies(sdata)) ||
	    time_before(jiffies, ifmsh->last_sn_update)) {
		++ifmsh->sn;
		sdata->u.mesh.last_sn_update = jiffies;
	}
	lifetime = default_lifetime(sdata);
	ttl = sdata->u.mesh.mshcfg.element_ttl;
	if (ttl == 0) {
		sdata->u.mesh.mshstats.dropped_frames_ttl++;
		spin_unlock_bh(&mpath->state_lock);
		goto enddiscovery;
	}

	if (preq_node->flags & PREQ_Q_F_REFRESH)
		target_flags = MP_F_DO;
	else
		target_flags = MP_F_RF;

	spin_unlock_bh(&mpath->state_lock);
	da = (mpath->is_root) ? mpath->rann_snd_addr : broadcast_addr;
	mesh_path_sel_frame_tx(MPATH_PREQ, 0, sdata->vif.addr, ifmsh->sn,
			       target_flags, mpath->dst, mpath->sn, da, 0,
			       ttl, lifetime, 0, ifmsh->preq_id++, sdata);
	mod_timer(&mpath->timer, jiffies + mpath->discovery_timeout);

enddiscovery:
	rcu_read_unlock();
	kfree(preq_node);
}
コード例 #22
0
ファイル: bcast.c プロジェクト: shimlee/mptcp_magw
/**
 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
 *
 * tipc_net_lock is read_locked, no other locks set
 */
void tipc_bclink_recv_pkt(struct sk_buff *buf)
{
	struct tipc_msg *msg = buf_msg(buf);
	struct tipc_node *node;
	u32 next_in;
	u32 seqno;
	int deferred;

	/* Screen out unwanted broadcast messages */

	if (msg_mc_netid(msg) != tipc_net_id)
		goto exit;

	node = tipc_node_find(msg_prevnode(msg));
	if (unlikely(!node))
		goto exit;

	tipc_node_lock(node);
	if (unlikely(!node->bclink.recv_permitted))
		goto unlock;

	/* Handle broadcast protocol message */

	if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
		if (msg_type(msg) != STATE_MSG)
			goto unlock;
		if (msg_destnode(msg) == tipc_own_addr) {
			tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
			tipc_node_unlock(node);
			spin_lock_bh(&bc_lock);
			bcl->stats.recv_nacks++;
			bclink->retransmit_to = node;
			bclink_retransmit_pkt(msg_bcgap_after(msg),
					      msg_bcgap_to(msg));
			spin_unlock_bh(&bc_lock);
		} else {
			tipc_node_unlock(node);
			bclink_peek_nack(msg);
		}
		goto exit;
	}

	/* Handle in-sequence broadcast message */

	seqno = msg_seqno(msg);
	next_in = mod(node->bclink.last_in + 1);

	if (likely(seqno == next_in)) {
receive:
		/* Deliver message to destination */

		if (likely(msg_isdata(msg))) {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			if (likely(msg_mcast(msg)))
				tipc_port_recv_mcast(buf, NULL);
			else
				kfree_skb(buf);
		} else if (msg_user(msg) == MSG_BUNDLER) {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			bcl->stats.recv_bundles++;
			bcl->stats.recv_bundled += msg_msgcnt(msg);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			tipc_link_recv_bundle(buf);
		} else if (msg_user(msg) == MSG_FRAGMENTER) {
			int ret;
			ret = tipc_link_recv_fragment(&node->bclink.reasm_head,
						      &node->bclink.reasm_tail,
						      &buf);
			if (ret == LINK_REASM_ERROR)
				goto unlock;
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			bcl->stats.recv_fragments++;
			if (ret == LINK_REASM_COMPLETE) {
				bcl->stats.recv_fragmented++;
				/* Point msg to inner header */
				msg = buf_msg(buf);
				spin_unlock_bh(&bc_lock);
				goto receive;
			}
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
		} else if (msg_user(msg) == NAME_DISTRIBUTOR) {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			tipc_named_recv(buf);
		} else {
			spin_lock_bh(&bc_lock);
			bclink_accept_pkt(node, seqno);
			spin_unlock_bh(&bc_lock);
			tipc_node_unlock(node);
			kfree_skb(buf);
		}
		buf = NULL;

		/* Determine new synchronization state */

		tipc_node_lock(node);
		if (unlikely(!tipc_node_is_up(node)))
			goto unlock;

		if (node->bclink.last_in == node->bclink.last_sent)
			goto unlock;

		if (!node->bclink.deferred_head) {
			node->bclink.oos_state = 1;
			goto unlock;
		}

		msg = buf_msg(node->bclink.deferred_head);
		seqno = msg_seqno(msg);
		next_in = mod(next_in + 1);
		if (seqno != next_in)
			goto unlock;

		/* Take in-sequence message from deferred queue & deliver it */

		buf = node->bclink.deferred_head;
		node->bclink.deferred_head = buf->next;
		buf->next = NULL;
		node->bclink.deferred_size--;
		goto receive;
	}

	/* Handle out-of-sequence broadcast message */

	if (less(next_in, seqno)) {
		deferred = tipc_link_defer_pkt(&node->bclink.deferred_head,
					       &node->bclink.deferred_tail,
					       buf);
		node->bclink.deferred_size += deferred;
		bclink_update_last_sent(node, seqno);
		buf = NULL;
	} else
		deferred = 0;

	spin_lock_bh(&bc_lock);

	if (deferred)
		bcl->stats.deferred_recv++;
	else
		bcl->stats.duplicates++;

	spin_unlock_bh(&bc_lock);

unlock:
	tipc_node_unlock(node);
exit:
	kfree_skb(buf);
}
コード例 #23
0
ファイル: agg-tx.c プロジェクト: AK101111/linux
int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
				    enum ieee80211_agg_stop_reason reason)
{
	struct ieee80211_local *local = sta->local;
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_ampdu_params params = {
		.sta = &sta->sta,
		.tid = tid,
		.buf_size = 0,
		.amsdu = false,
		.timeout = 0,
		.ssn = 0,
	};
	int ret;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);

	switch (reason) {
	case AGG_STOP_DECLINED:
	case AGG_STOP_LOCAL_REQUEST:
	case AGG_STOP_PEER_REQUEST:
		params.action = IEEE80211_AMPDU_TX_STOP_CONT;
		break;
	case AGG_STOP_DESTROY_STA:
		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH;
		break;
	default:
		WARN_ON_ONCE(1);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	if (!tid_tx) {
		spin_unlock_bh(&sta->lock);
		return -ENOENT;
	}

	/*
	 * if we're already stopping ignore any new requests to stop
	 * unless we're destroying it in which case notify the driver
	 */
	if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
		spin_unlock_bh(&sta->lock);
		if (reason != AGG_STOP_DESTROY_STA)
			return -EALREADY;
		params.action = IEEE80211_AMPDU_TX_STOP_FLUSH_CONT;
		ret = drv_ampdu_action(local, sta->sdata, &params);
		WARN_ON_ONCE(ret);
		return 0;
	}

	if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
		/* not even started yet! */
		ieee80211_assign_tid_tx(sta, tid, NULL);
		spin_unlock_bh(&sta->lock);
		kfree_rcu(tid_tx, rcu_head);
		return 0;
	}

	set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);

	spin_unlock_bh(&sta->lock);

	ht_dbg(sta->sdata, "Tx BA session stop requested for %pM tid %u\n",
	       sta->sta.addr, tid);

	del_timer_sync(&tid_tx->addba_resp_timer);
	del_timer_sync(&tid_tx->session_timer);

	/*
	 * After this packets are no longer handed right through
	 * to the driver but are put onto tid_tx->pending instead,
	 * with locking to ensure proper access.
	 */
	clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);

	/*
	 * There might be a few packets being processed right now (on
	 * another CPU) that have already gotten past the aggregation
	 * check when it was still OPERATIONAL and consequently have
	 * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
	 * call into the driver at the same time or even before the
	 * TX paths calls into it, which could confuse the driver.
	 *
	 * Wait for all currently running TX paths to finish before
	 * telling the driver. New packets will not go through since
	 * the aggregation session is no longer OPERATIONAL.
	 */
	synchronize_net();

	tid_tx->stop_initiator = reason == AGG_STOP_PEER_REQUEST ?
					WLAN_BACK_RECIPIENT :
					WLAN_BACK_INITIATOR;
	tid_tx->tx_stop = reason == AGG_STOP_LOCAL_REQUEST;

	ret = drv_ampdu_action(local, sta->sdata, &params);

	/* HW shall not deny going back to legacy */
	if (WARN_ON(ret)) {
		/*
		 * We may have pending packets get stuck in this case...
		 * Not bothering with a workaround for now.
		 */
	}

	/*
	 * In the case of AGG_STOP_DESTROY_STA, the driver won't
	 * necessarily call ieee80211_stop_tx_ba_cb(), so this may
	 * seem like we can leave the tid_tx data pending forever.
	 * This is true, in a way, but "forever" is only until the
	 * station struct is actually destroyed. In the meantime,
	 * leaving it around ensures that we don't transmit packets
	 * to the driver on this TID which might confuse it.
	 */

	return 0;
}

/*
 * After sending add Block Ack request we activated a timer until
 * add Block Ack response will arrive from the recipient.
 * If this timer expires sta_addba_resp_timer_expired will be executed.
 */
static void sta_addba_resp_timer_expired(unsigned long data)
{
	/* not an elegant detour, but there is no choice as the timer passes
	 * only one argument, and both sta_info and TID are needed, so init
	 * flow in sta_info_create gives the TID as data, while the timer_to_id
	 * array gives the sta through container_of */
	u16 tid = *(u8 *)data;
	struct sta_info *sta = container_of((void *)data,
		struct sta_info, timer_to_tid[tid]);
	struct tid_ampdu_tx *tid_tx;

	/* check if the TID waits for addBA response */
	rcu_read_lock();
	tid_tx = rcu_dereference(sta->ampdu_mlme.tid_tx[tid]);
	if (!tid_tx ||
	    test_bit(HT_AGG_STATE_RESPONSE_RECEIVED, &tid_tx->state)) {
		rcu_read_unlock();
		ht_dbg(sta->sdata,
		       "timer expired on %pM tid %d but we are not (or no longer) expecting addBA response there\n",
		       sta->sta.addr, tid);
		return;
	}

	ht_dbg(sta->sdata, "addBA response timer expired on %pM tid %d\n",
	       sta->sta.addr, tid);

	ieee80211_stop_tx_ba_session(&sta->sta, tid);
	rcu_read_unlock();
}
コード例 #24
0
/**
 * This function must be called by the user driver to unregister itself from
 * the base DMA driver. After doing required checks to verify the handle
 * and engine state, the DMA engine is reset, interrupts are disabled if
 * required, and the BD ring is freed, while returning all the packet buffers
 * to the user driver.
 *
 * @param handle is the handle which was assigned during the registration
 * process.
 * @param pkts is a PktBuf array, with the array of packets to be sent.
 * @param numpkts is the number of packets in the PktBuf array.
 *
 * @return 0 incase of any error
 * @return Number of packets successfully queued for DMA send, incase of
 * success.
 *
 * @note There may not be enough BDs to accomodate the number of packets
 * requested by the user driver, so the returned value may not match with
 * the requested numpkts. The user driver must be written to manage this
 * appropriately.
 *
 *
 *****************************************************************************/
int DmaSendPkt(void * handle, PktBuf * pkts, int numpkts)
{
#if defined DEBUG_NORMAL || defined DEBUG_VERBOSE
    static int send_count=1;
#endif
    int free_bd_count ;
    Dma_Engine * eptr;
    Dma_BdRing * rptr;
    struct pci_dev *pdev;
    struct privData *lp = NULL;
    Dma_Bd *BdPtr, *BdCurPtr, *PartialBDPtr=NULL;
    dma_addr_t bufPA;
    int result;
    PktBuf * pbuf;
    u32 flags, uflags;
    int i, len;
    int partialBDcount = 0, partialOK = 0;

    log_verbose(KERN_INFO "User send pkt for engine %p, numpkts %d\n",
                handle, numpkts);
    if(DriverState != INITIALIZED)
    {
        printk(KERN_ERR "DMA driver state %d - not ready\n", DriverState);
        return 0;
    }

    eptr = (Dma_Engine *)handle;
    rptr = &(eptr->BdRing);

    /* Check if this engine's pointer is valid */
    if(eptr == NULL)
    {
        printk(KERN_ERR "Handle is a NULL value\n");
        return 0;
    }

    /* Is the number valid? */
    if(numpkts <= 0) {
        log_normal(KERN_ERR "Packet count should be non-zero\n");
        return 0;
    }

    /* Is the engine assigned to any user? */
    if(eptr->EngineState != USER_ASSIGNED) {
        log_normal(KERN_ERR "Engine is not assigned to any user\n");
        return 0;
    }

    /* Is the engine an S2C one? */
    if(rptr->IsRxChannel)
    {
        log_normal(KERN_ERR "The requested engine cannot send packets\n");
        return 0;
    }

    pdev = eptr->pdev;
    lp = pci_get_drvdata(pdev);

    /* Protect this entry point from the handling of sent packets */
    spin_lock_bh(&DmaLock);

    /* Ensure that requested number of packets can be queued up */
    free_bd_count = Dma_mBdRingGetFreeCnt(rptr);

#if defined DEBUG_NORMAL || defined DEBUG_VERBOSE
    log_normal(KERN_INFO "DmaSendPkt: #%d \n", send_count);
    send_count += numpkts;
    log_verbose(KERN_INFO "BD ring %x Free BD count is %d\n",
                (u32)rptr, free_bd_count);
    //disp_frag((unsigned char *)bufVA, pktsize);
#endif

    /* Maintain a separation between start and end of BD ring. This is
     * required because DMA will stall if the two pointers coincide -
     * this will happen whether ring is full or empty.
     */
    if(free_bd_count > 2) free_bd_count -= 2;
    else
    {
        log_verbose(KERN_ERR "Not enough BDs to handle %d pkts\n", numpkts);
        spin_unlock_bh(&DmaLock);
        return 0;
    }

    log_normal("DmaSendPkt: numpkts %d free_bd_count %d\n", numpkts, free_bd_count);

    /* Fewer BDs are available than required */
    if(numpkts > free_bd_count)
        numpkts = free_bd_count;

    /* Allocate BDs from ring */
    result = Dma_BdRingAlloc(rptr, numpkts, &BdPtr);
    if (result != XST_SUCCESS) {
        /* we really shouldn't get this */
        printk(KERN_ERR "DmaSendPkt: BdRingAlloc unsuccessful (%d)\n", result);
        spin_unlock_bh(&DmaLock);
        return 0;
    }

    BdCurPtr = BdPtr;
    partialBDcount = 0;
    for(i=0; i<numpkts; i++)
    {
        pbuf = &(pkts[i]);
        bufPA = pci_map_single(pdev, pbuf->pktBuf, pbuf->size, PCI_DMA_TODEVICE);
        log_verbose(KERN_INFO "DmaSendPkt: BD %x buf PA %x VA %x size %d\n",
                    (u32)BdCurPtr, bufPA, (u32) (pbuf->pktBuf), pbuf->size);

        Dma_mBdSetBufAddr(BdCurPtr, bufPA);
        Dma_mBdSetCtrlLength(BdCurPtr, pbuf->size);
        Dma_mBdSetStatLength(BdCurPtr, pbuf->size); // Required for TX BDs
        Dma_mBdSetId(BdCurPtr, (unsigned long)pbuf->bufInfo); //guodebug: error prone

        uflags = pbuf->flags;
        flags = 0;
        if(uflags & DMA_BD_SOP_MASK)
        {
            flags |= DMA_BD_SOP_MASK;

            partialOK = (uflags & PKT_ALL) ? 0 : 1;
            if(!partialOK) PartialBDPtr = BdCurPtr;
        }

        /* Keep track of whether the buffer is the last one in a packet */
        if(uflags & DMA_BD_EOP_MASK)
        {
            flags |= DMA_BD_EOP_MASK;
            partialBDcount = 0;
        }
        else
            partialBDcount++;
        //printk("partialBDcount = %d partialOK = %d PartialBDPtr = %x\n",
        //                        partialBDcount, partialOK, (u32)PartialBDPtr);

        Dma_mBdSetCtrl(BdCurPtr, flags);                 // No ints also.
        Dma_mBdSetUserData(BdCurPtr, pbuf->userInfo);

#ifdef TH_BH_ISR
        /* Enable interrupts for errors and completion based on
         * coalesce count.
         */
        flags |= DMA_BD_INT_ERROR_MASK;
        if(!(eptr->intrCount % INT_COAL_CNT))
            flags |= DMA_BD_INT_COMP_MASK;
        eptr->intrCount += 1;
        Dma_mBdSetCtrl(BdCurPtr, flags);
#endif

        log_verbose("DmaSendPkt: free %d BD %x buf PA %x VA %x size %d flags %x\n",
                    free_bd_count, (u32)BdCurPtr, bufPA, (u32) (pbuf->pktBuf),
                    pbuf->size, flags);

        BdCurPtr = Dma_mBdRingNext(rptr, BdCurPtr);
    }

    /* Ensure that the user does not require all or no fragments of a packet
     * to be handled. For example, incase of a SG-list of multi-BD packets,
     * the user might require all fragments to be handled together.
     */
    if(partialBDcount && !partialOK)
        log_normal(KERN_ERR "Cannot accomodate %d buffers. Discarding %d.\n",
                   numpkts, partialBDcount);

    /* enqueue TxBD with the attached buffer such that it is
     * ready for frame transmission.
     */
    result = Dma_BdRingToHw(rptr, (numpkts-partialBDcount), BdPtr);
    if((result != XST_SUCCESS) || partialBDcount)
    {
        int count=0;
        if(result != XST_SUCCESS)
        {
            /* We should not come here. Incase of error, unmap buffers,
             * unallocated BDs, and return zero count so that app driver
             * can recover unused buffers.
             */
            printk(KERN_ERR "DmaSendPkt: BdRingToHw unsuccessful (%d)\n",
                   result);
            BdCurPtr = BdPtr;
            count = numpkts;
        }
        else if(partialBDcount)
        {
            /* Don't allow partial packets to be queued for DMA incase user
             * does not wish it. Unmap buffers, unallocate BDs, return the
             * count so that app driver can recover unused buffers.
             */
            log_verbose(KERN_ERR "DmaSendPkt: Recovering partial buffers\n");

            BdPtr = BdCurPtr = PartialBDPtr;
            count = partialBDcount;
        }

        for(i=0; i<count; i++)
        {
            bufPA = Dma_mBdGetBufAddr(BdCurPtr);
            len = Dma_mBdGetCtrlLength(BdCurPtr);
            pci_unmap_single(pdev, bufPA, len, PCI_DMA_TODEVICE);
            Dma_mBdSetId(BdCurPtr, 0LL);
            BdCurPtr = Dma_mBdRingNext(rptr, BdCurPtr);
        }
        Dma_BdRingUnAlloc(rptr, count, BdPtr);
        numpkts -= count;
    }

    spin_unlock_bh(&DmaLock);

    log_verbose("DmaSendPkt: Successfully transmitted %d buffers\n", numpkts);
    return numpkts;
}
コード例 #25
0
ファイル: agg-tx.c プロジェクト: AK101111/linux
int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid,
				  u16 timeout)
{
	struct sta_info *sta = container_of(pubsta, struct sta_info, sta);
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	struct ieee80211_local *local = sdata->local;
	struct tid_ampdu_tx *tid_tx;
	int ret = 0;

	trace_api_start_tx_ba_session(pubsta, tid);

	if (WARN(sta->reserved_tid == tid,
		 "Requested to start BA session on reserved tid=%d", tid))
		return -EINVAL;

	if (!pubsta->ht_cap.ht_supported)
		return -EINVAL;

	if (WARN_ON_ONCE(!local->ops->ampdu_action))
		return -EINVAL;

	if ((tid >= IEEE80211_NUM_TIDS) ||
	    !ieee80211_hw_check(&local->hw, AMPDU_AGGREGATION) ||
	    ieee80211_hw_check(&local->hw, TX_AMPDU_SETUP_IN_HW))
		return -EINVAL;

	ht_dbg(sdata, "Open BA session requested for %pM tid %u\n",
	       pubsta->addr, tid);

	if (sdata->vif.type != NL80211_IFTYPE_STATION &&
	    sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
	    sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
	    sdata->vif.type != NL80211_IFTYPE_AP &&
	    sdata->vif.type != NL80211_IFTYPE_ADHOC)
		return -EINVAL;

	if (test_sta_flag(sta, WLAN_STA_BLOCK_BA)) {
		ht_dbg(sdata,
		       "BA sessions blocked - Denying BA session request %pM tid %d\n",
		       sta->sta.addr, tid);
		return -EINVAL;
	}

	/*
	 * 802.11n-2009 11.5.1.1: If the initiating STA is an HT STA, is a
	 * member of an IBSS, and has no other existing Block Ack agreement
	 * with the recipient STA, then the initiating STA shall transmit a
	 * Probe Request frame to the recipient STA and shall not transmit an
	 * ADDBA Request frame unless it receives a Probe Response frame
	 * from the recipient within dot11ADDBAFailureTimeout.
	 *
	 * The probe request mechanism for ADDBA is currently not implemented,
	 * but we only build up Block Ack session with HT STAs. This information
	 * is set when we receive a bss info from a probe response or a beacon.
	 */
	if (sta->sdata->vif.type == NL80211_IFTYPE_ADHOC &&
	    !sta->sta.ht_cap.ht_supported) {
		ht_dbg(sdata,
		       "BA request denied - IBSS STA %pM does not advertise HT support\n",
		       pubsta->addr);
		return -EINVAL;
	}

	spin_lock_bh(&sta->lock);

	/* we have tried too many times, receiver does not want A-MPDU */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_MAX_RETRIES) {
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	/*
	 * if we have tried more than HT_AGG_BURST_RETRIES times we
	 * will spread our requests in time to avoid stalling connection
	 * for too long
	 */
	if (sta->ampdu_mlme.addba_req_num[tid] > HT_AGG_BURST_RETRIES &&
	    time_before(jiffies, sta->ampdu_mlme.last_addba_req_time[tid] +
			HT_AGG_RETRIES_PERIOD)) {
		ht_dbg(sdata,
		       "BA request denied - waiting a grace period after %d failed requests on %pM tid %u\n",
		       sta->ampdu_mlme.addba_req_num[tid], sta->sta.addr, tid);
		ret = -EBUSY;
		goto err_unlock_sta;
	}

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
	/* check if the TID is not in aggregation flow already */
	if (tid_tx || sta->ampdu_mlme.tid_start_tx[tid]) {
		ht_dbg(sdata,
		       "BA request denied - session is not idle on %pM tid %u\n",
		       sta->sta.addr, tid);
		ret = -EAGAIN;
		goto err_unlock_sta;
	}

	/* prepare A-MPDU MLME for Tx aggregation */
	tid_tx = kzalloc(sizeof(struct tid_ampdu_tx), GFP_ATOMIC);
	if (!tid_tx) {
		ret = -ENOMEM;
		goto err_unlock_sta;
	}

	skb_queue_head_init(&tid_tx->pending);
	__set_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	tid_tx->timeout = timeout;

	/* response timer */
	tid_tx->addba_resp_timer.function = sta_addba_resp_timer_expired;
	tid_tx->addba_resp_timer.data = (unsigned long)&sta->timer_to_tid[tid];
	init_timer(&tid_tx->addba_resp_timer);

	/* tx timer */
	tid_tx->session_timer.function = sta_tx_agg_session_timer_expired;
	tid_tx->session_timer.data = (unsigned long)&sta->timer_to_tid[tid];
	init_timer_deferrable(&tid_tx->session_timer);

	/* assign a dialog token */
	sta->ampdu_mlme.dialog_token_allocator++;
	tid_tx->dialog_token = sta->ampdu_mlme.dialog_token_allocator;

	/*
	 * Finally, assign it to the start array; the work item will
	 * collect it and move it to the normal array.
	 */
	sta->ampdu_mlme.tid_start_tx[tid] = tid_tx;

	ieee80211_queue_work(&local->hw, &sta->ampdu_mlme.work);

	/* this flow continues off the work */
 err_unlock_sta:
	spin_unlock_bh(&sta->lock);
	return ret;
}
コード例 #26
0
/**
 * This function must be called by the user driver to register itself with
 * the base DMA driver. After doing required checks to verify the choice
 * of engine and BAR register, it initializes the engine and the BD ring
 * associated with the engine, and enables interrupts if required.
 *
 * Only one user is supported per engine at any given time. Incase the
 * engine has already been registered with another user driver, an error
 * will be returned.
 *
 * @param engine is the DMA engine the user driver wants to use.
 * @param bar is the BAR register the user driver wants to use.
 * @param uptr is a pointer to the function callbacks in the user driver.
 * @param pktsize is the size of packets that the user driver will normally
 *        use.
 *
 * @return NULL incase of any error in completing the registration.
 * @return Handle with which the user driver is registered.
 *
 * @note This function should not be called in an interrupt context
 *
 *****************************************************************************/
void * DmaRegister(int engine, int bar, UserPtrs * uptr, int pktsize)
{
    Dma_Engine * eptr;
    Xaddr barbase;
    int result;

    log_verbose(KERN_INFO "User register for engine %d, BAR %d, pktsize %d\n", engine, bar, pktsize);

    if(DriverState != INITIALIZED)
    {
        printk(KERN_ERR "DMA driver state %d - not ready\n", DriverState);
        return NULL;
    }

    if((bar < 0) || (bar > 5)) {
        printk(KERN_ERR "Requested BAR %d is not valid\n", bar);
        return NULL;
    }

    if(!((dmaData->engineMask) & (1LL << engine))) {
        printk(KERN_ERR "Requested engine %d does not exist\n", engine);
        return NULL;
    }
    eptr = &(dmaData->Dma[engine]);
    barbase = (Xaddr)(dmaData->barInfo[bar].baseVAddr);
    log_verbose("guodebug DmaReg: barbase = %p\n", barbase);

    if(eptr->EngineState != INITIALIZED) {
        printk(KERN_ERR "Requested engine %d is not free\n", engine);
        return NULL;
    }

    /* Later, add check for reasonable packet size !!!! */

    /* Later, add check for mandatory user function pointers. For optional
     * ones, assign a stub function pointer. This is better than doing
     * a NULL value check in the performance path. !!!!
     */

    /* Copy user-supplied parameters */
    eptr->user = *uptr;
    eptr->pktSize = pktsize;

    /* Should check for errors returned here !!!! */
    (uptr->UserInit)(barbase, uptr->privData);

    spin_lock_bh(&DmaLock);

    /* Should inform the user of the errors !!!! */
    result = descriptor_init(eptr->pdev, eptr);
    if (result) {
        /* At this point, handle has not been returned to the user.
         * So, user refuses to prepare buffers. Will be trying again in
         * the poll_routine. So, do not abort here.
         */
        printk(KERN_ERR "Cannot create BD ring, will try again later.\n");
        //return NULL;
    }

    /* Change the state of the engine, and increment the user count */
    eptr->EngineState = USER_ASSIGNED;
    dmaData->userCount ++ ;

    /* Start the DMA engine */
    if (Dma_BdRingStart(&(eptr->BdRing)) == XST_FAILURE) {
        log_normal(KERN_ERR "DmaRegister: Could not start Dma channel\n");
        return NULL;
    }

#ifdef TH_BH_ISR
    printk("Now enabling interrupts\n");
    Dma_mEngIntEnable(eptr);
#endif

    spin_unlock_bh(&DmaLock);

    log_verbose(KERN_INFO "Returning user handle %p\n", eptr);

    return eptr;
}
コード例 #27
0
static __inline__ void icmpv6_xmit_unlock(struct sock *sk)
{
	spin_unlock_bh(&sk->sk_lock.slock);
}
コード例 #28
0
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
{
    struct device *dev = htt->ar->dev;
    struct htt_cmd *cmd;
    struct htt_data_tx_desc_frag *tx_frags;
    struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data;
    struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
    struct sk_buff *txdesc = NULL;
    bool use_frags;
    u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
    u8 tid;
    int prefetch_len, desc_len;
    int msdu_id = -1;
    int res;
    u8 flags0;
    u16 flags1;

    res = ath10k_htt_tx_inc_pending(htt);
    if (res)
        goto err;

    spin_lock_bh(&htt->tx_lock);
    res = ath10k_htt_tx_alloc_msdu_id(htt);
    if (res < 0) {
        spin_unlock_bh(&htt->tx_lock);
        goto err_tx_dec;
    }
    msdu_id = res;
    htt->pending_tx[msdu_id] = msdu;
    spin_unlock_bh(&htt->tx_lock);

    prefetch_len = min(htt->prefetch_len, msdu->len);
    prefetch_len = roundup(prefetch_len, 4);

    desc_len = sizeof(cmd->hdr) + sizeof(cmd->data_tx) + prefetch_len;

    txdesc = ath10k_htc_alloc_skb(desc_len);
    if (!txdesc) {
        res = -ENOMEM;
        goto err_free_msdu_id;
    }

    /* Since HTT 3.0 there is no separate mgmt tx command. However in case
     * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx
     * fragment list host driver specifies directly frame pointer. */
    use_frags = htt->target_version_major < 3 ||
                !ieee80211_is_mgmt(hdr->frame_control);

    if (!IS_ALIGNED((unsigned long)txdesc->data, 4)) {
        ath10k_warn("htt alignment check failed. dropping packet.\n");
        res = -EIO;
        goto err_free_txdesc;
    }

    if (use_frags) {
        skb_cb->htt.frag_len = sizeof(*tx_frags) * 2;
        skb_cb->htt.pad_len = (unsigned long)msdu->data -
                              round_down((unsigned long)msdu->data, 4);

        skb_push(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
    } else {
        skb_cb->htt.frag_len = 0;
        skb_cb->htt.pad_len = 0;
    }

    res = ath10k_skb_map(dev, msdu);
    if (res)
        goto err_pull_txfrag;

    if (use_frags) {
        dma_sync_single_for_cpu(dev, skb_cb->paddr, msdu->len,
                                DMA_TO_DEVICE);

        /* tx fragment list must be terminated with zero-entry */
        tx_frags = (struct htt_data_tx_desc_frag *)msdu->data;
        tx_frags[0].paddr = __cpu_to_le32(skb_cb->paddr +
                                          skb_cb->htt.frag_len +
                                          skb_cb->htt.pad_len);
        tx_frags[0].len   = __cpu_to_le32(msdu->len -
                                          skb_cb->htt.frag_len -
                                          skb_cb->htt.pad_len);
        tx_frags[1].paddr = __cpu_to_le32(0);
        tx_frags[1].len   = __cpu_to_le32(0);

        dma_sync_single_for_device(dev, skb_cb->paddr, msdu->len,
                                   DMA_TO_DEVICE);
    }

    ath10k_dbg(ATH10K_DBG_HTT, "msdu 0x%llx\n",
               (unsigned long long) ATH10K_SKB_CB(msdu)->paddr);
    ath10k_dbg_dump(ATH10K_DBG_HTT_DUMP, NULL, "msdu: ",
                    msdu->data, msdu->len);

    skb_put(txdesc, desc_len);
    cmd = (struct htt_cmd *)txdesc->data;

    tid = ATH10K_SKB_CB(msdu)->htt.tid;

    ath10k_dbg(ATH10K_DBG_HTT, "htt data tx using tid %hhu\n", tid);

    flags0  = 0;
    if (!ieee80211_has_protected(hdr->frame_control))
        flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT;
    flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT;

    if (use_frags)
        flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI,
                     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);
    else
        flags0 |= SM(ATH10K_HW_TXRX_MGMT,
                     HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE);

    flags1  = 0;
    flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
    flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
    flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
    flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;

    cmd->hdr.msg_type        = HTT_H2T_MSG_TYPE_TX_FRM;
    cmd->data_tx.flags0      = flags0;
    cmd->data_tx.flags1      = __cpu_to_le16(flags1);
    cmd->data_tx.len         = __cpu_to_le16(msdu->len -
                               skb_cb->htt.frag_len -
                               skb_cb->htt.pad_len);
    cmd->data_tx.id          = __cpu_to_le16(msdu_id);
    cmd->data_tx.frags_paddr = __cpu_to_le32(skb_cb->paddr);
    cmd->data_tx.peerid      = __cpu_to_le32(HTT_INVALID_PEERID);

    memcpy(cmd->data_tx.prefetch, hdr, prefetch_len);

    res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc);
    if (res)
        goto err_unmap_msdu;

    return 0;

err_unmap_msdu:
    ath10k_skb_unmap(dev, msdu);
err_pull_txfrag:
    skb_pull(msdu, skb_cb->htt.frag_len + skb_cb->htt.pad_len);
err_free_txdesc:
    dev_kfree_skb_any(txdesc);
err_free_msdu_id:
    spin_lock_bh(&htt->tx_lock);
    htt->pending_tx[msdu_id] = NULL;
    ath10k_htt_tx_free_msdu_id(htt, msdu_id);
    spin_unlock_bh(&htt->tx_lock);
err_tx_dec:
    ath10k_htt_tx_dec_pending(htt);
err:
    return res;
}
コード例 #29
0
ファイル: mesh_hwmp.c プロジェクト: x56981973/exp-platform
/**
 * hwmp_route_info_get - Update routing info to originator and transmitter
 *
 * @sdata: local mesh subif
 * @mgmt: mesh management frame
 * @hwmp_ie: hwmp information element (PREP or PREQ)
 *
 * This function updates the path routing information to the originator and the
 * transmitter of a HWMP PREQ or PREP frame.
 *
 * Returns: metric to frame originator or 0 if the frame should not be further
 * processed
 *
 * Notes: this function is the only place (besides user-provided info) where
 * path routing information is updated.
 */
static u32 hwmp_route_info_get(struct ieee80211_sub_if_data *sdata,
			    struct ieee80211_mgmt *mgmt,
			    u8 *hwmp_ie, enum mpath_frame_type action)
{
	struct ieee80211_local *local = sdata->local;
	struct mesh_path *mpath;
	struct sta_info *sta;
	bool fresh_info;
	u8 *orig_addr, *ta;
	u32 orig_sn, orig_metric;
	unsigned long orig_lifetime, exp_time;
	u32 last_hop_metric, new_metric;
	bool process = true;

	rcu_read_lock();
	sta = sta_info_get(sdata, mgmt->sa);
	if (!sta) {
		rcu_read_unlock();
		return 0;
	}

	last_hop_metric = airtime_link_metric_get(local, sta);
	/* Update and check originator routing info */
	fresh_info = true;

	switch (action) {
	case MPATH_PREQ:
		orig_addr = PREQ_IE_ORIG_ADDR(hwmp_ie);
		orig_sn = PREQ_IE_ORIG_SN(hwmp_ie);
		orig_lifetime = PREQ_IE_LIFETIME(hwmp_ie);
		orig_metric = PREQ_IE_METRIC(hwmp_ie);
		break;
	case MPATH_PREP:
		/* Originator here refers to the MP that was the destination in
		 * the Path Request. The draft refers to that MP as the
		 * destination address, even though usually it is the origin of
		 * the PREP frame. We divert from the nomenclature in the draft
		 * so that we can easily use a single function to gather path
		 * information from both PREQ and PREP frames.
		 */
		orig_addr = PREP_IE_ORIG_ADDR(hwmp_ie);
		orig_sn = PREP_IE_ORIG_SN(hwmp_ie);
		orig_lifetime = PREP_IE_LIFETIME(hwmp_ie);
		orig_metric = PREP_IE_METRIC(hwmp_ie);
		break;
	default:
		rcu_read_unlock();
		return 0;
	}
	new_metric = orig_metric + last_hop_metric;
	if (new_metric < orig_metric)
		new_metric = MAX_METRIC;
	exp_time = TU_TO_EXP_TIME(orig_lifetime);

	if (memcmp(orig_addr, sdata->vif.addr, ETH_ALEN) == 0) {
		/* This MP is the originator, we are not interested in this
		 * frame, except for updating transmitter's path info.
		 */
		process = false;
		fresh_info = false;
	} else {
		mpath = mesh_path_lookup(orig_addr, sdata);
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if (mpath->flags & MESH_PATH_FIXED)
				fresh_info = false;
			else if ((mpath->flags & MESH_PATH_ACTIVE) &&
			    (mpath->flags & MESH_PATH_SN_VALID)) {
				if (SN_GT(mpath->sn, orig_sn) ||
				    (mpath->sn == orig_sn &&
				     new_metric >= mpath->metric)) {
					process = false;//ymj
 					fresh_info = false;
 				}
 				if(0 < orig_metric < (min_metric/5))
 				{
 					mhwmp_dbg("mesh hwmp: get orig_metric = %d\n",orig_metric);//ymj
					process = false;
					fresh_info = false;
				}
			}
		} else {
			mesh_path_add(orig_addr, sdata);
			mpath = mesh_path_lookup(orig_addr, sdata);
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
			mpath->flags |= MESH_PATH_SN_VALID;
			mpath->metric = new_metric;
			mpath->sn = orig_sn;
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
			/* draft says preq_id should be saved to, but there does
			 * not seem to be any use for it, skipping by now
			 */
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	/* Update and check transmitter routing info */
	ta = mgmt->sa;
	if (memcmp(orig_addr, ta, ETH_ALEN) == 0)
		fresh_info = false;
	else {
		fresh_info = true;

		mpath = mesh_path_lookup(ta, sdata);
		if (mpath) {
			spin_lock_bh(&mpath->state_lock);
			if ((mpath->flags & MESH_PATH_FIXED) ||
				((mpath->flags & MESH_PATH_ACTIVE) &&
					(last_hop_metric > mpath->metric)))
				fresh_info = false;
		} else {
			mesh_path_add(ta, sdata);
			mpath = mesh_path_lookup(ta, sdata);
			if (!mpath) {
				rcu_read_unlock();
				return 0;
			}
			spin_lock_bh(&mpath->state_lock);
		}

		if (fresh_info) {
			mesh_path_assign_nexthop(mpath, sta);
			mpath->metric = last_hop_metric;
			mpath->exp_time = time_after(mpath->exp_time, exp_time)
					  ?  mpath->exp_time : exp_time;
			mesh_path_activate(mpath);
			spin_unlock_bh(&mpath->state_lock);
			mesh_path_tx_pending(mpath);
		} else
			spin_unlock_bh(&mpath->state_lock);
	}

	rcu_read_unlock();

	return process ? new_metric : 0;
}
コード例 #30
0
void ax25_linkfail_register(struct ax25_linkfail *lf)
{
    spin_lock_bh(&linkfail_lock);
    hlist_add_head(&lf->lf_node, &ax25_linkfail_list);
    spin_unlock_bh(&linkfail_lock);
}