Ejemplo n.º 1
0
/* wlc_lcnphy_tbl_init */
void b43_phy_lcn_tables_init(struct b43_wldev *dev)
{
	struct ssb_sprom *sprom = dev->dev->bus_sprom;

	b43_phy_lcn_upload_static_tables(dev);

	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
		if (sprom->boardflags_lo & B43_BFL_FEM)
			b43_phy_lcn_load_tx_gain_tab(dev,
				b43_lcntab_tx_gain_tbl_2ghz_ext_pa_rev0);
		else
			b43err(dev->wl,
			       "TX gain table unknown for this card\n");
	}

	if (sprom->boardflags_lo & B43_BFL_FEM &&
	    !(sprom->boardflags_hi & B43_BFH_FEM_BT))
		b43_lcntab_write_bulk(dev, B43_LCNTAB16(0xf, 0),
			ARRAY_SIZE(b43_lcntab_sw_ctl_4313_epa_rev0),
			b43_lcntab_sw_ctl_4313_epa_rev0);
	else
		b43err(dev->wl, "SW ctl table is unknown for this card\n");

	b43_phy_lcn_load_rfpower(dev);
	b43_phy_lcn_rewrite_rfpower_table(dev);
	b43_phy_lcn_clean_papd_comp_table(dev);
}
Ejemplo n.º 2
0
static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset)
{
	
	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
		offset &= ~B43_PHYROUTE;
		offset |= B43_PHYROUTE_BASE;
	}

#if B43_DEBUG
	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
		
		b43err(dev->wl, "Invalid EXT-G PHY access at "
		       "0x%04X on A-PHY\n", offset);
		dump_stack();
	}
	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
		
		b43err(dev->wl, "Invalid N-BMODE PHY access at "
		       "0x%04X on A-PHY\n", offset);
		dump_stack();
	}
#endif 

	return offset;
}
Ejemplo n.º 3
0
int b43_phy_init(struct b43_wldev *dev)
{
	struct b43_phy *phy = &dev->phy;
	const struct b43_phy_operations *ops = phy->ops;
	int err;

	phy->channel = ops->get_default_chan(dev);

	ops->software_rfkill(dev, false);
	err = ops->init(dev);
	if (err) {
		b43err(dev->wl, "PHY init failed\n");
		goto err_block_rf;
	}
	/* Make sure to switch hardware and firmware (SHM) to
	 * the default channel. */
	err = b43_switch_channel(dev, ops->get_default_chan(dev));
	if (err) {
		b43err(dev->wl, "PHY init: Channel switch to default failed\n");
		goto err_phy_exit;
	}

	return 0;

err_phy_exit:
	if (ops->exit)
		ops->exit(dev);
err_block_rf:
	ops->software_rfkill(dev, true);

	return err;
}
Ejemplo n.º 4
0
static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset)
{
	/* OFDM registers are base-registers for the A-PHY. */
	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) {
		offset &= ~B43_PHYROUTE;
		offset |= B43_PHYROUTE_BASE;
	}

#if B43_DEBUG
	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) {
		/* Ext-G registers are only available on G-PHYs */
		b43err(dev->wl, "Invalid EXT-G PHY access at "
		       "0x%04X on A-PHY\n", offset);
		dump_stack();
	}
	if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) {
		/* N-BMODE registers are only available on N-PHYs */
		b43err(dev->wl, "Invalid N-BMODE PHY access at "
		       "0x%04X on A-PHY\n", offset);
		dump_stack();
	}
#endif /* B43_DEBUG */

	return offset;
}
Ejemplo n.º 5
0
static int alloc_ringmemory(struct b43_dmaring *ring)
{
	gfp_t flags = GFP_KERNEL;

	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
	 * alignment and 8K buffers for 64-bit DMA with 8K alignment. Testing
	 * has shown that 4K is sufficient for the latter as long as the buffer
	 * does not cross an 8K boundary.
	 *
	 * For unknown reasons - possibly a hardware error - the BCM4311 rev
	 * 02, which uses 64-bit DMA, needs the ring buffer in very low memory,
	 * which accounts for the GFP_DMA flag below.
	 *
	 * The flags here must match the flags in free_ringmemory below!
	 */
	if (ring->type == B43_DMA_64BIT)
		flags |= GFP_DMA;
	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
					    B43_DMA_RINGMEMSIZE,
					    &(ring->dmabase), flags);
	if (!ring->descbase) {
		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
		return -ENOMEM;
	}
	memset(ring->descbase, 0, B43_DMA_RINGMEMSIZE);

	return 0;
}
Ejemplo n.º 6
0
/* Initialise the TSSI->dBm lookup table */
static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev)
{
	struct b43_phy *phy = &dev->phy;
	struct b43_phy_a *aphy = phy->a;
	s16 pab0, pab1, pab2;

	pab0 = (s16) (dev->dev->bus->sprom.pa1b0);
	pab1 = (s16) (dev->dev->bus->sprom.pa1b1);
	pab2 = (s16) (dev->dev->bus->sprom.pa1b2);

	if (pab0 != 0 && pab1 != 0 && pab2 != 0 &&
	    pab0 != -1 && pab1 != -1 && pab2 != -1) {
		/* The pabX values are set in SPROM. Use them. */
		if ((s8) dev->dev->bus->sprom.itssi_a != 0 &&
		    (s8) dev->dev->bus->sprom.itssi_a != -1)
			aphy->tgt_idle_tssi =
			    (s8) (dev->dev->bus->sprom.itssi_a);
		else
			aphy->tgt_idle_tssi = 62;
		aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0,
							       pab1, pab2);
		if (!aphy->tssi2dbm)
			return -ENOMEM;
	} else {
		/* pabX values not set in SPROM,
		 * but APHY needs a generated table. */
		aphy->tssi2dbm = NULL;
		b43err(dev->wl, "Could not generate tssi2dBm "
		       "table (wrong SPROM info)!\n");
		return -ENODEV;
	}

	return 0;
}
Ejemplo n.º 7
0
/* Allocate the initial descbuffers.
 * This is used for an RX ring only.
 */
static int alloc_initial_descbuffers(struct b43_dmaring *ring)
{
	int i, err = -ENOMEM;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;

	for (i = 0; i < ring->nr_slots; i++) {
		desc = ring->ops->idx2desc(ring, i, &meta);

		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
		if (err) {
			b43err(ring->dev->wl,
			       "Failed to allocate initial descbuffers\n");
			goto err_unwind;
		}
	}
	mb();
	ring->used_slots = ring->nr_slots;
	err = 0;
      out:
	return err;

      err_unwind:
	for (i--; i >= 0; i--) {
		desc = ring->ops->idx2desc(ring, i, &meta);

		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
		dev_kfree_skb(meta->skb);
	}
	goto out;
}
Ejemplo n.º 8
0
static int alloc_ringmemory(struct b43_dmaring *ring)
{
	gfp_t flags = GFP_KERNEL;

	/* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
	 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
	 * In practice we could use smaller buffers for the latter, but the
	 * alignment is really important because of the hardware bug. If bit
	 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
	 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
	 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
	 * more than 256 slots for ring.
	 */
	u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
				B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;

	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
					    ring_mem_size, &(ring->dmabase),
					    flags);
	if (!ring->descbase) {
		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
		return -ENOMEM;
	}
	memset(ring->descbase, 0, ring_mem_size);

	return 0;
}
Ejemplo n.º 9
0
/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
					bool blocked)
{
	if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
		b43err(dev->wl, "MAC not suspended\n");

	/* In the following PHY ops we copy wl's dummy behaviour.
	 * TODO: Find out if reads (currently hidden in masks/masksets) are
	 * needed and replace following ops with just writes or w&r.
	 * Note: B43_PHY_HT_RF_CTL1 register is tricky, wrong operation can
	 * cause delayed (!) machine lock up. */
	if (blocked) {
		b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
	} else {
		b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
		b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x1);
		b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0);
		b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x2);

		if (dev->phy.radio_ver == 0x2059)
			b43_radio_2059_init(dev);
		else
			B43_WARN_ON(1);

		b43_switch_channel(dev, dev->phy.channel);
	}
}
Ejemplo n.º 10
0
/* Calibrate resistors in LPF of PLL? */
static void b43_radio_2059_rcal(struct b43_wldev *dev)
{
	/* Enable */
	b43_radio_set(dev, R2059_C3 | R2059_RCAL_CONFIG, 0x1);
	usleep_range(10, 20);

	b43_radio_set(dev, R2059_C3 | 0x0BF, 0x1);
	b43_radio_maskset(dev, R2059_C3 | 0x19B, 0x3, 0x2);

	/* Start */
	b43_radio_set(dev, R2059_C3 | R2059_RCAL_CONFIG, 0x2);
	usleep_range(100, 200);

	/* Stop */
	b43_radio_mask(dev, R2059_C3 | R2059_RCAL_CONFIG, ~0x2);

	if (!b43_radio_wait_value(dev, R2059_C3 | R2059_RCAL_STATUS, 1, 1, 100,
				  1000000))
		b43err(dev->wl, "Radio 0x2059 rcal timeout\n");

	/* Disable */
	b43_radio_mask(dev, R2059_C3 | R2059_RCAL_CONFIG, ~0x1);

	b43_radio_set(dev, 0xa, 0x60);
}
Ejemplo n.º 11
0
/* Calibrate the internal RC oscillator? */
static void b43_radio_2057_rccal(struct b43_wldev *dev)
{
	const u16 radio_values[3][2] = {
		{ 0x61, 0xE9 }, { 0x69, 0xD5 }, { 0x73, 0x99 },
	};
	int i;

	for (i = 0; i < 3; i++) {
		b43_radio_write(dev, R2059_RCCAL_MASTER, radio_values[i][0]);
		b43_radio_write(dev, R2059_RCCAL_X1, 0x6E);
		b43_radio_write(dev, R2059_RCCAL_TRC0, radio_values[i][1]);

		/* Start */
		b43_radio_write(dev, R2059_RCCAL_START_R1_Q1_P1, 0x55);

		/* Wait */
		if (!b43_radio_wait_value(dev, R2059_RCCAL_DONE_OSCCAP, 2, 2,
					  500, 5000000))
			b43err(dev->wl, "Radio 0x2059 rccal timeout\n");

		/* Stop */
		b43_radio_write(dev, R2059_RCCAL_START_R1_Q1_P1, 0x15);
	}

	b43_radio_mask(dev, R2059_RCCAL_MASTER, ~0x1);
}
Ejemplo n.º 12
0
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
{
	u64 orig_mask = mask;
	bool fallback = false;
	int err;

	/* Try to set the DMA mask. If it fails, try falling back to a
	 * lower mask, as we can always also support a lower one. */
	while (1) {
		err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
		if (!err)
			break;
		if (mask == DMA_BIT_MASK(64)) {
			mask = DMA_BIT_MASK(32);
			fallback = true;
			continue;
		}
		if (mask == DMA_BIT_MASK(32)) {
			mask = DMA_BIT_MASK(30);
			fallback = true;
			continue;
		}
		b43err(dev->wl, "The machine/kernel does not support "
		       "the required %u-bit DMA mask\n",
		       (unsigned int)dma_mask_to_engine_type(orig_mask));
		return -EOPNOTSUPP;
	}
	if (fallback) {
		b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
			(unsigned int)dma_mask_to_engine_type(orig_mask),
			(unsigned int)dma_mask_to_engine_type(mask));
	}

	return 0;
}
Ejemplo n.º 13
0
/* wlc_lcnphy_tx_pwr_ctrl_init */
static void b43_phy_lcn_tx_pwr_ctl_init(struct b43_wldev *dev)
{
	struct lcn_tx_gains tx_gains;
	u8 bbmult;

	b43_mac_suspend(dev);

	if (!dev->phy.lcn->hw_pwr_ctl_capable) {
		if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
			tx_gains.gm_gain = 4;
			tx_gains.pga_gain = 12;
			tx_gains.pad_gain = 12;
			tx_gains.dac_gain = 0;
			bbmult = 150;
		} else {
			tx_gains.gm_gain = 7;
			tx_gains.pga_gain = 15;
			tx_gains.pad_gain = 14;
			tx_gains.dac_gain = 0;
			bbmult = 150;
		}
		b43_phy_lcn_set_tx_gain(dev, &tx_gains);
		b43_phy_lcn_set_bbmult(dev, bbmult);
		b43_phy_lcn_sense_setup(dev, B43_SENSE_TEMP);
	} else {
		b43err(dev->wl, "TX power control not supported for this HW\n");
	}

	b43_mac_enable(dev);
}
Ejemplo n.º 14
0
/* Reset the TX DMA channel */
static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
				      enum b43_dmatype type)
{
	int i;
	u32 value;
	u16 offset;

	might_sleep();

	for (i = 0; i < 10; i++) {
		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
						   B43_DMA32_TXSTATUS;
		value = b43_read32(dev, mmio_base + offset);
		if (type == B43_DMA_64BIT) {
			value &= B43_DMA64_TXSTAT;
			if (value == B43_DMA64_TXSTAT_DISABLED ||
			    value == B43_DMA64_TXSTAT_IDLEWAIT ||
			    value == B43_DMA64_TXSTAT_STOPPED)
				break;
		} else {
			value &= B43_DMA32_TXSTATE;
			if (value == B43_DMA32_TXSTAT_DISABLED ||
			    value == B43_DMA32_TXSTAT_IDLEWAIT ||
			    value == B43_DMA32_TXSTAT_STOPPED)
				break;
		}
		msleep(1);
	}
	offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
	b43_write32(dev, mmio_base + offset, 0);
	for (i = 0; i < 10; i++) {
		offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
						   B43_DMA32_TXSTATUS;
		value = b43_read32(dev, mmio_base + offset);
		if (type == B43_DMA_64BIT) {
			value &= B43_DMA64_TXSTAT;
			if (value == B43_DMA64_TXSTAT_DISABLED) {
				i = -1;
				break;
			}
		} else {
			value &= B43_DMA32_TXSTATE;
			if (value == B43_DMA32_TXSTAT_DISABLED) {
				i = -1;
				break;
			}
		}
		msleep(1);
	}
	if (i != -1) {
		b43err(dev->wl, "DMA TX reset timed out\n");
		return -ENODEV;
	}
	/* ensure the reset is completed. */
	msleep(1);

	return 0;
}
Ejemplo n.º 15
0
Archivo: lo.c Proyecto: Tigrouzen/k1099
static int assert_rfatt_and_bbatt(const struct b43_rfatt *rfatt,
				  const struct b43_bbatt *bbatt,
				  struct b43_wldev *dev)
{
	int err = 0;

	/* Check the attenuation values against the LO control array sizes. */
	if (unlikely(rfatt->att >= B43_NR_RF)) {
		b43err(dev->wl, "rfatt(%u) >= size of LO array\n", rfatt->att);
		err = -EINVAL;
	}
	if (unlikely(bbatt->att >= B43_NR_BB)) {
		b43err(dev->wl, "bbatt(%u) >= size of LO array\n", bbatt->att);
		err = -EINVAL;
	}

	return err;
}
Ejemplo n.º 16
0
Archivo: phy_ht.c Proyecto: mbgg/linux
static void b43_phy_ht_rssi_select(struct b43_wldev *dev, u8 core_sel,
				   u8 rssi_type)
{
	static const u16 ctl_regs[3][2] = {
		{ B43_PHY_HT_AFE_C1, B43_PHY_HT_AFE_C1_OVER, },
		{ B43_PHY_HT_AFE_C2, B43_PHY_HT_AFE_C2_OVER, },
		{ B43_PHY_HT_AFE_C3, B43_PHY_HT_AFE_C3_OVER, },
	};
	static const u16 radio_r[] = { R2059_SYN, R2059_TXRX0, R2059_RXRX1, };
	int core;

	if (core_sel == 0) {
		b43err(dev->wl, "RSSI selection for core off not implemented yet\n");
	} else {
		for (core = 0; core < 3; core++) {
			/* Check if caller requested a one specific core */
			if ((core_sel == 1 && core != 0) ||
			    (core_sel == 2 && core != 1) ||
			    (core_sel == 3 && core != 2))
				continue;

			switch (rssi_type) {
			case 4:
				b43_phy_set(dev, ctl_regs[core][0], 0x3 << 8);
				b43_phy_set(dev, ctl_regs[core][0], 0x3 << 10);
				b43_phy_set(dev, ctl_regs[core][1], 0x1 << 9);
				b43_phy_set(dev, ctl_regs[core][1], 0x1 << 10);

				b43_radio_set(dev, R2059_RXRX1 | 0xbf, 0x1);
				b43_radio_write(dev, radio_r[core] | 0x159,
						0x11);
				break;
			default:
				b43err(dev->wl, "RSSI selection for type %d not implemented yet\n",
				       rssi_type);
			}
		}
	}
}
Ejemplo n.º 17
0
static ssize_t b43_attr_interfmode_store(struct device *dev,
					 struct device_attribute *attr,
					 const char *buf, size_t count)
{
	struct b43_wldev *wldev = dev_to_b43_wldev(dev);
	int err;
	int mode;

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	mode = get_integer(buf, count);
	switch (mode) {
	case 0:
		mode = B43_INTERFMODE_NONE;
		break;
	case 1:
		mode = B43_INTERFMODE_NONWLAN;
		break;
	case 2:
		mode = B43_INTERFMODE_MANUALWLAN;
		break;
	case 3:
		mode = B43_INTERFMODE_AUTOWLAN;
		break;
	default:
		return -EINVAL;
	}

	mutex_lock(&wldev->wl->mutex);

	if (wldev->phy.ops->interf_mitigation) {
		err = wldev->phy.ops->interf_mitigation(wldev, mode);
		if (err) {
			b43err(wldev->wl, "Interference Mitigation not "
			       "supported by device\n");
		}
	} else
		err = -ENOSYS;

	mmiowb();
	mutex_unlock(&wldev->wl->mutex);

	return err ? err : count;
}
Ejemplo n.º 18
0
/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */
static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev,
					bool blocked)
{
	if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
		b43err(dev->wl, "MAC not suspended\n");

	if (blocked) {
		b43_phy_mask(dev, B43_PHY_HT_RF_CTL_CMD,
			     ~B43_PHY_HT_RF_CTL_CMD_CHIP0_PU);
	} else {
		if (dev->phy.radio_ver == 0x2059)
			b43_radio_2059_init(dev);
		else
			B43_WARN_ON(1);

		b43_switch_channel(dev, dev->phy.channel);
	}
}
Ejemplo n.º 19
0
static int alloc_ringmemory(struct b43_dmaring *ring)
{
	gfp_t flags = GFP_KERNEL;

	u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
				B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;

	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
					    ring_mem_size, &(ring->dmabase),
					    flags);
	if (!ring->descbase) {
		b43err(ring->dev->wl, "DMA ringmemory allocation failed\n");
		return -ENOMEM;
	}
	memset(ring->descbase, 0, ring_mem_size);

	return 0;
}
Ejemplo n.º 20
0
static void b43_phy_ht_force_rf_sequence(struct b43_wldev *dev, u16 rf_seq)
{
	u8 i;

	u16 save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
	b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE, 0x3);

	b43_phy_set(dev, B43_PHY_HT_RF_SEQ_TRIG, rf_seq);
	for (i = 0; i < 200; i++) {
		if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & rf_seq)) {
			i = 0;
			break;
		}
		msleep(1);
	}
	if (i)
		b43err(dev->wl, "Forcing RF sequence timeout\n");

	b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
}
Ejemplo n.º 21
0
Archivo: phy_ht.c Proyecto: mbgg/linux
static void b43_phy_ht_run_samples(struct b43_wldev *dev, u16 samps, u16 loops,
				   u16 wait)
{
	struct b43_phy_ht *phy_ht = dev->phy.ht;
	u16 save_seq_mode;
	int i;

	for (i = 0; i < 3; i++) {
		if (phy_ht->bb_mult_save[i] < 0)
			phy_ht->bb_mult_save[i] = b43_httab_read(dev, B43_HTTAB16(13, 0x63 + i * 4));
	}

	b43_phy_write(dev, B43_PHY_HT_SAMP_DEP_CNT, samps - 1);
	if (loops != 0xFFFF)
		loops--;
	b43_phy_write(dev, B43_PHY_HT_SAMP_LOOP_CNT, loops);
	b43_phy_write(dev, B43_PHY_HT_SAMP_WAIT_CNT, wait);

	save_seq_mode = b43_phy_read(dev, B43_PHY_HT_RF_SEQ_MODE);
	b43_phy_set(dev, B43_PHY_HT_RF_SEQ_MODE,
		    B43_PHY_HT_RF_SEQ_MODE_CA_OVER);

	/* TODO: find out mask bits! Do we need more function arguments? */
	b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
	b43_phy_mask(dev, B43_PHY_HT_SAMP_CMD, ~0);
	b43_phy_mask(dev, B43_PHY_HT_IQLOCAL_CMDGCTL, ~0);
	b43_phy_set(dev, B43_PHY_HT_SAMP_CMD, 0x1);

	for (i = 0; i < 100; i++) {
		if (!(b43_phy_read(dev, B43_PHY_HT_RF_SEQ_STATUS) & 1)) {
			i = 0;
			break;
		}
		udelay(10);
	}
	if (i)
		b43err(dev->wl, "run samples timeout\n");

	b43_phy_write(dev, B43_PHY_HT_RF_SEQ_MODE, save_seq_mode);
}
Ejemplo n.º 22
0
static int setup_rx_descbuffer(struct b43_dmaring *ring,
			       struct b43_dmadesc_generic *desc,
			       struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
{
	dma_addr_t dmaaddr;
	struct sk_buff *skb;

	B43_WARN_ON(ring->tx);

	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
	if (unlikely(!skb))
		return -ENOMEM;
	b43_poison_rx_buffer(ring, skb);
	dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
	if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
		/* ugh. try to realloc in zone_dma */
		gfp_flags |= GFP_DMA;

		dev_kfree_skb_any(skb);

		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
		if (unlikely(!skb))
			return -ENOMEM;
		b43_poison_rx_buffer(ring, skb);
		dmaaddr = map_descbuffer(ring, skb->data,
					 ring->rx_buffersize, 0);
		if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
			b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
			dev_kfree_skb_any(skb);
			return -EIO;
		}
	}

	meta->skb = skb;
	meta->dmaaddr = dmaaddr;
	ring->ops->fill_descriptor(ring, desc, dmaaddr,
				   ring->rx_buffersize, 0, 0, 0);

	return 0;
}
Ejemplo n.º 23
0
static void b43_phy_lcn_op_software_rfkill(struct b43_wldev *dev,
					bool blocked)
{
	if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
		b43err(dev->wl, "MAC not suspended\n");

	if (blocked) {
		b43_phy_mask(dev, B43_PHY_LCN_RF_CTL2, ~0x7c00);
		b43_phy_set(dev, B43_PHY_LCN_RF_CTL1, 0x1f00);

		b43_phy_mask(dev, B43_PHY_LCN_RF_CTL5, ~0x7f00);
		b43_phy_mask(dev, B43_PHY_LCN_RF_CTL4, ~0x2);
		b43_phy_set(dev, B43_PHY_LCN_RF_CTL3, 0x808);

		b43_phy_mask(dev, B43_PHY_LCN_RF_CTL7, ~0x8);
		b43_phy_set(dev, B43_PHY_LCN_RF_CTL6, 0x8);
	} else {
		b43_phy_mask(dev, B43_PHY_LCN_RF_CTL1, ~0x1f00);
		b43_phy_mask(dev, B43_PHY_LCN_RF_CTL3, ~0x808);
		b43_phy_mask(dev, B43_PHY_LCN_RF_CTL6, ~0x8);
	}
}
Ejemplo n.º 24
0
/* Reset the RX DMA channel */
static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
				      enum b43_dmatype type)
{
	int i;
	u32 value;
	u16 offset;

	might_sleep();

	offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
	b43_write32(dev, mmio_base + offset, 0);
	for (i = 0; i < 10; i++) {
		offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
						   B43_DMA32_RXSTATUS;
		value = b43_read32(dev, mmio_base + offset);
		if (type == B43_DMA_64BIT) {
			value &= B43_DMA64_RXSTAT;
			if (value == B43_DMA64_RXSTAT_DISABLED) {
				i = -1;
				break;
			}
		} else {
			value &= B43_DMA32_RXSTATE;
			if (value == B43_DMA32_RXSTAT_DISABLED) {
				i = -1;
				break;
			}
		}
		msleep(1);
	}
	if (i != -1) {
		b43err(dev->wl, "DMA RX reset timed out\n");
		return -ENODEV;
	}

	return 0;
}
Ejemplo n.º 25
0
static int b43_dma_set_mask(struct b43_wldev *dev, u64 mask)
{
	u64 orig_mask = mask;
	bool fallback = false;
	int err;

	while (1) {
		err = dma_set_mask(dev->dev->dma_dev, mask);
		if (!err) {
			err = dma_set_coherent_mask(dev->dev->dma_dev, mask);
			if (!err)
				break;
		}
		if (mask == DMA_BIT_MASK(64)) {
			mask = DMA_BIT_MASK(32);
			fallback = true;
			continue;
		}
		if (mask == DMA_BIT_MASK(32)) {
			mask = DMA_BIT_MASK(30);
			fallback = true;
			continue;
		}
		b43err(dev->wl, "The machine/kernel does not support "
		       "the required %u-bit DMA mask\n",
		       (unsigned int)dma_mask_to_engine_type(orig_mask));
		return -EOPNOTSUPP;
	}
	if (fallback) {
		b43info(dev->wl, "DMA mask fallback from %u-bit to %u-bit\n",
			(unsigned int)dma_mask_to_engine_type(orig_mask),
			(unsigned int)dma_mask_to_engine_type(mask));
	}

	return 0;
}
Ejemplo n.º 26
0
static void b43_radio_2059_init(struct b43_wldev *dev)
{
	const u16 routing[] = { R2059_SYN, R2059_TXRX0, R2059_RXRX1 };
	const u16 radio_values[3][2] = {
		{ 0x61, 0xE9 }, { 0x69, 0xD5 }, { 0x73, 0x99 },
	};
	u16 i, j;

	b43_radio_write(dev, R2059_ALL | 0x51, 0x0070);
	b43_radio_write(dev, R2059_ALL | 0x5a, 0x0003);

	for (i = 0; i < ARRAY_SIZE(routing); i++)
		b43_radio_set(dev, routing[i] | 0x146, 0x3);

	b43_radio_set(dev, 0x2e, 0x0078);
	b43_radio_set(dev, 0xc0, 0x0080);
	msleep(2);
	b43_radio_mask(dev, 0x2e, ~0x0078);
	b43_radio_mask(dev, 0xc0, ~0x0080);

	if (1) { /* FIXME */
		b43_radio_set(dev, R2059_RXRX1 | 0x4, 0x1);
		udelay(10);
		b43_radio_set(dev, R2059_RXRX1 | 0x0BF, 0x1);
		b43_radio_maskset(dev, R2059_RXRX1 | 0x19B, 0x3, 0x2);

		b43_radio_set(dev, R2059_RXRX1 | 0x4, 0x2);
		udelay(100);
		b43_radio_mask(dev, R2059_RXRX1 | 0x4, ~0x2);

		for (i = 0; i < 10000; i++) {
			if (b43_radio_read(dev, R2059_RXRX1 | 0x145) & 1) {
				i = 0;
				break;
			}
			udelay(100);
		}
		if (i)
			b43err(dev->wl, "radio 0x945 timeout\n");

		b43_radio_mask(dev, R2059_RXRX1 | 0x4, ~0x1);
		b43_radio_set(dev, 0xa, 0x60);

		for (i = 0; i < 3; i++) {
			b43_radio_write(dev, 0x17F, radio_values[i][0]);
			b43_radio_write(dev, 0x13D, 0x6E);
			b43_radio_write(dev, 0x13E, radio_values[i][1]);
			b43_radio_write(dev, 0x13C, 0x55);

			for (j = 0; j < 10000; j++) {
				if (b43_radio_read(dev, 0x140) & 2) {
					j = 0;
					break;
				}
				udelay(500);
			}
			if (j)
				b43err(dev->wl, "radio 0x140 timeout\n");

			b43_radio_write(dev, 0x13C, 0x15);
		}

		b43_radio_mask(dev, 0x17F, ~0x1);
	}

	b43_radio_mask(dev, 0x11, ~0x0008);
}
Ejemplo n.º 27
0
/* Main initialization function. */
static
struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
				      int controller_index,
				      int for_tx,
				      enum b43_dmatype type)
{
	struct b43_dmaring *ring;
	int i, err;
	dma_addr_t dma_test;

	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
	if (!ring)
		goto out;

	ring->nr_slots = B43_RXRING_SLOTS;
	if (for_tx)
		ring->nr_slots = B43_TXRING_SLOTS;

	ring->meta = kcalloc(ring->nr_slots, sizeof(struct b43_dmadesc_meta),
			     GFP_KERNEL);
	if (!ring->meta)
		goto err_kfree_ring;
	for (i = 0; i < ring->nr_slots; i++)
		ring->meta->skb = B43_DMA_PTR_POISON;

	ring->type = type;
	ring->dev = dev;
	ring->mmio_base = b43_dmacontroller_base(type, controller_index);
	ring->index = controller_index;
	if (type == B43_DMA_64BIT)
		ring->ops = &dma64_ops;
	else
		ring->ops = &dma32_ops;
	if (for_tx) {
		ring->tx = true;
		ring->current_slot = -1;
	} else {
		if (ring->index == 0) {
			switch (dev->fw.hdr_format) {
			case B43_FW_HDR_598:
				ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
				ring->frameoffset = B43_DMA0_RX_FW598_FO;
				break;
			case B43_FW_HDR_410:
			case B43_FW_HDR_351:
				ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
				ring->frameoffset = B43_DMA0_RX_FW351_FO;
				break;
			}
		} else
			B43_WARN_ON(1);
	}
#ifdef CPTCFG_B43_DEBUG
	ring->last_injected_overflow = jiffies;
#endif

	if (for_tx) {
		/* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
		BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);

		ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
					    b43_txhdr_size(dev),
					    GFP_KERNEL);
		if (!ring->txhdr_cache)
			goto err_kfree_meta;

		/* test for ability to dma to txhdr_cache */
		dma_test = dma_map_single(dev->dev->dma_dev,
					  ring->txhdr_cache,
					  b43_txhdr_size(dev),
					  DMA_TO_DEVICE);

		if (b43_dma_mapping_error(ring, dma_test,
					  b43_txhdr_size(dev), 1)) {
			/* ugh realloc */
			kfree(ring->txhdr_cache);
			ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
						    b43_txhdr_size(dev),
						    GFP_KERNEL | GFP_DMA);
			if (!ring->txhdr_cache)
				goto err_kfree_meta;

			dma_test = dma_map_single(dev->dev->dma_dev,
						  ring->txhdr_cache,
						  b43_txhdr_size(dev),
						  DMA_TO_DEVICE);

			if (b43_dma_mapping_error(ring, dma_test,
						  b43_txhdr_size(dev), 1)) {

				b43err(dev->wl,
				       "TXHDR DMA allocation failed\n");
				goto err_kfree_txhdr_cache;
			}
		}

		dma_unmap_single(dev->dev->dma_dev,
				 dma_test, b43_txhdr_size(dev),
				 DMA_TO_DEVICE);
	}

	err = alloc_ringmemory(ring);
	if (err)
		goto err_kfree_txhdr_cache;
	err = dmacontroller_setup(ring);
	if (err)
		goto err_free_ringmemory;

      out:
	return ring;

      err_free_ringmemory:
	free_ringmemory(ring);
      err_kfree_txhdr_cache:
	kfree(ring->txhdr_cache);
      err_kfree_meta:
	kfree(ring->meta);
      err_kfree_ring:
	kfree(ring);
	ring = NULL;
	goto out;
}
Ejemplo n.º 28
0
Archivo: phy_ht.c Proyecto: mbgg/linux
static int b43_phy_ht_op_init(struct b43_wldev *dev)
{
	struct b43_phy_ht *phy_ht = dev->phy.ht;
	u16 tmp;
	u16 clip_state[3];
	bool saved_tx_pwr_ctl;

	if (dev->dev->bus_type != B43_BUS_BCMA) {
		b43err(dev->wl, "HT-PHY is supported only on BCMA bus!\n");
		return -EOPNOTSUPP;
	}

	b43_phy_ht_tables_init(dev);

	b43_phy_mask(dev, 0x0be, ~0x2);
	b43_phy_set(dev, 0x23f, 0x7ff);
	b43_phy_set(dev, 0x240, 0x7ff);
	b43_phy_set(dev, 0x241, 0x7ff);

	b43_phy_ht_zero_extg(dev);

	b43_phy_mask(dev, B43_PHY_EXTG(0), ~0x3);

	b43_phy_write(dev, B43_PHY_HT_AFE_C1_OVER, 0);
	b43_phy_write(dev, B43_PHY_HT_AFE_C2_OVER, 0);
	b43_phy_write(dev, B43_PHY_HT_AFE_C3_OVER, 0);

	b43_phy_write(dev, B43_PHY_EXTG(0x103), 0x20);
	b43_phy_write(dev, B43_PHY_EXTG(0x101), 0x20);
	b43_phy_write(dev, 0x20d, 0xb8);
	b43_phy_write(dev, B43_PHY_EXTG(0x14f), 0xc8);
	b43_phy_write(dev, 0x70, 0x50);
	b43_phy_write(dev, 0x1ff, 0x30);

	if (0) /* TODO: condition */
		; /* TODO: PHY op on reg 0x217 */

	if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
		b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN, 0);
	else
		b43_phy_ht_classifier(dev, B43_PHY_HT_CLASS_CTL_CCK_EN,
				      B43_PHY_HT_CLASS_CTL_CCK_EN);

	b43_phy_set(dev, 0xb1, 0x91);
	b43_phy_write(dev, 0x32f, 0x0003);
	b43_phy_write(dev, 0x077, 0x0010);
	b43_phy_write(dev, 0x0b4, 0x0258);
	b43_phy_mask(dev, 0x17e, ~0x4000);

	b43_phy_write(dev, 0x0b9, 0x0072);

	b43_httab_write_few(dev, B43_HTTAB16(7, 0x14e), 2, 0x010f, 0x010f);
	b43_httab_write_few(dev, B43_HTTAB16(7, 0x15e), 2, 0x010f, 0x010f);
	b43_httab_write_few(dev, B43_HTTAB16(7, 0x16e), 2, 0x010f, 0x010f);

	b43_phy_ht_afe_unk1(dev);

	b43_httab_write_few(dev, B43_HTTAB16(7, 0x130), 9, 0x777, 0x111, 0x111,
			    0x777, 0x111, 0x111, 0x777, 0x111, 0x111);

	b43_httab_write(dev, B43_HTTAB16(7, 0x120), 0x0777);
	b43_httab_write(dev, B43_HTTAB16(7, 0x124), 0x0777);

	b43_httab_write(dev, B43_HTTAB16(8, 0x00), 0x02);
	b43_httab_write(dev, B43_HTTAB16(8, 0x10), 0x02);
	b43_httab_write(dev, B43_HTTAB16(8, 0x20), 0x02);

	b43_httab_write_few(dev, B43_HTTAB16(8, 0x08), 4,
			    0x8e, 0x96, 0x96, 0x96);
	b43_httab_write_few(dev, B43_HTTAB16(8, 0x18), 4,
			    0x8f, 0x9f, 0x9f, 0x9f);
	b43_httab_write_few(dev, B43_HTTAB16(8, 0x28), 4,
			    0x8f, 0x9f, 0x9f, 0x9f);

	b43_httab_write_few(dev, B43_HTTAB16(8, 0x0c), 4, 0x2, 0x2, 0x2, 0x2);
	b43_httab_write_few(dev, B43_HTTAB16(8, 0x1c), 4, 0x2, 0x2, 0x2, 0x2);
	b43_httab_write_few(dev, B43_HTTAB16(8, 0x2c), 4, 0x2, 0x2, 0x2, 0x2);

	b43_phy_maskset(dev, 0x0280, 0xff00, 0x3e);
	b43_phy_maskset(dev, 0x0283, 0xff00, 0x3e);
	b43_phy_maskset(dev, B43_PHY_OFDM(0x0141), 0xff00, 0x46);
	b43_phy_maskset(dev, 0x0283, 0xff00, 0x40);

	b43_httab_write_few(dev, B43_HTTAB16(00, 0x8), 4,
			    0x09, 0x0e, 0x13, 0x18);
	b43_httab_write_few(dev, B43_HTTAB16(01, 0x8), 4,
			    0x09, 0x0e, 0x13, 0x18);
	/* TODO: Did wl mean 2 instead of 40? */
	b43_httab_write_few(dev, B43_HTTAB16(40, 0x8), 4,
			    0x09, 0x0e, 0x13, 0x18);

	b43_phy_maskset(dev, B43_PHY_OFDM(0x24), 0x3f, 0xd);
	b43_phy_maskset(dev, B43_PHY_OFDM(0x64), 0x3f, 0xd);
	b43_phy_maskset(dev, B43_PHY_OFDM(0xa4), 0x3f, 0xd);

	b43_phy_set(dev, B43_PHY_EXTG(0x060), 0x1);
	b43_phy_set(dev, B43_PHY_EXTG(0x064), 0x1);
	b43_phy_set(dev, B43_PHY_EXTG(0x080), 0x1);
	b43_phy_set(dev, B43_PHY_EXTG(0x084), 0x1);

	/* Copy some tables entries */
	tmp = b43_httab_read(dev, B43_HTTAB16(7, 0x144));
	b43_httab_write(dev, B43_HTTAB16(7, 0x14a), tmp);
	tmp = b43_httab_read(dev, B43_HTTAB16(7, 0x154));
	b43_httab_write(dev, B43_HTTAB16(7, 0x15a), tmp);
	tmp = b43_httab_read(dev, B43_HTTAB16(7, 0x164));
	b43_httab_write(dev, B43_HTTAB16(7, 0x16a), tmp);

	/* Reset CCA */
	b43_phy_force_clock(dev, true);
	tmp = b43_phy_read(dev, B43_PHY_HT_BBCFG);
	b43_phy_write(dev, B43_PHY_HT_BBCFG, tmp | B43_PHY_HT_BBCFG_RSTCCA);
	b43_phy_write(dev, B43_PHY_HT_BBCFG, tmp & ~B43_PHY_HT_BBCFG_RSTCCA);
	b43_phy_force_clock(dev, false);

	b43_mac_phy_clock_set(dev, true);

	b43_phy_ht_pa_override(dev, false);
	b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RX2TX);
	b43_phy_ht_force_rf_sequence(dev, B43_PHY_HT_RF_SEQ_TRIG_RST2RX);
	b43_phy_ht_pa_override(dev, true);

	/* TODO: Should we restore it? Or store it in global PHY info? */
	b43_phy_ht_classifier(dev, 0, 0);
	b43_phy_ht_read_clip_detection(dev, clip_state);

	if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
		b43_phy_ht_bphy_init(dev);

	b43_httab_write_bulk(dev, B43_HTTAB32(0x1a, 0xc0),
			B43_HTTAB_1A_C0_LATE_SIZE, b43_httab_0x1a_0xc0_late);

	saved_tx_pwr_ctl = phy_ht->tx_pwr_ctl;
	b43_phy_ht_tx_power_fix(dev);
	b43_phy_ht_tx_power_ctl(dev, false);
	b43_phy_ht_tx_power_ctl_idle_tssi(dev);
	b43_phy_ht_tx_power_ctl_setup(dev);
	b43_phy_ht_tx_power_ctl(dev, saved_tx_pwr_ctl);

	return 0;
}
Ejemplo n.º 29
0
static void dma_rx(struct b43_dmaring *ring, int *slot)
{
	const struct b43_dma_ops *ops = ring->ops;
	struct b43_dmadesc_generic *desc;
	struct b43_dmadesc_meta *meta;
	struct b43_rxhdr_fw4 *rxhdr;
	struct sk_buff *skb;
	u16 len;
	int err;
	dma_addr_t dmaaddr;

	desc = ops->idx2desc(ring, *slot, &meta);

	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
	skb = meta->skb;

	rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
	len = le16_to_cpu(rxhdr->frame_len);
	if (len == 0) {
		int i = 0;

		do {
			udelay(2);
			barrier();
			len = le16_to_cpu(rxhdr->frame_len);
		} while (len == 0 && i++ < 5);
		if (unlikely(len == 0)) {
			dmaaddr = meta->dmaaddr;
			goto drop_recycle_buffer;
		}
	}
	if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
		/* Something went wrong with the DMA.
		 * The device did not touch the buffer and did not overwrite the poison. */
		b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
		dmaaddr = meta->dmaaddr;
		goto drop_recycle_buffer;
	}
	if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
		/* The data did not fit into one descriptor buffer
		 * and is split over multiple buffers.
		 * This should never happen, as we try to allocate buffers
		 * big enough. So simply ignore this packet.
		 */
		int cnt = 0;
		s32 tmp = len;

		while (1) {
			desc = ops->idx2desc(ring, *slot, &meta);
			/* recycle the descriptor buffer. */
			b43_poison_rx_buffer(ring, meta->skb);
			sync_descbuffer_for_device(ring, meta->dmaaddr,
						   ring->rx_buffersize);
			*slot = next_slot(ring, *slot);
			cnt++;
			tmp -= ring->rx_buffersize;
			if (tmp <= 0)
				break;
		}
		b43err(ring->dev->wl, "DMA RX buffer too small "
		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
		       len, ring->rx_buffersize, cnt);
		goto drop;
	}

	dmaaddr = meta->dmaaddr;
	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
	if (unlikely(err)) {
		b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
		goto drop_recycle_buffer;
	}

	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
	skb_put(skb, len + ring->frameoffset);
	skb_pull(skb, ring->frameoffset);

	b43_rx(ring->dev, skb, rxhdr);
drop:
	return;

drop_recycle_buffer:
	/* Poison and recycle the RX buffer. */
	b43_poison_rx_buffer(ring, skb);
	sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
}
Ejemplo n.º 30
0
int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
{
	struct b43_dmaring *ring;
	struct ieee80211_hdr *hdr;
	int err = 0;
	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);

	hdr = (struct ieee80211_hdr *)skb->data;
	if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
		/* The multicast ring will be sent after the DTIM */
		ring = dev->dma.tx_ring_mcast;
		/* Set the more-data bit. Ucode will clear it on
		 * the last frame for us. */
		hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
	} else {
		/* Decide by priority where to put this frame. */
		ring = select_ring_by_priority(
			dev, skb_get_queue_mapping(skb));
	}

	B43_WARN_ON(!ring->tx);

	if (unlikely(ring->stopped)) {
		/* We get here only because of a bug in mac80211.
		 * Because of a race, one packet may be queued after
		 * the queue is stopped, thus we got called when we shouldn't.
		 * For now, just refuse the transmit. */
		if (b43_debug(dev, B43_DBG_DMAVERBOSE))
			b43err(dev->wl, "Packet after queue stopped\n");
		err = -ENOSPC;
		goto out;
	}

	if (unlikely(WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME))) {
		/* If we get here, we have a real error with the queue
		 * full, but queues not stopped. */
		b43err(dev->wl, "DMA queue overflow\n");
		err = -ENOSPC;
		goto out;
	}

	/* Assign the queue number to the ring (if not already done before)
	 * so TX status handling can use it. The queue to ring mapping is
	 * static, so we don't need to store it per frame. */
	ring->queue_prio = skb_get_queue_mapping(skb);

	err = dma_tx_fragment(ring, skb);
	if (unlikely(err == -ENOKEY)) {
		/* Drop this packet, as we don't have the encryption key
		 * anymore and must not transmit it unencrypted. */
		ieee80211_free_txskb(dev->wl->hw, skb);
		err = 0;
		goto out;
	}
	if (unlikely(err)) {
		b43err(dev->wl, "DMA tx mapping failure\n");
		goto out;
	}
	if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
	    should_inject_overflow(ring)) {
		/* This TX ring is full. */
		unsigned int skb_mapping = skb_get_queue_mapping(skb);
		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
		dev->wl->tx_queue_stopped[skb_mapping] = 1;
		ring->stopped = true;
		if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
			b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
		}
	}
out:

	return err;
}