Beispiel #1
0
static int ar5523_switch_chan(struct ar5523 *ar)
{
	int error;

	error = ar5523_set_chan(ar);
	if (error) {
		ar5523_err(ar, "could not set chan, error %d\n", error);
		goto out_err;
	}

	/* reset Tx rings */
	error = ar5523_reset_tx_queues(ar);
	if (error) {
		ar5523_err(ar, "could not reset Tx queues, error %d\n",
			   error);
		goto out_err;
	}
	/* set Tx rings WME properties */
	error = ar5523_queue_init(ar);
	if (error)
		ar5523_err(ar, "could not init wme, error %d\n", error);

out_err:
	return error;
}
Beispiel #2
0
static void ar5523_rx_refill_work(struct work_struct *work)
{
	struct ar5523 *ar = container_of(work, struct ar5523, rx_refill_work);
	struct ar5523_rx_data *data;
	unsigned long flags;
	int error;

	ar5523_dbg(ar, "%s\n", __func__);
	do {
		spin_lock_irqsave(&ar->rx_data_list_lock, flags);

		if (!list_empty(&ar->rx_data_free))
			data = (struct ar5523_rx_data *) ar->rx_data_free.next;
		else
			data = NULL;
		spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);

		if (!data)
			goto done;

		data->skb = alloc_skb(ar->rxbufsz, GFP_KERNEL);
		if (!data->skb) {
			ar5523_err(ar, "could not allocate rx skbuff\n");
			return;
		}

		usb_fill_bulk_urb(data->urb, ar->dev,
				  ar5523_data_rx_pipe(ar->dev), data->skb->data,
				  ar->rxbufsz, ar5523_data_rx_cb, data);

		spin_lock_irqsave(&ar->rx_data_list_lock, flags);
		list_move(&data->list, &ar->rx_data_used);
		spin_unlock_irqrestore(&ar->rx_data_list_lock, flags);
		atomic_dec(&ar->rx_data_free_cnt);

		error = usb_submit_urb(data->urb, GFP_KERNEL);
		if (error) {
			kfree_skb(data->skb);
			if (error != -ENODEV)
				ar5523_err(ar, "Err sending rx data urb %d\n",
					   error);
			ar5523_rx_data_put(ar, data);
			atomic_inc(&ar->rx_data_free_cnt);
			return;
		}

	} while (true);
done:
	return;
}
Beispiel #3
0
static void ar5523_bss_info_changed(struct ieee80211_hw *hw,
				    struct ieee80211_vif *vif,
				    struct ieee80211_bss_conf *bss,
				    u32 changed)
{
	struct ar5523 *ar = hw->priv;
	int error;

	ar5523_dbg(ar, "bss_info_changed called\n");
	mutex_lock(&ar->mutex);

	if (!(changed & BSS_CHANGED_ASSOC))
		goto out_unlock;

	if (bss->assoc) {
		error = ar5523_create_connection(ar, vif, bss);
		if (error) {
			ar5523_err(ar, "could not create connection\n");
			goto out_unlock;
		}

		error = ar5523_set_basic_rates(ar, bss);
		if (error) {
			ar5523_err(ar, "could not set negotiated rate set\n");
			goto out_unlock;
		}

		error = ar5523_write_associd(ar, bss);
		if (error) {
			ar5523_err(ar, "could not set association\n");
			goto out_unlock;
		}

		/* turn link LED on */
		ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_ON);
		set_bit(AR5523_CONNECTED, &ar->flags);
		ieee80211_queue_delayed_work(hw, &ar->stat_work, HZ);

	} else {
		cancel_delayed_work(&ar->stat_work);
		clear_bit(AR5523_CONNECTED, &ar->flags);
		ar5523_set_ledsteady(ar, UATH_LED_LINK, UATH_LED_OFF);
	}

out_unlock:
	mutex_unlock(&ar->mutex);

}
Beispiel #4
0
static void ar5523_tx_wd_work(struct work_struct *work)
{
	struct ar5523 *ar = container_of(work, struct ar5523, tx_wd_work);

	/* Occasionally the TX queues stop responding. The only way to
	 * recover seems to be to reset the dongle.
	 */

	mutex_lock(&ar->mutex);
	ar5523_err(ar, "TX queue stuck (tot %d pend %d)\n",
		   atomic_read(&ar->tx_nr_total),
		   atomic_read(&ar->tx_nr_pending));

	ar5523_err(ar, "Will restart dongle.\n");
	ar5523_cmd_write(ar, WDCMSG_TARGET_RESET, NULL, 0, 0);
	mutex_unlock(&ar->mutex);
}
Beispiel #5
0
static int ar5523_cmd(struct ar5523 *ar, u32 code, const void *idata,
		      int ilen, void *odata, int olen, int flags)
{
	struct ar5523_cmd_hdr *hdr;
	struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
	int xferlen, error;

	/* always bulk-out a multiple of 4 bytes */
	xferlen = (sizeof(struct ar5523_cmd_hdr) + ilen + 3) & ~3;

	hdr = (struct ar5523_cmd_hdr *)cmd->buf_tx;
	memset(hdr, 0, sizeof(struct ar5523_cmd_hdr));
	hdr->len  = cpu_to_be32(xferlen);
	hdr->code = cpu_to_be32(code);
	hdr->priv = AR5523_CMD_ID;

	if (flags & AR5523_CMD_FLAG_MAGIC)
		hdr->magic = cpu_to_be32(1 << 24);
	memcpy(hdr + 1, idata, ilen);

	cmd->odata = odata;
	cmd->olen = olen;
	cmd->flags = flags;

	ar5523_dbg(ar, "do cmd %02x\n", code);

	usb_fill_bulk_urb(cmd->urb_tx, ar->dev, ar5523_cmd_tx_pipe(ar->dev),
			  cmd->buf_tx, xferlen, ar5523_cmd_tx_cb, cmd);
	cmd->urb_tx->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

	error = usb_submit_urb(cmd->urb_tx, GFP_KERNEL);
	if (error) {
		ar5523_err(ar, "could not send command 0x%x, error=%d\n",
			   code, error);
		return error;
	}

	if (!wait_for_completion_timeout(&cmd->done, 2 * HZ)) {
		cmd->odata = NULL;
		ar5523_err(ar, "timeout waiting for command %02x reply\n",
			   code);
		cmd->res = -ETIMEDOUT;
	}
	return cmd->res;
}
Beispiel #6
0
static int ar5523_get_status(struct ar5523 *ar, u32 which, void *odata,
			     int olen)
{
	int error;
	__be32 which_be;

	which_be = cpu_to_be32(which);
	error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_STATUS,
	    &which_be, sizeof(which_be), odata, olen, AR5523_CMD_FLAG_MAGIC);
	if (error != 0)
		ar5523_err(ar, "could not read EEPROM offset 0x%02x\n", which);
	return error;
}
Beispiel #7
0
static void ar5523_flush_tx(struct ar5523 *ar)
{
	ar5523_tx_work_locked(ar);

	/* Don't waste time trying to flush if USB is disconnected */
	if (test_bit(AR5523_USB_DISCONNECTED, &ar->flags))
		return;
	if (!wait_event_timeout(ar->tx_flush_waitq,
	    !atomic_read(&ar->tx_nr_pending), AR5523_FLUSH_TIMEOUT))
		ar5523_err(ar, "flush timeout (tot %d pend %d)\n",
			   atomic_read(&ar->tx_nr_total),
			   atomic_read(&ar->tx_nr_pending));
}
Beispiel #8
0
static int ar5523_get_devstatus(struct ar5523 *ar)
{
	u8 macaddr[ETH_ALEN];
	int error;

	/* retrieve MAC address */
	error = ar5523_get_status(ar, ST_MAC_ADDR, macaddr, ETH_ALEN);
	if (error) {
		ar5523_err(ar, "could not read MAC address\n");
		return error;
	}

	SET_IEEE80211_PERM_ADDR(ar->hw, macaddr);

	error = ar5523_get_status(ar, ST_SERIAL_NUMBER,
	    &ar->serial[0], sizeof(ar->serial));
	if (error) {
		ar5523_err(ar, "could not read device serial number\n");
		return error;
	}
	return 0;
}
Beispiel #9
0
static int ar5523_config(struct ar5523 *ar, u32 reg, u32 val)
{
	struct ar5523_write_mac write;
	int error;

	write.reg = cpu_to_be32(reg);
	write.len = cpu_to_be32(0);	/* 0 = single write */
	*(__be32 *)write.data = cpu_to_be32(val);

	error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
				 3 * sizeof(u32), 0);
	if (error != 0)
		ar5523_err(ar, "could not write register 0x%02x\n", reg);
	return error;
}
Beispiel #10
0
static int ar5523_get_max_rxsz(struct ar5523 *ar)
{
	int error;
	__be32 rxsize;

	/* Get max rx size */
	error = ar5523_get_status(ar, ST_WDC_TRANSPORT_CHUNK_SIZE, &rxsize,
				  sizeof(rxsize));
	if (error != 0) {
		ar5523_err(ar, "could not read max RX size\n");
		return error;
	}

	ar->rxbufsz = be32_to_cpu(rxsize);

	if (!ar->rxbufsz || ar->rxbufsz > AR5523_SANE_RXBUFSZ) {
		ar5523_err(ar, "Bad rxbufsz from device. Using %d instead\n",
			   AR5523_SANE_RXBUFSZ);
		ar->rxbufsz = AR5523_SANE_RXBUFSZ;
	}

	ar5523_dbg(ar, "Max RX buf size: %d\n", ar->rxbufsz);
	return 0;
}
Beispiel #11
0
static int ar5523_get_capability(struct ar5523 *ar, u32 cap, u32 *val)
{
	int error;

	cap = cpu_to_be32(cap);
	error = ar5523_cmd_read(ar, WDCMSG_TARGET_GET_CAPABILITY,
	    &cap, sizeof(cap), val, sizeof(u32), AR5523_CMD_FLAG_MAGIC);
	if (error != 0) {
		ar5523_err(ar, "could not read capability %u\n",
			   be32_to_cpu(cap));
		return error;
	}
	*val = be32_to_cpu(*val);
	return error;
}
Beispiel #12
0
static int ar5523_config_multi(struct ar5523 *ar, u32 reg, const void *data,
			       int len)
{
	struct ar5523_write_mac write;
	int error;

	write.reg = cpu_to_be32(reg);
	write.len = cpu_to_be32(len);
	memcpy(write.data, data, len);

	/* properly handle the case where len is zero (reset) */
	error = ar5523_cmd_write(ar, WDCMSG_TARGET_SET_CONFIG, &write,
	    (len == 0) ? sizeof(u32) : 2 * sizeof(u32) + len, 0);
	if (error != 0)
		ar5523_err(ar, "could not write %d bytes to register 0x%02x\n",
			   len, reg);
	return error;
}
Beispiel #13
0
/*
 * Command submitted cb
 */
static void ar5523_cmd_tx_cb(struct urb *urb)
{
	struct ar5523_tx_cmd *cmd = urb->context;
	struct ar5523 *ar = cmd->ar;

	if (urb->status) {
		ar5523_err(ar, "Failed to TX command. Status = %d\n",
			   urb->status);
		cmd->res = urb->status;
		complete(&cmd->done);
		return;
	}

	if (!(cmd->flags & AR5523_CMD_FLAG_READ)) {
		cmd->res = 0;
		complete(&cmd->done);
	}
}
Beispiel #14
0
static int ar5523_submit_rx_cmd(struct ar5523 *ar)
{
	int error;

	usb_fill_bulk_urb(ar->rx_cmd_urb, ar->dev,
			  ar5523_cmd_rx_pipe(ar->dev), ar->rx_cmd_buf,
			  AR5523_MAX_RXCMDSZ, ar5523_cmd_rx_cb, ar);
	ar->rx_cmd_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;

	error = usb_submit_urb(ar->rx_cmd_urb, GFP_ATOMIC);
	if (error) {
		if (error != -ENODEV)
			ar5523_err(ar, "error %d when submitting rx urb\n",
				   error);
		return error;
	}
	return 0;
}
Beispiel #15
0
/*
 * This function is called periodically (every second) when associated to
 * query device statistics.
 */
static void ar5523_stat_work(struct work_struct *work)
{
	struct ar5523 *ar = container_of(work, struct ar5523, stat_work.work);
	int error;

	ar5523_dbg(ar, "%s\n", __func__);
	mutex_lock(&ar->mutex);

	/*
	 * Send request for statistics asynchronously once a second. This
	 * seems to be important. Throughput is a lot better if this is done.
	 */
	error = ar5523_cmd_write(ar, WDCMSG_TARGET_GET_STATS, NULL, 0, 0);
	if (error)
		ar5523_err(ar, "could not query stats, error %d\n", error);
	mutex_unlock(&ar->mutex);
	ieee80211_queue_delayed_work(ar->hw, &ar->stat_work, HZ);
}
Beispiel #16
0
static void ar5523_read_reply(struct ar5523 *ar, struct ar5523_cmd_hdr *hdr,
			      struct ar5523_tx_cmd *cmd)
{
	int dlen, olen;
	__be32 *rp;

	dlen = be32_to_cpu(hdr->len) - sizeof(*hdr);

	if (dlen < 0) {
		WARN_ON(1);
		goto out;
	}

	ar5523_dbg(ar, "Code = %d len = %d\n", be32_to_cpu(hdr->code) & 0xff,
		   dlen);

	rp = (__be32 *)(hdr + 1);
	if (dlen >= sizeof(u32)) {
		olen = be32_to_cpu(rp[0]);
		dlen -= sizeof(u32);
		if (olen == 0) {
			/* convention is 0 =>'s one word */
			olen = sizeof(u32);
		}
	} else
		olen = 0;

	if (cmd->odata) {
		if (cmd->olen < olen) {
			ar5523_err(ar, "olen to small %d < %d\n",
				   cmd->olen, olen);
			cmd->olen = 0;
			cmd->res = -EOVERFLOW;
		} else {
			cmd->olen = olen;
			memcpy(cmd->odata, &rp[1], olen);
			cmd->res = 0;
		}
	}

out:
	complete(&cmd->done);
}
Beispiel #17
0
static int ar5523_alloc_tx_cmd(struct ar5523 *ar)
{
	struct ar5523_tx_cmd *cmd = &ar->tx_cmd;

	cmd->ar = ar;
	init_completion(&cmd->done);

	cmd->urb_tx = usb_alloc_urb(0, GFP_KERNEL);
	if (!cmd->urb_tx) {
		ar5523_err(ar, "could not allocate urb\n");
		return -ENOMEM;
	}
	cmd->buf_tx = usb_alloc_coherent(ar->dev, AR5523_MAX_TXCMDSZ,
					 GFP_KERNEL,
					 &cmd->urb_tx->transfer_dma);
	if (!cmd->buf_tx) {
		usb_free_urb(cmd->urb_tx);
		return -ENOMEM;
	}
	return 0;
}
Beispiel #18
0
static int ar5523_alloc_rx_bufs(struct ar5523 *ar)
{
	int i;

	for (i = 0; i < AR5523_RX_DATA_COUNT; i++) {
		struct ar5523_rx_data *data = &ar->rx_data[i];

		data->ar = ar;
		data->urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!data->urb) {
			ar5523_err(ar, "could not allocate rx data urb\n");
			goto err;
		}
		list_add_tail(&data->list, &ar->rx_data_free);
		atomic_inc(&ar->rx_data_free_cnt);
	}
	return 0;

err:
	ar5523_free_rx_bufs(ar);
	return -ENOMEM;
}
Beispiel #19
0
static int ar5523_probe(struct usb_interface *intf,
			const struct usb_device_id *id)
{
	struct usb_device *dev = interface_to_usbdev(intf);
	struct ieee80211_hw *hw;
	struct ar5523 *ar;
	int error = -ENOMEM;

	/*
	 * Load firmware if the device requires it.  This will return
	 * -ENXIO on success and we'll get called back afer the usb
	 * id changes to indicate that the firmware is present.
	 */
	if (id->driver_info & AR5523_FLAG_PRE_FIRMWARE)
		return ar5523_load_firmware(dev);


	hw = ieee80211_alloc_hw(sizeof(*ar), &ar5523_ops);
	if (!hw)
		goto out;
	SET_IEEE80211_DEV(hw, &intf->dev);

	ar = hw->priv;
	ar->hw = hw;
	ar->dev = dev;
	mutex_init(&ar->mutex);

	INIT_DELAYED_WORK(&ar->stat_work, ar5523_stat_work);
	init_timer(&ar->tx_wd_timer);
	setup_timer(&ar->tx_wd_timer, ar5523_tx_wd_timer, (unsigned long) ar);
	INIT_WORK(&ar->tx_wd_work, ar5523_tx_wd_work);
	INIT_WORK(&ar->tx_work, ar5523_tx_work);
	INIT_LIST_HEAD(&ar->tx_queue_pending);
	INIT_LIST_HEAD(&ar->tx_queue_submitted);
	spin_lock_init(&ar->tx_data_list_lock);
	atomic_set(&ar->tx_nr_total, 0);
	atomic_set(&ar->tx_nr_pending, 0);
	init_waitqueue_head(&ar->tx_flush_waitq);

	atomic_set(&ar->rx_data_free_cnt, 0);
	INIT_WORK(&ar->rx_refill_work, ar5523_rx_refill_work);
	INIT_LIST_HEAD(&ar->rx_data_free);
	INIT_LIST_HEAD(&ar->rx_data_used);
	spin_lock_init(&ar->rx_data_list_lock);

	ar->wq = create_singlethread_workqueue("ar5523");
	if (!ar->wq) {
		ar5523_err(ar, "Could not create wq\n");
		goto out_free_ar;
	}

	error = ar5523_alloc_rx_bufs(ar);
	if (error) {
		ar5523_err(ar, "Could not allocate rx buffers\n");
		goto out_free_wq;
	}

	error = ar5523_alloc_rx_cmd(ar);
	if (error) {
		ar5523_err(ar, "Could not allocate rx command buffers\n");
		goto out_free_rx_bufs;
	}

	error = ar5523_alloc_tx_cmd(ar);
	if (error) {
		ar5523_err(ar, "Could not allocate tx command buffers\n");
		goto out_free_rx_cmd;
	}

	error = ar5523_submit_rx_cmd(ar);
	if (error) {
		ar5523_err(ar, "Failed to submit rx cmd\n");
		goto out_free_tx_cmd;
	}

	/*
	 * We're now ready to send/receive firmware commands.
	 */
	error = ar5523_host_available(ar);
	if (error) {
		ar5523_err(ar, "could not initialize adapter\n");
		goto out_cancel_rx_cmd;
	}

	error = ar5523_get_max_rxsz(ar);
	if (error) {
		ar5523_err(ar, "could not get caps from adapter\n");
		goto out_cancel_rx_cmd;
	}

	error = ar5523_get_devcap(ar);
	if (error) {
		ar5523_err(ar, "could not get caps from adapter\n");
		goto out_cancel_rx_cmd;
	}

	error = ar5523_get_devstatus(ar);
	if (error != 0) {
		ar5523_err(ar, "could not get device status\n");
		goto out_cancel_rx_cmd;
	}

	ar5523_info(ar, "MAC/BBP AR5523, RF AR%c112\n",
			(id->driver_info & AR5523_FLAG_ABG) ? '5' : '2');

	ar->vif = NULL;
	hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
		    IEEE80211_HW_SIGNAL_DBM |
		    IEEE80211_HW_HAS_RATE_CONTROL;
	hw->extra_tx_headroom = sizeof(struct ar5523_tx_desc) +
				sizeof(struct ar5523_chunk);
	hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
	hw->queues = 1;

	error = ar5523_init_modes(ar);
	if (error)
		goto out_cancel_rx_cmd;

	usb_set_intfdata(intf, hw);

	error = ieee80211_register_hw(hw);
	if (error) {
		ar5523_err(ar, "could not register device\n");
		goto out_cancel_rx_cmd;
	}

	ar5523_info(ar, "Found and initialized AR5523 device\n");
	return 0;

out_cancel_rx_cmd:
	ar5523_cancel_rx_cmd(ar);
out_free_tx_cmd:
	ar5523_free_tx_cmd(ar);
out_free_rx_cmd:
	ar5523_free_rx_cmd(ar);
out_free_rx_bufs:
	ar5523_free_rx_bufs(ar);
out_free_wq:
	destroy_workqueue(ar->wq);
out_free_ar:
	ieee80211_free_hw(hw);
out:
	return error;
}
Beispiel #20
0
static void ar5523_cmd_rx_cb(struct urb *urb)
{
	struct ar5523 *ar = urb->context;
	struct ar5523_tx_cmd *cmd = &ar->tx_cmd;
	struct ar5523_cmd_hdr *hdr = ar->rx_cmd_buf;
	int dlen;
	u32 code, hdrlen;

	if (urb->status) {
		if (urb->status != -ESHUTDOWN)
			ar5523_err(ar, "RX USB error %d.\n", urb->status);
		goto skip;
	}

	if (urb->actual_length < sizeof(struct ar5523_cmd_hdr)) {
		ar5523_err(ar, "RX USB to short.\n");
		goto skip;
	}

	ar5523_dbg(ar, "%s code %02x priv %d\n", __func__,
		   be32_to_cpu(hdr->code) & 0xff, hdr->priv);

	code = be32_to_cpu(hdr->code);
	hdrlen = be32_to_cpu(hdr->len);

	switch (code & 0xff) {
	default:
		/* reply to a read command */
		if (hdr->priv != AR5523_CMD_ID) {
			ar5523_err(ar, "Unexpected command id: %02x\n",
				   code & 0xff);
			goto skip;
		}
		ar5523_read_reply(ar, hdr, cmd);
		break;

	case WDCMSG_DEVICE_AVAIL:
		ar5523_dbg(ar, "WDCMSG_DEVICE_AVAIL\n");
		cmd->res = 0;
		cmd->olen = 0;
		complete(&cmd->done);
		break;

	case WDCMSG_SEND_COMPLETE:
		ar5523_dbg(ar, "WDCMSG_SEND_COMPLETE: %d pending\n",
			atomic_read(&ar->tx_nr_pending));
		if (!test_bit(AR5523_HW_UP, &ar->flags))
			ar5523_dbg(ar, "Unexpected WDCMSG_SEND_COMPLETE\n");
		else {
			mod_timer(&ar->tx_wd_timer,
				  jiffies + AR5523_TX_WD_TIMEOUT);
			ar5523_data_tx_pkt_put(ar);

		}
		break;

	case WDCMSG_TARGET_START:
		/* This command returns a bogus id so it needs special
		   handling */
		dlen = hdrlen - sizeof(*hdr);
		if (dlen != (int)sizeof(u32)) {
			ar5523_err(ar, "Invalid reply to WDCMSG_TARGET_START");
			return;
		}
		memcpy(cmd->odata, hdr + 1, sizeof(u32));
		cmd->olen = sizeof(u32);
		cmd->res = 0;
		complete(&cmd->done);
		break;

	case WDCMSG_STATS_UPDATE:
		ar5523_dbg(ar, "WDCMSG_STATS_UPDATE\n");
		break;
	}

skip:
	ar5523_submit_rx_cmd(ar);
}
Beispiel #21
0
static void ar5523_data_rx_cb(struct urb *urb)
{
	struct ar5523_rx_data *data = urb->context;
	struct ar5523 *ar = data->ar;
	struct ar5523_rx_desc *desc;
	struct ar5523_chunk *chunk;
	struct ieee80211_hw *hw = ar->hw;
	struct ieee80211_rx_status *rx_status;
	u32 rxlen;
	int usblen = urb->actual_length;
	int hdrlen, pad;

	ar5523_dbg(ar, "%s\n", __func__);
	/* sync/async unlink faults aren't errors */
	if (urb->status) {
		if (urb->status != -ESHUTDOWN)
			ar5523_err(ar, "%s: USB err: %d\n", __func__,
				   urb->status);
		goto skip;
	}

	if (usblen < AR5523_MIN_RXBUFSZ) {
		ar5523_err(ar, "RX: wrong xfer size (usblen=%d)\n", usblen);
		goto skip;
	}

	chunk = (struct ar5523_chunk *) data->skb->data;

	if (((chunk->flags & UATH_CFLAGS_FINAL) == 0) ||
		chunk->seqnum != 0) {
		ar5523_dbg(ar, "RX: No final flag. s: %d f: %02x l: %d\n",
			   chunk->seqnum, chunk->flags,
			   be16_to_cpu(chunk->length));
		goto skip;
	}

	/* Rx descriptor is located at the end, 32-bit aligned */
	desc = (struct ar5523_rx_desc *)
		(data->skb->data + usblen - sizeof(struct ar5523_rx_desc));

	rxlen = be32_to_cpu(desc->len);
	if (rxlen > ar->rxbufsz) {
		ar5523_dbg(ar, "RX: Bad descriptor (len=%d)\n",
			   be32_to_cpu(desc->len));
		goto skip;
	}

	if (!rxlen) {
		ar5523_dbg(ar, "RX: rxlen is 0\n");
		goto skip;
	}

	if (be32_to_cpu(desc->status) != 0) {
		ar5523_dbg(ar, "Bad RX status (0x%x len = %d). Skip\n",
			   be32_to_cpu(desc->status), be32_to_cpu(desc->len));
		goto skip;
	}

	skb_reserve(data->skb, sizeof(*chunk));
	skb_put(data->skb, rxlen - sizeof(struct ar5523_rx_desc));

	hdrlen = ieee80211_get_hdrlen_from_skb(data->skb);
	if (!IS_ALIGNED(hdrlen, 4)) {
		ar5523_dbg(ar, "eek, alignment workaround activated\n");
		pad = ALIGN(hdrlen, 4) - hdrlen;
		memmove(data->skb->data + pad, data->skb->data, hdrlen);
		skb_pull(data->skb, pad);
		skb_put(data->skb, pad);
	}

	rx_status = IEEE80211_SKB_RXCB(data->skb);
	memset(rx_status, 0, sizeof(*rx_status));
	rx_status->freq = be32_to_cpu(desc->channel);
	rx_status->band = hw->conf.chandef.chan->band;
	rx_status->signal = -95 + be32_to_cpu(desc->rssi);

	ieee80211_rx_irqsafe(hw, data->skb);
	data->skb = NULL;

skip:
	if (data->skb) {
		dev_kfree_skb_irq(data->skb);
		data->skb = NULL;
	}

	ar5523_rx_data_put(ar, data);
	if (atomic_inc_return(&ar->rx_data_free_cnt) >=
	    AR5523_RX_DATA_REFILL_COUNT &&
	    test_bit(AR5523_HW_UP, &ar->flags))
		queue_work(ar->wq, &ar->rx_refill_work);
}
Beispiel #22
0
static void ar5523_tx_work_locked(struct ar5523 *ar)
{
	struct ar5523_tx_data *data;
	struct ar5523_tx_desc *desc;
	struct ar5523_chunk *chunk;
	struct ieee80211_tx_info *txi;
	struct urb *urb;
	struct sk_buff *skb;
	int error = 0, paylen;
	u32 txqid;
	unsigned long flags;

	BUILD_BUG_ON(sizeof(struct ar5523_tx_data) >
		     IEEE80211_TX_INFO_DRIVER_DATA_SIZE);

	ar5523_dbg(ar, "%s\n", __func__);
	do {
		spin_lock_irqsave(&ar->tx_data_list_lock, flags);
		if (!list_empty(&ar->tx_queue_pending)) {
			data = (struct ar5523_tx_data *)
				ar->tx_queue_pending.next;
			list_del(&data->list);
		} else
			data = NULL;
		spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);

		if (!data)
			break;

		txi = container_of((void *)data, struct ieee80211_tx_info,
				   driver_data);
		txqid = 0;

		skb = container_of((void *)txi, struct sk_buff, cb);
		paylen = skb->len;

		urb = usb_alloc_urb(0, GFP_KERNEL);
		if (!urb) {
			ar5523_err(ar, "Failed to allocate TX urb\n");
			ieee80211_free_txskb(ar->hw, skb);
			continue;
		}

		data->ar = ar;
		data->urb = urb;

		desc = (struct ar5523_tx_desc *)skb_push(skb, sizeof(*desc));
		chunk = (struct ar5523_chunk *)skb_push(skb, sizeof(*chunk));

		chunk->seqnum = 0;
		chunk->flags = UATH_CFLAGS_FINAL;
		chunk->length = cpu_to_be16(skb->len);

		desc->msglen = cpu_to_be32(skb->len);
		desc->msgid  = AR5523_DATA_ID;
		desc->buflen = cpu_to_be32(paylen);
		desc->type   = cpu_to_be32(WDCMSG_SEND);
		desc->flags  = cpu_to_be32(UATH_TX_NOTIFY);

		if (test_bit(AR5523_CONNECTED, &ar->flags))
			desc->connid = cpu_to_be32(AR5523_ID_BSS);
		else
			desc->connid = cpu_to_be32(AR5523_ID_BROADCAST);

		if (txi->flags & IEEE80211_TX_CTL_USE_MINRATE)
			txqid |= UATH_TXQID_MINRATE;

		desc->txqid = cpu_to_be32(txqid);

		urb->transfer_flags = URB_ZERO_PACKET;
		usb_fill_bulk_urb(urb, ar->dev, ar5523_data_tx_pipe(ar->dev),
				  skb->data, skb->len, ar5523_data_tx_cb, skb);

		spin_lock_irqsave(&ar->tx_data_list_lock, flags);
		list_add_tail(&data->list, &ar->tx_queue_submitted);
		spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
		mod_timer(&ar->tx_wd_timer, jiffies + AR5523_TX_WD_TIMEOUT);
		atomic_inc(&ar->tx_nr_pending);

		ar5523_dbg(ar, "TX Frame (%d pending)\n",
			   atomic_read(&ar->tx_nr_pending));
		error = usb_submit_urb(urb, GFP_KERNEL);
		if (error) {
			ar5523_err(ar, "error %d when submitting tx urb\n",
				   error);
			spin_lock_irqsave(&ar->tx_data_list_lock, flags);
			list_del(&data->list);
			spin_unlock_irqrestore(&ar->tx_data_list_lock, flags);
			atomic_dec(&ar->tx_nr_pending);
			ar5523_data_tx_pkt_put(ar);
			usb_free_urb(urb);
			ieee80211_free_txskb(ar->hw, skb);
		}
	} while (true);
}