/*
 * Poll the mailbox event field until any of the bits in the mask is set or a
 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
 */
static int wl1271_cmd_wait_for_event(struct wl1271 *wl, u32 mask)
{
    u32 events_vector, event;
    unsigned long timeout;

    timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);

    do {
        if (time_after(jiffies, timeout)) {
            ieee80211_queue_work(wl->hw, &wl->recovery_work);
            return -ETIMEDOUT;
        }

        msleep(1);

        /* read from both event fields */
        wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
                    sizeof(events_vector), false);
        event = events_vector & mask;
        wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
                    sizeof(events_vector), false);
        event |= events_vector & mask;
    } while (!event);

    return 0;
}
Example #2
0
/*
 * Poll the mailbox event field until any of the bits in the mask is set or a
 * timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
 */
static int wl1271_cmd_wait_for_event_or_timeout(struct wl1271 *wl, u32 mask)
{
	u32 events_vector, event;
	unsigned long timeout;

	timeout = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);

	do {
		if (time_after(jiffies, timeout)) {
			wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
				     (int)mask);
			return -ETIMEDOUT;
		}

		msleep(1);

		/* read from both event fields */
		wl1271_read(wl, wl->mbox_ptr[0], &events_vector,
			    sizeof(events_vector), false);
		event = events_vector & mask;
		wl1271_read(wl, wl->mbox_ptr[1], &events_vector,
			    sizeof(events_vector), false);
		event |= events_vector & mask;
	} while (!event);

	return 0;
}
Example #3
0
static int wl1271_boot_fw_version(struct wl1271 *wl)
{
    struct wl1271_static_data *static_data;
    int ret;

    static_data = kmalloc(sizeof(*static_data), GFP_KERNEL);
    if (!static_data)
        return -ENOMEM;

    ret = wl1271_read(wl, wl->cmd_box_addr, static_data,
                      sizeof(*static_data), false);
    if (ret < 0)
        goto out;

    strncpy(wl->chip.fw_ver_str, static_data->fw_version,
            sizeof(wl->chip.fw_ver_str));

    /* make sure the string is NULL-terminated */
    wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';

    wl1271_parse_fw_ver(wl);

out:
    kfree(static_data);
    return ret;
}
/*
 * send command to firmware
 *
 * @wl: wl struct
 * @id: command id
 * @buf: buffer containing the command, must work with dma
 * @len: length of the buffer
 */
int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
                    size_t res_len)
{
    struct wl1271_cmd_header *cmd;
    unsigned long timeout;
    u32 intr;
    int ret = 0;
    u16 status;
    u16 poll_count = 0;

    cmd = buf;
    cmd->id = cpu_to_le16(id);
    cmd->status = 0;

    WARN_ON(len % 4 != 0);

    wl1271_write(wl, wl->cmd_box_addr, buf, len, false);

    wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);

    timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);

    intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
    while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
        if (time_after(jiffies, timeout)) {
            wl1271_error("command complete timeout");
            ret = -ETIMEDOUT;
            goto out;
        }

        poll_count++;
        if (poll_count < WL1271_CMD_FAST_POLL_COUNT)
            udelay(10);
        else
            msleep(1);

        intr = wl1271_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
    }

    /* read back the status code of the command */
    if (res_len == 0)
        res_len = sizeof(struct wl1271_cmd_header);
    wl1271_read(wl, wl->cmd_box_addr, cmd, res_len, false);

    status = le16_to_cpu(cmd->status);
    if (status != CMD_STATUS_SUCCESS) {
        wl1271_error("command execute failure %d", status);
        ieee80211_queue_work(wl->hw, &wl->recovery_work);
        ret = -EIO;
    }

    wl1271_write32(wl, ACX_REG_INTERRUPT_ACK,
                   WL1271_ACX_INTR_CMD_COMPLETE);

out:
    return ret;
}
static void wl1271_boot_fw_version(struct wl1271 *wl)
{
	struct wl1271_static_data static_data;

	wl1271_read(wl, wl->cmd_box_addr, &static_data, sizeof(static_data),
		    false);

	strncpy(wl->chip.fw_ver, static_data.fw_version,
		sizeof(wl->chip.fw_ver));

	/* make sure the string is NULL-terminated */
	wl->chip.fw_ver[sizeof(wl->chip.fw_ver) - 1] = '\0';
}
static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
{
	struct ieee80211_rx_status rx_status;
	struct wl1271_rx_descriptor *desc;
	struct sk_buff *skb;
	u16 *fc;
	u8 *buf;
	u8 beacon = 0;

	/*
	 * In PLT mode we seem to get frames and mac80211 warns about them,
	 * workaround this by not retrieving them at all.
	 */
	if (unlikely(wl->state == WL1271_STATE_PLT))
		return;

	skb = __dev_alloc_skb(length, GFP_KERNEL);
	if (!skb) {
		wl1271_error("Couldn't allocate RX frame");
		return;
	}

	buf = skb_put(skb, length);
	wl1271_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);

	/* the data read starts with the descriptor */
	desc = (struct wl1271_rx_descriptor *) buf;

	/* now we pull the descriptor out of the buffer */
	skb_pull(skb, sizeof(*desc));

	fc = (u16 *)skb->data;
	if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
		beacon = 1;

	wl1271_rx_status(wl, desc, &rx_status, beacon);

	wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
		     beacon ? "beacon" : "");

	skb_trim(skb, skb->len - desc->pad_len);

	memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
	ieee80211_rx_ni(wl->hw, skb);
}
Example #7
0
int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
{
	struct event_mailbox mbox;
	int ret;

	wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);

	if (mbox_num > 1)
		return -EINVAL;

	/* first we read the mbox descriptor */
	wl1271_read(wl, wl->mbox_ptr[mbox_num], &mbox,
		    sizeof(struct event_mailbox), false);

	/* process the descriptor */
	ret = wl1271_event_process(wl, &mbox);
	if (ret < 0)
		return ret;

	/* then we let the firmware know it can go on...*/
	wl1271_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);

	return 0;
}
Example #8
0
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status)
{
	struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
	u32 buf_size;
	u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 rx_counter;
	u32 mem_block;
	u32 pkt_length;
	u32 pkt_offset;

	while (drv_rx_counter != fw_rx_counter) {
		buf_size = 0;
		rx_counter = drv_rx_counter;
		while (rx_counter != fw_rx_counter) {
			pkt_length = wl1271_rx_get_buf_size(status, rx_counter);
			if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
				break;
			buf_size += pkt_length;
			rx_counter++;
			rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
		}

		if (buf_size == 0) {
			wl1271_warning("received empty data");
			break;
		}

		/*
		 * Choose the block we want to read
		 * For aggregated packets, only the first memory block should
		 * be retrieved. The FW takes care of the rest.
		 */
		mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter);
		wl->rx_mem_pool_addr.addr = (mem_block << 8) +
			le32_to_cpu(wl_mem_map->packet_memory_pool_start);
		wl->rx_mem_pool_addr.addr_extra =
			wl->rx_mem_pool_addr.addr + 4;
		wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr,
				sizeof(wl->rx_mem_pool_addr), false);

		/* Read all available packets at once */
		wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
				buf_size, true);

		/* Split data into separate packets */
		pkt_offset = 0;
		while (pkt_offset < buf_size) {
			pkt_length = wl1271_rx_get_buf_size(status,
					drv_rx_counter);
			/*
			 * the handle data call can only fail in memory-outage
			 * conditions, in that case the received frame will just
			 * be dropped.
			 */
			wl1271_rx_handle_data(wl,
					      wl->aggr_buf + pkt_offset,
					      pkt_length);
			wl->rx_counter++;
			drv_rx_counter++;
			drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
			pkt_offset += pkt_length;
		}
	}
	wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
}
Example #9
0
void wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
{
	struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
	u32 buf_size;
	u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 rx_counter;
	u32 mem_block;
	u32 pkt_length;
	u32 pkt_offset;
	u8 hlid;
	bool unaligned = false;

	while (drv_rx_counter != fw_rx_counter) {
		buf_size = 0;
		rx_counter = drv_rx_counter;
		while (rx_counter != fw_rx_counter) {
			pkt_length = wl12xx_rx_get_buf_size(status, rx_counter);
			if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
				break;
			buf_size += pkt_length;
			rx_counter++;
			rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
		}

		if (buf_size == 0) {
			wl1271_warning("received empty data");
			break;
		}

		if (wl->chip.id != CHIP_ID_1283_PG20) {
			/*
                                      
                                                         
                                                         
    */
			mem_block = wl12xx_rx_get_mem_block(status,
							    drv_rx_counter);

			wl->rx_mem_pool_addr.addr = (mem_block << 8) +
			   le32_to_cpu(wl_mem_map->packet_memory_pool_start);

			wl->rx_mem_pool_addr.addr_extra =
				wl->rx_mem_pool_addr.addr + 4;

			wl1271_write(wl, WL1271_SLV_REG_DATA,
				     &wl->rx_mem_pool_addr,
				     sizeof(wl->rx_mem_pool_addr), false);
		}

		/*                                    */
		wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
				buf_size, true);

		/*                                  */
		pkt_offset = 0;
		while (pkt_offset < buf_size) {
			pkt_length = wl12xx_rx_get_buf_size(status,
					drv_rx_counter);

			unaligned = wl12xx_rx_get_unaligned(status,
					drv_rx_counter);

			/*
                                                         
                                                           
                 
    */
			if (wl1271_rx_handle_data(wl,
						  wl->aggr_buf + pkt_offset,
						  pkt_length, unaligned,
						  &hlid) == 1) {
				if (hlid < WL12XX_MAX_LINKS)
					__set_bit(hlid, active_hlids);
				else
					WARN(1,
					     "hlid exceeded WL12XX_MAX_LINKS "
					     "(%d)\n", hlid);
			}

			wl->rx_counter++;
			drv_rx_counter++;
			drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
			pkt_offset += pkt_length;
		}
	}

	/*
                                                                      
                                
  */
	if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
		wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);

	wl12xx_rearm_rx_streaming(wl, active_hlids);
}
Example #10
0
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
{
	struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
	u32 buf_size;
	u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 rx_counter;
	u32 mem_block;
	u32 pkt_length;
	u32 pkt_offset;
	bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS);
	bool had_data = false;
	bool unaligned = false;

	while (drv_rx_counter != fw_rx_counter) {
		buf_size = 0;
		rx_counter = drv_rx_counter;
		while (rx_counter != fw_rx_counter) {
			pkt_length = wl1271_rx_get_buf_size(status, rx_counter);
			if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
				break;
			buf_size += pkt_length;
			rx_counter++;
			rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
		}

		if (buf_size == 0) {
			wl1271_warning("received empty data");
			break;
		}

		if (wl->chip.id != CHIP_ID_1283_PG20) {
			/*
			 * Choose the block we want to read
			 * For aggregated packets, only the first memory block
			 * should be retrieved. The FW takes care of the rest.
			 */
			mem_block = wl1271_rx_get_mem_block(status,
							    drv_rx_counter);

			wl->rx_mem_pool_addr.addr = (mem_block << 8) +
			   le32_to_cpu(wl_mem_map->packet_memory_pool_start);

			wl->rx_mem_pool_addr.addr_extra =
				wl->rx_mem_pool_addr.addr + 4;

			wl1271_write(wl, WL1271_SLV_REG_DATA,
				     &wl->rx_mem_pool_addr,
				     sizeof(wl->rx_mem_pool_addr), false);
		}

		/* Read all available packets at once */
		wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
				buf_size, true);

		/* Split data into separate packets */
		pkt_offset = 0;
		while (pkt_offset < buf_size) {
			pkt_length = wl1271_rx_get_buf_size(status,
					drv_rx_counter);

			unaligned = wl1271_rx_get_unaligned(status,
					drv_rx_counter);

			/*
			 * the handle data call can only fail in memory-outage
			 * conditions, in that case the received frame will just
			 * be dropped.
			 */
			if (wl1271_rx_handle_data(wl,
						  wl->aggr_buf + pkt_offset,
						  pkt_length, unaligned) == 1)
				had_data = true;

			wl->rx_counter++;
			drv_rx_counter++;
			drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
			pkt_offset += pkt_length;
		}
	}

	/*
	 * Write the driver's packet counter to the FW. This is only required
	 * for older hardware revisions
	 */
	if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION)
		wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);

	if (!is_ap && wl->conf.rx_streaming.interval && had_data &&
	    (wl->conf.rx_streaming.always ||
	     test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) {
		u32 timeout = wl->conf.rx_streaming.duration;

		/* restart rx streaming */
		if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags))
			ieee80211_queue_work(wl->hw,
					     &wl->rx_streaming_enable_work);

		mod_timer(&wl->rx_streaming_timer,
			  jiffies + msecs_to_jiffies(timeout));
	}
}
Example #11
0
static int tester(void *data)
{
	struct wl1271 *wl = data;
	struct sdio_func *func = wl_to_func(wl);
	struct device *pdev = &func->dev;
	int ret = 0;
	bool rx_started = 0;
	bool tx_started = 0;
	uint8_t *tx_buf, *rx_buf;
	int test_size = PAGE_SIZE;
	u32 addr = 0;
	struct wl1271_partition_set partition;

	/* We assume chip is powered up and firmware fetched */

	memcpy(&partition, &part_down, sizeof(partition));
	partition.mem.start = addr;
	wl1271_set_partition(wl, &partition);

	tx_buf = kmalloc(test_size, GFP_KERNEL);
	rx_buf = kmalloc(test_size, GFP_KERNEL);
	if (!tx_buf || !rx_buf) {
		dev_err(pdev,
			"Could not allocate memory. Test will not run.\n");
		ret = -ENOMEM;
		goto free;
	}

	memset(tx_buf, 0x5a, test_size);

	/* write something in data area so we can read it back */
	wl1271_write(wl, addr, tx_buf, test_size, false);

	while (!kthread_should_stop()) {
		if (rx && !rx_started) {
			dev_info(pdev, "starting rx test\n");
			rx_started = 1;
		} else if (!rx && rx_started) {
			dev_info(pdev, "stopping rx test\n");
			rx_started = 0;
		}

		if (tx && !tx_started) {
			dev_info(pdev, "starting tx test\n");
			tx_started = 1;
		} else if (!tx && tx_started) {
			dev_info(pdev, "stopping tx test\n");
			tx_started = 0;
		}

		if (rx_started)
			wl1271_read(wl, addr, rx_buf, test_size, false);

		if (tx_started)
			wl1271_write(wl, addr, tx_buf, test_size, false);

		if (!rx_started && !tx_started)
			msleep(100);
	}

free:
	kfree(tx_buf);
	kfree(rx_buf);
	return ret;
}
Example #12
0
int wl12xx_rx(struct wl1271 *wl, struct wl12xx_fw_status *status)
{
	struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
	u32 buf_size;
	u32 fw_rx_counter  = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
	u32 rx_counter;
	u32 mem_block;
	u32 pkt_length;
	u32 pkt_offset;
	u8 hlid;
	bool unaligned = false;
	int ret = 0;

	while (drv_rx_counter != fw_rx_counter) {
		buf_size = 0;
		rx_counter = drv_rx_counter;
		while (rx_counter != fw_rx_counter) {
			pkt_length = wl12xx_rx_get_buf_size(status, rx_counter);
			if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE)
				break;
			buf_size += pkt_length;
			rx_counter++;
			rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
		}

		if (buf_size == 0) {
			wl1271_warning("received empty data");
			break;
		}

		if (wl->chip.id != CHIP_ID_1283_PG20) {
			/*
			 * Choose the block we want to read
			 * For aggregated packets, only the first memory block
			 * should be retrieved. The FW takes care of the rest.
			 */
			mem_block = wl12xx_rx_get_mem_block(status,
							    drv_rx_counter);

			wl->rx_mem_pool_addr->addr = (mem_block << 8) +
			   le32_to_cpu(wl_mem_map->packet_memory_pool_start);

			wl->rx_mem_pool_addr->addr_extra =
				wl->rx_mem_pool_addr->addr + 4;

			ret = wl1271_write(wl, WL1271_SLV_REG_DATA,
					   wl->rx_mem_pool_addr,
					   sizeof(*wl->rx_mem_pool_addr),
					   false);
			if (ret < 0)
				goto out;
		}

		/* Read all available packets at once */
		ret = wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
				  buf_size, true);
		if (ret < 0)
			goto out;

		/* Split data into separate packets */
		pkt_offset = 0;
		while (pkt_offset < buf_size) {
			pkt_length = wl12xx_rx_get_buf_size(status,
					drv_rx_counter);

			unaligned = wl12xx_rx_get_unaligned(status,
					drv_rx_counter);

			/*
			 * the handle data call can only fail in memory-outage
			 * conditions, in that case the received frame will just
			 * be dropped.
			 */
			if (wl1271_rx_handle_data(wl,
						  wl->aggr_buf + pkt_offset,
						  pkt_length, unaligned,
						  &hlid) == 1) {
				if (hlid < WL12XX_MAX_LINKS)
					__set_bit(hlid, active_hlids);
				else
					WARN(1,
					     "hlid exceeded WL12XX_MAX_LINKS "
					     "(%d)\n", hlid);
			}

			wl->rx_counter++;
			drv_rx_counter++;
			drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
			pkt_offset += pkt_length;
		}
	}

	/*
	 * Write the driver's packet counter to the FW. This is only required
	 * for older hardware revisions
	 */
	if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) {
		ret = wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS,
				     wl->rx_counter);
		if (ret < 0)
			goto out;
	}

	wl12xx_rearm_rx_streaming(wl, active_hlids);

out:
	return ret;
}