void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) { struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; u32 buf_size; u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 mem_block; while (drv_rx_counter != fw_rx_counter) { mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter); buf_size = wl1271_rx_get_buf_size(status, drv_rx_counter); if (buf_size == 0) { wl1271_warning("received empty data"); break; } wl->rx_mem_pool_addr.addr = (mem_block << 8) + le32_to_cpu(wl_mem_map->packet_memory_pool_start); wl->rx_mem_pool_addr.addr_extra = wl->rx_mem_pool_addr.addr + 4; /* Choose the block we want to read */ wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr, sizeof(wl->rx_mem_pool_addr), false); wl1271_rx_handle_data(wl, buf_size); wl->rx_counter++; drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; } wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); }
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_common_status *status) { struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; u32 buf_size; u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 rx_counter; u32 mem_block; u32 pkt_length; u32 pkt_offset; while (drv_rx_counter != fw_rx_counter) { buf_size = 0; rx_counter = drv_rx_counter; while (rx_counter != fw_rx_counter) { pkt_length = wl1271_rx_get_buf_size(status, rx_counter); if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE) break; buf_size += pkt_length; rx_counter++; rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; } if (buf_size == 0) { wl1271_warning("received empty data"); break; } /* * Choose the block we want to read * For aggregated packets, only the first memory block should * be retrieved. The FW takes care of the rest. */ mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter); wl->rx_mem_pool_addr.addr = (mem_block << 8) + le32_to_cpu(wl_mem_map->packet_memory_pool_start); wl->rx_mem_pool_addr.addr_extra = wl->rx_mem_pool_addr.addr + 4; wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr, sizeof(wl->rx_mem_pool_addr), false); /* Read all available packets at once */ wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, buf_size, true); /* Split data into separate packets */ pkt_offset = 0; while (pkt_offset < buf_size) { pkt_length = wl1271_rx_get_buf_size(status, drv_rx_counter); /* * the handle data call can only fail in memory-outage * conditions, in that case the received frame will just * be dropped. */ wl1271_rx_handle_data(wl, wl->aggr_buf + pkt_offset, pkt_length); wl->rx_counter++; drv_rx_counter++; drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; pkt_offset += pkt_length; } } wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); }
void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) { struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map; u32 buf_size; u32 fw_rx_counter = status->fw_rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; u32 rx_counter; u32 mem_block; u32 pkt_length; u32 pkt_offset; bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); bool had_data = false; bool unaligned = false; while (drv_rx_counter != fw_rx_counter) { buf_size = 0; rx_counter = drv_rx_counter; while (rx_counter != fw_rx_counter) { pkt_length = wl1271_rx_get_buf_size(status, rx_counter); if (buf_size + pkt_length > WL1271_AGGR_BUFFER_SIZE) break; buf_size += pkt_length; rx_counter++; rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; } if (buf_size == 0) { wl1271_warning("received empty data"); break; } if (wl->chip.id != CHIP_ID_1283_PG20) { /* * Choose the block we want to read * For aggregated packets, only the first memory block * should be retrieved. The FW takes care of the rest. */ mem_block = wl1271_rx_get_mem_block(status, drv_rx_counter); wl->rx_mem_pool_addr.addr = (mem_block << 8) + le32_to_cpu(wl_mem_map->packet_memory_pool_start); wl->rx_mem_pool_addr.addr_extra = wl->rx_mem_pool_addr.addr + 4; wl1271_write(wl, WL1271_SLV_REG_DATA, &wl->rx_mem_pool_addr, sizeof(wl->rx_mem_pool_addr), false); } /* Read all available packets at once */ wl1271_read(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, buf_size, true); /* Split data into separate packets */ pkt_offset = 0; while (pkt_offset < buf_size) { pkt_length = wl1271_rx_get_buf_size(status, drv_rx_counter); unaligned = wl1271_rx_get_unaligned(status, drv_rx_counter); /* * the handle data call can only fail in memory-outage * conditions, in that case the received frame will just * be dropped. */ if (wl1271_rx_handle_data(wl, wl->aggr_buf + pkt_offset, pkt_length, unaligned) == 1) had_data = true; wl->rx_counter++; drv_rx_counter++; drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; pkt_offset += pkt_length; } } /* * Write the driver's packet counter to the FW. This is only required * for older hardware revisions */ if (wl->quirks & WL12XX_QUIRK_END_OF_TRANSACTION) wl1271_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); if (!is_ap && wl->conf.rx_streaming.interval && had_data && (wl->conf.rx_streaming.always || test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) { u32 timeout = wl->conf.rx_streaming.duration; /* restart rx streaming */ if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) ieee80211_queue_work(wl->hw, &wl->rx_streaming_enable_work); mod_timer(&wl->rx_streaming_timer, jiffies + msecs_to_jiffies(timeout)); } }