int zd_mac_set_mode(struct zd_mac *mac, u32 mode) { struct ieee80211_device *ieee; switch (mode) { case IW_MODE_AUTO: case IW_MODE_ADHOC: case IW_MODE_INFRA: mac->netdev->type = ARPHRD_ETHER; break; case IW_MODE_MONITOR: mac->netdev->type = ARPHRD_IEEE80211_RADIOTAP; break; default: dev_dbg_f(zd_mac_dev(mac), "wrong mode %u\n", mode); return -EINVAL; } ieee = zd_mac_to_ieee80211(mac); ZD_ASSERT(!irqs_disabled()); spin_lock_irq(&ieee->lock); ieee->iw_mode = mode; spin_unlock_irq(&ieee->lock); if (netif_running(mac->netdev)) return reset_mode(mac); return 0; }
static int zd_mac_tx(struct zd_mac *mac, struct ieee80211_txb *txb, int pri) { int i, r; struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); for (i = 0; i < txb->nr_frags; i++) { struct sk_buff *skb = txb->fragments[i]; r = fill_ctrlset(mac, txb, i); if (r) { ieee->stats.tx_dropped++; return r; } r = zd_usb_tx(&mac->chip.usb, skb->data, skb->len); if (r) { ieee->stats.tx_dropped++; return r; } } /* FIXME: shouldn't this be handled by the upper layers? */ mac->netdev->trans_start = jiffies; ieee80211_txb_free(txb); return 0; }
int zd_mac_set_regdomain(struct zd_mac *mac, u8 regdomain) { int r; u8 channel; ZD_ASSERT(!irqs_disabled()); spin_lock_irq(&mac->lock); if (regdomain == 0) { regdomain = mac->default_regdomain; } if (!zd_regdomain_supported(regdomain)) { spin_unlock_irq(&mac->lock); return -EINVAL; } mac->regdomain = regdomain; channel = mac->requested_channel; spin_unlock_irq(&mac->lock); r = zd_geo_init(zd_mac_to_ieee80211(mac), regdomain); if (r) return r; if (!zd_regdomain_supports_channel(regdomain, channel)) { r = reset_channel(mac); if (r) return r; } return 0; }
int zd_mac_init_hw(struct zd_mac *mac, u8 device_type) { int r; struct zd_chip *chip = &mac->chip; u8 addr[ETH_ALEN]; u8 default_regdomain; r = zd_chip_enable_int(chip); if (r) goto out; r = zd_chip_init_hw(chip, device_type); if (r) goto disable_int; zd_get_e2p_mac_addr(chip, addr); r = zd_write_mac_addr(chip, addr); if (r) goto disable_int; ZD_ASSERT(!irqs_disabled()); spin_lock_irq(&mac->lock); memcpy(mac->netdev->dev_addr, addr, ETH_ALEN); spin_unlock_irq(&mac->lock); r = zd_read_regdomain(chip, &default_regdomain); if (r) goto disable_int; if (!zd_regdomain_supported(default_regdomain)) { dev_dbg_f(zd_mac_dev(mac), "Regulatory Domain %#04x is not supported.\n", default_regdomain); r = -EINVAL; goto disable_int; } spin_lock_irq(&mac->lock); mac->regdomain = mac->default_regdomain = default_regdomain; spin_unlock_irq(&mac->lock); r = reset_channel(mac); if (r) goto disable_int; /* We must inform the device that we are doing encryption/decryption in * software at the moment. */ r = zd_set_encryption_type(chip, ENC_SNIFFER); if (r) goto disable_int; r = zd_geo_init(zd_mac_to_ieee80211(mac), mac->regdomain); if (r) goto disable_int; r = 0; disable_int: zd_chip_disable_int(chip); out: return r; }
static inline void handle_retry_failed_int(struct urb *urb) { struct zd_usb *usb = urb->context; struct zd_mac *mac = zd_usb_to_mac(usb); struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); ieee->stats.tx_errors++; ieee->ieee_stats.tx_retry_limit_exceeded++; dev_dbg_f(urb_dev(urb), "retry failed interrupt\n"); }
int zd_mac_get_mode(struct zd_mac *mac, u32 *mode) { unsigned long flags; struct ieee80211_device *ieee; ieee = zd_mac_to_ieee80211(mac); spin_lock_irqsave(&ieee->lock, flags); *mode = ieee->iw_mode; spin_unlock_irqrestore(&ieee->lock, flags); return 0; }
static void zd_mac_rx(struct zd_mac *mac, struct sk_buff *skb) { int r; struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); struct ieee80211_rx_stats stats; const struct rx_status *status; if (skb->len < ZD_PLCP_HEADER_SIZE + IEEE80211_1ADDR_LEN + IEEE80211_FCS_LEN + sizeof(struct rx_status)) { ieee->stats.rx_errors++; ieee->stats.rx_length_errors++; goto free_skb; } r = fill_rx_stats(&stats, &status, mac, skb->data, skb->len); if (r) { /* Only packets with rx errors are included here. * The error stats have already been set in fill_rx_stats. */ goto free_skb; } __skb_pull(skb, ZD_PLCP_HEADER_SIZE); __skb_trim(skb, skb->len - (IEEE80211_FCS_LEN + sizeof(struct rx_status))); update_qual_rssi(mac, skb->data, skb->len, stats.signal, status->signal_strength); r = filter_rx(ieee, skb->data, skb->len, &stats); if (r <= 0) { if (r < 0) { ieee->stats.rx_errors++; dev_dbg_f(zd_mac_dev(mac), "Error in packet.\n"); } goto free_skb; } if (ieee->iw_mode == IW_MODE_MONITOR) fill_rt_header(skb_push(skb, sizeof(struct zd_rt_hdr)), mac, &stats, status); r = ieee80211_rx(ieee, skb, &stats); if (r) return; free_skb: /* We are always in a soft irq. */ dev_kfree_skb(skb); }
static int reset_mode(struct zd_mac *mac) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); struct zd_ioreq32 ioreqs[] = { { CR_RX_FILTER, STA_RX_FILTER }, { CR_SNIFFER_ON, 0U }, }; if (ieee->iw_mode == IW_MODE_MONITOR) { ioreqs[0].value = 0xffffffff; ioreqs[1].value = 0x1; } return zd_iowrite32a(&mac->chip, ioreqs, ARRAY_SIZE(ioreqs)); }
int zd_mac_rx_irq(struct zd_mac *mac, const u8 *buffer, unsigned int length) { struct sk_buff *skb; skb = dev_alloc_skb(sizeof(struct zd_rt_hdr) + length); if (!skb) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); dev_warn(zd_mac_dev(mac), "Could not allocate skb.\n"); ieee->stats.rx_dropped++; return -ENOMEM; } skb_reserve(skb, sizeof(struct zd_rt_hdr)); memcpy(__skb_put(skb, length), buffer, length); skb_queue_tail(&mac->rx_queue, skb); tasklet_schedule(&mac->rx_tasklet); return 0; }
static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, unsigned int length) { int i; struct zd_mac *mac = zd_usb_to_mac(usb); const struct rx_length_info *length_info; if (length < sizeof(struct rx_length_info)) { /* It's not a complete packet anyhow. */ struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); ieee->stats.rx_errors++; ieee->stats.rx_length_errors++; return; } length_info = (struct rx_length_info *) (buffer + length - sizeof(struct rx_length_info)); /* It might be that three frames are merged into a single URB * transaction. We have to check for the length info tag. * * While testing we discovered that length_info might be unaligned, * because if USB transactions are merged, the last packet will not * be padded. Unaligned access might also happen if the length_info * structure is not present. */ if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) { unsigned int l, k, n; for (i = 0, l = 0;; i++) { k = le16_to_cpu(get_unaligned(&length_info->length[i])); if (k == 0) return; n = l+k; if (n > length) return; zd_mac_rx_irq(mac, buffer+l, k); if (i >= 2) return; l = (n+3) & ~3; } } else { zd_mac_rx_irq(mac, buffer, length); } }
static int fill_rx_stats(struct ieee80211_rx_stats *stats, const struct rx_status **pstatus, struct zd_mac *mac, const u8 *buffer, unsigned int length) { const struct rx_status *status; *pstatus = status = zd_tail(buffer, length, sizeof(struct rx_status)); if (status->frame_status & ZD_RX_ERROR) { struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); ieee->stats.rx_errors++; if (status->frame_status & ZD_RX_TIMEOUT_ERROR) ieee->stats.rx_missed_errors++; else if (status->frame_status & ZD_RX_FIFO_OVERRUN_ERROR) ieee->stats.rx_fifo_errors++; else if (status->frame_status & ZD_RX_DECRYPTION_ERROR) ieee->ieee_stats.rx_discards_undecryptable++; else if (status->frame_status & ZD_RX_CRC32_ERROR) { ieee->stats.rx_crc_errors++; ieee->ieee_stats.rx_fcs_errors++; } else if (status->frame_status & ZD_RX_CRC16_ERROR) ieee->stats.rx_crc_errors++; return -EINVAL; } memset(stats, 0, sizeof(struct ieee80211_rx_stats)); stats->len = length - (ZD_PLCP_HEADER_SIZE + IEEE80211_FCS_LEN + + sizeof(struct rx_status)); /* FIXME: 802.11a */ stats->freq = IEEE80211_24GHZ_BAND; stats->received_channel = _zd_chip_get_channel(&mac->chip); stats->rssi = zd_rx_strength_percent(status->signal_strength); stats->signal = zd_rx_qual_percent(buffer, length - sizeof(struct rx_status), status); stats->mask = IEEE80211_STATMASK_RSSI | IEEE80211_STATMASK_SIGNAL; stats->rate = zd_rx_rate(buffer, status); if (stats->rate) stats->mask |= IEEE80211_STATMASK_RATE; return 0; }
int zd_mac_request_channel(struct zd_mac *mac, u8 channel) { unsigned long lock_flags; struct ieee80211_device *ieee = zd_mac_to_ieee80211(mac); if (ieee->iw_mode == IW_MODE_INFRA) return -EPERM; spin_lock_irqsave(&mac->lock, lock_flags); if (!zd_regdomain_supports_channel(mac->regdomain, channel)) { spin_unlock_irqrestore(&mac->lock, lock_flags); return -EINVAL; } mac->requested_channel = channel; spin_unlock_irqrestore(&mac->lock, lock_flags); if (netif_running(mac->netdev)) return zd_chip_set_channel(&mac->chip, channel); else return 0; }
static void update_qual_rssi(struct zd_mac *mac, const u8 *buffer, unsigned int length, u8 qual_percent, u8 rssi_percent) { unsigned long flags; struct ieee80211_hdr_3addr *hdr; int i; hdr = (struct ieee80211_hdr_3addr *)buffer; if (length < offsetof(struct ieee80211_hdr_3addr, addr3)) return; if (memcmp(hdr->addr2, zd_mac_to_ieee80211(mac)->bssid, ETH_ALEN) != 0) return; spin_lock_irqsave(&mac->lock, flags); i = mac->stats_count % ZD_MAC_STATS_BUFFER_SIZE; mac->qual_buffer[i] = qual_percent; mac->rssi_buffer[i] = rssi_percent; mac->stats_count++; spin_unlock_irqrestore(&mac->lock, flags); }