static u16 index_to_pioqueue_base(struct b43_wldev *dev, unsigned int index) { static const u16 bases[] = { B43_MMIO_PIO_BASE0, B43_MMIO_PIO_BASE1, B43_MMIO_PIO_BASE2, B43_MMIO_PIO_BASE3, B43_MMIO_PIO_BASE4, B43_MMIO_PIO_BASE5, B43_MMIO_PIO_BASE6, B43_MMIO_PIO_BASE7, }; static const u16 bases_rev11[] = { B43_MMIO_PIO11_BASE0, B43_MMIO_PIO11_BASE1, B43_MMIO_PIO11_BASE2, B43_MMIO_PIO11_BASE3, B43_MMIO_PIO11_BASE4, B43_MMIO_PIO11_BASE5, }; if (dev->dev->id.revision >= 11) { B43_WARN_ON(index >= ARRAY_SIZE(bases_rev11)); return bases_rev11[index]; } B43_WARN_ON(index >= ARRAY_SIZE(bases)); return bases[index]; }
void b43_lcntab_write(struct b43_wldev *dev, u32 offset, u32 value) { u32 type; type = offset & B43_LCNTAB_TYPEMASK; offset &= 0xFFFF; switch (type) { case B43_LCNTAB_8BIT: B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_16BIT: B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_32BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, value >> 16); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value & 0xFFFF); break; default: B43_WARN_ON(1); } return; }
static struct b43_pio_txqueue *parse_cookie(struct b43_wldev *dev, u16 cookie, struct b43_pio_txpacket **pack) { struct b43_pio *pio = &dev->pio; struct b43_pio_txqueue *q = NULL; unsigned int pack_index; switch (cookie & 0xF000) { case 0x1000: q = pio->tx_queue_AC_BK; break; case 0x2000: q = pio->tx_queue_AC_BE; break; case 0x3000: q = pio->tx_queue_AC_VI; break; case 0x4000: q = pio->tx_queue_AC_VO; break; case 0x5000: q = pio->tx_queue_mcast; break; } if (B43_WARN_ON(!q)) return NULL; pack_index = (cookie & 0x0FFF); if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) return NULL; *pack = &q->packets[pack_index]; return q; }
static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx) { static const u16 map64[] = { B43_MMIO_DMA64_BASE0, B43_MMIO_DMA64_BASE1, B43_MMIO_DMA64_BASE2, B43_MMIO_DMA64_BASE3, B43_MMIO_DMA64_BASE4, B43_MMIO_DMA64_BASE5, }; static const u16 map32[] = { B43_MMIO_DMA32_BASE0, B43_MMIO_DMA32_BASE1, B43_MMIO_DMA32_BASE2, B43_MMIO_DMA32_BASE3, B43_MMIO_DMA32_BASE4, B43_MMIO_DMA32_BASE5, }; if (type == B43_DMA_64BIT) { B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map64))); return map64[controller_idx]; } B43_WARN_ON(!(controller_idx >= 0 && controller_idx < ARRAY_SIZE(map32))); return map32[controller_idx]; }
u32 b43_lcntab_read(struct b43_wldev *dev, u32 offset) { u32 type, value; type = offset & B43_LCNTAB_TYPEMASK; offset &= ~B43_LCNTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); switch (type) { case B43_LCNTAB_8BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO) & 0xFF; break; case B43_LCNTAB_16BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); break; case B43_LCNTAB_32BIT: b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); value |= (b43_phy_read(dev, B43_PHY_LCN_TABLE_DATAHI) << 16); break; default: B43_WARN_ON(1); value = 0; } return value; }
static u16 lo_measure_feedthrough(struct b43_wldev *dev, u16 lna, u16 pga, u16 trsw_rx) { struct b43_phy *phy = &dev->phy; u16 rfover; u16 feedthrough; if (phy->gmode) { lna <<= B43_PHY_RFOVERVAL_LNA_SHIFT; pga <<= B43_PHY_RFOVERVAL_PGA_SHIFT; B43_WARN_ON(lna & ~B43_PHY_RFOVERVAL_LNA); B43_WARN_ON(pga & ~B43_PHY_RFOVERVAL_PGA); /*FIXME This assertion fails B43_WARN_ON(trsw_rx & ~(B43_PHY_RFOVERVAL_TRSWRX | B43_PHY_RFOVERVAL_BW)); */ trsw_rx &= (B43_PHY_RFOVERVAL_TRSWRX | B43_PHY_RFOVERVAL_BW); /* Construct the RF Override Value */ rfover = B43_PHY_RFOVERVAL_UNK; rfover |= pga; rfover |= lna; rfover |= trsw_rx; if ((dev->dev->bus->sprom.boardflags_lo & B43_BFL_EXTLNA) && phy->rev > 6) rfover |= B43_PHY_RFOVERVAL_EXTLNA; b43_phy_write(dev, B43_PHY_PGACTL, 0xE300); b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); udelay(10); rfover |= B43_PHY_RFOVERVAL_BW_LBW; b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); udelay(10); rfover |= B43_PHY_RFOVERVAL_BW_LPF; b43_phy_write(dev, B43_PHY_RFOVERVAL, rfover); udelay(10); b43_phy_write(dev, B43_PHY_PGACTL, 0xF300); } else { pga |= B43_PHY_PGACTL_UNKNOWN; b43_phy_write(dev, B43_PHY_PGACTL, pga); udelay(10); pga |= B43_PHY_PGACTL_LOWBANDW; b43_phy_write(dev, B43_PHY_PGACTL, pga); udelay(10); pga |= B43_PHY_PGACTL_LPF; b43_phy_write(dev, B43_PHY_PGACTL, pga); } udelay(21); feedthrough = b43_phy_read(dev, B43_PHY_LO_LEAKAGE); /* This is a good place to check if we need to relax a bit, * as this is the main function called regularly * in the LO calibration. */ cond_resched(); return feedthrough; }
void b43_phy_unlock(struct b43_wldev *dev) { #if B43_DEBUG B43_WARN_ON(!dev->phy.phy_locked); dev->phy.phy_locked = false; #endif B43_WARN_ON(dev->dev->core_rev < 3); if (!b43_is_mode(dev->wl, NL80211_IFTYPE_AP)) b43_power_saving_ctl_bits(dev, 0); }
static int pio_tx_frame(struct b43_pio_txqueue *q, struct sk_buff *skb) { struct b43_wldev *dev = q->dev; struct b43_wl *wl = dev->wl; struct b43_pio_txpacket *pack; u16 cookie; int err; unsigned int hdrlen; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace; B43_WARN_ON(list_empty(&q->packets_list)); pack = list_entry(q->packets_list.next, struct b43_pio_txpacket, list); cookie = generate_cookie(q, pack); hdrlen = b43_txhdr_size(dev); BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr)); B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen); err = b43_generate_txhdr(dev, (u8 *)txhdr, skb, info, cookie); if (err) return err; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { /* Tell the firmware about the cookie of the last * mcast frame, so it can clear the more-data bit in it. */ b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } pack->skb = skb; if (q->rev >= 8) pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen); else pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen); /* Remove it from the list of available packet slots. * It will be put back when we receive the status report. */ list_del(&pack->list); /* Update the queue statistics. */ q->buffer_used += roundup(skb->len + hdrlen, 4); q->free_packet_slots -= 1; return 0; }
void b43_pio_handle_txstatus(struct b43_wldev *dev, const struct b43_txstatus *status) { struct b43_pio_txqueue *q; struct b43_pio_txpacket *pack = NULL; unsigned int total_len; struct ieee80211_tx_info *info; q = parse_cookie(dev, status->cookie, &pack); if (unlikely(!q)) return; B43_WARN_ON(!pack); info = IEEE80211_SKB_CB(pack->skb); b43_fill_txstatus_report(dev, info, status); total_len = pack->skb->len + b43_txhdr_size(dev); total_len = roundup(total_len, 4); q->buffer_used -= total_len; q->free_packet_slots += 1; ieee80211_tx_status(dev->wl->hw, pack->skb); pack->skb = NULL; list_add(&pack->list, &q->packets_list); if (q->stopped) { ieee80211_wake_queue(dev->wl->hw, q->queue_prio); q->stopped = 0; } }
/* Static mapping of mac80211's queues (priorities) to b43 PIO queues. */ static struct b43_pio_txqueue *select_queue_by_priority(struct b43_wldev *dev, u8 queue_prio) { struct b43_pio_txqueue *q; if (dev->qos_enabled) { /* 0 = highest priority */ switch (queue_prio) { default: B43_WARN_ON(1); /* fallthrough */ case 0: q = dev->pio.tx_queue_AC_VO; break; case 1: q = dev->pio.tx_queue_AC_VI; break; case 2: q = dev->pio.tx_queue_AC_BE; break; case 3: q = dev->pio.tx_queue_AC_BK; break; } } else q = dev->pio.tx_queue_AC_BE; return q; }
/* http://bcm-v4.sipsolutions.net/802.11/Radio/Switch%20Radio */ static void b43_phy_ht_op_software_rfkill(struct b43_wldev *dev, bool blocked) { if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED) b43err(dev->wl, "MAC not suspended\n"); /* In the following PHY ops we copy wl's dummy behaviour. * TODO: Find out if reads (currently hidden in masks/masksets) are * needed and replace following ops with just writes or w&r. * Note: B43_PHY_HT_RF_CTL1 register is tricky, wrong operation can * cause delayed (!) machine lock up. */ if (blocked) { b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0); } else { b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0); b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x1); b43_phy_mask(dev, B43_PHY_HT_RF_CTL1, 0); b43_phy_maskset(dev, B43_PHY_HT_RF_CTL1, 0, 0x2); if (dev->phy.radio_ver == 0x2059) b43_radio_2059_init(dev); else B43_WARN_ON(1); b43_switch_channel(dev, dev->phy.channel); } }
static void free_all_descbuffers(struct b43_dmaring *ring) { struct b43_dmadesc_meta *meta; int i; if (!ring->used_slots) return; for (i = 0; i < ring->nr_slots; i++) { /* get meta - ignore returned value */ ring->ops->idx2desc(ring, i, &meta); if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) { B43_WARN_ON(!ring->tx); continue; } if (ring->tx) { unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); } else { unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); } free_descriptor_buffer(ring, meta); } }
/* Request a slot for usage. */ static inline int request_slot(struct b43_dmaring *ring) { int slot; B43_WARN_ON(!ring->tx); B43_WARN_ON(ring->stopped); B43_WARN_ON(free_slots(ring) == 0); slot = next_slot(ring, ring->current_slot); ring->current_slot = slot; ring->used_slots++; update_max_used_slots(ring, ring->used_slots); return slot; }
static void op32_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc32 *descbase = ring->descbase; int slot; u32 ctl; u32 addr; u32 addrext; slot = (int)(&(desc->dma32) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); ctl = bufsize & B43_DMA32_DCTL_BYTECNT; if (slot == ring->nr_slots - 1) ctl |= B43_DMA32_DCTL_DTABLEEND; if (start) ctl |= B43_DMA32_DCTL_FRAMESTART; if (end) ctl |= B43_DMA32_DCTL_FRAMEEND; if (irq) ctl |= B43_DMA32_DCTL_IRQ; ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT) & B43_DMA32_DCTL_ADDREXT_MASK; desc->dma32.control = cpu_to_le32(ctl); desc->dma32.address = cpu_to_le32(addr); }
static inline int next_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1)); if (slot == ring->nr_slots - 1) return 0; return slot + 1; }
static inline int prev_slot(struct b43_dmaring *ring, int slot) { B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1)); if (slot == 0) return ring->nr_slots - 1; return slot - 1; }
static void op64_fill_descriptor(struct b43_dmaring *ring, struct b43_dmadesc_generic *desc, dma_addr_t dmaaddr, u16 bufsize, int start, int end, int irq) { struct b43_dmadesc64 *descbase = ring->descbase; int slot; u32 ctl0 = 0, ctl1 = 0; u32 addrlo, addrhi; u32 addrext; slot = (int)(&(desc->dma64) - descbase); B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW); addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH); addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT); if (slot == ring->nr_slots - 1) ctl0 |= B43_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= B43_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= B43_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= B43_DMA64_DCTL0_IRQ; ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & B43_DMA64_DCTL1_ADDREXT_MASK; desc->dma64.control0 = cpu_to_le32(ctl0); desc->dma64.control1 = cpu_to_le32(ctl1); desc->dma64.address_low = cpu_to_le32(addrlo); desc->dma64.address_high = cpu_to_le32(addrhi); }
/* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */ static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev, u8 queue_prio) { struct b43_dmaring *ring; if (dev->qos_enabled) { /* 0 = highest priority */ switch (queue_prio) { default: B43_WARN_ON(1); /* fallthrough */ case 0: ring = dev->dma.tx_ring_AC_VO; break; case 1: ring = dev->dma.tx_ring_AC_VI; break; case 2: ring = dev->dma.tx_ring_AC_BE; break; case 3: ring = dev->dma.tx_ring_AC_BK; break; } } else ring = dev->dma.tx_ring_AC_BE; return ring; }
void b43_phy_inita(struct b43_wldev *dev) { struct ssb_bus *bus = dev->dev->bus; struct b43_phy *phy = &dev->phy; /* This lowlevel A-PHY init is also called from G-PHY init. * So we must not access phy->a, if called from G-PHY code. */ B43_WARN_ON((phy->type != B43_PHYTYPE_A) && (phy->type != B43_PHYTYPE_G)); might_sleep(); if (phy->rev >= 6) { if (phy->type == B43_PHYTYPE_A) b43_phy_write(dev, B43_PHY_OFDM(0x1B), b43_phy_read(dev, B43_PHY_OFDM(0x1B)) & ~0x1000); if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) b43_phy_write(dev, B43_PHY_ENCORE, b43_phy_read(dev, B43_PHY_ENCORE) | 0x0010); else b43_phy_write(dev, B43_PHY_ENCORE, b43_phy_read(dev, B43_PHY_ENCORE) & ~0x1010); } b43_wa_all(dev); if (phy->type == B43_PHYTYPE_A) { if (phy->gmode && (phy->rev < 3)) b43_phy_write(dev, 0x0034, b43_phy_read(dev, 0x0034) | 0x0001); b43_phy_rssiagc(dev, 0); b43_phy_write(dev, B43_PHY_CRS0, b43_phy_read(dev, B43_PHY_CRS0) | B43_PHY_CRS0_EN); b43_radio_init2060(dev); if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && ((bus->boardinfo.type == SSB_BOARD_BU4306) || (bus->boardinfo.type == SSB_BOARD_BU4309))) { ; //TODO: A PHY LO } if (phy->rev >= 3) b43_phy_ww(dev); hardware_pctl_init_aphy(dev); //TODO: radar detection } if ((phy->type == B43_PHYTYPE_G) && (dev->dev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) { b43_phy_write(dev, B43_PHY_OFDM(0x6E), (b43_phy_read(dev, B43_PHY_OFDM(0x6E)) & 0xE000) | 0x3CF); } }
static inline void sync_descbuffer_for_device(struct b43_dmaring *ring, dma_addr_t addr, size_t len) { B43_WARN_ON(ring->tx); dma_sync_single_for_device(ring->dev->dev->dma_dev, addr, len, DMA_FROM_DEVICE); }
static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); }
static int pio_tx_frame(struct b43_pio_txqueue *q, struct sk_buff *skb) { struct b43_wldev *dev = q->dev; struct b43_wl *wl = dev->wl; struct b43_pio_txpacket *pack; u16 cookie; int err; unsigned int hdrlen; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace; B43_WARN_ON(list_empty(&q->packets_list)); pack = list_entry(q->packets_list.next, struct b43_pio_txpacket, list); cookie = generate_cookie(q, pack); hdrlen = b43_txhdr_size(dev); BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr)); B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen); err = b43_generate_txhdr(dev, (u8 *)txhdr, skb, info, cookie); if (err) return err; if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { b43_shm_write16(dev, B43_SHM_SHARED, B43_SHM_SH_MCASTCOOKIE, cookie); } pack->skb = skb; if (q->rev >= 8) pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen); else pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen); list_del(&pack->list); q->buffer_used += roundup(skb->len + hdrlen, 4); q->free_packet_slots -= 1; return 0; }
static u16 generate_cookie(struct b43_dmaring *ring, int slot) { u16 cookie; cookie = (((u16)ring->index + 1) << 12); B43_WARN_ON(slot & ~0x0FFF); cookie |= (u16)slot; return cookie; }
static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); /* A-PHY needs 0x40 for read access */ reg |= 0x40; b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); }
void b43_dma_rx(struct b43_dmaring *ring) { const struct b43_dma_ops *ops = ring->ops; int slot, current_slot; int used_slots = 0; B43_WARN_ON(ring->tx); current_slot = ops->get_current_rxslot(ring); B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots)); slot = ring->current_slot; for (; slot != current_slot; slot = next_slot(ring, slot)) { dma_rx(ring, &slot); update_max_used_slots(ring, ++used_slots); } wmb(); ops->set_current_rxslot(ring, slot); ring->current_slot = slot; }
static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg) { B43_WARN_ON(reg == 1); reg |= 0x40; b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); }
static enum b43_dmatype dma_mask_to_engine_type(u64 dmamask) { if (dmamask == DMA_BIT_MASK(30)) return B43_DMA_30BIT; if (dmamask == DMA_BIT_MASK(32)) return B43_DMA_32BIT; if (dmamask == DMA_BIT_MASK(64)) return B43_DMA_64BIT; B43_WARN_ON(1); return B43_DMA_30BIT; }
void r2057_get_chantabent_rev7(struct b43_wldev *dev, u16 freq, const struct b43_nphy_chantabent_rev7 **tabent_r7, const struct b43_nphy_chantabent_rev7_2g **tabent_r7_2g) { struct b43_phy *phy = &dev->phy; const struct b43_nphy_chantabent_rev7 *e_r7 = NULL; const struct b43_nphy_chantabent_rev7_2g *e_r7_2g = NULL; unsigned int len, i; *tabent_r7 = NULL; *tabent_r7_2g = NULL; switch (phy->rev) { case 8: if (phy->radio_rev == 5) { e_r7_2g = b43_nphy_chantab_phy_rev8_radio_rev5; len = ARRAY_SIZE(b43_nphy_chantab_phy_rev8_radio_rev5); } break; case 16: if (phy->radio_rev == 9) { e_r7 = b43_nphy_chantab_phy_rev16_radio_rev9; len = ARRAY_SIZE(b43_nphy_chantab_phy_rev16_radio_rev9); } break; case 17: if (phy->radio_rev == 14) { e_r7_2g = b43_nphy_chantab_phy_rev17_radio_rev14; len = ARRAY_SIZE(b43_nphy_chantab_phy_rev17_radio_rev14); } break; default: break; } if (e_r7) { for (i = 0; i < len; i++, e_r7++) { if (e_r7->freq == freq) { *tabent_r7 = e_r7; return; } } } else if (e_r7_2g) { for (i = 0; i < len; i++, e_r7_2g++) { if (e_r7_2g->freq == freq) { *tabent_r7_2g = e_r7_2g; return; } } } else { B43_WARN_ON(1); } }
void b43_lcntab_write_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, const void *_data) { u32 type, value; const u8 *data = _data; unsigned int i; type = offset & B43_LCNTAB_TYPEMASK; offset &= ~B43_LCNTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { switch (type) { case B43_LCNTAB_8BIT: value = *data; data++; B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_16BIT: value = *((u16 *)data); data += 2; B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value); break; case B43_LCNTAB_32BIT: value = *((u32 *)data); data += 4; b43_phy_write(dev, B43_PHY_LCN_TABLE_DATAHI, value >> 16); b43_phy_write(dev, B43_PHY_LCN_TABLE_DATALO, value & 0xFFFF); break; default: B43_WARN_ON(1); } } }
void b43_lcntab_read_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, void *_data) { u32 type; u8 *data = _data; unsigned int i; type = offset & B43_LCNTAB_TYPEMASK; offset &= ~B43_LCNTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_PHY_LCN_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { switch (type) { case B43_LCNTAB_8BIT: *data = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO) & 0xFF; data++; break; case B43_LCNTAB_16BIT: *((u16 *)data) = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); data += 2; break; case B43_LCNTAB_32BIT: *((u32 *)data) = b43_phy_read(dev, B43_PHY_LCN_TABLE_DATAHI); *((u32 *)data) <<= 16; *((u32 *)data) |= b43_phy_read(dev, B43_PHY_LCN_TABLE_DATALO); data += 4; break; default: B43_WARN_ON(1); } } }