static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_status *ev = (void *) skb->data; __u16 opcode; skb_pull(skb, sizeof(*ev)); opcode = __le16_to_cpu(ev->opcode); switch (opcode) { case HCI_OP_INQUIRY: hci_cs_inquiry(hdev, ev->status); break; case HCI_OP_CREATE_CONN: hci_cs_create_conn(hdev, ev->status); break; case HCI_OP_ADD_SCO: hci_cs_add_sco(hdev, ev->status); break; case HCI_OP_AUTH_REQUESTED: hci_cs_auth_requested(hdev, ev->status); break; case HCI_OP_SET_CONN_ENCRYPT: hci_cs_set_conn_encrypt(hdev, ev->status); break; case HCI_OP_REMOTE_NAME_REQ: hci_cs_remote_name_req(hdev, ev->status); break; case HCI_OP_READ_REMOTE_FEATURES: hci_cs_read_remote_features(hdev, ev->status); break; case HCI_OP_READ_REMOTE_EXT_FEATURES: hci_cs_read_remote_ext_features(hdev, ev->status); break; case HCI_OP_SETUP_SYNC_CONN: hci_cs_setup_sync_conn(hdev, ev->status); break; case HCI_OP_SNIFF_MODE: hci_cs_sniff_mode(hdev, ev->status); break; case HCI_OP_EXIT_SNIFF_MODE: hci_cs_exit_sniff_mode(hdev, ev->status); break; default: BT_DBG("%s opcode 0x%x", hdev->name, opcode); break; } if (ev->ncmd) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) tasklet_schedule(&hdev->cmd_task); } }
int ax25_rebuild_header(struct sk_buff *skb) { struct sk_buff *ourskb; unsigned char *bp = skb->data; ax25_route *route; struct net_device *dev = NULL; ax25_address *src, *dst; ax25_digi *digipeat = NULL; ax25_dev *ax25_dev; ax25_cb *ax25; char ip_mode = ' '; dst = (ax25_address *)(bp + 1); src = (ax25_address *)(bp + 8); if (arp_find(bp + 1, skb)) return 1; route = ax25_get_route(dst, NULL); if (route) { digipeat = route->digipeat; dev = route->dev; ip_mode = route->ip_mode; } if (dev == NULL) dev = skb->dev; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) { goto put; } if (bp[16] == AX25_P_IP) { if (ip_mode == 'V' || (ip_mode == ' ' && ax25_dev->values[AX25_VALUES_IPDEFMODE])) { /* * We copy the buffer and release the original thereby * keeping it straight * * Note: we report 1 back so the caller will * not feed the frame direct to the physical device * We don't want that to happen. (It won't be upset * as we have pulled the frame from the queue by * freeing it). * * NB: TCP modifies buffers that are still * on a device queue, thus we use skb_copy() * instead of using skb_clone() unless this * gets fixed. */ ax25_address src_c; ax25_address dst_c; if ((ourskb = skb_copy(skb, GFP_ATOMIC)) == NULL) { kfree_skb(skb); goto put; } if (skb->sk != NULL) skb_set_owner_w(ourskb, skb->sk); kfree_skb(skb); /* dl9sau: bugfix * after kfree_skb(), dst and src which were pointer * to bp which is part of skb->data would not be valid * anymore hope that after skb_pull(ourskb, ..) our * dsc_c and src_c will not become invalid */ bp = ourskb->data; dst_c = *(ax25_address *)(bp + 1); src_c = *(ax25_address *)(bp + 8); skb_pull(ourskb, AX25_HEADER_LEN - 1); /* Keep PID */ skb_reset_network_header(ourskb); ax25=ax25_send_frame( ourskb, ax25_dev->values[AX25_VALUES_PACLEN], &src_c, &dst_c, digipeat, dev); if (ax25) { ax25_cb_put(ax25); } goto put; } } bp[7] &= ~AX25_CBIT; bp[7] &= ~AX25_EBIT; bp[7] |= AX25_SSSID_SPARE; bp[14] &= ~AX25_CBIT; bp[14] |= AX25_EBIT; bp[14] |= AX25_SSSID_SPARE; skb_pull(skb, AX25_KISS_HEADER_LEN); if (digipeat != NULL) { if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { kfree_skb(skb); goto put; } skb = ourskb; } ax25_queue_xmit(skb, dev); put: if (route) ax25_put_route(route); return 1; }
static void dma_rx(struct b43_dmaring *ring, int *slot) { const struct b43_dma_ops *ops = ring->ops; struct b43_dmadesc_generic *desc; struct b43_dmadesc_meta *meta; struct b43_rxhdr_fw4 *rxhdr; struct sk_buff *skb; u16 len; int err; dma_addr_t dmaaddr; desc = ops->idx2desc(ring, *slot, &meta); sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); skb = meta->skb; rxhdr = (struct b43_rxhdr_fw4 *)skb->data; len = le16_to_cpu(rxhdr->frame_len); if (len == 0) { int i = 0; do { udelay(2); barrier(); len = le16_to_cpu(rxhdr->frame_len); } while (len == 0 && i++ < 5); if (unlikely(len == 0)) { dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } } if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) { /* Something went wrong with the DMA. * The device did not touch the buffer and did not overwrite the poison. */ b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n"); dmaaddr = meta->dmaaddr; goto drop_recycle_buffer; } if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) { /* The data did not fit into one descriptor buffer * and is split over multiple buffers. * This should never happen, as we try to allocate buffers * big enough. So simply ignore this packet. */ int cnt = 0; s32 tmp = len; while (1) { desc = ops->idx2desc(ring, *slot, &meta); /* recycle the descriptor buffer. */ b43_poison_rx_buffer(ring, meta->skb); sync_descbuffer_for_device(ring, meta->dmaaddr, ring->rx_buffersize); *slot = next_slot(ring, *slot); cnt++; tmp -= ring->rx_buffersize; if (tmp <= 0) break; } b43err(ring->dev->wl, "DMA RX buffer too small " "(len: %u, buffer: %u, nr-dropped: %d)\n", len, ring->rx_buffersize, cnt); goto drop; } dmaaddr = meta->dmaaddr; err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC); if (unlikely(err)) { b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n"); goto drop_recycle_buffer; } unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0); skb_put(skb, len + ring->frameoffset); skb_pull(skb, ring->frameoffset); b43_rx(ring->dev, skb, rxhdr); drop: return; drop_recycle_buffer: /* Poison and recycle the RX buffer. */ b43_poison_rx_buffer(ring, skb); sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize); }
/* * Poll a virtual cards message queue. * If there are new status-replies from the card, copy them to * ringbuffer for reading on /dev/isdnctrl and call * isdnloop_parse_status() for processing them. Watch for special * Firmware bootmessage and parse it, to get the D-Channel protocol. * If there are B-Channels open, initiate a timer-callback to * isdnloop_pollbchan(). * This routine is called periodically via timer interrupt. * * Parameter: * data = pointer to card struct */ static void isdnloop_polldchan(unsigned long data) { isdnloop_card *card = (isdnloop_card *) data; struct sk_buff *skb; int avail; int left; u_char c; int ch; unsigned long flags; u_char *p; isdn_ctrl cmd; if ((skb = skb_dequeue(&card->dqueue))) avail = skb->len; else avail = 0; for (left = avail; left > 0; left--) { c = *skb->data; skb_pull(skb, 1); isdnloop_putmsg(card, c); card->imsg[card->iptr] = c; if (card->iptr < 59) card->iptr++; if (!skb->len) { avail++; isdnloop_putmsg(card, '\n'); card->imsg[card->iptr] = 0; card->iptr = 0; if (card->imsg[0] == '0' && card->imsg[1] >= '0' && card->imsg[1] <= '2' && card->imsg[2] == ';') { ch = (card->imsg[1] - '0') - 1; p = &card->imsg[3]; isdnloop_parse_status(p, ch, card); } else { p = card->imsg; if (!strncmp(p, "DRV1.", 5)) { printk(KERN_INFO "isdnloop: (%s) %s\n", CID, p); if (!strncmp(p + 7, "TC", 2)) { card->ptype = ISDN_PTYPE_1TR6; card->interface.features |= ISDN_FEATURE_P_1TR6; printk(KERN_INFO "isdnloop: (%s) 1TR6-Protocol loaded and running\n", CID); } if (!strncmp(p + 7, "EC", 2)) { card->ptype = ISDN_PTYPE_EURO; card->interface.features |= ISDN_FEATURE_P_EURO; printk(KERN_INFO "isdnloop: (%s) Euro-Protocol loaded and running\n", CID); } continue; } } } } if (avail) { cmd.command = ISDN_STAT_STAVAIL; cmd.driver = card->myid; cmd.arg = avail; card->interface.statcallb(&cmd); } if (card->flags & (ISDNLOOP_FLAGS_B1ACTIVE | ISDNLOOP_FLAGS_B2ACTIVE)) if (!(card->flags & ISDNLOOP_FLAGS_RBTIMER)) { /* schedule b-channel polling */ card->flags |= ISDNLOOP_FLAGS_RBTIMER; spin_lock_irqsave(&card->isdnloop_lock, flags); del_timer(&card->rb_timer); card->rb_timer.function = isdnloop_pollbchan; card->rb_timer.data = (unsigned long) card; card->rb_timer.expires = jiffies + ISDNLOOP_TIMER_BCREAD; add_timer(&card->rb_timer); spin_unlock_irqrestore(&card->isdnloop_lock, flags); } /* schedule again */ spin_lock_irqsave(&card->isdnloop_lock, flags); card->st_timer.expires = jiffies + ISDNLOOP_TIMER_DCREAD; add_timer(&card->st_timer); spin_unlock_irqrestore(&card->isdnloop_lock, flags); }
static int ccmni_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ccci_port *port = *((struct ccci_port **)netdev_priv(dev)); struct ccci_request *req = NULL; struct ccci_header *ccci_h; int ret; int skb_len = skb->len; static int tx_busy_retry_cnt = 0; int tx_queue, tx_channel; #ifndef FEATURE_SEQ_CHECK_EN struct netdev_entity *nent = (struct netdev_entity *)port->private_data; CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d, curr_seq=%d\n", port->name, skb_headroom(skb), skb->len, nent->tx_seq_num); #else CCCI_DBG_MSG(port->modem->index, NET, "write on %s, len=%d/%d\n", port->name, skb_headroom(skb), skb->len); #endif if(unlikely(skb->len > CCMNI_MTU)) { CCCI_ERR_MSG(port->modem->index, NET, "exceeds MTU(%d) with %d/%d\n", CCMNI_MTU, dev->mtu, skb->len); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } if(unlikely(skb_headroom(skb) < sizeof(struct ccci_header))) { CCCI_ERR_MSG(port->modem->index, NET, "not enough header room on %s, len=%d header=%d hard_header=%d\n", port->name, skb->len, skb_headroom(skb), dev->hard_header_len); dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } if(unlikely(port->modem->md_state != READY)) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } req = ccci_alloc_req(OUT, -1, 1, 0); if(req) { if(likely(port->rx_ch != CCCI_CCMNI3_RX)) { if(unlikely(skb_is_ack(skb))) { tx_channel = port->tx_ch==CCCI_CCMNI1_TX?CCCI_CCMNI1_DL_ACK:CCCI_CCMNI2_DL_ACK; tx_queue = NET_ACK_TXQ_INDEX(port); } else { tx_channel = port->tx_ch; tx_queue = NET_DAT_TXQ_INDEX(port); } } else { tx_channel = port->tx_ch; tx_queue = NET_DAT_TXQ_INDEX(port); } req->skb = skb; req->policy = FREE; ccci_h = (struct ccci_header*)skb_push(skb, sizeof(struct ccci_header)); ccci_h->channel = tx_channel; ccci_h->data[0] = 0; ccci_h->data[1] = skb->len; // as skb->len already included ccci_header after skb_push #ifndef FEATURE_SEQ_CHECK_EN ccci_h->reserved = nent->tx_seq_num++; #else ccci_h->reserved = 0; #endif ret = port->modem->ops->send_request(port->modem, tx_queue, req); if(ret) { skb_pull(skb, sizeof(struct ccci_header)); // undo header, in next retry, we'll reserve header again req->policy = NOOP; // if you return busy, do NOT free skb as network may still use it ccci_free_req(req); goto tx_busy; } dev->stats.tx_packets++; dev->stats.tx_bytes += skb_len; tx_busy_retry_cnt = 0; } else { CCCI_ERR_MSG(port->modem->index, NET, "fail to alloc request\n"); goto tx_busy; } return NETDEV_TX_OK; tx_busy: if(unlikely(!(port->modem->capability & MODEM_CAP_TXBUSY_STOP))) { if((++tx_busy_retry_cnt)%20000 == 0) CCCI_INF_MSG(port->modem->index, NET, "%s TX busy: retry_times=%d\n", port->name, tx_busy_retry_cnt); } else { port->tx_busy_count++; } return NETDEV_TX_BUSY; }
/* * This routine is the centralised routine for parsing the control * information for the different frame formats. */ int lapb_decode(struct lapb_cb *lapb, struct sk_buff *skb, struct lapb_frame *frame) { frame->type = LAPB_ILLEGAL; #if LAPB_DEBUG > 2 printk(KERN_DEBUG "lapb: (%p) S%d RX %02X %02X %02X\n", lapb->dev, lapb->state, skb->data[0], skb->data[1], skb->data[2]); #endif /* We always need to look at 2 bytes, sometimes we need * to look at 3 and those cases are handled below. */ if (!pskb_may_pull(skb, 2)) return -1; if (lapb->mode & LAPB_MLP) { if (lapb->mode & LAPB_DCE) { if (skb->data[0] == LAPB_ADDR_D) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_C) frame->cr = LAPB_RESPONSE; } else { if (skb->data[0] == LAPB_ADDR_C) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_D) frame->cr = LAPB_RESPONSE; } } else { if (lapb->mode & LAPB_DCE) { if (skb->data[0] == LAPB_ADDR_B) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_A) frame->cr = LAPB_RESPONSE; } else { if (skb->data[0] == LAPB_ADDR_A) frame->cr = LAPB_COMMAND; if (skb->data[0] == LAPB_ADDR_B) frame->cr = LAPB_RESPONSE; } } skb_pull(skb, 1); if (lapb->mode & LAPB_EXTENDED) { if (!(skb->data[0] & LAPB_S)) { if (!pskb_may_pull(skb, 2)) return -1; /* * I frame - carries NR/NS/PF */ frame->type = LAPB_I; frame->ns = (skb->data[0] >> 1) & 0x7F; frame->nr = (skb->data[1] >> 1) & 0x7F; frame->pf = skb->data[1] & LAPB_EPF; frame->control[0] = skb->data[0]; frame->control[1] = skb->data[1]; skb_pull(skb, 2); } else if ((skb->data[0] & LAPB_U) == 1) { if (!pskb_may_pull(skb, 2)) return -1; /* * S frame - take out PF/NR */ frame->type = skb->data[0] & 0x0F; frame->nr = (skb->data[1] >> 1) & 0x7F; frame->pf = skb->data[1] & LAPB_EPF; frame->control[0] = skb->data[0]; frame->control[1] = skb->data[1]; skb_pull(skb, 2); } else if ((skb->data[0] & LAPB_U) == 3) {
static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; struct blkcipher_desc desc = { .tfm = wep->rx_tfm }; u32 klen, plen; u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos; u32 crc; u8 icv[4]; struct scatterlist sg; if (skb->len < hdr_len + 8) return -1; pos = skb->data + hdr_len; key[0] = *pos++; key[1] = *pos++; key[2] = *pos++; keyidx = *pos++ >> 6; if (keyidx != wep->key_idx) return -1; klen = 3 + wep->key_len; /* */ memcpy(key + 3, wep->key, wep->key_len); /* */ plen = skb->len - hdr_len - 8; crypto_blkcipher_setkey(wep->rx_tfm, key, klen); sg_init_one(&sg, pos, plen + 4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) return -7; crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { /* */ return -2; } /* */ memmove(skb->data + 4, skb->data, hdr_len); skb_pull(skb, 4); skb_trim(skb, skb->len - 4); return 0; } static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < 0 || len > WEP_KEY_LEN) return -1; memcpy(wep->key, key, len); wep->key_len = len; return 0; } static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < wep->key_len) return -1; memcpy(key, wep->key, wep->key_len); return wep->key_len; } static char * prism2_wep_print_stats(char *p, void *priv) { struct prism2_wep_data *wep = priv; p += sprintf(p, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len); return p; } static struct ieee80211_crypto_ops ieee80211_crypt_wep = { .name = "WEP", .init = prism2_wep_init, .deinit = prism2_wep_deinit, .encrypt_mpdu = prism2_wep_encrypt, .decrypt_mpdu = prism2_wep_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = prism2_wep_set_key, .get_key = prism2_wep_get_key, .print_stats = prism2_wep_print_stats, .extra_prefix_len = 4, /* */ .extra_postfix_len = 4, /* */ .owner = THIS_MODULE, }; int ieee80211_crypto_wep_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_wep); } void ieee80211_crypto_wep_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep); } void ieee80211_wep_null(void) { // return; }
/* process a frame handed over to us from linux network layer. First byte semantics as defined in Documentation/networking/x25-iface.txt */ static int isdn_x25iface_xmit(struct concap_proto *cprot, struct sk_buff *skb) { unsigned char firstbyte = skb->data[0]; enum wan_states *state = &((ix25_pdata_t *)cprot->proto_data)->state; int ret = 0; IX25DEBUG("isdn_x25iface_xmit: %s first=%x state=%d\n", MY_DEVNAME(cprot->net_dev), firstbyte, *state); switch (firstbyte) { case X25_IFACE_DATA: if (*state == WAN_CONNECTED) { skb_pull(skb, 1); cprot->net_dev->trans_start = jiffies; ret = (cprot->dops->data_req(cprot, skb)); /* prepare for future retransmissions */ if (ret) skb_push(skb, 1); return ret; } illegal_state_warn(*state, firstbyte); break; case X25_IFACE_CONNECT: if (*state == WAN_DISCONNECTED) { *state = WAN_CONNECTING; ret = cprot->dops->connect_req(cprot); if (ret) { /* reset state and notify upper layer about * immidiatly failed attempts */ isdn_x25iface_disconn_ind(cprot); } } else { illegal_state_warn(*state, firstbyte); } break; case X25_IFACE_DISCONNECT: switch (*state) { case WAN_DISCONNECTED: /* Should not happen. However, give upper layer a chance to recover from inconstistency but don't trust the lower layer sending the disconn_confirm when already disconnected */ printk(KERN_WARNING "isdn_x25iface_xmit: disconnect " " requested while disconnected\n"); isdn_x25iface_disconn_ind(cprot); break; /* prevent infinite loops */ case WAN_CONNECTING: case WAN_CONNECTED: *state = WAN_DISCONNECTED; cprot->dops->disconn_req(cprot); break; default: illegal_state_warn(*state, firstbyte); } break; case X25_IFACE_PARAMS: printk(KERN_WARNING "isdn_x25iface_xmit: setting of lapb" " options not yet supported\n"); break; default: printk(KERN_WARNING "isdn_x25iface_xmit: frame with illegal" " first byte %x ignored:\n", firstbyte); } dev_kfree_skb(skb); return 0; }
/* This function handles received packet. Necessary action is taken based on * cmd/event/data. */ static int mwifiex_usb_recv(struct mwifiex_adapter *adapter, struct sk_buff *skb, u8 ep) { u32 recv_type; __le32 tmp; int ret; if (adapter->hs_activated) mwifiex_process_hs_config(adapter); if (skb->len < INTF_HEADER_LEN) { mwifiex_dbg(adapter, ERROR, "%s: invalid skb->len\n", __func__); return -1; } switch (ep) { case MWIFIEX_USB_EP_CMD_EVENT: mwifiex_dbg(adapter, EVENT, "%s: EP_CMD_EVENT\n", __func__); skb_copy_from_linear_data(skb, &tmp, INTF_HEADER_LEN); recv_type = le32_to_cpu(tmp); skb_pull(skb, INTF_HEADER_LEN); switch (recv_type) { case MWIFIEX_USB_TYPE_CMD: if (skb->len > MWIFIEX_SIZE_OF_CMD_BUFFER) { mwifiex_dbg(adapter, ERROR, "CMD: skb->len too large\n"); ret = -1; goto exit_restore_skb; } else if (!adapter->curr_cmd) { mwifiex_dbg(adapter, WARN, "CMD: no curr_cmd\n"); if (adapter->ps_state == PS_STATE_SLEEP_CFM) { mwifiex_process_sleep_confirm_resp( adapter, skb->data, skb->len); ret = 0; goto exit_restore_skb; } ret = -1; goto exit_restore_skb; } adapter->curr_cmd->resp_skb = skb; adapter->cmd_resp_received = true; break; case MWIFIEX_USB_TYPE_EVENT: if (skb->len < sizeof(u32)) { mwifiex_dbg(adapter, ERROR, "EVENT: skb->len too small\n"); ret = -1; goto exit_restore_skb; } skb_copy_from_linear_data(skb, &tmp, sizeof(u32)); adapter->event_cause = le32_to_cpu(tmp); mwifiex_dbg(adapter, EVENT, "event_cause %#x\n", adapter->event_cause); if (skb->len > MAX_EVENT_SIZE) { mwifiex_dbg(adapter, ERROR, "EVENT: event body too large\n"); ret = -1; goto exit_restore_skb; } memcpy(adapter->event_body, skb->data + MWIFIEX_EVENT_HEADER_LEN, skb->len); adapter->event_received = true; adapter->event_skb = skb; break; default: mwifiex_dbg(adapter, ERROR, "unknown recv_type %#x\n", recv_type); return -1; } break; case MWIFIEX_USB_EP_DATA: mwifiex_dbg(adapter, DATA, "%s: EP_DATA\n", __func__); if (skb->len > MWIFIEX_RX_DATA_BUF_SIZE) { mwifiex_dbg(adapter, ERROR, "DATA: skb->len too large\n"); return -1; } skb_queue_tail(&adapter->rx_data_q, skb); adapter->data_received = true; atomic_inc(&adapter->rx_pending); break; default: mwifiex_dbg(adapter, ERROR, "%s: unknown endport %#x\n", __func__, ep); return -1; } return -EINPROGRESS; exit_restore_skb: /* The buffer will be reused for further cmds/events */ skb_push(skb, INTF_HEADER_LEN); return ret; }
static void tx_iso_complete(struct urb *urb, struct pt_regs *regs) { iso_urb_struct *context_iso_urb = (iso_urb_struct *) urb->context; usb_fifo *fifo = context_iso_urb->owner_fifo; hfcusb_data *hfc = fifo->hfc; int k, tx_offset, num_isoc_packets, sink, len, current_len, errcode; int frame_complete, transp_mode, fifon, status; __u8 threshbit; __u8 threshtable[8] = { 1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80 }; fifon = fifo->fifonum; status = urb->status; tx_offset = 0; if (fifo->active && !status) { transp_mode = 0; if (fifon < 4 && hfc->b_mode[fifon / 2] == L1_MODE_TRANS) transp_mode = TRUE; /* is FifoFull-threshold set for our channel? */ threshbit = threshtable[fifon] & hfc->threshold_mask; num_isoc_packets = iso_packets[fifon]; /* predict dataflow to avoid fifo overflow */ if (fifon >= HFCUSB_D_TX) { sink = (threshbit) ? SINK_DMIN : SINK_DMAX; } else { sink = (threshbit) ? SINK_MIN : SINK_MAX; } fill_isoc_urb(urb, fifo->hfc->dev, fifo->pipe, context_iso_urb->buffer, num_isoc_packets, fifo->usb_packet_maxlen, fifo->intervall, tx_iso_complete, urb->context); memset(context_iso_urb->buffer, 0, sizeof(context_iso_urb->buffer)); frame_complete = FALSE; /* Generate next Iso Packets */ for (k = 0; k < num_isoc_packets; ++k) { if (fifo->skbuff) { len = fifo->skbuff->len; /* we lower data margin every msec */ fifo->bit_line -= sink; current_len = (0 - fifo->bit_line) / 8; /* maximum 15 byte for every ISO packet makes our life easier */ if (current_len > 14) current_len = 14; current_len = (len <= current_len) ? len : current_len; /* how much bit do we put on the line? */ fifo->bit_line += current_len * 8; context_iso_urb->buffer[tx_offset] = 0; if (current_len == len) { if (!transp_mode) { /* here frame completion */ context_iso_urb-> buffer[tx_offset] = 1; /* add 2 byte flags and 16bit CRC at end of ISDN frame */ fifo->bit_line += 32; } frame_complete = TRUE; } memcpy(context_iso_urb->buffer + tx_offset + 1, fifo->skbuff->data, current_len); skb_pull(fifo->skbuff, current_len); /* define packet delimeters within the URB buffer */ urb->iso_frame_desc[k].offset = tx_offset; urb->iso_frame_desc[k].length = current_len + 1; tx_offset += (current_len + 1); } else { urb->iso_frame_desc[k].offset = tx_offset++; urb->iso_frame_desc[k].length = 1; fifo->bit_line -= sink; /* we lower data margin every msec */ if (fifo->bit_line < BITLINE_INF) { fifo->bit_line = BITLINE_INF; } } if (frame_complete) { fifo->delete_flg = TRUE; fifo->hif->l1l2(fifo->hif, PH_DATA | CONFIRM, (void *) fifo->skbuff-> truesize); if (fifo->skbuff && fifo->delete_flg) { dev_kfree_skb_any(fifo->skbuff); fifo->skbuff = NULL; fifo->delete_flg = FALSE; } frame_complete = FALSE; } } errcode = usb_submit_urb(urb, GFP_ATOMIC); if (errcode < 0) { printk(KERN_INFO "HFC-S USB: error submitting ISO URB: %d \n", errcode); } } else { if (status && !hfc->disc_flag) { printk(KERN_INFO "HFC-S USB: tx_iso_complete : urb->status %s (%i), fifonum=%d\n", symbolic(urb_errlist, status), status, fifon); } } } /* tx_iso_complete */
static __inline__ int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp) { struct rtnetlink_link *link; struct rtnetlink_link *link_tab; int sz_idx, kind; int min_len; int family; int type; int err; /* Only requests are handled by kernel now */ if (!(nlh->nlmsg_flags&NLM_F_REQUEST)) return 0; type = nlh->nlmsg_type; /* A control message: ignore them */ if (type < RTM_BASE) return 0; /* Unknown message: reply with EINVAL */ if (type > RTM_MAX) goto err_inval; type -= RTM_BASE; /* All the messages must have at least 1 byte length */ if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg))) return 0; family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family; if (family >= NPROTO) { *errp = -EAFNOSUPPORT; return -1; } link_tab = rtnetlink_links[family]; if (link_tab == NULL) link_tab = rtnetlink_links[PF_UNSPEC]; link = &link_tab[type]; sz_idx = type>>2; kind = type&3; if (kind != 2 && security_netlink_recv(skb)) { *errp = -EPERM; return -1; } if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { u32 rlen; if (link->dumpit == NULL) link = &(rtnetlink_links[PF_UNSPEC][type]); if (link->dumpit == NULL) goto err_inval; if ((*errp = netlink_dump_start(rtnl, skb, nlh, link->dumpit, rtnetlink_done)) != 0) { return -1; } rlen = NLMSG_ALIGN(nlh->nlmsg_len); if (rlen > skb->len) rlen = skb->len; skb_pull(skb, rlen); return -1; } memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *))); min_len = rtm_min[sz_idx]; if (nlh->nlmsg_len < min_len) goto err_inval; if (nlh->nlmsg_len > min_len) { int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len); struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len); while (RTA_OK(attr, attrlen)) { unsigned flavor = attr->rta_type; if (flavor) { if (flavor > rta_max[sz_idx]) goto err_inval; rta_buf[flavor-1] = attr; } attr = RTA_NEXT(attr, attrlen); } } if (link->doit == NULL) link = &(rtnetlink_links[PF_UNSPEC][type]); if (link->doit == NULL) goto err_inval; err = link->doit(skb, nlh, (void *)&rta_buf[0]); *errp = err; return err; err_inval: *errp = -EINVAL; return -1; }
void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); unsigned int header_length, i; u8 rate_idx, rate_flags, retry_rates; u8 skbdesc_flags = skbdesc->flags; bool success; /* * Unmap the skb. */ rt2x00queue_unmap_skb(entry); /* * Remove the extra tx headroom from the skb. */ skb_pull(entry->skb, rt2x00dev->ops->extra_tx_headroom); /* * Signal that the TX descriptor is no longer in the skb. */ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; /* * Determine the length of 802.11 header. */ header_length = ieee80211_get_hdrlen_from_skb(entry->skb); /* * Remove L2 padding which was added during */ if (test_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags)) rt2x00queue_remove_l2pad(entry->skb, header_length); /* * If the IV/EIV data was stripped from the frame before it was * passed to the hardware, we should now reinsert it again because * mac80211 will expect the same data to be present it the * frame as it was passed to us. */ if (test_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags)) rt2x00crypto_tx_insert_iv(entry->skb, header_length); /* * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry->skb); /* * Determine if the frame has been successfully transmitted. */ success = test_bit(TXDONE_SUCCESS, &txdesc->flags) || test_bit(TXDONE_UNKNOWN, &txdesc->flags); /* * Update TX statistics. */ rt2x00dev->link.qual.tx_success += success; rt2x00dev->link.qual.tx_failed += !success; rate_idx = skbdesc->tx_rate_idx; rate_flags = skbdesc->tx_rate_flags; retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ? (txdesc->retry + 1) : 1; /* * Initialize TX status */ memset(&tx_info->status, 0, sizeof(tx_info->status)); tx_info->status.ack_signal = 0; /* * Frame was send with retries, hardware tried * different rates to send out the frame, at each * retry it lowered the rate 1 step except when the * lowest rate was used. */ for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) { tx_info->status.rates[i].idx = rate_idx - i; tx_info->status.rates[i].flags = rate_flags; if (rate_idx - i == 0) { /* * The lowest rate (index 0) was used until the * number of max retries was reached. */ tx_info->status.rates[i].count = retry_rates - i; i++; break; } tx_info->status.rates[i].count = 1; } if (i < (IEEE80211_TX_MAX_RATES - 1)) tx_info->status.rates[i].idx = -1; /* terminate */ if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { if (success) tx_info->flags |= IEEE80211_TX_STAT_ACK; else rt2x00dev->low_level_stats.dot11ACKFailureCount++; } /* * Every single frame has it's own tx status, hence report * every frame as ampdu of size 1. * * TODO: if we can find out how many frames were aggregated * by the hw we could provide the real ampdu_len to mac80211 * which would allow the rc algorithm to better decide on * which rates are suitable. */ if (test_bit(TXDONE_AMPDU, &txdesc->flags) || tx_info->flags & IEEE80211_TX_CTL_AMPDU) { tx_info->flags |= IEEE80211_TX_STAT_AMPDU; tx_info->status.ampdu_len = 1; tx_info->status.ampdu_ack_len = success ? 1 : 0; /* * TODO: Need to tear down BA session here * if not successful. */ } if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { if (success) rt2x00dev->low_level_stats.dot11RTSSuccessCount++; else rt2x00dev->low_level_stats.dot11RTSFailureCount++; } /* * Only send the status report to mac80211 when it's a frame * that originated in mac80211. If this was a extra frame coming * through a mac80211 library call (RTS/CTS) then we should not * send the status report back. */ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { if (test_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags)) ieee80211_tx_status(rt2x00dev->hw, entry->skb); else ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); } else dev_kfree_skb_any(entry->skb); /* * Make this entry available for reuse. */ entry->skb = NULL; entry->flags = 0; rt2x00dev->ops->lib->clear_entry(entry); rt2x00queue_index_inc(entry, Q_INDEX_DONE); /* * If the data queue was below the threshold before the txdone * handler we must make sure the packet queue in the mac80211 stack * is reenabled when the txdone handler has finished. This has to be * serialized with rt2x00mac_tx(), otherwise we can wake up queue * before it was stopped. */ spin_lock_bh(&entry->queue->tx_lock); if (!rt2x00queue_threshold(entry->queue)) rt2x00queue_unpause_queue(entry->queue); spin_unlock_bh(&entry->queue->tx_lock); }
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *) skb->data; __u8 event = hdr->evt; skb_pull(skb, HCI_EVENT_HDR_SIZE); switch (event) { case HCI_EV_INQUIRY_COMPLETE: hci_inquiry_complete_evt(hdev, skb); break; case HCI_EV_INQUIRY_RESULT: hci_inquiry_result_evt(hdev, skb); break; case HCI_EV_CONN_COMPLETE: hci_conn_complete_evt(hdev, skb); break; case HCI_EV_CONN_REQUEST: hci_conn_request_evt(hdev, skb); break; case HCI_EV_DISCONN_COMPLETE: hci_disconn_complete_evt(hdev, skb); break; case HCI_EV_AUTH_COMPLETE: hci_auth_complete_evt(hdev, skb); break; case HCI_EV_REMOTE_NAME: hci_remote_name_evt(hdev, skb); break; case HCI_EV_ENCRYPT_CHANGE: hci_encrypt_change_evt(hdev, skb); break; case HCI_EV_CHANGE_LINK_KEY_COMPLETE: hci_change_link_key_complete_evt(hdev, skb); break; case HCI_EV_REMOTE_FEATURES: hci_remote_features_evt(hdev, skb); break; case HCI_EV_REMOTE_VERSION: hci_remote_version_evt(hdev, skb); break; case HCI_EV_QOS_SETUP_COMPLETE: hci_qos_setup_complete_evt(hdev, skb); break; case HCI_EV_CMD_COMPLETE: hci_cmd_complete_evt(hdev, skb); break; case HCI_EV_CMD_STATUS: hci_cmd_status_evt(hdev, skb); break; case HCI_EV_ROLE_CHANGE: hci_role_change_evt(hdev, skb); break; case HCI_EV_NUM_COMP_PKTS: hci_num_comp_pkts_evt(hdev, skb); break; case HCI_EV_MODE_CHANGE: hci_mode_change_evt(hdev, skb); break; case HCI_EV_PIN_CODE_REQ: hci_pin_code_request_evt(hdev, skb); break; case HCI_EV_LINK_KEY_REQ: hci_link_key_request_evt(hdev, skb); break; case HCI_EV_LINK_KEY_NOTIFY: hci_link_key_notify_evt(hdev, skb); break; case HCI_EV_CLOCK_OFFSET: hci_clock_offset_evt(hdev, skb); break; case HCI_EV_PKT_TYPE_CHANGE: hci_pkt_type_change_evt(hdev, skb); break; case HCI_EV_PSCAN_REP_MODE: hci_pscan_rep_mode_evt(hdev, skb); break; case HCI_EV_INQUIRY_RESULT_WITH_RSSI: hci_inquiry_result_with_rssi_evt(hdev, skb); break; case HCI_EV_REMOTE_EXT_FEATURES: hci_remote_ext_features_evt(hdev, skb); break; case HCI_EV_SYNC_CONN_COMPLETE: hci_sync_conn_complete_evt(hdev, skb); break; case HCI_EV_SYNC_CONN_CHANGED: hci_sync_conn_changed_evt(hdev, skb); break; case HCI_EV_SNIFF_SUBRATE: hci_sniff_subrate_evt(hdev, skb); break; case HCI_EV_EXTENDED_INQUIRY_RESULT: hci_extended_inquiry_result_evt(hdev, skb); break; case HCI_EV_IO_CAPA_REQUEST: hci_io_capa_request_evt(hdev, skb); break; case HCI_EV_SIMPLE_PAIR_COMPLETE: hci_simple_pair_complete_evt(hdev, skb); break; case HCI_EV_REMOTE_HOST_FEATURES: hci_remote_host_features_evt(hdev, skb); break; default: BT_DBG("%s event 0x%x", hdev->name, event); break; } kfree_skb(skb); hdev->stat.evt_rx++; }
static void ifb_ri_tasklet(unsigned long _txp) { struct ifb_q_private *txp = (struct ifb_q_private *)_txp; struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(txp->dev, txp->txqnum); skb = skb_peek(&txp->tq); if (!skb) { if (!__netif_tx_trylock(txq)) goto resched; skb_queue_splice_tail_init(&txp->rq, &txp->tq); __netif_tx_unlock(txq); } while ((skb = __skb_dequeue(&txp->tq)) != NULL) { skb->tc_redirected = 0; skb->tc_skip_classify = 1; u64_stats_update_begin(&txp->tsync); txp->tx_packets++; txp->tx_bytes += skb->len; u64_stats_update_end(&txp->tsync); rcu_read_lock(); skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); txp->dev->stats.tx_dropped++; if (skb_queue_len(&txp->tq) != 0) goto resched; break; } rcu_read_unlock(); skb->skb_iif = txp->dev->ifindex; if (!skb->tc_from_ingress) { dev_queue_xmit(skb); } else { skb_pull(skb, skb->mac_len); netif_receive_skb(skb); } } if (__netif_tx_trylock(txq)) { skb = skb_peek(&txp->rq); if (!skb) { txp->tasklet_pending = 0; if (netif_tx_queue_stopped(txq)) netif_tx_wake_queue(txq); } else { __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: txp->tasklet_pending = 1; tasklet_schedule(&txp->ifb_tasklet); } }
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring, int weight) { u32 end_slot; int handled = 0; end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS); end_slot &= BGMAC_DMA_RX_STATDPTR; end_slot -= ring->index_base; end_slot &= BGMAC_DMA_RX_STATDPTR; end_slot /= sizeof(struct bgmac_dma_desc); while (ring->start != end_slot) { struct device *dma_dev = bgmac->dma_dev; struct bgmac_slot_info *slot = &ring->slots[ring->start]; struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET; struct sk_buff *skb; void *buf = slot->buf; dma_addr_t dma_addr = slot->dma_addr; u16 len, flags; do { /* Prepare new skb as replacement */ if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) { bgmac_dma_rx_poison_buf(dma_dev, slot); break; } /* Unmap buffer to make it accessible to the CPU */ dma_unmap_single(dma_dev, dma_addr, BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE); /* Get info from the header */ len = le16_to_cpu(rx->len); flags = le16_to_cpu(rx->flags); /* Check for poison and drop or pass the packet */ if (len == 0xdead && flags == 0xbeef) { netdev_err(bgmac->net_dev, "Found poisoned packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); bgmac->net_dev->stats.rx_errors++; break; } if (len > BGMAC_RX_ALLOC_SIZE) { netdev_err(bgmac->net_dev, "Found oversized packet at slot %d, DMA issue!\n", ring->start); put_page(virt_to_head_page(buf)); bgmac->net_dev->stats.rx_length_errors++; bgmac->net_dev->stats.rx_errors++; break; } /* Omit CRC. */ len -= ETH_FCS_LEN; skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); if (unlikely(!skb)) { netdev_err(bgmac->net_dev, "build_skb failed\n"); put_page(virt_to_head_page(buf)); bgmac->net_dev->stats.rx_errors++; break; } skb_put(skb, BGMAC_RX_FRAME_OFFSET + BGMAC_RX_BUF_OFFSET + len); skb_pull(skb, BGMAC_RX_FRAME_OFFSET + BGMAC_RX_BUF_OFFSET); skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, bgmac->net_dev); bgmac->net_dev->stats.rx_bytes += len; bgmac->net_dev->stats.rx_packets++; napi_gro_receive(&bgmac->napi, skb); handled++; } while (0); bgmac_dma_rx_setup_desc(bgmac, ring, ring->start); if (++ring->start >= BGMAC_RX_RING_SLOTS) ring->start = 0; if (handled >= weight) /* Should never be greater */ break; } bgmac_dma_rx_update_index(bgmac, ring); return handled; }
/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta) * Convert Ethernet header into a suitable IEEE 802.11 header depending on * device configuration. */ int hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int need_headroom, need_tailroom = 0; struct ieee80211_hdr hdr; u16 fc, ethertype = 0; enum { WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME } use_wds = WDS_NO; u8 *encaps_data; int hdr_len, encaps_len, skip_header_bytes; int to_assoc_ap = 0; struct hostap_skb_tx_data *meta; iface = netdev_priv(dev); local = iface->local; if (skb->len < ETH_HLEN) { printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return 0; } if (local->ddev != dev) { use_wds = (local->iw_mode == IW_MODE_MASTER && !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ? WDS_OWN_FRAME : WDS_COMPLIANT_FRAME; if (dev == local->stadev) { to_assoc_ap = 1; use_wds = WDS_NO; } else if (dev == local->apdev) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "AP device with Ethernet net dev\n", dev->name); kfree_skb(skb); return 0; } } else { if (local->iw_mode == IW_MODE_REPEAT) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "non-WDS link in Repeater mode\n", dev->name); kfree_skb(skb); return 0; } else if (local->iw_mode == IW_MODE_INFRA && (local->wds_type & HOSTAP_WDS_AP_CLIENT) && memcmp(skb->data + ETH_ALEN, dev->dev_addr, ETH_ALEN) != 0) { /* AP client mode: send frames with foreign src addr * using 4-addr WDS frames */ use_wds = WDS_COMPLIANT_FRAME; } } /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload * ==> * Prism2 TX frame with 802.11 header: * txdesc (address order depending on used mode; includes dst_addr and * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel; * proto[2], payload {, possible addr4[6]} */ ethertype = (skb->data[12] << 8) | skb->data[13]; memset(&hdr, 0, sizeof(hdr)); /* Length of data after IEEE 802.11 header */ encaps_data = NULL; encaps_len = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= 0x600) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; hdr_len = IEEE80211_DATA_HDR3_LEN; if (use_wds != WDS_NO) { /* Note! Prism2 station firmware has problems with sending real * 802.11 frames with four addresses; until these problems can * be fixed or worked around, 4-addr frames needed for WDS are * using incompatible format: FromDS flag is not set and the * fourth address is added after the frame payload; it is * assumed, that the receiving station knows how to handle this * frame format */ if (use_wds == WDS_COMPLIANT_FRAME) { fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, * Addr4 = SA */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); hdr_len += ETH_ALEN; } else { /* bogus 4-addr format to workaround Prism2 station * f/w bug */ fc |= IEEE80211_FCTL_TODS; /* From DS: Addr1 = DA (used as RA), * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA), */ /* SA from skb->data + ETH_ALEN will be added after * frame payload; use hdr.addr4 as a temporary buffer */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); need_tailroom += ETH_ALEN; } /* send broadcast and multicast frames to broadcast RA, if * configured; otherwise, use unicast RA of the WDS link */ if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && skb->data[0] & 0x01) memset(&hdr.addr1, 0xff, ETH_ALEN); else if (iface->type == HOSTAP_INTERFACE_WDS) memcpy(&hdr.addr1, iface->u.wds.remote_addr, ETH_ALEN); else memcpy(&hdr.addr1, local->bssid, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { fc |= IEEE80211_FCTL_FROMDS; /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&hdr.addr1, to_assoc_ap ? local->assoc_ap_addr : local->bssid, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); memcpy(&hdr.addr3, local->bssid, ETH_ALEN); } hdr.frame_control = cpu_to_le16(fc); skb_pull(skb, skip_header_bytes); need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len; if (skb_tailroom(skb) < need_tailroom) { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return 0; } if (pskb_expand_head(skb, need_headroom, need_tailroom, GFP_ATOMIC)) { kfree_skb(skb); iface->stats.tx_dropped++; return 0; } } else if (skb_headroom(skb) < need_headroom) { struct sk_buff *tmp = skb; skb = skb_realloc_headroom(skb, need_headroom); kfree_skb(tmp); if (skb == NULL) { iface->stats.tx_dropped++; return 0; } } else { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return 0; } } if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); memcpy(skb_push(skb, hdr_len), &hdr, hdr_len); if (use_wds == WDS_OWN_FRAME) { memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN); } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; if (use_wds) meta->flags |= HOSTAP_TX_FLAGS_WDS; meta->ethertype = ethertype; meta->iface = iface; /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return 0; }
static int sendup_buffer (struct net_device *dev) { /* on entry, command is in ltdmacbuf, data in ltdmabuf */ /* called from idle, non-reentrant */ int dnode, snode, llaptype, len; int sklen; struct sk_buff *skb; struct net_device_stats *stats = &((struct ltpc_private *)dev->priv)->stats; struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf; if (ltc->command != LT_RCVLAP) { printk("unknown command 0x%02x from ltpc card\n",ltc->command); return(-1); } dnode = ltc->dnode; snode = ltc->snode; llaptype = ltc->laptype; len = ltc->length; sklen = len; if (llaptype == 1) sklen += 8; /* correct for short ddp */ if(sklen > 800) { printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n", dev->name,sklen); return -1; } if ( (llaptype==0) || (llaptype>2) ) { printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype); return -1; } skb = dev_alloc_skb(3+sklen); if (skb == NULL) { printk("%s: dropping packet due to memory squeeze.\n", dev->name); return -1; } skb->dev = dev; if (sklen > len) skb_reserve(skb,8); skb_put(skb,len+3); skb->protocol = htons(ETH_P_LOCALTALK); /* add LLAP header */ skb->data[0] = dnode; skb->data[1] = snode; skb->data[2] = llaptype; skb_reset_mac_header(skb); /* save pointer to llap header */ skb_pull(skb,3); /* copy ddp(s,e)hdr + contents */ skb_copy_to_linear_data(skb, ltdmabuf, len); skb_reset_transport_header(skb); stats->rx_packets++; stats->rx_bytes+=skb->len; /* toss it onwards */ netif_rx(skb); dev->last_rx = jiffies; return 0; }
u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) { __u16 appl_id; int _len, _len2; __u8 msghead[64]; hycapictrl_info *cinfo = ctrl->driverdata; u16 retval = CAPI_NOERROR; appl_id = CAPIMSG_APPID(skb->data); switch(_hycapi_appCheck(appl_id, ctrl->cnr)) { case 0: /* printk(KERN_INFO "Need to register\n"); */ hycapi_register_internal(ctrl, appl_id, &(hycapi_applications[appl_id-1].rp)); break; case 1: break; default: printk(KERN_ERR "HYCAPI: Controller mixup!\n"); retval = CAPI_ILLAPPNR; goto out; } switch(CAPIMSG_CMD(skb->data)) { case CAPI_DISCONNECT_B3_RESP: capilib_free_ncci(&cinfo->ncci_head, appl_id, CAPIMSG_NCCI(skb->data)); break; case CAPI_DATA_B3_REQ: _len = CAPIMSG_LEN(skb->data); if (_len > 22) { _len2 = _len - 22; memcpy(msghead, skb->data, 22); memcpy(skb->data + _len2, msghead, 22); skb_pull(skb, _len2); CAPIMSG_SETLEN(skb->data, 22); retval = capilib_data_b3_req(&cinfo->ncci_head, CAPIMSG_APPID(skb->data), CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); } break; case CAPI_LISTEN_REQ: if(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1]) { kfree_skb(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1]); hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1] = NULL; } if (!(hycapi_applications[appl_id-1].listen_req[ctrl->cnr-1] = skb_copy(skb, GFP_ATOMIC))) { printk(KERN_ERR "HYSDN: memory squeeze in private_listen\n"); } break; default: break; } out: if (retval == CAPI_NOERROR) hycapi_sendmsg_internal(ctrl, skb); else dev_kfree_skb_any(skb); return retval; }
/** @ingroup tx_functions This function despatches the IP packets with the given vcid to the target via the host h/w interface. @return zero(success) or -ve value(failure) */ INT SetupNextSend(PMINI_ADAPTER Adapter, /**<Logical Adapter*/ struct sk_buff *Packet, /**<data buffer*/ USHORT Vcid) /**<VCID for this packet*/ { int status=0; #ifdef GDMA_INTERFACE int dontfree = 0; #endif BOOLEAN bHeaderSupressionEnabled = FALSE; B_UINT16 uiClassifierRuleID; int QueueIndex = NO_OF_QUEUES + 1; B_UINT32 time_spent_on_host = 0 ; if(!Adapter || !Packet) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Got NULL Adapter or Packet"); return -EINVAL; } if(Packet->len > MAX_DEVICE_DESC_SIZE) { status = STATUS_FAILURE; goto errExit; } /* Get the Classifier Rule ID */ uiClassifierRuleID = *((UINT32*) (Packet->cb)+SKB_CB_CLASSIFICATION_OFFSET); QueueIndex = SearchVcid( Adapter,Vcid); if(QueueIndex < NO_OF_QUEUES) { bHeaderSupressionEnabled = Adapter->PackInfo[QueueIndex].bHeaderSuppressionEnabled; bHeaderSupressionEnabled = bHeaderSupressionEnabled & Adapter->bPHSEnabled; } if(Adapter->device_removed) { status = STATUS_FAILURE; goto errExit; } status = PHSTransmit(Adapter, &Packet, Vcid, uiClassifierRuleID, bHeaderSupressionEnabled, (UINT *)&Packet->len, Adapter->PackInfo[QueueIndex].bEthCSSupport); if(status != STATUS_SUCCESS) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "PHS Transmit failed..\n"); goto errExit; } Leader.Vcid = Vcid; if(TCP_ACK == *((UINT32*) (Packet->cb) + SKB_CB_TCPACK_OFFSET )) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Sending TCP ACK\n"); Leader.Status = LEADER_STATUS_TCP_ACK; } else { Leader.Status = LEADER_STATUS; } if(Adapter->PackInfo[QueueIndex].bEthCSSupport) { Leader.PLength = Packet->len; if(skb_headroom(Packet) < LEADER_SIZE) { if((status = skb_cow(Packet,LEADER_SIZE))) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL,"bcm_transmit : Failed To Increase headRoom\n"); goto errExit; } } skb_push(Packet, LEADER_SIZE); memcpy(Packet->data, &Leader, LEADER_SIZE); } else { Leader.PLength = Packet->len - ETH_HLEN; memcpy((LEADER*)skb_pull(Packet, (ETH_HLEN - LEADER_SIZE)), &Leader, LEADER_SIZE); } BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Packet->len = %d", Packet->len); BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Vcid = %d", Vcid); #ifndef BCM_SHM_INTERFACE status = Adapter->interface_transmit(Adapter->pvInterfaceAdapter, Packet->data, (Leader.PLength + LEADER_SIZE)); #else status = tx_pkts_to_firmware(Packet,Packet->len,0); #endif if(status) { BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, NEXT_SEND, DBG_LVL_ALL, "Tx Failed..\n"); } else { Adapter->PackInfo[QueueIndex].uiTotalTxBytes += Leader.PLength; atomic_add(Leader.PLength, &Adapter->GoodTxByteCount); atomic_inc(&Adapter->TxTotalPacketCount); #ifdef GDMA_INTERFACE dontfree = 1; #endif } time_spent_on_host = jiffies - *((UINT32*) (Packet->cb) + SKB_CB_LATENCY_OFFSET); BCM_DEBUG_PRINT(Adapter,DBG_TYPE_TX, TX_TIME_SPENT_IN_HOST, DBG_LVL_ALL, "TIME SPENT ON HOST :%#X \n",time_spent_on_host); atomic_dec(&Adapter->CurrNumFreeTxDesc); errExit: if(STATUS_SUCCESS == status) { Adapter->PackInfo[QueueIndex].uiCurrentTokenCount -= Leader.PLength << 3; Adapter->PackInfo[QueueIndex].uiSentBytes += (Packet->len); Adapter->PackInfo[QueueIndex].uiSentPackets++; Adapter->PackInfo[QueueIndex].NumOfPacketsSent++; atomic_dec(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount); #ifdef BCM_SHM_INTERFACE if(atomic_read(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount) < 0) { atomic_set(&Adapter->PackInfo[QueueIndex].uiPerSFTxResourceCount, 0); } #endif Adapter->PackInfo[QueueIndex].uiThisPeriodSentBytes += Leader.PLength; } #ifdef GDMA_INTERFACE if(!dontfree){ bcm_kfree_skb(Packet); } #else bcm_kfree_skb(Packet); #endif return status; }
/* * Update the dynamic parts of a beacon frame based on the current state. */ int ieee80211_beacon_update(struct ieee80211_node *ni, struct ieee80211_beacon_offsets *bo, struct sk_buff *skb, int mcast, int *is_dtim) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; int len_changed = 0; u_int16_t capinfo; IEEE80211_LOCK_IRQ(ic); /* Check if we need to change channel right now */ if ((ic->ic_flags & IEEE80211_F_DOTH) && (vap->iv_flags & IEEE80211_F_CHANSWITCH)) { struct ieee80211_channel *c = ieee80211_doth_findchan(vap, ic->ic_chanchange_chan); if (!vap->iv_chanchange_count && !c) { vap->iv_flags &= ~IEEE80211_F_CHANSWITCH; ic->ic_flags &= ~IEEE80211_F_CHANSWITCH; } else if (vap->iv_chanchange_count && ((!ic->ic_chanchange_tbtt) || (vap->iv_chanchange_count == ic->ic_chanchange_tbtt))) { u_int8_t *frm; IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH, "%s: reinit beacon\n", __func__); /* NB: ic_bsschan is in the DSPARMS beacon IE, so must * set this prior to the beacon re-init, below. */ if (c == NULL) { /* Requested channel invalid; drop the channel * switch announcement and do nothing. */ IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH, "%s: find channel failure\n", __func__); } else ic->ic_bsschan = c; skb_pull(skb, sizeof(struct ieee80211_frame)); skb_trim(skb, 0); frm = skb->data; skb_put(skb, ieee80211_beacon_init(ni, bo, frm) - frm); skb_push(skb, sizeof(struct ieee80211_frame)); vap->iv_chanchange_count = 0; vap->iv_flags &= ~IEEE80211_F_CHANSWITCH; ic->ic_flags &= ~IEEE80211_F_CHANSWITCH; /* NB: Only for the first VAP to get here, and when we * have a valid channel to which to change. */ if (c && (ic->ic_curchan != c)) { ic->ic_curchan = c; ic->ic_set_channel(ic); } len_changed = 1; } } /* XXX faster to recalculate entirely or just changes? */ if (vap->iv_opmode == IEEE80211_M_IBSS) capinfo = IEEE80211_CAPINFO_IBSS; else capinfo = IEEE80211_CAPINFO_ESS; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(ic->ic_bsschan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHSLOT) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; if (ic->ic_flags & IEEE80211_F_DOTH) capinfo |= IEEE80211_CAPINFO_SPECTRUM_MGMT; *bo->bo_caps = htole16(capinfo); if (vap->iv_flags & IEEE80211_F_WME) { struct ieee80211_wme_state *wme = &ic->ic_wme; /* * Check for aggressive mode change. When there is * significant high priority traffic in the BSS * throttle back BE traffic by using conservative * parameters. Otherwise BE uses aggressive params * to optimize performance of legacy/non-QoS traffic. */ if (wme->wme_flags & WME_F_AGGRMODE) { if (wme->wme_hipri_traffic > wme->wme_hipri_switch_thresh) { IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni, "%s: traffic %u, disable aggressive mode", __func__, wme->wme_hipri_traffic); wme->wme_flags &= ~WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } else wme->wme_hipri_traffic = 0; } else { if (wme->wme_hipri_traffic <= wme->wme_hipri_switch_thresh) { IEEE80211_NOTE(vap, IEEE80211_MSG_WME, ni, "%s: traffic %u, enable aggressive mode", __func__, wme->wme_hipri_traffic); wme->wme_flags |= WME_F_AGGRMODE; ieee80211_wme_updateparams_locked(vap); wme->wme_hipri_traffic = 0; } else wme->wme_hipri_traffic = wme->wme_hipri_switch_hysteresis; } /* XXX multi-bss */ if (vap->iv_flags & IEEE80211_F_WMEUPDATE) { (void) ieee80211_add_wme_param(bo->bo_wme, wme, IEEE80211_VAP_UAPSD_ENABLED(vap)); vap->iv_flags &= ~IEEE80211_F_WMEUPDATE; } } if (vap->iv_opmode == IEEE80211_M_HOSTAP) { /* NB: no IBSS support*/ struct ieee80211_tim_ie *tie = (struct ieee80211_tim_ie *) bo->bo_tim; if (vap->iv_flags & IEEE80211_F_TIMUPDATE) { u_int timlen, timoff, i; /* * ATIM/DTIM needs updating. If it fits in the * current space allocated then just copy in the * new bits. Otherwise we need to move any trailing * data to make room. Note that we know there is * contiguous space because ieee80211_beacon_allocate * ensures there is space in the mbuf to write a * maximal-size virtual bitmap (based on ic_max_aid). */ /* * Calculate the bitmap size and offset, copy any * trailer out of the way, and then copy in the * new bitmap and update the information element. * Note that the tim bitmap must contain at least * one byte and any offset must be even. */ if (vap->iv_ps_pending != 0) { timoff = 128; /* impossibly large */ for (i = 0; i < vap->iv_tim_len; i++) if (vap->iv_tim_bitmap[i]) { timoff = i & BITCTL_BUFD_UCAST_AID_MASK; break; } KASSERT(timoff != 128, ("tim bitmap empty!")); for (i = vap->iv_tim_len-1; i >= timoff; i--) if (vap->iv_tim_bitmap[i]) break; timlen = 1 + (i - timoff); } else { timoff = 0; timlen = 1; } if (timlen != bo->bo_tim_len) { /* copy up/down trailer */ int trailer_adjust = (tie->tim_bitmap+timlen) - (bo->bo_tim_trailer); memmove(tie->tim_bitmap+timlen, bo->bo_tim_trailer, bo->bo_tim_trailerlen); bo->bo_tim_trailer = tie->tim_bitmap+timlen; bo->bo_chanswitch += trailer_adjust; bo->bo_wme += trailer_adjust; bo->bo_erp += trailer_adjust; bo->bo_ath_caps += trailer_adjust; bo->bo_xr += trailer_adjust; if (timlen > bo->bo_tim_len) skb_put(skb, timlen - bo->bo_tim_len); else skb_trim(skb, skb->len - (bo->bo_tim_len - timlen)); bo->bo_tim_len = timlen; /* update information element */ tie->tim_len = 3 + timlen; tie->tim_bitctl = timoff; len_changed = 1; } memcpy(tie->tim_bitmap, vap->iv_tim_bitmap + timoff, bo->bo_tim_len); vap->iv_flags &= ~IEEE80211_F_TIMUPDATE; IEEE80211_NOTE(vap, IEEE80211_MSG_POWER, ni, "%s: TIM updated, pending %u, off %u, len %u", __func__, vap->iv_ps_pending, timoff, timlen); } /* count down DTIM period */ if (tie->tim_count == 0) tie->tim_count = tie->tim_period - 1; else tie->tim_count--; /* update state for buffered multicast frames on DTIM */ if (mcast && (tie->tim_count == 0)) tie->tim_bitctl |= BITCTL_BUFD_MCAST; else tie->tim_bitctl &= ~BITCTL_BUFD_MCAST; *is_dtim = (tie->tim_count == 0); } /* Whenever we want to switch to a new channel, we need to follow the * following steps: * * - iv_chanchange_count= number of beacon intervals elapsed (0) * - ic_chanchange_tbtt = number of beacon intervals before switching * - ic_chanchange_chan = IEEE channel number after switching * - ic_flags |= IEEE80211_F_CHANSWITCH */ if (IEEE80211_IS_MODE_BEACON(vap->iv_opmode)) { if ((ic->ic_flags & IEEE80211_F_DOTH) && (ic->ic_flags & IEEE80211_F_CHANSWITCH)) { struct ieee80211_ie_csa *csa_ie = (struct ieee80211_ie_csa *)bo->bo_chanswitch; IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH, "%s: Sending 802.11h chanswitch IE: " "%d/%d\n", __func__, ic->ic_chanchange_chan, ic->ic_chanchange_tbtt); if (!vap->iv_chanchange_count) { vap->iv_flags |= IEEE80211_F_CHANSWITCH; /* copy out trailer to open up a slot */ memmove(bo->bo_chanswitch + sizeof(*csa_ie), bo->bo_chanswitch, bo->bo_chanswitch_trailerlen); /* add ie in opened slot */ csa_ie->csa_id = IEEE80211_ELEMID_CHANSWITCHANN; /* fixed length */ csa_ie->csa_len = sizeof(*csa_ie) - 2; /* STA shall transmit no further frames */ csa_ie->csa_mode = 1; csa_ie->csa_chan = ic->ic_chanchange_chan; csa_ie->csa_count = ic->ic_chanchange_tbtt; /* update the trailer lens */ bo->bo_chanswitch_trailerlen += sizeof(*csa_ie); bo->bo_tim_trailerlen += sizeof(*csa_ie); bo->bo_wme += sizeof(*csa_ie); bo->bo_erp += sizeof(*csa_ie); bo->bo_ath_caps += sizeof(*csa_ie); bo->bo_xr += sizeof(*csa_ie); /* indicate new beacon length so other layers * may manage memory */ skb_put(skb, sizeof(*csa_ie)); len_changed = 1; } else if(csa_ie->csa_count) csa_ie->csa_count--; vap->iv_chanchange_count++; IEEE80211_DPRINTF(vap, IEEE80211_MSG_DOTH, "%s: CHANSWITCH IE, change in %d TBTT\n", __func__, csa_ie->csa_count); } #ifdef ATH_SUPERG_XR if (vap->iv_flags & IEEE80211_F_XRUPDATE) { if (vap->iv_xrvap) (void)ieee80211_add_xr_param(bo->bo_xr, vap); vap->iv_flags &= ~IEEE80211_F_XRUPDATE; } #endif if ((ic->ic_flags_ext & IEEE80211_FEXT_ERPUPDATE) && (bo->bo_erp != NULL)) { (void)ieee80211_add_erp(bo->bo_erp, ic); ic->ic_flags_ext &= ~IEEE80211_FEXT_ERPUPDATE; } } /* if it is a mode change beacon for dynamic turbo case */ if (((ic->ic_ath_cap & IEEE80211_ATHC_BOOST) != 0) ^ IEEE80211_IS_CHAN_TURBO(ic->ic_curchan)) ieee80211_add_athAdvCap(bo->bo_ath_caps, vap->iv_bss->ni_ath_flags, vap->iv_bss->ni_ath_defkeyindex); /* add APP_IE buffer if app updated it */ if (vap->iv_flags_ext & IEEE80211_FEXT_APPIE_UPDATE) { /* adjust the buffer size if the size is changed */ if (vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length != bo->bo_appie_buf_len) { int diff_len; diff_len = vap->app_ie[IEEE80211_APPIE_FRAME_BEACON]. length - bo->bo_appie_buf_len; if (diff_len > 0) skb_put(skb, diff_len); else skb_trim(skb, skb->len + diff_len); bo->bo_appie_buf_len = vap->app_ie[IEEE80211_APPIE_FRAME_BEACON]. length; /* update the trailer lens */ bo->bo_chanswitch_trailerlen += diff_len; bo->bo_tim_trailerlen += diff_len; len_changed = 1; } memcpy(bo->bo_appie_buf, vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].ie, vap->app_ie[IEEE80211_APPIE_FRAME_BEACON].length); vap->iv_flags_ext &= ~IEEE80211_FEXT_APPIE_UPDATE; } IEEE80211_UNLOCK_IRQ(ic); return len_changed; }
static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; struct crypto_blkcipher *tfm = hcon->hdev->tfm; int ret; u8 key[16], res[16], random[16], confirm[16]; swap128(skb->data, random); skb_pull(skb, sizeof(random)); if (conn->hcon->out) ret = smp_c1(tfm, hcon->tk, random, hcon->preq, hcon->prsp, 0, conn->src, hcon->dst_type, conn->dst, res); else ret = smp_c1(tfm, hcon->tk, random, hcon->preq, hcon->prsp, hcon->dst_type, conn->dst, 0, conn->src, res); if (ret) return SMP_UNSPECIFIED; BT_DBG("conn %p %s", conn, conn->hcon->out ? "master" : "slave"); swap128(res, confirm); if (memcmp(hcon->pcnf, confirm, sizeof(hcon->pcnf)) != 0) { BT_ERR("Pairing failed (confirmation values mismatch)"); return SMP_CONFIRM_FAILED; } if (conn->hcon->out) { u8 stk[16], rand[8]; __le16 ediv; memset(rand, 0, sizeof(rand)); ediv = 0; smp_s1(tfm, hcon->tk, random, hcon->prnd, key); swap128(key, stk); memset(stk + hcon->smp_key_size, 0, SMP_MAX_ENC_KEY_SIZE - hcon->smp_key_size); hci_le_start_enc(hcon, ediv, rand, stk); hcon->enc_key_size = hcon->smp_key_size; } else { u8 stk[16], r[16], rand[8]; __le16 ediv; memset(rand, 0, sizeof(rand)); ediv = 0; swap128(hcon->prnd, r); smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(r), r); smp_s1(tfm, hcon->tk, hcon->prnd, random, key); swap128(key, stk); memset(stk + hcon->smp_key_size, 0, SMP_MAX_ENC_KEY_SIZE - hcon->smp_key_size); hci_add_ltk(conn->hcon->hdev, 0, conn->dst, hcon->dst_type, hcon->smp_key_size, hcon->auth, ediv, rand, stk); } return 0; }
/*---------------------------------------------------------------- * p80211knetdev_hard_start_xmit * * Linux netdevice method for transmitting a frame. * * Arguments: * skb Linux sk_buff containing the frame. * netdev Linux netdevice. * * Side effects: * If the lower layers report that buffers are full. netdev->tbusy * will be set to prevent higher layers from sending more traffic. * * Note: If this function returns non-zero, higher layers retain * ownership of the skb. * * Returns: * zero on success, non-zero on failure. *---------------------------------------------------------------- */ static netdev_tx_t p80211knetdev_hard_start_xmit(struct sk_buff *skb, struct net_device *netdev) { int result = 0; int txresult = -1; struct wlandevice *wlandev = netdev->ml_priv; union p80211_hdr p80211_hdr; struct p80211_metawep p80211_wep; p80211_wep.data = NULL; if (!skb) return NETDEV_TX_OK; if (wlandev->state != WLAN_DEVICE_OPEN) { result = 1; goto failed; } memset(&p80211_hdr, 0, sizeof(p80211_hdr)); memset(&p80211_wep, 0, sizeof(p80211_wep)); if (netif_queue_stopped(netdev)) { netdev_dbg(netdev, "called when queue stopped.\n"); result = 1; goto failed; } netif_stop_queue(netdev); /* Check to see that a valid mode is set */ switch (wlandev->macmode) { case WLAN_MACMODE_IBSS_STA: case WLAN_MACMODE_ESS_STA: case WLAN_MACMODE_ESS_AP: break; default: /* Mode isn't set yet, just drop the frame * and return success . * TODO: we need a saner way to handle this */ if (be16_to_cpu(skb->protocol) != ETH_P_80211_RAW) { netif_start_queue(wlandev->netdev); netdev_notice(netdev, "Tx attempt prior to association, frame dropped.\n"); netdev->stats.tx_dropped++; result = 0; goto failed; } break; } /* Check for raw transmits */ if (be16_to_cpu(skb->protocol) == ETH_P_80211_RAW) { if (!capable(CAP_NET_ADMIN)) { result = 1; goto failed; } /* move the header over */ memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr)); skb_pull(skb, sizeof(p80211_hdr)); } else { if (skb_ether_to_p80211 (wlandev, wlandev->ethconv, skb, &p80211_hdr, &p80211_wep) != 0) { /* convert failed */ netdev_dbg(netdev, "ether_to_80211(%d) failed.\n", wlandev->ethconv); result = 1; goto failed; } } if (!wlandev->txframe) { result = 1; goto failed; } netif_trans_update(netdev); netdev->stats.tx_packets++; /* count only the packet payload */ netdev->stats.tx_bytes += skb->len; txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep); if (txresult == 0) { /* success and more buf */ /* avail, re: hw_txdata */ netif_wake_queue(wlandev->netdev); result = NETDEV_TX_OK; } else if (txresult == 1) { /* success, no more avail */ netdev_dbg(netdev, "txframe success, no more bufs\n"); /* netdev->tbusy = 1; don't set here, irqhdlr */ /* may have already cleared it */ result = NETDEV_TX_OK; } else if (txresult == 2) { /* alloc failure, drop frame */ netdev_dbg(netdev, "txframe returned alloc_fail\n"); result = NETDEV_TX_BUSY; } else { /* buffer full or queue busy, drop frame. */ netdev_dbg(netdev, "txframe returned full or busy\n"); result = NETDEV_TX_BUSY; } failed: /* Free up the WEP buffer if it's not the same as the skb */ if ((p80211_wep.data) && (p80211_wep.data != skb->data)) kzfree(p80211_wep.data); /* we always free the skb here, never in a lower level. */ if (!result) dev_kfree_skb(skb); return result; }
int smp_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb) { struct hci_conn *hcon = conn->hcon; __u8 code = skb->data[0]; __u8 reason; int err = 0; if (IS_ERR(hcon->hdev->tfm)) { err = PTR_ERR(hcon->hdev->tfm); reason = SMP_PAIRING_NOTSUPP; BT_ERR("SMP_PAIRING_NOTSUPP %p", hcon->hdev->tfm); goto done; } hcon->smp_conn = conn; skb_pull(skb, sizeof(code)); switch (code) { case SMP_CMD_PAIRING_REQ: reason = smp_cmd_pairing_req(conn, skb); break; case SMP_CMD_PAIRING_FAIL: reason = 0; err = -EPERM; del_timer(&hcon->smp_timer); clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend); mgmt_auth_failed(hcon->hdev->id, conn->dst, skb->data[0]); hci_conn_put(hcon); break; case SMP_CMD_PAIRING_RSP: reason = smp_cmd_pairing_rsp(conn, skb); break; case SMP_CMD_SECURITY_REQ: reason = smp_cmd_security_req(conn, skb); break; case SMP_CMD_PAIRING_CONFIRM: reason = smp_cmd_pairing_confirm(conn, skb); break; case SMP_CMD_PAIRING_RANDOM: reason = smp_cmd_pairing_random(conn, skb); break; case SMP_CMD_ENCRYPT_INFO: reason = smp_cmd_encrypt_info(conn, skb); break; case SMP_CMD_MASTER_IDENT: reason = smp_cmd_master_ident(conn, skb); break; case SMP_CMD_IDENT_INFO: case SMP_CMD_IDENT_ADDR_INFO: case SMP_CMD_SIGN_INFO: /* Just ignored */ reason = 0; break; default: BT_DBG("Unknown command code 0x%2.2x", code); reason = SMP_CMD_NOTSUPP; err = -EOPNOTSUPP; goto done; } done: if (reason) { BT_ERR("SMP_CMD_PAIRING_FAIL: %d", reason); smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), &reason); del_timer(&hcon->smp_timer); clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->pend); mgmt_auth_failed(hcon->hdev->id, conn->dst, reason); hci_conn_put(hcon); } kfree_skb(skb); return err; }
static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb_out; struct cdc_mbim_state *info = (void *)&dev->data; struct cdc_ncm_ctx *ctx = info->ctx; __le32 sign = cpu_to_le32(USB_CDC_MBIM_NDP16_IPS_SIGN); u16 tci = 0; bool is_ip; u8 *c; if (!ctx) goto error; if (skb) { if (skb->len <= ETH_HLEN) goto error; /* Some applications using e.g. packet sockets will * bypass the VLAN acceleration and create tagged * ethernet frames directly. We primarily look for * the accelerated out-of-band tag, but fall back if * required */ skb_reset_mac_header(skb); if (vlan_get_tag(skb, &tci) < 0 && skb->len > VLAN_ETH_HLEN && __vlan_get_tag(skb, &tci) == 0) { is_ip = is_ip_proto(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); skb_pull(skb, VLAN_ETH_HLEN); } else { is_ip = is_ip_proto(eth_hdr(skb)->h_proto); skb_pull(skb, ETH_HLEN); } /* mapping VLANs to MBIM sessions: * no tag => IPS session <0> * 1 - 255 => IPS session <vlanid> * 256 - 511 => DSS session <vlanid - 256> * 512 - 4095 => unsupported, drop */ switch (tci & 0x0f00) { case 0x0000: /* VLAN ID 0 - 255 */ if (!is_ip) goto error; c = (u8 *)&sign; c[3] = tci; break; case 0x0100: /* VLAN ID 256 - 511 */ sign = cpu_to_le32(USB_CDC_MBIM_NDP16_DSS_SIGN); c = (u8 *)&sign; c[3] = tci; break; default: netif_err(dev, tx_err, dev->net, "unsupported tci=0x%04x\n", tci); goto error; } } spin_lock_bh(&ctx->mtx); skb_out = cdc_ncm_fill_tx_frame(ctx, skb, sign); spin_unlock_bh(&ctx->mtx); return skb_out; error: if (skb) dev_kfree_skb_any(skb); return NULL; }
static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev, int ifindex) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct flowi6 fl6 = { .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, .flowi6_iif = ifindex, }; struct net *net = dev_net(vrf_dev); struct rt6_info *rt6; rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE); if (unlikely(!rt6)) return; if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst)) return; skb_dst_set(skb, &rt6->dst); } static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { int orig_iif = skb->skb_iif; bool need_strict; /* loopback traffic; do not push through packet taps again. * Reset pkt_type for upper layers to process skb */ if (skb->pkt_type == PACKET_LOOPBACK) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IP6CB(skb)->flags |= IP6SKB_L3SLAVE; skb->pkt_type = PACKET_HOST; goto out; } /* if packet is NDISC or addressed to multicast or link-local * then keep the ingress interface */ need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); if (!ipv6_ndisc_frame(skb) && !need_strict) { vrf_rx_stats(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; if (!list_empty(&vrf_dev->ptype_all)) { skb_push(skb, skb->mac_len); dev_queue_xmit_nit(skb, vrf_dev); skb_pull(skb, skb->mac_len); } IP6CB(skb)->flags |= IP6SKB_L3SLAVE; } if (need_strict) vrf_ip6_input_dst(skb, vrf_dev, orig_iif); skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev); out: return skb; }
static void bluecard_write_wakeup(bluecard_info_t *info) { if (!info) { BT_ERR("Unknown device"); return; } if (!test_bit(XMIT_SENDING_READY, &(info->tx_state))) return; if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { set_bit(XMIT_WAKEUP, &(info->tx_state)); return; } do { register unsigned int iobase = info->p_dev->resource[0]->start; register unsigned int offset; register unsigned char command; register unsigned long ready_bit; register struct sk_buff *skb; register int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); if (!pcmcia_dev_present(info->p_dev)) return; if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) { if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state))) break; offset = 0x10; command = REG_COMMAND_TX_BUF_TWO; ready_bit = XMIT_BUF_TWO_READY; } else { if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state))) break; offset = 0x00; command = REG_COMMAND_TX_BUF_ONE; ready_bit = XMIT_BUF_ONE_READY; } if (!(skb = skb_dequeue(&(info->txq)))) break; if (bt_cb(skb)->pkt_type & 0x80) { /* Disable RTS */ info->ctrl_reg |= REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); } /* Activate LED */ bluecard_enable_activity_led(info); /* Send frame */ len = bluecard_write(iobase, offset, skb->data, skb->len); /* Tell the FPGA to send the data */ outb_p(command, iobase + REG_COMMAND); /* Mark the buffer as dirty */ clear_bit(ready_bit, &(info->tx_state)); if (bt_cb(skb)->pkt_type & 0x80) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); DEFINE_WAIT(wait); unsigned char baud_reg; switch (bt_cb(skb)->pkt_type) { case PKT_BAUD_RATE_460800: baud_reg = REG_CONTROL_BAUD_RATE_460800; break; case PKT_BAUD_RATE_230400: baud_reg = REG_CONTROL_BAUD_RATE_230400; break; case PKT_BAUD_RATE_115200: baud_reg = REG_CONTROL_BAUD_RATE_115200; break; case PKT_BAUD_RATE_57600: /* Fall through... */ default: baud_reg = REG_CONTROL_BAUD_RATE_57600; break; } /* Wait until the command reaches the baseband */ prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); schedule_timeout(HZ/10); finish_wait(&wq, &wait); /* Set baud on baseband */ info->ctrl_reg &= ~0x03; info->ctrl_reg |= baud_reg; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Enable RTS */ info->ctrl_reg &= ~REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Wait before the next HCI packet can be send */ prepare_to_wait(&wq, &wait, TASK_INTERRUPTIBLE); schedule_timeout(HZ); finish_wait(&wq, &wait); } if (len == skb->len) { kfree_skb(skb); } else { skb_pull(skb, len); skb_queue_head(&(info->txq), skb); } info->hdev->stat.byte_tx += len; /* Change buffer */ change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state)); } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); clear_bit(XMIT_SENDING, &(info->tx_state)); }
static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_cmd_complete *ev = (void *) skb->data; __u16 opcode; skb_pull(skb, sizeof(*ev)); opcode = __le16_to_cpu(ev->opcode); switch (opcode) { case HCI_OP_INQUIRY_CANCEL: hci_cc_inquiry_cancel(hdev, skb); break; case HCI_OP_EXIT_PERIODIC_INQ: hci_cc_exit_periodic_inq(hdev, skb); break; case HCI_OP_REMOTE_NAME_REQ_CANCEL: hci_cc_remote_name_req_cancel(hdev, skb); break; case HCI_OP_ROLE_DISCOVERY: hci_cc_role_discovery(hdev, skb); break; case HCI_OP_READ_LINK_POLICY: hci_cc_read_link_policy(hdev, skb); break; case HCI_OP_WRITE_LINK_POLICY: hci_cc_write_link_policy(hdev, skb); break; case HCI_OP_READ_DEF_LINK_POLICY: hci_cc_read_def_link_policy(hdev, skb); break; case HCI_OP_WRITE_DEF_LINK_POLICY: hci_cc_write_def_link_policy(hdev, skb); break; case HCI_OP_RESET: hci_cc_reset(hdev, skb); break; case HCI_OP_WRITE_LOCAL_NAME: hci_cc_write_local_name(hdev, skb); break; case HCI_OP_READ_LOCAL_NAME: hci_cc_read_local_name(hdev, skb); break; case HCI_OP_WRITE_AUTH_ENABLE: hci_cc_write_auth_enable(hdev, skb); break; case HCI_OP_WRITE_ENCRYPT_MODE: hci_cc_write_encrypt_mode(hdev, skb); break; case HCI_OP_WRITE_SCAN_ENABLE: hci_cc_write_scan_enable(hdev, skb); break; case HCI_OP_READ_CLASS_OF_DEV: hci_cc_read_class_of_dev(hdev, skb); break; case HCI_OP_WRITE_CLASS_OF_DEV: hci_cc_write_class_of_dev(hdev, skb); break; case HCI_OP_READ_VOICE_SETTING: hci_cc_read_voice_setting(hdev, skb); break; case HCI_OP_WRITE_VOICE_SETTING: hci_cc_write_voice_setting(hdev, skb); break; case HCI_OP_HOST_BUFFER_SIZE: hci_cc_host_buffer_size(hdev, skb); break; case HCI_OP_READ_SSP_MODE: hci_cc_read_ssp_mode(hdev, skb); break; case HCI_OP_WRITE_SSP_MODE: hci_cc_write_ssp_mode(hdev, skb); break; case HCI_OP_READ_LOCAL_VERSION: hci_cc_read_local_version(hdev, skb); break; case HCI_OP_READ_LOCAL_COMMANDS: hci_cc_read_local_commands(hdev, skb); break; case HCI_OP_READ_LOCAL_FEATURES: hci_cc_read_local_features(hdev, skb); break; case HCI_OP_READ_BUFFER_SIZE: hci_cc_read_buffer_size(hdev, skb); break; case HCI_OP_READ_BD_ADDR: hci_cc_read_bd_addr(hdev, skb); break; default: BT_DBG("YNC UNKNOWN CMD FOR DEVICE %s opcode 0x%x", hdev->name, opcode); break; } if (ev->ncmd) { atomic_set(&hdev->cmd_cnt, 1); if (!skb_queue_empty(&hdev->cmd_q)) tasklet_schedule(&hdev->cmd_task); } }