static inline int l2cap_disconnect_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd, __u8 *data) { l2cap_disconn_req *req = (l2cap_disconn_req *) data; l2cap_disconn_rsp rsp; __u16 dcid, scid; struct sock *sk; scid = __le16_to_cpu(req->scid); dcid = __le16_to_cpu(req->dcid); BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid))) return 0; rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid); rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid); l2cap_send_rsp(conn, cmd->ident, L2CAP_DISCONN_RSP, L2CAP_DISCONN_RSP_SIZE, &rsp); sk->shutdown = SHUTDOWN_MASK; l2cap_chan_del(sk, ECONNRESET); bh_unlock_sock(sk); l2cap_sock_kill(sk); return 0; }
const void *build_dot11_frame_header(uint16_t fc, size_t *len) { struct dot11_frame_header *header = NULL; const void *buf = NULL; static uint16_t frag_seq; buf = malloc(sizeof(struct dot11_frame_header)); if(buf) { *len = sizeof(struct dot11_frame_header); memset((void *) buf, 0, sizeof(struct dot11_frame_header)); header = (struct dot11_frame_header *) buf; frag_seq += SEQ_MASK; header->duration = __cpu_to_le16(DEFAULT_DURATION); header->fc = __cpu_to_le16(fc); header->frag_seq = __cpu_to_le16(frag_seq); memcpy((void *) header->addr1, get_bssid(), MAC_ADDR_LEN); memcpy((void *) header->addr2, get_mac(), MAC_ADDR_LEN); memcpy((void *) header->addr3, get_bssid(), MAC_ADDR_LEN); } return buf; }
void fio_net_cmd_crc_pdu(struct fio_net_cmd *cmd, const void *pdu) { uint32_t pdu_len; cmd->cmd_crc16 = __cpu_to_le16(fio_crc16(cmd, FIO_NET_CMD_CRC_SZ)); pdu_len = le32_to_cpu(cmd->pdu_len); cmd->pdu_crc16 = __cpu_to_le16(fio_crc16(pdu, pdu_len)); }
static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, __u8 code, __u8 ident, __u16 dlen, void *data) { struct sk_buff *skb, **frag; l2cap_cmd_hdr *cmd; l2cap_hdr *lh; int len, count; BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen); len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; count = MIN(conn->mtu, len); skb = bluez_skb_alloc(count, GFP_ATOMIC); if (!skb) return NULL; lh = (l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE); lh->len = __cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); lh->cid = __cpu_to_le16(0x0001); cmd = (l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE); cmd->code = code; cmd->ident = ident; cmd->len = __cpu_to_le16(dlen); if (dlen) { count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; memcpy(skb_put(skb, count), data, count); data += count; } len -= skb->len; /* Continuation fragments (no L2CAP header) */ frag = &skb_shinfo(skb)->frag_list; while (len) { count = MIN(conn->mtu, len); *frag = bluez_skb_alloc(count, GFP_ATOMIC); if (!*frag) goto fail; memcpy(skb_put(*frag, count), data, count); len -= count; data += count; frag = &(*frag)->next; } return skb; fail: kfree_skb(skb); return NULL; }
/* * Guts of ath10k_ce_send. * The caller takes responsibility for any needed locking. */ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, u32 buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags) { struct ath10k *ar = ce_state->ar; struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ce_desc *desc, sdesc; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; unsigned int write_index = src_ring->write_index; u32 ctrl_addr = ce_state->ctrl_addr; u32 desc_flags = 0; int ret = 0; if (nbytes > ce_state->src_sz_max) ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; goto exit; } desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, write_index); desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); if (flags & CE_SEND_FLAG_GATHER) desc_flags |= CE_DESC_FLAGS_GATHER; if (flags & CE_SEND_FLAG_BYTE_SWAP) desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; sdesc.addr = __cpu_to_le32(buffer); sdesc.nbytes = __cpu_to_le16(nbytes); sdesc.flags = __cpu_to_le16(desc_flags); *desc = sdesc; src_ring->per_transfer_context[write_index] = per_transfer_context; /* Update Source Ring Write Index */ write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ if (!(flags & CE_SEND_FLAG_GATHER)) ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); src_ring->write_index = write_index; exit: return ret; }
static int init_device (void) { char buf [4096], *cp = &buf [0]; int fd; int result; #ifdef AIO if (iso) result = iso_autoconfig (); else #endif result = autoconfig (); if (result < 0) { fprintf (stderr, "?? don't recognize %s %s device\n", gadgetfs, iso ? "iso" : "bulk"); return result; } fd = open (DEVNAME, O_RDWR); if (fd < 0) { perror (DEVNAME); return -errno; } *(__u32 *)cp = 0; /* tag for this format */ cp += 4; /* write full then high speed configs */ cp = build_config (cp, fs_eps); if (HIGHSPEED) cp = build_config (cp, hs_eps); device_desc.idVendor = __cpu_to_le16 (vendorid); device_desc.idProduct = __cpu_to_le16 (productid); if (verbose) { fprintf(stderr, "idVendor=%04X idProduct=%04X\n", vendorid, productid); } /* and device descriptor at the end */ memcpy (cp, &device_desc, sizeof device_desc); cp += sizeof device_desc; result = write (fd, &buf [0], cp - &buf [0]); if (result < 0) { perror ("write dev descriptors"); close (fd); return result; } else if (result != (cp - buf)) { fprintf (stderr, "dev init, wrote %d expected %ld\n", result, (long int) (cp - buf)); close (fd); return -EIO; } return fd; }
static void write_superblock(struct imgspec* const spec, char* base, const __u64 sz) { __u64 padding = superblock_offset(spec); __u64 offset = padding + sizeof(struct microfs_sb) + spec->sp_lib->hl_info->li_dd_sz; struct microfs_sb* sb = (struct microfs_sb*)(base + padding); sb->s_magic = __cpu_to_le32(MICROFS_MAGIC); sb->s_size = sz == MICROFS_MAXIMGSIZE ? 0 : __cpu_to_le32(sz); sb->s_crc = 0; sb->s_blocks = __cpu_to_le32((sz - 1) / spec->sp_blksz + 1); sb->s_files = __cpu_to_le16(spec->sp_files); sb->s_blkshift = __cpu_to_le16(spec->sp_blkshift); if (sb->s_size == 0) { warning("this image is exactly %llu bytes (as big as is possible)," " this special case is not well tested", MICROFS_MAXIMGSIZE); } struct timespec nowish; if (clock_gettime(CLOCK_REALTIME, &nowish) < 0) { error("failed to get the current time: %s", strerror(errno)); } sb->s_ctime = __cpu_to_le32(nowish.tv_sec); __u32 flags = spec->sp_lib->hl_info->li_id; sb->s_flags = __cpu_to_le32(flags); memcpy(sb->s_signature, MICROFS_SIGNATURE, sizeof(sb->s_signature)); memcpy(sb->s_name, spec->sp_name, sizeof(sb->s_name)); sb->s_root.i_mode = __cpu_to_le16(spec->sp_root->e_mode); sb->s_root.i_uid = __cpu_to_le16(spec->sp_root->e_uid); sb->s_root.i_gid = __cpu_to_le16(spec->sp_root->e_gid); i_setsize(&sb->s_root, spec->sp_root->e_size); sb->s_root.i_offset = spec->sp_root->e_firstchild? __cpu_to_le32(offset): 0; /* With everything in place it is possible to calculate the * crc32 checksum for the image. */ __u32 crc = hostprog_lib_zlib_crc32(base + padding, sz - padding); sb->s_crc = __cpu_to_le32(crc); message(VERBOSITY_0, "CRC: %x", crc); }
static inline int l2cap_information_req(struct l2cap_conn *conn, l2cap_cmd_hdr *cmd, u8 *data) { l2cap_info_req *req = (l2cap_info_req *) data; l2cap_info_rsp rsp; u16 type; type = __le16_to_cpu(req->type); BT_DBG("type 0x%4.4x", type); rsp.type = __cpu_to_le16(type); rsp.result = __cpu_to_le16(L2CAP_IR_NOTSUPP); l2cap_send_rsp(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), &rsp); return 0; }
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_frag_desc_bank_cfg *cfg; int ret, size; u8 info; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; skb_put(skb, size); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; info = 0; info |= SM(htt->tx_q_state.type, HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, ar->fw_features)) info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; cfg = &cmd->frag_desc_bank_cfg; cfg->info = info; cfg->num_banks = 1; cfg->desc_size = sizeof(struct htt_msdu_ext_desc); cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); cfg->bank_id[0].bank_min_id = 0; cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); dev_kfree_skb_any(skb); return ret; } return 0; }
int ath10k_htc_start(struct ath10k_htc *htc) { struct ath10k *ar = htc->ar; struct sk_buff *skb; int status = 0; struct ath10k_htc_msg *msg; skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) return -ENOMEM; skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext)); memset(skb->data, 0, skb->len); msg = (struct ath10k_htc_msg *)skb->data; msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { kfree_skb(skb); return status; } return 0; }
static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) { struct hci_cp_remote_name_req *cp; struct hci_conn *conn; BT_DBG("%s status 0x%x", hdev->name, status); /* If successful wait for the name req complete event before * checking for the need to do authentication */ if (!status) return; cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); if (!cp) return; hci_dev_lock(hdev); conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); if (conn && hci_outgoing_auth_needed(hdev, conn)) { struct hci_cp_auth_requested cp; cp.handle = __cpu_to_le16(conn->handle); hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); } hci_dev_unlock(hdev); }
static struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size, gfp_t gfp_mask) { struct ieee80211_txb *txb; int i; txb = kmalloc( sizeof(struct ieee80211_txb) + (sizeof(u8 *) * nr_frags), gfp_mask); if (!txb) return NULL; memset(txb, 0, sizeof(struct ieee80211_txb)); txb->nr_frags = nr_frags; txb->frag_size = __cpu_to_le16(txb_size); for (i = 0; i < nr_frags; i++) { txb->fragments[i] = dev_alloc_skb(txb_size); if (unlikely(!txb->fragments[i])) { i--; break; } memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb)); } if (unlikely(i != nr_frags)) { while (i >= 0) dev_kfree_skb_any(txb->fragments[i--]); kfree(txb); return NULL; } return txb; }
int ath10k_htc_start(struct ath10k_htc *htc) { struct ath10k *ar = htc->ar; struct sk_buff *skb; int status = 0; struct ath10k_htc_msg *msg; skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) return -ENOMEM; skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext)); memset(skb->data, 0, skb->len); msg = (struct ath10k_htc_msg *)skb->data; msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); if (ar->hif.bus == ATH10K_BUS_SDIO) { /* Extra setup params used by SDIO */ msg->setup_complete_ext.flags = __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN); msg->setup_complete_ext.max_msgs_per_bundled_recv = htc->max_msgs_per_htc_bundle; } ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { kfree_skb(skb); return status; } return 0; }
static struct urb *construct_urb(struct usb_device *dev, int endpoint_type, unsigned long pipe, void *buffer, int len, struct devrequest *setup, int interval) { int epnum = usb_pipeendpoint(pipe); int is_in = usb_pipein(pipe); memset(&urb, 0, sizeof(struct urb)); memset(&hep, 0, sizeof(struct usb_host_endpoint)); INIT_LIST_HEAD(&hep.urb_list); INIT_LIST_HEAD(&urb.urb_list); urb.ep = &hep; urb.complete = musb_host_complete_urb; urb.status = -EINPROGRESS; urb.dev = dev; urb.pipe = pipe; urb.transfer_buffer = buffer; urb.transfer_dma = (unsigned long)buffer; urb.transfer_buffer_length = len; urb.setup_packet = (unsigned char *)setup; urb.ep->desc.wMaxPacketSize = __cpu_to_le16(is_in ? dev->epmaxpacketin[epnum] : dev->epmaxpacketout[epnum]); urb.ep->desc.bmAttributes = endpoint_type; urb.ep->desc.bEndpointAddress = (is_in ? USB_DIR_IN : USB_DIR_OUT) | epnum; urb.ep->desc.bInterval = interval; return &urb; }
/* Authentication Complete */ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { evt_auth_complete *ac = (evt_auth_complete *) skb->data; struct hci_conn *conn = NULL; __u16 handle = __le16_to_cpu(ac->handle); BT_DBG("%s status %d", hdev->name, ac->status); hci_dev_lock(hdev); conn = conn_hash_lookup_handle(hdev, handle); if (conn) { if (!ac->status) conn->link_mode |= HCI_LM_AUTH; clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); hci_proto_auth_cfm(conn, ac->status); if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { if (!ac->status) { set_conn_encrypt_cp ce; ce.handle = __cpu_to_le16(conn->handle); ce.encrypt = 1; hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, SET_CONN_ENCRYPT_CP_SIZE, &ce); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); hci_proto_encrypt_cfm(conn, ac->status); } } } hci_dev_unlock(hdev); }
static void l2cap_conn_ready(struct l2cap_conn *conn) { struct l2cap_chan_list *l = &conn->chan_list; struct sock *sk; BT_DBG("conn %p", conn); read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { bh_lock_sock(sk); if (sk->type != SOCK_SEQPACKET) { l2cap_sock_clear_timer(sk); sk->state = BT_CONNECTED; sk->state_change(sk); } else if (sk->state == BT_CONNECT) { l2cap_conn_req req; req.scid = __cpu_to_le16(l2cap_pi(sk)->scid); req.psm = l2cap_pi(sk)->psm; l2cap_send_req(conn, L2CAP_CONN_REQ, L2CAP_CONN_REQ_SIZE, &req); } bh_unlock_sock(sk); } read_unlock(&l->lock); }
static char * build_config (char *cp, const struct usb_endpoint_descriptor **ep) { struct usb_config_descriptor *c; int i; c = (struct usb_config_descriptor *) cp; memcpy (cp, &config, config.bLength); cp += config.bLength; memcpy (cp, &source_sink_intf, sizeof source_sink_intf); cp += sizeof source_sink_intf; // Append vendor class specification memcpy (cp, &ccid_desc, sizeof ccid_desc); cp += sizeof ccid_desc; for (i = 0; i < source_sink_intf.bNumEndpoints; i++) { memcpy (cp, ep [i], USB_DT_ENDPOINT_SIZE); cp += USB_DT_ENDPOINT_SIZE; } c->wTotalLength = __cpu_to_le16 (cp - (char *) c); return cp; }
/* Authentication Complete */ static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_ev_auth_complete *ev = (struct hci_ev_auth_complete *) skb->data; struct hci_conn *conn; BT_DBG("%s status %d", hdev->name, ev->status); hci_dev_lock(hdev); conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); if (conn) { if (!ev->status) conn->link_mode |= HCI_LM_AUTH; clear_bit(HCI_CONN_AUTH_PEND, &conn->pend); hci_auth_cfm(conn, ev->status); if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) { if (!ev->status) { struct hci_cp_set_conn_encrypt cp; cp.handle = __cpu_to_le16(conn->handle); cp.encrypt = 1; hci_send_cmd(conn->hdev, OGF_LINK_CTL, OCF_SET_CONN_ENCRYPT, sizeof(cp), &cp); } else { clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend); hci_encrypt_cfm(conn, ev->status, 0x00); } } } hci_dev_unlock(hdev); }
static void l2cap_add_conf_opt(void **ptr, __u8 type, __u8 len, unsigned long val) { register l2cap_conf_opt *opt = *ptr; BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val); opt->type = type; opt->len = len; switch (len) { case 1: *((__u8 *) opt->val) = val; break; case 2: *((__u16 *) opt->val) = __cpu_to_le16(val); break; case 4: *((__u32 *) opt->val) = __cpu_to_le32(val); break; default: memcpy(opt->val, (void *) val, len); break; }; *ptr += L2CAP_CONF_OPT_SIZE + len; }
static int l2cap_auth_cfm(struct hci_conn *hcon, __u8 status) { struct l2cap_chan_list *l; struct l2cap_conn *conn; l2cap_conn_rsp rsp; struct sock *sk; int result; if (!(conn = hcon->l2cap_data)) return 0; l = &conn->chan_list; BT_DBG("conn %p", conn); read_lock(&l->lock); for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) { bh_lock_sock(sk); if (sk->state != BT_CONNECT2 || (l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT)) { bh_unlock_sock(sk); continue; } if (!status) { sk->state = BT_CONFIG; result = 0; } else { sk->state = BT_DISCONN; l2cap_sock_set_timer(sk, HZ/10); result = L2CAP_CR_SEC_BLOCK; } rsp.scid = __cpu_to_le16(l2cap_pi(sk)->dcid); rsp.dcid = __cpu_to_le16(l2cap_pi(sk)->scid); rsp.result = __cpu_to_le16(result); rsp.status = __cpu_to_le16(0); l2cap_send_rsp(conn, l2cap_pi(sk)->ident, L2CAP_CONN_RSP, L2CAP_CONN_RSP_SIZE, &rsp); bh_unlock_sock(sk); } read_unlock(&l->lock); return 0; }
static void bpa10x_wakeup(struct bpa10x_data *data) { struct urb *urb; struct sk_buff *skb; int err; BT_DBG("data %p", data); urb = data->cmd_urb; if (urb->status == -EINPROGRESS) skb = NULL; else skb = skb_dequeue(&data->cmd_queue); if (skb) { struct usb_ctrlrequest *cr; if (skb->len > BPA10X_CMD_BUF_SIZE) { BT_ERR("%s command packet with size %d is too big", data->hdev->name, skb->len); kfree_skb(skb); return; } cr = (struct usb_ctrlrequest *) urb->setup_packet; cr->wLength = __cpu_to_le16(skb->len); memcpy(urb->transfer_buffer, skb->data, skb->len); urb->transfer_buffer_length = skb->len; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0 && err != -ENODEV) { BT_ERR("%s submit failed for command urb %p with error %d", data->hdev->name, urb, err); skb_queue_head(&data->cmd_queue, skb); } else kfree_skb(skb); } urb = data->tx_urb; if (urb->status == -EINPROGRESS) skb = NULL; else skb = skb_dequeue(&data->tx_queue); if (skb) { memcpy(urb->transfer_buffer, skb->data, skb->len); urb->transfer_buffer_length = skb->len; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0 && err != -ENODEV) { BT_ERR("%s submit failed for command urb %p with error %d", data->hdev->name, urb, err); skb_queue_head(&data->tx_queue, skb); } else kfree_skb(skb); } }
const void *build_association_management_frame(size_t *len) { struct association_request_management_frame *frame = NULL; const void *buf = NULL; buf = malloc(sizeof(struct association_request_management_frame)); if(buf) { *len = sizeof(struct association_request_management_frame); memset((void *) buf, 0, *len); frame = (struct association_request_management_frame *) buf; frame->capability = __cpu_to_le16(get_ap_capability()); frame->listen_interval = __cpu_to_le16(LISTEN_INTERVAL); } return buf; }
/* * TODO: * -Optimize * -Rewrite cleaner */ static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, u32 size_bytes) { u32 i = 0; u32 __iomem *ptr = mem_addr_start; const u16 *buf16; if (unlikely(!ptr || !buf)) return 0; /* shortcut for extremely often used cases */ switch (size_bytes) { case 2: /* 2 bytes */ buf16 = (const u16 *)buf; writew(__cpu_to_le16(*buf16), ptr); return 2; break; case 1: /* * also needs to write 4 bytes in this case * so falling through.. */ case 4: /* 4 bytes */ writel(__cpu_to_le32(*buf), ptr); return 4; break; } while (i < size_bytes) { if (size_bytes - i == 2) { /* 2 bytes */ buf16 = (const u16 *)buf; writew(__cpu_to_le16(*buf16), ptr); i += 2; } else { /* 4 bytes */ writel(__cpu_to_le32(*buf), ptr); i += 4; } buf++; ptr++; } return i; }
const void *build_authentication_management_frame(size_t *len) { struct authentication_management_frame *frame = NULL; const void *buf = NULL; buf = malloc(sizeof(struct authentication_management_frame)); if(buf) { *len = sizeof(struct authentication_management_frame); memset((void *) buf, 0, *len); frame = (struct authentication_management_frame *) buf; frame->algorithm = __cpu_to_le16(OPEN_SYSTEM); frame->sequence = __cpu_to_le16(1); frame->status = 0; } return buf; }
static int l2cap_build_conf_rsp(struct sock *sk, void *data, int *result) { l2cap_conf_rsp *rsp = (l2cap_conf_rsp *) data; void *ptr = rsp->data; u16 flags = 0; BT_DBG("sk %p complete %d", sk, result ? 1 : 0); if (result) *result = l2cap_conf_output(sk, &ptr); else flags |= 0x0001; rsp->scid = __cpu_to_le16(l2cap_pi(sk)->dcid); rsp->result = __cpu_to_le16(result ? *result : 0); rsp->flags = __cpu_to_le16(flags); return ptr - data; }
static int l2cap_do_connect(struct sock *sk) { bdaddr_t *src = &bluez_pi(sk)->src; bdaddr_t *dst = &bluez_pi(sk)->dst; struct l2cap_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err = 0; BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm); if (!(hdev = hci_get_route(dst, src))) return -EHOSTUNREACH; hci_dev_lock_bh(hdev); err = -ENOMEM; hcon = hci_connect(hdev, ACL_LINK, dst); if (!hcon) goto done; conn = l2cap_conn_add(hcon, 0); if (!conn) { hci_conn_put(hcon); goto done; } err = 0; /* Update source addr of the socket */ bacpy(src, conn->src); l2cap_chan_add(conn, sk, NULL); sk->state = BT_CONNECT; l2cap_sock_set_timer(sk, sk->sndtimeo); if (hcon->state == BT_CONNECTED) { if (sk->type == SOCK_SEQPACKET) { l2cap_conn_req req; req.scid = __cpu_to_le16(l2cap_pi(sk)->scid); req.psm = l2cap_pi(sk)->psm; l2cap_send_req(conn, L2CAP_CONN_REQ, L2CAP_CONN_REQ_SIZE, &req); } else { l2cap_sock_clear_timer(sk); sk->state = BT_CONNECTED; } } done: hci_dev_unlock_bh(hdev); hci_dev_put(hdev); return err; }
static inline int max17050_write_16regs(struct max17050_chip *max17050, u8 reg, u16 *buf) { u16 tmp[16]; int i; for (i = 0; i < 16; i++) { tmp[i] = __cpu_to_le16(buf[i]); } return i2c_smbus_write_i2c_block_data(max17050->i2c, reg, 16 * sizeof(u16), (u8 *)tmp); }
int PackGetInfoCurrentSTA(char *pdubuf, char *mac, char *password) { PKT_GET_INFO_EX1 *body; DWORD tid; tid = PackCmdHdr(pdubuf, NET_CMD_ID_GETINFO_EX, mac, password); body = (PKT_GET_INFO_EX1 *)(pdubuf+sizeof(IBOX_COMM_PKT_HDR_EX)); body->FieldCount = 1; body->FieldID = __cpu_to_le16(FIELD_GENERAL_CURRENT_STA); return (tid); }
static int l2cap_build_conf_req(struct sock *sk, void *data) { struct l2cap_pinfo *pi = l2cap_pi(sk); l2cap_conf_req *req = (l2cap_conf_req *) data; void *ptr = req->data; BT_DBG("sk %p", sk); if (pi->imtu != L2CAP_DEFAULT_MTU) l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu); /* FIXME. Need actual value of the flush timeout */ //if (flush_to != L2CAP_DEFAULT_FLUSH_TO) // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to); req->dcid = __cpu_to_le16(pi->dcid); req->flags = __cpu_to_le16(0); return ptr - data; }
static int nilfs2_image(const void *buf, unsigned long long *bytes) { const struct nilfs_super_block *sb = (const struct nilfs_super_block *)buf; if (sb->s_magic == __cpu_to_le16(NILFS_SUPER_MAGIC) && sb->s_rev_level == __cpu_to_le32(2)) { *bytes = (unsigned long long)__le64_to_cpu(sb->s_dev_size); return 1; } return 0; }