static int ath10k_swap_code_seg_fill(struct ath10k *ar, struct ath10k_swap_code_seg_info *seg_info, const void *data, size_t data_len) { u8 *virt_addr = seg_info->virt_address[0]; u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {}; const u8 *fw_data = data; union ath10k_swap_code_seg_item *swap_item; u32 length = 0; u32 payload_len; u32 total_payload_len = 0; u32 size_left = data_len; /* Parse swap bin and copy the content to host allocated memory. * The format is Address, length and value. The last 4-bytes is * target write address. Currently address field is not used. */ seg_info->target_addr = -1; while (size_left >= sizeof(*swap_item)) { swap_item = (union ath10k_swap_code_seg_item *)fw_data; payload_len = __le32_to_cpu(swap_item->tlv.length); if ((payload_len > size_left) || (payload_len == 0 && size_left != sizeof(struct ath10k_swap_code_seg_tail))) { ath10k_err(ar, "refusing to parse invalid tlv length %d\n", payload_len); return -EINVAL; } if (payload_len == 0) { if (memcmp(swap_item->tail.magic_signature, swap_magic, ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) { ath10k_err(ar, "refusing an invalid swap file\n"); return -EINVAL; } seg_info->target_addr = __le32_to_cpu(swap_item->tail.bmi_write_addr); break; } memcpy(virt_addr, swap_item->tlv.data, payload_len); virt_addr += payload_len; length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv); size_left -= length; fw_data += length; total_payload_len += payload_len; } if (seg_info->target_addr == -1) { ath10k_err(ar, "failed to parse invalid swap file\n"); return -EINVAL; } seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len); return 0; }
/* * Guts of ath10k_ce_send. * The caller takes responsibility for any needed locking. */ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, void *per_transfer_context, u32 buffer, unsigned int nbytes, unsigned int transfer_id, unsigned int flags) { struct ath10k *ar = ce_state->ar; struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ce_desc *desc, sdesc; unsigned int nentries_mask = src_ring->nentries_mask; unsigned int sw_index = src_ring->sw_index; unsigned int write_index = src_ring->write_index; u32 ctrl_addr = ce_state->ctrl_addr; u32 desc_flags = 0; int ret = 0; if (nbytes > ce_state->src_sz_max) ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n", __func__, nbytes, ce_state->src_sz_max); if (unlikely(CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) <= 0)) { ret = -ENOSR; goto exit; } desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, write_index); desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); if (flags & CE_SEND_FLAG_GATHER) desc_flags |= CE_DESC_FLAGS_GATHER; if (flags & CE_SEND_FLAG_BYTE_SWAP) desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; sdesc.addr = __cpu_to_le32(buffer); sdesc.nbytes = __cpu_to_le16(nbytes); sdesc.flags = __cpu_to_le16(desc_flags); *desc = sdesc; src_ring->per_transfer_context[write_index] = per_transfer_context; /* Update Source Ring Write Index */ write_index = CE_RING_IDX_INCR(nentries_mask, write_index); /* WORKAROUND */ if (!(flags & CE_SEND_FLAG_GATHER)) ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); src_ring->write_index = write_index; exit: return ret; }
/* * TODO: * -Optimize * -Rewrite cleaner */ static u32 write_mem32(void __iomem *mem_addr_start, const u32 *buf, u32 size_bytes) { u32 i = 0; u32 __iomem *ptr = mem_addr_start; const u16 *buf16; if (unlikely(!ptr || !buf)) return 0; /* shortcut for extremely often used cases */ switch (size_bytes) { case 2: /* 2 bytes */ buf16 = (const u16 *)buf; writew(__cpu_to_le16(*buf16), ptr); return 2; break; case 1: /* * also needs to write 4 bytes in this case * so falling through.. */ case 4: /* 4 bytes */ writel(__cpu_to_le32(*buf), ptr); return 4; break; } while (i < size_bytes) { if (size_bytes - i == 2) { /* 2 bytes */ buf16 = (const u16 *)buf; writew(__cpu_to_le16(*buf16), ptr); i += 2; } else { /* 4 bytes */ writel(__cpu_to_le32(*buf), ptr); i += 4; } buf++; ptr++; } return i; }
/** * \brief Convert the simple instrument to byte stream * \param simple Simple instrument handle * \param name Simple instrument name * \param __data Result - allocated byte stream * \param __size Result - size of allocated byte stream * \return 0 on success otherwise a negative error code */ int snd_instr_simple_convert_to_stream(snd_instr_simple_t *simple, const char *name, snd_instr_header_t **__data, size_t *__size) { snd_instr_header_t *put; int size; char *ptr; simple_instrument_t *instr; simple_xinstrument_t *xinstr; if (simple == NULL || __data == NULL) return -EINVAL; instr = (simple_instrument_t *)simple; *__data = NULL; *__size = 0; size = simple_size(simple); if (snd_instr_header_malloc(&put, sizeof(simple_xinstrument_t) + size) < 0) return -ENOMEM; /* build header */ if (name) snd_instr_header_set_name(put, name); snd_instr_header_set_type(put, SND_SEQ_INSTR_ATYPE_DATA); snd_instr_header_set_format(put, SND_SEQ_INSTR_ID_SIMPLE); /* build data section */ xinstr = (simple_xinstrument_t *)snd_instr_header_get_data(put); xinstr->stype = SIMPLE_STRU_INSTR; xinstr->share_id[0] = __cpu_to_le32(instr->share_id[0]); xinstr->share_id[1] = __cpu_to_le32(instr->share_id[1]); xinstr->share_id[2] = __cpu_to_le32(instr->share_id[2]); xinstr->share_id[3] = __cpu_to_le32(instr->share_id[3]); xinstr->format = __cpu_to_le32(instr->format); xinstr->size = __cpu_to_le32(instr->size); xinstr->start = __cpu_to_le32(instr->start); xinstr->loop_start = __cpu_to_le32(instr->loop_start); xinstr->loop_end = __cpu_to_le32(instr->loop_end); xinstr->loop_repeat = __cpu_to_le16(instr->loop_repeat); xinstr->effect1 = instr->effect1; xinstr->effect1_depth = instr->effect1_depth; xinstr->effect2 = instr->effect2; xinstr->effect2_depth = instr->effect2_depth; ptr = (char *)(xinstr + 1); memcpy(ptr, instr->address.ptr, size); /* write result */ *__data = put; *__size = sizeof(*put) + sizeof(simple_xinstrument_t) + size; return 0; }
static int f2fs_image(const void *buf, unsigned long long *bytes) { const struct f2fs_super_block *sb = (const struct f2fs_super_block *)buf; if (sb->magic == __cpu_to_le32(F2FS_SUPER_MAGIC)) { *bytes = 0; return 1; } return 0; }
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address, char *buffer, u32 length) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem); u32 rxlen; int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi read address 0x%x length %d\n", address, length); if (ar->bmi.done_sent) { ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } while (length) { rxlen = MIN(length, BMI_MAX_DATA_SIZE); cmd.id = __cpu_to_le32(BMI_READ_MEMORY); cmd.read_mem.addr = __cpu_to_le32(address); cmd.read_mem.len = __cpu_to_le32(rxlen); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { ath10k_warn(ar, "unable to read from the device (%d)\n", ret); return ret; } memcpy(buffer, resp.read_mem.payload, rxlen); address += rxlen; buffer += rxlen; length -= rxlen; } return 0; }
static SectorType prefillGD(SparseGTInfo *gtInfo, SectorType gtBase) { uint32_t i; for (i = 0; i < gtInfo->GTs; i++) { gtInfo->gd[i] = __cpu_to_le32(gtBase); gtBase += gtInfo->GTsectors; } return gtBase; }
static bool writeSpecial(SparseVmdkWriter *writer, uint32_t marker, SectorType length) { SparseSpecialLBAHeaderOnDisk *specialHdr = writer->zlibBuffer.specialHdr; memset(writer->zlibBuffer.data, 0, VMDK_SECTOR_SIZE); specialHdr->lba = __cpu_to_le64(length); specialHdr->type = __cpu_to_le32(marker); return safeWrite(writer->fd, specialHdr, VMDK_SECTOR_SIZE); }
static int nilfs2_image(const void *buf, unsigned long long *bytes) { const struct nilfs_super_block *sb = (const struct nilfs_super_block *)buf; if (sb->s_magic == __cpu_to_le16(NILFS_SUPER_MAGIC) && sb->s_rev_level == __cpu_to_le32(2)) { *bytes = (unsigned long long)__le64_to_cpu(sb->s_dev_size); return 1; } return 0; }
static void write_firmware(struct firmware *f, unsigned char** r_data, off_t *r_size) { off_t size; unsigned int i = 0; unsigned char* data; unsigned char* p; size = HEADER_LENGTH + f->nr_desc * DESC_HEADER_LENGTH; for(i = 0; i < f->nr_desc; ++i) { size += f->desc[i].size; } data = malloc(size); p = data; memcpy(p, f->name, 32); p += 32; *(__u16*)p = __cpu_to_le16(f->version); p += sizeof(f->version); *(__u16*)p = __cpu_to_le16(f->nr_desc); p += sizeof(f->nr_desc); for(i = 0; i < f->nr_desc; ++i) { *(__u32*) p = __cpu_to_le32(f->desc[i].type); p += sizeof(f->desc[i].type); *(__u64*) p = __cpu_to_le64(f->desc[i].id); p += sizeof(f->desc[i].id); *(__u32*) p = __cpu_to_le32(f->desc[i].size); p += sizeof(f->desc[i].size); memcpy(p, f->desc[i].data, f->desc[i].size); p += f->desc[i].size; } *r_data = data; *r_size = size; }
int ath10k_bmi_read_memory(struct ath10k *ar, u32 address, void *buffer, u32 length) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.read_mem); u32 rxlen; int ret; if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } ath10k_dbg(ATH10K_DBG_CORE, "%s: (device: 0x%p, address: 0x%x, length: %d)\n", __func__, ar, address, length); while (length) { rxlen = min_t(u32, length, BMI_MAX_DATA_SIZE); cmd.id = __cpu_to_le32(BMI_READ_MEMORY); cmd.read_mem.addr = __cpu_to_le32(address); cmd.read_mem.len = __cpu_to_le32(rxlen); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &rxlen); if (ret) { ath10k_warn("unable to read from the device\n"); return ret; } memcpy(buffer, resp.read_mem.payload, rxlen); address += rxlen; buffer += rxlen; length -= rxlen; } return 0; }
static int base_ext4_image(const void *buf, unsigned long long *bytes, int *test_fs) { const struct ext3_super_block *sb = (const struct ext3_super_block *)buf; if (sb->s_magic != __cpu_to_le16(EXT2_SUPER_MAGIC)) return 0; /* There is at least one feature not supported by ext3 */ if ((sb->s_feature_incompat & __cpu_to_le32(EXT3_FEATURE_INCOMPAT_UNSUPPORTED)) || (sb->s_feature_ro_compat & __cpu_to_le32(EXT3_FEATURE_RO_COMPAT_UNSUPPORTED))) { *bytes = (unsigned long long)__le32_to_cpu(sb->s_blocks_count) << (10 + __le32_to_cpu(sb->s_log_block_size)); *test_fs = (sb->s_flags & __cpu_to_le32(EXT2_FLAGS_TEST_FILESYS)) != 0; return 1; } return 0; }
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 param, u32 *result) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute); u32 resplen = sizeof(resp.execute); int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute address 0x%x param 0x%x\n", address, param); if (ar->bmi.done_sent) { ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } cmd.id = __cpu_to_le32(BMI_EXECUTE); cmd.execute.addr = __cpu_to_le32(address); cmd.execute.param = __cpu_to_le32(param); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { ath10k_warn(ar, "unable to read from the device\n"); return ret; } if (resplen < sizeof(resp.execute)) { ath10k_warn(ar, "invalid execute response length (%d)\n", resplen); return -EIO; } *result = __le32_to_cpu(resp.execute.result); ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi execute result 0x%x\n", *result); return 0; }
int ath10k_bmi_lz_stream_start(struct ath10k *ar, u32 address) { struct bmi_cmd cmd; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.lz_start); int ret; if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } cmd.id = __cpu_to_le32(BMI_LZ_STREAM_START); cmd.lz_start.addr = __cpu_to_le32(address); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { ath10k_warn("unable to Start LZ Stream to the device\n"); return ret; } return 0; }
DWORD PackCmdHdr(char *pdubuf, WORD cmd, char *mac, char *password) { IBOX_COMM_PKT_HDR_EX *hdr; hdr=(IBOX_COMM_PKT_HDR_EX *)pdubuf; hdr->ServiceID = NET_SERVICE_ID_IBOX_INFO; hdr->PacketType = NET_PACKET_TYPE_CMD; hdr->OpCode = __cpu_to_le16(cmd); hdr->Info = __cpu_to_le32(GetTransactionID()); memcpy(hdr->MacAddress, mac, 6); memcpy(hdr->Password, password, 32); return (hdr->Info); }
int fio_net_send_cmd(int fd, uint16_t opcode, const void *buf, off_t size, uint64_t *tagptr, struct flist_head *list) { struct fio_net_cmd *cmd = NULL; size_t this_len, cur_len = 0; uint64_t tag; int ret; if (list) { assert(tagptr); tag = *tagptr = alloc_reply(*tagptr, opcode); } else tag = tagptr ? *tagptr : 0; do { this_len = size; if (this_len > FIO_SERVER_MAX_FRAGMENT_PDU) this_len = FIO_SERVER_MAX_FRAGMENT_PDU; if (!cmd || cur_len < sizeof(*cmd) + this_len) { if (cmd) free(cmd); cur_len = sizeof(*cmd) + this_len; cmd = malloc(cur_len); } fio_init_net_cmd(cmd, opcode, buf, this_len, tag); if (this_len < size) cmd->flags = __cpu_to_le32(FIO_NET_CMD_F_MORE); fio_net_cmd_crc(cmd); ret = fio_send_data(fd, cmd, sizeof(*cmd) + this_len); size -= this_len; buf += this_len; } while (!ret && size); if (list) { if (ret) free_reply(tag); else add_reply(tag, list); } if (cmd) free(cmd); return ret; }
int ath10k_bmi_lz_data(struct ath10k *ar, const char *buffer, u32 length) { struct bmi_cmd cmd; u32 hdrlen = sizeof(cmd.id) + sizeof(cmd.lz_data); u32 txlen; int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi lz data buffer 0x%p length %d\n", buffer, length); if (ar->bmi.done_sent) { ath10k_warn(ar, "command disallowed\n"); return -EBUSY; } while (length) { txlen = min(length, BMI_MAX_DATA_SIZE - hdrlen); WARN_ON(txlen & 3); cmd.id = __cpu_to_le32(BMI_LZ_DATA); cmd.lz_data.len = __cpu_to_le32(txlen); memcpy(cmd.lz_data.payload, buffer, txlen); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, hdrlen + txlen, NULL, NULL); if (ret) { ath10k_warn(ar, "unable to write to the device\n"); return ret; } buffer += txlen; length -= txlen; } return 0; }
static int ext3_image(const void *buf, unsigned long long *bytes) { const struct ext3_super_block *sb = (const struct ext3_super_block *)buf; if (sb->s_magic == __cpu_to_le16(EXT2_SUPER_MAGIC) && sb-> s_feature_compat & __cpu_to_le32(EXT3_FEATURE_COMPAT_HAS_JOURNAL)) { *bytes = (unsigned long long)__le32_to_cpu(sb->s_blocks_count) << (10 + __le32_to_cpu(sb->s_log_block_size)); return 1; } return 0; }
static void write_val32(__u32 *adr, __u32 val) { switch(endian) { case ENDIAN_HOST: *adr = val; break; case ENDIAN_LITTLE: *adr = __cpu_to_le32(val); break; case ENDIAN_BIG: *adr = __cpu_to_be32(val); break; } }
int ath10k_bmi_execute(struct ath10k *ar, u32 address, u32 *param) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.execute); u32 resplen = sizeof(resp.execute); int ret; if (ar->bmi.done_sent) { ath10k_warn("command disallowed\n"); return -EBUSY; } ath10k_dbg(ATH10K_DBG_BMI, "%s: (device: 0x%p, address: 0x%x, param: %d)\n", __func__, ar, address, *param); cmd.id = __cpu_to_le32(BMI_EXECUTE); cmd.execute.addr = __cpu_to_le32(address); cmd.execute.param = __cpu_to_le32(*param); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { ath10k_warn("unable to read from the device\n"); return ret; } if (resplen < sizeof(resp.execute)) { ath10k_warn("invalid execute response length (%d)\n", resplen); return ret; } *param = __le32_to_cpu(resp.execute.result); return 0; }
static struct ath10k_swap_code_seg_info * ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len) { struct ath10k_swap_code_seg_info *seg_info; void *virt_addr; dma_addr_t paddr; swap_bin_len = roundup(swap_bin_len, 2); if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) { ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n", swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX); return NULL; } seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL); if (!seg_info) return NULL; virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr, GFP_KERNEL); if (!virt_addr) { ath10k_err(ar, "failed to allocate dma coherent memory\n"); return NULL; } seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr); seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len); seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len); seg_info->seg_hw_info.num_segs = __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED); seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len)); seg_info->virt_address[0] = virt_addr; seg_info->paddr[0] = paddr; return seg_info; }
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, void *per_recv_context, u32 buffer) { struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; u32 ctrl_addr = ce_state->ctrl_addr; struct ath10k *ar = ce_state->ar; struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); unsigned int nentries_mask = dest_ring->nentries_mask; unsigned int write_index; unsigned int sw_index; int ret; spin_lock_bh(&ar_pci->ce_lock); write_index = dest_ring->write_index; sw_index = dest_ring->sw_index; ret = ath10k_pci_wake(ar); if (ret) goto out; if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { struct ce_desc *base = dest_ring->base_addr_owner_space; struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); /* Update destination descriptor */ desc->addr = __cpu_to_le32(buffer); desc->nbytes = 0; dest_ring->per_transfer_context[write_index] = per_recv_context; /* Update Destination Ring Write Index */ write_index = CE_RING_IDX_INCR(nentries_mask, write_index); ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); dest_ring->write_index = write_index; ret = 0; } else { ret = -EIO; } ath10k_pci_sleep(ar); out: spin_unlock_bh(&ar_pci->ce_lock); return ret; }
static void write_superblock(struct imgspec* const spec, char* base, const __u64 sz) { __u64 padding = superblock_offset(spec); __u64 offset = padding + sizeof(struct microfs_sb) + spec->sp_lib->hl_info->li_dd_sz; struct microfs_sb* sb = (struct microfs_sb*)(base + padding); sb->s_magic = __cpu_to_le32(MICROFS_MAGIC); sb->s_size = sz == MICROFS_MAXIMGSIZE ? 0 : __cpu_to_le32(sz); sb->s_crc = 0; sb->s_blocks = __cpu_to_le32((sz - 1) / spec->sp_blksz + 1); sb->s_files = __cpu_to_le16(spec->sp_files); sb->s_blkshift = __cpu_to_le16(spec->sp_blkshift); if (sb->s_size == 0) { warning("this image is exactly %llu bytes (as big as is possible)," " this special case is not well tested", MICROFS_MAXIMGSIZE); } struct timespec nowish; if (clock_gettime(CLOCK_REALTIME, &nowish) < 0) { error("failed to get the current time: %s", strerror(errno)); } sb->s_ctime = __cpu_to_le32(nowish.tv_sec); __u32 flags = spec->sp_lib->hl_info->li_id; sb->s_flags = __cpu_to_le32(flags); memcpy(sb->s_signature, MICROFS_SIGNATURE, sizeof(sb->s_signature)); memcpy(sb->s_name, spec->sp_name, sizeof(sb->s_name)); sb->s_root.i_mode = __cpu_to_le16(spec->sp_root->e_mode); sb->s_root.i_uid = __cpu_to_le16(spec->sp_root->e_uid); sb->s_root.i_gid = __cpu_to_le16(spec->sp_root->e_gid); i_setsize(&sb->s_root, spec->sp_root->e_size); sb->s_root.i_offset = spec->sp_root->e_firstchild? __cpu_to_le32(offset): 0; /* With everything in place it is possible to calculate the * crc32 checksum for the image. */ __u32 crc = hostprog_lib_zlib_crc32(base + padding, sz - padding); sb->s_crc = __cpu_to_le32(crc); message(VERBOSITY_0, "CRC: %x", crc); }
int ath10k_htc_start(struct ath10k_htc *htc) { struct ath10k *ar = htc->ar; struct sk_buff *skb; int status = 0; struct ath10k_htc_msg *msg; skb = ath10k_htc_build_tx_ctrl_skb(htc->ar); if (!skb) return -ENOMEM; skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext)); memset(skb->data, 0, skb->len); msg = (struct ath10k_htc_msg *)skb->data; msg->hdr.message_id = __cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID); if (ar->hif.bus == ATH10K_BUS_SDIO) { /* Extra setup params used by SDIO */ msg->setup_complete_ext.flags = __cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN); msg->setup_complete_ext.max_msgs_per_bundled_recv = htc->max_msgs_per_htc_bundle; } ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n"); status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb); if (status) { kfree_skb(skb); return status; } if (ath10k_htc_pktlog_svc_supported(ar)) { status = ath10k_htc_pktlog_connect(ar); if (status) { ath10k_err(ar, "failed to connect to pktlog: %d\n", status); return status; } } return 0; }
static void priv_status_resp(struct usb_serial_port *port) { struct garmin_data *garmin_data_p = usb_get_serial_port_data(port); __le32 *pkt = (__le32 *)garmin_data_p->privpkt; pkt[0] = __cpu_to_le32(GARMIN_LAYERID_PRIVATE); pkt[1] = __cpu_to_le32(PRIV_PKTID_INFO_RESP); pkt[2] = __cpu_to_le32(12); pkt[3] = __cpu_to_le32(VERSION_MAJOR << 16 | VERSION_MINOR); pkt[4] = __cpu_to_le32(garmin_data_p->mode); pkt[5] = __cpu_to_le32(garmin_data_p->serial_num); send_to_tty(port, (__u8 *)pkt, 6 * 4); }
static bool getSparseExtentHeader(SparseExtentHeader *dst, const SparseExtentHeaderOnDisk *src) { if (src->magicNumber != __cpu_to_le32(SPARSE_MAGICNUMBER)) { return false; } dst->version = __le32_to_cpu(src->version); if (dst->version > SPARSE_VERSION_INCOMPAT_FLAGS) { return false; } dst->flags = __le32_to_cpu(src->flags); if (dst->flags & (SPARSEFLAG_INCOMPAT_FLAGS & ~SPARSEFLAG_COMPRESSED & ~SPARSEFLAG_EMBEDDED_LBA)) { return false; } if (dst->flags & SPARSEFLAG_VALID_NEWLINE_DETECTOR) { if (src->singleEndLineChar != SPARSE_SINGLE_END_LINE_CHAR || src->nonEndLineChar != SPARSE_NON_END_LINE_CHAR || src->doubleEndLineChar1 != SPARSE_DOUBLE_END_LINE_CHAR1 || src->doubleEndLineChar2 != SPARSE_DOUBLE_END_LINE_CHAR2) { return false; } } /* Embedded LBA is allowed with compressed flag only. */ if (dst->flags & SPARSEFLAG_EMBEDDED_LBA) { if (!(dst->flags & SPARSEFLAG_COMPRESSED)) { return false; } } dst->compressAlgorithm = getUnalignedLE16(&src->compressAlgorithm); dst->uncleanShutdown = src->uncleanShutdown; dst->reserved = 0; dst->capacity = getUnalignedLE64(&src->capacity); dst->grainSize = getUnalignedLE64(&src->grainSize); dst->descriptorOffset = getUnalignedLE64(&src->descriptorOffset); dst->descriptorSize = getUnalignedLE64(&src->descriptorSize); dst->numGTEsPerGT = __le32_to_cpu(src->numGTEsPerGT); dst->rgdOffset = __le64_to_cpu(src->rgdOffset); dst->gdOffset = __le64_to_cpu(src->gdOffset); dst->overHead = __le64_to_cpu(src->overHead); return true; }
int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct athp_buf *skb; struct htt_cmd *cmd; int ret, size; if (!ar->hw_params.continuous_frag_desc) return 0; if (!htt->frag_desc.paddr) { ath10k_warn(ar, "invalid frag desc memory\n"); return -EINVAL; } size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); skb = ath10k_htc_alloc_skb(ar, size); if (!skb) return -ENOMEM; mbuf_skb_put(skb->m, size); cmd = (struct htt_cmd *)mbuf_skb_data(skb->m); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; cmd->frag_desc_bank_cfg.info = 0; cmd->frag_desc_bank_cfg.num_banks = 1; cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); cmd->frag_desc_bank_cfg.bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 1); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", ret); athp_freebuf(ar, &ar->buf_tx, skb); return ret; } return 0; }
static int pla_write_word(struct usb_device *udev, u16 index, u32 data) { __le32 *tmp; u32 mask = 0xffff; u16 byen = BYTE_EN_WORD; u8 shift = index & 2; int ret; tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) return -ENOMEM; data &= mask; if (shift) { byen <<= shift; mask <<= (shift * 8); data <<= (shift * 8); index &= ~3; } ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); if (ret < 0) goto out3; data |= __le32_to_cpu(*tmp) & ~mask; *tmp = __cpu_to_le32(data); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp), 500); out3: kfree(tmp); return ret; }
int ath10k_bmi_done(struct ath10k *ar) { struct bmi_cmd cmd; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.done); int ret; if (ar->bmi.done_sent) { ath10k_dbg(ATH10K_DBG_BMI, "%s skipped\n", __func__); return 0; } ar->bmi.done_sent = true; cmd.id = __cpu_to_le32(BMI_DONE); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, NULL, NULL); if (ret) { ath10k_warn("unable to write to the device: %d\n", ret); return ret; } ath10k_dbg(ATH10K_DBG_CORE, "BMI done\n"); return 0; }
int ath10k_bmi_get_target_info(struct ath10k *ar, struct bmi_target_info *target_info) { struct bmi_cmd cmd; union bmi_resp resp; u32 cmdlen = sizeof(cmd.id) + sizeof(cmd.get_target_info); u32 resplen = sizeof(resp.get_target_info); int ret; ath10k_dbg(ar, ATH10K_DBG_BMI, "bmi get target info\n"); if (ar->bmi.done_sent) { ath10k_warn(ar, "BMI Get Target Info Command disallowed\n"); return -EBUSY; } cmd.id = __cpu_to_le32(BMI_GET_TARGET_INFO); ret = ath10k_hif_exchange_bmi_msg(ar, &cmd, cmdlen, &resp, &resplen); if (ret) { ath10k_warn(ar, "unable to get target info from device\n"); return ret; } if (resplen < sizeof(resp.get_target_info)) { ath10k_warn(ar, "invalid get_target_info response length (%d)\n", resplen); return -EIO; } target_info->version = __le32_to_cpu(resp.get_target_info.version); target_info->type = __le32_to_cpu(resp.get_target_info.type); return 0; }