static void mt76x02u_multiple_mcu_reads(struct mt76_dev *dev, u8 *data, int len) { struct mt76_usb *usb = &dev->usb; u32 reg, val; int i; if (usb->mcu.burst) { WARN_ON_ONCE(len / 4 != usb->mcu.rp_len); reg = usb->mcu.rp[0].reg - usb->mcu.base; for (i = 0; i < usb->mcu.rp_len; i++) { val = get_unaligned_le32(data + 4 * i); usb->mcu.rp[i].reg = reg++; usb->mcu.rp[i].value = val; } } else { WARN_ON_ONCE(len / 8 != usb->mcu.rp_len); for (i = 0; i < usb->mcu.rp_len; i++) { reg = get_unaligned_le32(data + 8 * i) - usb->mcu.base; val = get_unaligned_le32(data + 8 * i + 4); WARN_ON_ONCE(usb->mcu.rp[i].reg != reg); usb->mcu.rp[i].value = val; } } }
enum filetype is_fat_or_mbr(const unsigned char *sector, unsigned long *bootsec) { /* * bootsec can be used to return index of the first sector in the * first partition */ if (bootsec) *bootsec = 0; /* * Check record signature (always placed at offset 510 even if the * sector size is > 512) */ if (get_unaligned_le16(§or[BS_55AA]) != 0xAA55) return filetype_unknown; /* Check "FAT" string */ if ((get_unaligned_le32(§or[BS_FilSysType]) & 0xFFFFFF) == 0x544146) return filetype_fat; if ((get_unaligned_le32(§or[BS_FilSysType32]) & 0xFFFFFF) == 0x544146) return filetype_fat; if (bootsec) /* * This must be an MBR, so return the starting sector of the * first partition so we could check if there is a FAT boot * sector there */ *bootsec = get_unaligned_le16(§or[MBR_Table + MBR_StartSector]); return filetype_mbr; }
static void nh_generic(const u32 *key, const u8 *message, size_t message_len, __le64 hash[NH_NUM_PASSES]) { u64 sums[4] = { 0, 0, 0, 0 }; BUILD_BUG_ON(NH_PAIR_STRIDE != 2); BUILD_BUG_ON(NH_NUM_PASSES != 4); while (message_len) { u32 m0 = get_unaligned_le32(message + 0); u32 m1 = get_unaligned_le32(message + 4); u32 m2 = get_unaligned_le32(message + 8); u32 m3 = get_unaligned_le32(message + 12); sums[0] += (u64)(u32)(m0 + key[ 0]) * (u32)(m2 + key[ 2]); sums[1] += (u64)(u32)(m0 + key[ 4]) * (u32)(m2 + key[ 6]); sums[2] += (u64)(u32)(m0 + key[ 8]) * (u32)(m2 + key[10]); sums[3] += (u64)(u32)(m0 + key[12]) * (u32)(m2 + key[14]); sums[0] += (u64)(u32)(m1 + key[ 1]) * (u32)(m3 + key[ 3]); sums[1] += (u64)(u32)(m1 + key[ 5]) * (u32)(m3 + key[ 7]); sums[2] += (u64)(u32)(m1 + key[ 9]) * (u32)(m3 + key[11]); sums[3] += (u64)(u32)(m1 + key[13]) * (u32)(m3 + key[15]); key += NH_MESSAGE_UNIT / sizeof(key[0]); message += NH_MESSAGE_UNIT; message_len -= NH_MESSAGE_UNIT; } hash[0] = cpu_to_le64(sums[0]); hash[1] = cpu_to_le64(sums[1]); hash[2] = cpu_to_le64(sums[2]); hash[3] = cpu_to_le64(sums[3]); }
int ieee80211_radiotap_iterator_init( struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) { /* Linux only supports version 0 radiotap format */ if (radiotap_header->it_version) return -EINVAL; /* sanity check for allowed length and radiotap length field */ if (max_length < get_unaligned_le16(&radiotap_header->it_len)) return -EINVAL; iterator->_rtheader = radiotap_header; iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len); iterator->_arg_index = 0; iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header); iterator->_reset_on_ext = 0; iterator->_next_bitmap = &radiotap_header->it_present; iterator->_next_bitmap++; iterator->_vns = vns; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; /* find payload start allowing for extended bitmap(s) */ if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { while (get_unaligned_le32(iterator->_arg) & (1 << IEEE80211_RADIOTAP_EXT)) { iterator->_arg += sizeof(uint32_t); /* * check for insanity where the present bitmaps * keep claiming to extend up to or even beyond the * stated radiotap header length */ if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; } iterator->_arg += sizeof(uint32_t); /* * no need to check again for blowing past stated radiotap * header length, because ieee80211_radiotap_iterator_next * checks it before it is dereferenced */ } iterator->this_arg = iterator->_arg; /* we are all initialized happily */ return 0; }
static void dos_extended_partition(struct block_device *blk, struct partition_desc *pd, struct partition *partition, uint32_t signature) { uint8_t *buf = dma_alloc(SECTOR_SIZE); uint32_t ebr_sector = partition->first_sec; struct partition_entry *table = (struct partition_entry *)&buf[0x1be]; unsigned partno = 5; while (pd->used_entries < ARRAY_SIZE(pd->parts)) { int rc, i; int n = pd->used_entries; dev_dbg(blk->dev, "expect EBR in sector %x\n", ebr_sector); rc = block_read(blk, buf, ebr_sector, 1); if (rc != 0) { dev_err(blk->dev, "Cannot read EBR partition table\n"); goto out; } /* sanity checks */ if (buf[0x1fe] != 0x55 || buf[0x1ff] != 0xaa) { dev_err(blk->dev, "sector %x doesn't contain an EBR signature\n", ebr_sector); goto out; } for (i = 0x1de; i < 0x1fe; ++i) if (buf[i]) { dev_err(blk->dev, "EBR's third or fourth partition non-empty\n"); goto out; } /* /sanity checks */ /* the first entry defines the extended partition */ pd->parts[n].first_sec = ebr_sector + get_unaligned_le32(&table[0].partition_start); pd->parts[n].size = get_unaligned_le32(&table[0].partition_size); pd->parts[n].dos_partition_type = table[0].type; if (signature) sprintf(pd->parts[n].partuuid, "%08x-%02u", signature, partno); pd->used_entries++; partno++; /* the second entry defines the start of the next ebr if != 0 */ if (get_unaligned_le32(&table[1].partition_start)) ebr_sector = partition->first_sec + get_unaligned_le32(&table[1].partition_start); else break; } out: dma_free(buf); return; }
static const char *nvme_trace_dsm(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); trace_seq_printf(p, "nr=%u, attributes=%u", get_unaligned_le32(cdw10), get_unaligned_le32(cdw10 + 4)); trace_seq_putc(p, 0); return ret; }
static int mtdsplit_parse_lzma(struct mtd_info *master, const struct mtd_partition **pparts, struct mtd_part_parser_data *data) { struct lzma_header hdr; size_t hdr_len, retlen; size_t rootfs_offset; u32 t; struct mtd_partition *parts; int err; hdr_len = sizeof(hdr); err = mtd_read(master, 0, hdr_len, &retlen, (void *) &hdr); if (err) return err; if (retlen != hdr_len) return -EIO; /* verify LZMA properties */ if (hdr.props[0] >= (9 * 5 * 5)) return -EINVAL; t = get_unaligned_le32(&hdr.props[1]); if (!is_power_of_2(t)) return -EINVAL; t = get_unaligned_le32(&hdr.size_high); if (t) return -EINVAL; err = mtd_find_rootfs_from(master, master->erasesize, master->size, &rootfs_offset, NULL); if (err) return err; parts = kzalloc(LZMA_NR_PARTS * sizeof(*parts), GFP_KERNEL); if (!parts) return -ENOMEM; parts[0].name = KERNEL_PART_NAME; parts[0].offset = 0; parts[0].size = rootfs_offset; parts[1].name = ROOTFS_PART_NAME; parts[1].offset = rootfs_offset; parts[1].size = master->size - rootfs_offset; *pparts = parts; return LZMA_NR_PARTS; }
int ieee80211_radiotap_iterator_init( struct ieee80211_radiotap_iterator *iterator, struct ieee80211_radiotap_header *radiotap_header, int max_length, const struct ieee80211_radiotap_vendor_namespaces *vns) { if (radiotap_header->it_version) return -EINVAL; if (max_length < get_unaligned_le16(&radiotap_header->it_len)) return -EINVAL; iterator->_rtheader = radiotap_header; iterator->_max_length = get_unaligned_le16(&radiotap_header->it_len); iterator->_arg_index = 0; iterator->_bitmap_shifter = get_unaligned_le32(&radiotap_header->it_present); iterator->_arg = (uint8_t *)radiotap_header + sizeof(*radiotap_header); iterator->_reset_on_ext = 0; iterator->_next_bitmap = &radiotap_header->it_present; iterator->_next_bitmap++; iterator->_vns = vns; iterator->current_namespace = &radiotap_ns; iterator->is_radiotap_ns = 1; if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { while (get_unaligned_le32(iterator->_arg) & (1 << IEEE80211_RADIOTAP_EXT)) { iterator->_arg += sizeof(uint32_t); if ((unsigned long)iterator->_arg - (unsigned long)iterator->_rtheader > (unsigned long)iterator->_max_length) return -EINVAL; } iterator->_arg += sizeof(uint32_t); } iterator->this_arg = iterator->_arg; return 0; }
static void mt7603_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u32 changed) { struct mt7603_dev *dev = hw->priv; struct mt7603_vif *mvif = (struct mt7603_vif *) vif->drv_priv; mutex_lock(&dev->mutex); if (changed & BSS_CHANGED_ASSOC) { mt76_wr(dev, MT_BSSID0(mvif->idx), get_unaligned_le32(info->bssid)); mt76_wr(dev, MT_BSSID1(mvif->idx), get_unaligned_le16(info->bssid + 4)); } if (changed & BSS_CHANGED_ERP_SLOT) { dev->slottime = info->use_short_slot ? 9 : 20; mt7603_mac_set_timing(dev); } if (changed & (BSS_CHANGED_BEACON_ENABLED | BSS_CHANGED_BEACON_INT)) { int beacon_int = !!info->enable_beacon * info->beacon_int; tasklet_disable(&dev->pre_tbtt_tasklet); mt7603_beacon_set_timer(dev, mvif->idx, beacon_int); tasklet_enable(&dev->pre_tbtt_tasklet); } mutex_unlock(&dev->mutex); }
void mt76x2_mac_set_bssid(struct mt76x2_dev *dev, u8 idx, const u8 *addr) { idx &= 7; mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr)); mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR, get_unaligned_le16(addr + 4)); }
static bool iwmct_checksum(struct iwmct_priv *priv) { struct iwmct_parser *parser = &priv->parser; __le32 *file = (__le32 *)parser->file; int i, pad, steps; u32 accum = 0; u32 checksum; u32 mask = 0xffffffff; pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4; steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4; LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps); for (i = 0; i < steps; i++) accum += le32_to_cpu(file[i]); if (pad) { mask <<= 8 * (4 - pad); accum += le32_to_cpu(file[steps]) & mask; } checksum = get_unaligned_le32((__le32 *)(parser->file + parser->file_size - CHECKSUM_BYTES_NUM)); LOG_INFO(priv, FW_DOWNLOAD, "compare checksum accum=0x%x to checksum=0x%x\n", accum, checksum); return checksum == accum; }
static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, u32 seg_len, struct page *p) { struct sk_buff *skb; struct mt7601u_rxwi *rxwi; u32 fce_info, truesize = seg_len; /* DMA_INFO field at the beginning of the segment contains only some of * the information, we need to read the FCE descriptor from the end. */ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); seg_len -= MT_FCE_INFO_LEN; data += MT_DMA_HDR_LEN; seg_len -= MT_DMA_HDR_LEN; rxwi = (struct mt7601u_rxwi *) data; data += sizeof(struct mt7601u_rxwi); seg_len -= sizeof(struct mt7601u_rxwi); if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2])) dev_err_once(dev->dev, "Error: RXWI zero fields are set\n"); if (unlikely(MT76_GET(MT_RXD_INFO_TYPE, fce_info))) dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n"); trace_mt_rx(dev, rxwi, fce_info); skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); if (!skb) return; ieee80211_rx_ni(dev->hw, skb); }
static const char *nvme_trace_read_write(struct trace_seq *p, u8 *cdw10) { const char *ret = trace_seq_buffer_ptr(p); u64 slba = get_unaligned_le64(cdw10); u16 length = get_unaligned_le16(cdw10 + 8); u16 control = get_unaligned_le16(cdw10 + 10); u32 dsmgmt = get_unaligned_le32(cdw10 + 12); u32 reftag = get_unaligned_le32(cdw10 + 16); trace_seq_printf(p, "slba=%llu, len=%u, ctrl=0x%x, dsmgmt=%u, reftag=%u", slba, length, control, dsmgmt, reftag); trace_seq_putc(p, 0); return ret; }
static int fw_write_image(const u8 *data, size_t len) { u16 addr = 0; for (addr = 0; addr < (len / 4); addr++, data += 4) { u32 val = get_unaligned_le32(data); u32 verify_val; int retries = 3; while (retries--) { flash_writel(addr, val); verify_val = flash_readl(addr); if (val == verify_val) break; pr_err("tsp fw.: mismatch @ addr 0x%x: 0x%x != 0x%x\n", addr, verify_val, val); hw_reboot_bootloader(); continue; } if (retries < 0) return -ENXIO; } return 0; }
void michael_mic(const u8 *key, struct ieee80211_hdr *hdr, const u8 *data, size_t data_len, u8 *mic) { u32 val; size_t block, blocks, left; struct michael_mic_ctx mctx; michael_mic_hdr(&mctx, key, hdr); blocks = data_len / 4; left = data_len % 4; for (block = 0; block < blocks; block++) michael_block(&mctx, get_unaligned_le32(&data[block * 4])); val = 0x5a; while (left > 0) { val <<= 8; left--; val |= data[blocks * 4 + left]; } michael_block(&mctx, val); michael_block(&mctx, 0); put_unaligned_le32(mctx.l, mic); put_unaligned_le32(mctx.r, mic + 4); }
/* should be called with usb_ctrl_mtx locked */ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) { struct mt76_usb *usb = &dev->usb; u32 data = ~0; u16 offset; int ret; u8 req; switch (addr & MT_VEND_TYPE_MASK) { case MT_VEND_TYPE_EEPROM: req = MT_VEND_READ_EEPROM; break; case MT_VEND_TYPE_CFG: req = MT_VEND_READ_CFG; break; default: req = MT_VEND_MULTI_READ; break; } offset = addr & ~MT_VEND_TYPE_MASK; ret = __mt76u_vendor_request(dev, req, USB_DIR_IN | USB_TYPE_VENDOR, 0, offset, usb->data, sizeof(__le32)); if (ret == sizeof(__le32)) data = get_unaligned_le32(usb->data); trace_usb_reg_rr(dev, addr, data); return data; }
/** * ath_hw_set_bssid_mask - filter out bssids we listen * * @common: the ath_common struct for the device. * * BSSID masking is a method used by AR5212 and newer hardware to inform PCU * which bits of the interface's MAC address should be looked at when trying * to decide which packets to ACK. In station mode and AP mode with a single * BSS every bit matters since we lock to only one BSS. In AP mode with * multiple BSSes (virtual interfaces) not every bit matters because hw must * accept frames for all BSSes and so we tweak some bits of our mac address * in order to have multiple BSSes. * * NOTE: This is a simple filter and does *not* filter out all * relevant frames. Some frames that are not for us might get ACKed from us * by PCU because they just match the mask. * * When handling multiple BSSes you can get the BSSID mask by computing the * set of ~ ( MAC XOR BSSID ) for all bssids we handle. * * When you do this you are essentially computing the common bits of all your * BSSes. Later it is assumed the hardware will "and" (&) the BSSID mask with * the MAC address to obtain the relevant bits and compare the result with * (frame's BSSID & mask) to see if they match. * * Simple example: on your card you have have two BSSes you have created with * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address. * There is another BSSID-03 but you are not part of it. For simplicity's sake, * assuming only 4 bits for a mac address and for BSSIDs you can then have: * * \ * MAC: 0001 | * BSSID-01: 0100 | --> Belongs to us * BSSID-02: 1001 | * / * ------------------- * BSSID-03: 0110 | --> External * ------------------- * * Our bssid_mask would then be: * * On loop iteration for BSSID-01: * ~(0001 ^ 0100) -> ~(0101) * -> 1010 * bssid_mask = 1010 * * On loop iteration for BSSID-02: * bssid_mask &= ~(0001 ^ 1001) * bssid_mask = (1010) & ~(0001 ^ 1001) * bssid_mask = (1010) & ~(1000) * bssid_mask = (1010) & (0111) * bssid_mask = 0010 * * A bssid_mask of 0010 means "only pay attention to the second least * significant bit". This is because its the only bit common * amongst the MAC and all BSSIDs we support. To findout what the real * common bit is we can simply "&" the bssid_mask now with any BSSID we have * or our MAC address (we assume the hardware uses the MAC address). * * Now, suppose there's an incoming frame for BSSID-03: * * IFRAME-01: 0110 * * An easy eye-inspeciton of this already should tell you that this frame * will not pass our check. This is because the bssid_mask tells the * hardware to only look at the second least significant bit and the * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB * as 1, which does not match 0. * * So with IFRAME-01 we *assume* the hardware will do: * * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0; * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0; * --> allow = (0010) == 0000 ? 1 : 0; * --> allow = 0 * * Lets now test a frame that should work: * * IFRAME-02: 0001 (we should allow) * * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0; * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0; * --> allow = (0000) == (0000) * --> allow = 1 * * Other examples: * * IFRAME-03: 0100 --> allowed * IFRAME-04: 1001 --> allowed * IFRAME-05: 1101 --> allowed but its not for us!!! * */ void ath_hw_setbssidmask(struct ath_common *common) { void *ah = common->ah; REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL); REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU); }
static bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) { u32 macHi, macLo; u32 unicast_flag = AR_KEYTABLE_VALID; void *ah = common->ah; if (entry >= common->keymax) { ath_err(common, "keycache entry %u out of range\n", entry); return false; } if (mac != NULL) { /* * AR_KEYTABLE_VALID indicates that the address is a unicast * address, which must match the transmitter address for * decrypting frames. * Not setting this bit allows the hardware to use the key * for multicast frame decryption. */ if (mac[0] & 0x01) unicast_flag = 0; macLo = get_unaligned_le32(mac); macHi = get_unaligned_le16(mac + 4); macLo >>= 1; macLo |= (macHi & 1) << 31; macHi >>= 1; } else {
/** * ath5k_hw_set_associd - Set BSSID for association * * @ah: The &struct ath5k_hw * @bssid: BSSID * @assoc_id: Assoc id * * Sets the BSSID which trigers the "SME Join" operation */ void ath5k_hw_set_associd(struct ath5k_hw *ah) { struct ath_common *common = ath5k_hw_common(ah); u16 tim_offset = 0; /* * Set simple BSSID mask on 5212 */ if (ah->ah_version == AR5K_AR5212) ath_hw_setbssidmask(common); /* * Set BSSID which triggers the "SME Join" operation */ ath5k_hw_reg_write(ah, get_unaligned_le32(common->curbssid), AR5K_BSS_ID0); ath5k_hw_reg_write(ah, get_unaligned_le16(common->curbssid + 4) | ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1); if (common->curaid == 0) { ath5k_hw_disable_pspoll(ah); return; } AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM, tim_offset ? tim_offset + 4 : 0); ath5k_hw_enable_pspoll(ah, NULL, 0); }
int main(int argc, char *argv[]) { uint32_t olen; long ilen; unsigned long offs; FILE *f; if (argc < 2) { fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); return 1; } f = fopen(argv[1], "r"); if (!f) { perror(argv[1]); return 1; } if (fseek(f, -4L, SEEK_END)) { perror(argv[1]); } if (fread(&olen, sizeof(olen), 1, f) != 1) { perror(argv[1]); return 1; } ilen = ftell(f); olen = get_unaligned_le32(&olen); fclose(f); offs = (olen > ilen) ? olen - ilen : 0; offs += olen >> 12; offs += 64*1024 + 128; offs = (offs+4095) & ~4095; printf(".section \".rodata..compressed\",\"a\",@progbits\n"); printf(".globl z_input_len\n"); printf("z_input_len = %lu\n", ilen); printf(".globl z_output_len\n"); printf("z_output_len = %lu\n", (unsigned long)olen); printf(".globl z_extract_offset\n"); printf("z_extract_offset = 0x%lx\n", offs); printf(".globl z_extract_offset_negative\n"); printf("z_extract_offset_negative = -0x%lx\n", offs); printf(".globl input_data, input_data_end\n"); printf("input_data:\n"); printf(".incbin \"%s\"\n", argv[1]); printf("input_data_end:\n"); return 0; }
/** * Guess the size of the disk, based on the partition table entries * @param dev device to create partitions for * @param table partition table * @return sector count */ static uint64_t disk_guess_size(struct device_d *dev, struct partition_entry *table) { uint64_t size = 0; int i; for (i = 0; i < 4; i++) { if (get_unaligned_le32(&table[i].partition_start) != 0) { uint64_t part_end = get_unaligned_le32(&table[i].partition_start) + get_unaligned_le32(&table[i].partition_size); if (size < part_end) size = part_end; } } return size; }
/* * pmbr_part_valid(): Check for EFI partition signature * * Returns: 1 if EFI GPT partition type is found. */ static int pmbr_part_valid(struct partition *part) { if (part->sys_ind == EFI_PMBR_OSTYPE_EFI_GPT && get_unaligned_le32(&part->start_sect) == 1UL) { return 1; } return 0; }
static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev, struct sk_buff *skb, bool need_resp) { u32 i, csum = 0; for (i = 0; i < skb->len / 4; i++) csum ^= get_unaligned_le32(skb->data + i * 4); trace_mt_mcu_msg_send(dev, skb, csum, need_resp); }
void crypto_chacha_init(u32 *state, struct chacha_ctx *ctx, u8 *iv) { state[0] = 0x61707865; /* "expa" */ state[1] = 0x3320646e; /* "nd 3" */ state[2] = 0x79622d32; /* "2-by" */ state[3] = 0x6b206574; /* "te k" */ state[4] = ctx->key[0]; state[5] = ctx->key[1]; state[6] = ctx->key[2]; state[7] = ctx->key[3]; state[8] = ctx->key[4]; state[9] = ctx->key[5]; state[10] = ctx->key[6]; state[11] = ctx->key[7]; state[12] = get_unaligned_le32(iv + 0); state[13] = get_unaligned_le32(iv + 4); state[14] = get_unaligned_le32(iv + 8); state[15] = get_unaligned_le32(iv + 12); }
static inline dma_addr_t iwl_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) { struct iwl_tfd_tb *tb = &tfd->tbs[idx]; dma_addr_t addr = get_unaligned_le32(&tb->lo); if (sizeof(dma_addr_t) > sizeof(u32)) addr |= ((dma_addr_t)(le16_to_cpu(tb->hi_n_len) & 0xF) << 16) << 16; return addr; }
static int lzma_uncompress(struct squashfs_sb_info *msblk, void **buffer, struct buffer_head **bh, int b, int offset, int length, int srclength, int pages) { struct squashfs_lzma *stream = msblk->stream; void *buff = stream->input; int avail, i, bytes = length, res; mutex_lock(&lzma_mutex); for (i = 0; i < b; i++) { wait_on_buffer(bh[i]); if (!buffer_uptodate(bh[i])) goto block_release; avail = min(bytes, msblk->devblksize - offset); memcpy(buff, bh[i]->b_data + offset, avail); buff += avail; bytes -= avail; offset = 0; put_bh(bh[i]); } lzma_error = 0; res = unlzma(stream->input, length, NULL, NULL, stream->output, NULL, error); if (res || lzma_error) goto failed; /* uncompressed size is stored in the LZMA header (5 byte offset) */ res = bytes = get_unaligned_le32(stream->input + 5); for (i = 0, buff = stream->output; bytes && i < pages; i++) { avail = min_t(int, bytes, PAGE_CACHE_SIZE); memcpy(buffer[i], buff, avail); buff += avail; bytes -= avail; } if (bytes) goto failed; mutex_unlock(&lzma_mutex); return res; block_release: for (; i < b; i++) put_bh(bh[i]); failed: mutex_unlock(&lzma_mutex); ERROR("lzma decompression failed, data probably corrupt\n"); return -EIO; }
/* * Setting the seed allows arbitrary accumulators and flexible XOR policy * If your algorithm starts with ~0, then XOR with ~0 before you set * the seed. */ static int chksum_setkey(struct crypto_shash *tfm, const u8 *key, unsigned int keylen) { struct chksum_ctx *mctx = crypto_shash_ctx(tfm); if (keylen != sizeof(mctx->key)) { crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); return -EINVAL; } mctx->key = get_unaligned_le32(key); return 0; }
static int hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len) { u32 size, crc; size = le32_to_cpu(db->size); if (size > len) { nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len); return -EINVAL; } size -= sizeof(u32); crc = crc32_posix(db, size); if (crc != get_unaligned_le32(db->start + size)) { nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n", crc, get_unaligned_le32(db->start + size)); return -EINVAL; } return hwinfo_db_walk(cpp, db, size); }
static void michael_mic_hdr(struct michael_mic_ctx *mctx, const u8 *key, struct ieee80211_hdr *hdr) { u8 *da, *sa, tid; da = ieee80211_get_DA(hdr); sa = ieee80211_get_SA(hdr); if (ieee80211_is_data_qos(hdr->frame_control)) tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; else tid = 0; mctx->l = get_unaligned_le32(key); mctx->r = get_unaligned_le32(key + 4); michael_block(mctx, get_unaligned_le32(da)); michael_block(mctx, get_unaligned_le16(&da[4]) | (get_unaligned_le16(sa) << 16)); michael_block(mctx, get_unaligned_le32(&sa[2])); michael_block(mctx, tid); }
static int scratch_mem_setup(struct atombios *atb) { struct master_data_tbl *data_tbl; struct firmware_vram_usage *usage; /* use rev 1.1 table layout */ u16 of; size_t bytes; u64 start_addr; u16 sz; of = get_unaligned_le16(&atb->hdr->master_data_tbl_of); data_tbl = atb->adev.rom + of; of = get_unaligned_le16(&data_tbl->list.firmware_vram_usage); usage = atb->adev.rom + of; start_addr = (u64)get_unaligned_le32(&usage->info.start_addr); if (usage->hdr.tbl_content_rev >= 4) start_addr = start_addr * 1024; sz = get_unaligned_le16(&usage->info.sz); /* * This table defines a large data buffer for the interpreter. * It's actually in kernel RAM. The "vram" name comes from the * fact that this large data buffer is in vram when running * in POST real mode. */ dev_info(atb->adev.dev, "atombios: firmware_(v)ram_usage (0x%04x) " "revision %u.%u\n", of, usage->hdr.tbl_fmt_rev, usage->hdr.tbl_content_rev); dev_info(atb->adev.dev, "atombios: firmware_(v)ram_usage address is" " 0x%016llx\n", start_addr); dev_info(atb->adev.dev, "atombios: firmware_(v)ram_usage size is " "%zukB\n", (size_t)sz); if (sz != 0) bytes = sz * 1024; else bytes = 20 * 1024; /* quirk: get 20kB if zero or not defined */ atb->scratch = kzalloc(bytes, GFP_KERNEL); if (!atb->scratch) { dev_err(atb->adev.dev, "atombios: unable to allocate scratch " "kernel memory\n"); return -ATB_ERR; } atb->scratch_sz = bytes; dev_info(atb->adev.dev, "atombios: %zuB allocated for scratch " "memory\n", bytes); return 0; }