crc32_pclmul_le(u32 crc, unsigned char const *p, size_t len) { unsigned int iquotient; unsigned int iremainder; unsigned int prealign; if (len < PCLMUL_MIN_LEN + SCALE_F_MASK || !irq_fpu_usable()) return crc32_le(crc, p, len); if ((long)p & SCALE_F_MASK) { /* align p to 16 byte */ prealign = SCALE_F - ((long)p & SCALE_F_MASK); crc = crc32_le(crc, p, prealign); len -= prealign; p = (unsigned char *)(((unsigned long)p + SCALE_F_MASK) & ~SCALE_F_MASK); } iquotient = len & (~SCALE_F_MASK); iremainder = len & SCALE_F_MASK; kernel_fpu_begin(); crc = crc32_pclmul_le_16(p, iquotient, crc); kernel_fpu_end(); if (iremainder) crc = crc32_le(crc, p + iquotient, iremainder); return crc; }
/** * calc_crc_cont - check CRC of blocks continuously * @sbi: nilfs_sb_info * @bhs: buffer head of start block * @sum: place to store result * @offset: offset bytes in the first block * @check_bytes: number of bytes to be checked * @start: DBN of start block * @nblock: number of blocks to be checked */ static int calc_crc_cont(struct nilfs_sb_info *sbi, struct buffer_head *bhs, u32 *sum, unsigned long offset, u64 check_bytes, sector_t start, unsigned long nblock) { unsigned long blocksize = sbi->s_super->s_blocksize; unsigned long size; u32 crc; BUG_ON(offset >= blocksize); check_bytes -= offset; size = min_t(u64, check_bytes, blocksize - offset); crc = crc32_le(sbi->s_nilfs->ns_crc_seed, (unsigned char *)bhs->b_data + offset, size); if (--nblock > 0) { do { struct buffer_head *bh = sb_bread(sbi->s_super, ++start); if (!bh) return -EIO; check_bytes -= size; size = min_t(u64, check_bytes, blocksize); crc = crc32_le(crc, bh->b_data, size); brelse(bh); } while (--nblock > 0); } *sum = crc; return 0; }
/** * nilfs_compute_checksum - compute checksum of blocks continuously * @nilfs: nilfs object * @bhs: buffer head of start block * @sum: place to store result * @offset: offset bytes in the first block * @check_bytes: number of bytes to be checked * @start: DBN of start block * @nblock: number of blocks to be checked */ static int nilfs_compute_checksum(struct the_nilfs *nilfs, struct buffer_head *bhs, u32 *sum, unsigned long offset, u64 check_bytes, sector_t start, unsigned long nblock) { unsigned int blocksize = nilfs->ns_blocksize; unsigned long size; u32 crc; BUG_ON(offset >= blocksize); check_bytes -= offset; size = min_t(u64, check_bytes, blocksize - offset); crc = crc32_le(nilfs->ns_crc_seed, (unsigned char *)bhs->b_data + offset, size); if (--nblock > 0) { do { struct buffer_head *bh; bh = __bread(nilfs->ns_bdev, ++start, blocksize); if (!bh) return -EIO; check_bytes -= size; size = min_t(u64, check_bytes, blocksize); crc = crc32_le(crc, bh->b_data, size); brelse(bh); } while (--nblock > 0); } *sum = crc; return 0; }
int nilfs_commit_super(struct super_block *sb, int flag) { struct the_nilfs *nilfs = sb->s_fs_info; struct nilfs_super_block **sbp = nilfs->ns_sbp; time_t t; /* nilfs->ns_sem must be locked by the caller. */ t = get_seconds(); nilfs->ns_sbwtime = t; sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[0], nilfs->ns_sbsize)); if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) { sbp[1]->s_wtime = sbp[0]->s_wtime; sbp[1]->s_sum = 0; sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[1], nilfs->ns_sbsize)); } clear_nilfs_sb_dirty(nilfs); nilfs->ns_flushed_device = 1; /* make sure store to ns_flushed_device cannot be reordered */ smp_wmb(); return nilfs_sync_super(sb, flag); }
/* Compute a partial ICRC for all the IB transport headers. */ u32 rxe_icrc_hdr(struct rxe_pkt_info *pkt, struct sk_buff *skb) { unsigned int bth_offset = 0; struct iphdr *ip4h = NULL; struct ipv6hdr *ip6h = NULL; struct udphdr *udph; struct rxe_bth *bth; int crc; int length; int hdr_size = sizeof(struct udphdr) + (skb->protocol == htons(ETH_P_IP) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)); /* pseudo header buffer size is calculate using ipv6 header size since * it is bigger than ipv4 */ u8 pshdr[sizeof(struct udphdr) + sizeof(struct ipv6hdr) + RXE_BTH_BYTES]; /* This seed is the result of computing a CRC with a seed of * 0xfffffff and 8 bytes of 0xff representing a masked LRH. */ crc = 0xdebb20e3; if (skb->protocol == htons(ETH_P_IP)) { /* IPv4 */ memcpy(pshdr, ip_hdr(skb), hdr_size); ip4h = (struct iphdr *)pshdr; udph = (struct udphdr *)(ip4h + 1); ip4h->ttl = 0xff; ip4h->check = CSUM_MANGLED_0; ip4h->tos = 0xff; } else { /* IPv6 */ memcpy(pshdr, ipv6_hdr(skb), hdr_size); ip6h = (struct ipv6hdr *)pshdr; udph = (struct udphdr *)(ip6h + 1); memset(ip6h->flow_lbl, 0xff, sizeof(ip6h->flow_lbl)); ip6h->priority = 0xf; ip6h->hop_limit = 0xff; } udph->check = CSUM_MANGLED_0; bth_offset += hdr_size; memcpy(&pshdr[bth_offset], pkt->hdr, RXE_BTH_BYTES); bth = (struct rxe_bth *)&pshdr[bth_offset]; /* exclude bth.resv8a */ bth->qpn |= cpu_to_be32(~BTH_QPN_MASK); length = hdr_size + RXE_BTH_BYTES; crc = crc32_le(crc, pshdr, length); /* And finish to compute the CRC on the remainder of the headers. */ crc = crc32_le(crc, pkt->hdr + RXE_BTH_BYTES, rxe_opcode[pkt->opcode].length - RXE_BTH_BYTES); return crc; }
uint32_t Item::calculateCrc32WithoutValue() const { uint32_t result = 0xffffffff; const uint8_t* p = reinterpret_cast<const uint8_t*>(this); result = crc32_le(result, p + offsetof(Item, nsIndex), offsetof(Item, datatype) - offsetof(Item, nsIndex)); result = crc32_le(result, p + offsetof(Item, key), sizeof(key)); return result; }
/* * This function validates existing check information. Like _compute, * the function will take care of zeroing bc before calculating check codes. * If bc is not a pointer inside data, the caller must have zeroed any * inline ocfs2_block_check structures. * * Again, the data passed in should be the on-disk endian. */ int ocfs2_block_check_validate(void *data, size_t blocksize, struct ocfs2_block_check *bc, struct ocfs2_blockcheck_stats *stats) { int rc = 0; u32 bc_crc32e; u16 bc_ecc; u32 crc, ecc; ocfs2_blockcheck_inc_check(stats); bc_crc32e = le32_to_cpu(bc->bc_crc32e); bc_ecc = le16_to_cpu(bc->bc_ecc); memset(bc, 0, sizeof(struct ocfs2_block_check)); /* Fast path - if the crc32 validates, we're good to go */ crc = crc32_le(~0, data, blocksize); if (crc == bc_crc32e) goto out; ocfs2_blockcheck_inc_failure(stats); mlog(ML_ERROR, "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", (unsigned int)bc_crc32e, (unsigned int)crc); /* Ok, try ECC fixups */ ecc = ocfs2_hamming_encode_block(data, blocksize); ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc); /* And check the crc32 again */ crc = crc32_le(~0, data, blocksize); if (crc == bc_crc32e) { ocfs2_blockcheck_inc_recover(stats); goto out; } mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", (unsigned int)bc_crc32e, (unsigned int)crc); rc = -EIO; out: bc->bc_crc32e = cpu_to_le32(bc_crc32e); bc->bc_ecc = cpu_to_le16(bc_ecc); return rc; }
/* * This function generates check information for a list of buffer_heads. * bhs is the blocks to be checked. bc is a pointer to the * ocfs2_block_check structure describing the crc32 and the ecc. * * bc should be a pointer inside data, as the function will * take care of zeroing it before calculating the check information. If * bc does not point inside data, the caller must make sure any inline * ocfs2_block_check structures are zeroed. * * The data buffer must be in on-disk endian (little endian for ocfs2). * bc will be filled with little-endian values and will be ready to go to * disk. */ void ocfs2_block_check_compute_bhs(struct buffer_head **bhs, int nr, struct ocfs2_block_check *bc) { int i; u32 crc, ecc; BUG_ON(nr < 0); if (!nr) return; memset(bc, 0, sizeof(struct ocfs2_block_check)); for (i = 0, crc = ~0, ecc = 0; i < nr; i++) { crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); /* * The number of bits in a buffer is obviously b_size*8. * The offset of this buffer is b_size*i, so the bit offset * of this buffer is b_size*8*i. */ ecc = (u16)ocfs2_hamming_encode(ecc, bhs[i]->b_data, bhs[i]->b_size * 8, bhs[i]->b_size * 8 * i); } /* * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no * larger than 16 bits. */ BUG_ON(ecc > USHRT_MAX); bc->bc_crc32e = cpu_to_le32(crc); bc->bc_ecc = cpu_to_le16((u16)ecc); }
/* * opa_vnic_mac_send_event - post event on possible mac list exchange * Send trap when digest from uc/mc mac list differs from previous run. * Digest is evaluated similar to how cksum does. */ static void opa_vnic_mac_send_event(struct net_device *netdev, u8 event) { struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev); struct netdev_hw_addr *ha; struct netdev_hw_addr_list *hw_list; u32 *ref_crc; u32 l, crc = 0; switch (event) { case OPA_VESWPORT_TRAP_IFACE_UCAST_MAC_CHANGE: hw_list = &netdev->uc; adapter->info.vport.uc_macs_gen_count++; ref_crc = &adapter->umac_hash; break; case OPA_VESWPORT_TRAP_IFACE_MCAST_MAC_CHANGE: hw_list = &netdev->mc; adapter->info.vport.mc_macs_gen_count++; ref_crc = &adapter->mmac_hash; break; default: return; } netdev_hw_addr_list_for_each(ha, hw_list) { crc = crc32_le(crc, ha->addr, ETH_ALEN); }
static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) { bool ret = false; u32 reg; void *addr; dma_addr_t phys_addr; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; void *orig_addr; dma_addr_t orig_phys_addr; size_t offset; size_t alignment = test->alignment; u32 crc32; orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, GFP_KERNEL); if (!orig_addr) { dev_err(dev, "failed to allocate address\n"); ret = false; goto err; } if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { phys_addr = PTR_ALIGN(orig_phys_addr, alignment); offset = phys_addr - orig_phys_addr; addr = orig_addr + offset; } else { phys_addr = orig_phys_addr; addr = orig_addr; } get_random_bytes(addr, size); crc32 = crc32_le(~0, addr, size); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, crc32); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, lower_32_bits(phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, upper_32_bits(phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 1 << MSI_NUMBER_SHIFT | COMMAND_READ); wait_for_completion(&test->irq_raised); reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); if (reg & STATUS_READ_SUCCESS) ret = true; dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr); err: return ret; }
static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) { int rc = 0; u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; struct dlm_protocol_version fs_version; BUG_ON(conn == NULL); BUG_ON(conn->cc_proto == NULL); /* for now we only have one cluster/node, make sure we see it * in the heartbeat universe */ if (!o2hb_check_local_node_heartbeating()) { if (o2hb_global_heartbeat_active()) mlog(ML_ERROR, "Global heartbeat not started\n"); rc = -EINVAL; goto out; } priv = kzalloc(sizeof(struct o2dlm_private), GFP_KERNEL); if (!priv) { rc = -ENOMEM; goto out_free; } /* This just fills the structure in. It is safe to pass conn. */ dlm_setup_eviction_cb(&priv->op_eviction_cb, o2dlm_eviction_cb, conn); conn->cc_private = priv; /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); fs_version.pv_major = conn->cc_version.pv_major; fs_version.pv_minor = conn->cc_version.pv_minor; dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } conn->cc_version.pv_major = fs_version.pv_major; conn->cc_version.pv_minor = fs_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); out_free: if (rc && conn->cc_private) kfree(conn->cc_private); out: return rc; }
static int o2cb_cluster_connect(struct ocfs2_cluster_connection *conn) { int rc = 0; u32 dlm_key; struct dlm_ctxt *dlm; struct o2dlm_private *priv; struct dlm_protocol_version fs_version; BUG_ON(conn == NULL); BUG_ON(conn->cc_proto == NULL); /* Ensure cluster stack is up and all nodes are connected */ rc = o2cb_cluster_check(); if (rc) { printk(KERN_ERR "o2cb: Cluster check failed. Fix errors " "before retrying.\n"); goto out; } priv = kzalloc(sizeof(struct o2dlm_private), GFP_KERNEL); if (!priv) { rc = -ENOMEM; goto out_free; } /* This just fills the structure in. It is safe to pass conn. */ dlm_setup_eviction_cb(&priv->op_eviction_cb, o2dlm_eviction_cb, conn); conn->cc_private = priv; /* used by the dlm code to make message headers unique, each * node in this domain must agree on this. */ dlm_key = crc32_le(0, conn->cc_name, conn->cc_namelen); fs_version.pv_major = conn->cc_version.pv_major; fs_version.pv_minor = conn->cc_version.pv_minor; dlm = dlm_register_domain(conn->cc_name, dlm_key, &fs_version); if (IS_ERR(dlm)) { rc = PTR_ERR(dlm); mlog_errno(rc); goto out_free; } conn->cc_version.pv_major = fs_version.pv_major; conn->cc_version.pv_minor = fs_version.pv_minor; conn->cc_lockspace = dlm; dlm_register_eviction_cb(dlm, &priv->op_eviction_cb); out_free: if (rc && conn->cc_private) kfree(conn->cc_private); out: return rc; }
static uint32_t nilfs_sb_check_sum(struct nilfs_super_block *sbp) { uint32_t seed, crc; __le32 sum; seed = le32_to_cpu(sbp->s_crc_seed); sum = sbp->s_sum; sbp->s_sum = 0; crc = crc32_le(seed, (unsigned char *)sbp, le16_to_cpu(sbp->s_bytes)); sbp->s_sum = sum; return crc; }
/** * s3c_pm_runcheck() - helper to check a resource on restore. * @res: The resource to check * @vak: Pointer to list of CRC32 values to check. * * Called from the s3c_pm_check_restore() via s3c_pm_run_sysram(), this * function runs the given memory resource checking it against the stored * CRC to ensure that memory is restored. The function tries to skip as * many of the areas used during the suspend process. */ static u32 *s3c_pm_runcheck(struct resource *res, u32 *val) { void *save_at = phys_to_virt(s3c_sleep_save_phys); unsigned long addr; unsigned long left; void *stkpage; void *ptr; u32 calc; stkpage = (void *)((u32)&calc & ~PAGE_MASK); for (addr = res->start; addr < res->end; addr += CHECK_CHUNKSIZE) { left = res->end - addr; if (left > CHECK_CHUNKSIZE) left = CHECK_CHUNKSIZE; ptr = phys_to_virt(addr); if (in_region(ptr, left, stkpage, 4096)) { S3C_PMDBG("skipping %08lx, has stack in\n", addr); goto skip_check; } if (in_region(ptr, left, crcs, crc_size)) { S3C_PMDBG("skipping %08lx, has crc block in\n", addr); goto skip_check; } if (in_region(ptr, left, save_at, 32*4 )) { S3C_PMDBG("skipping %08lx, has save block in\n", addr); goto skip_check; } /* calculate and check the checksum */ calc = crc32_le(~0, ptr, left); if (calc != *val) { printk(KERN_ERR "Restore CRC error at " "%08lx (%08x vs %08x)\n", addr, calc, *val); S3C_PMDBG("Restore CRC error at %08lx (%08x vs %08x)\n", addr, calc, *val); } skip_check: val++; } return val; }
int cCardCryptoworks::Assemble(cAssembleData *ad) { const unsigned char *data=ad->Data(); int len=SCT_LEN(data); switch(data[0]) { case 0x82: return 0; // no assemble needed case 0x84: free(sharedEmm); sharedEmm=(unsigned char *)malloc(len); if(sharedEmm) { memcpy(sharedEmm,data,len); sharedLen=len; } break; case 0x86: if(sharedEmm) { int alen=len-5 + sharedLen-12; unsigned char *tmp=AUTOMEM(alen); memcpy(tmp,&data[5],len-5); memcpy(tmp+len-5,&sharedEmm[12],sharedLen-12); unsigned char *ass=(unsigned char *)malloc(alen+12); if(!ass) return -1; // ignore memcpy(ass,sharedEmm,12); SortNanos(ass+12,tmp,alen); SetSctLen(ass,alen+9); free(sharedEmm); sharedEmm=0; if(ass[11]==alen) { // sanity check ad->SetAssembled(ass); return 1; // assembled } } break; case 0x88: case 0x89: if(data[0]!=globalToggle) { globalToggle=data[0]; unsigned int crc=crc32_le(0,data+1,len-1); if(crc!=globalCrc) { globalCrc=crc; return 0; // no assemble needed } } break; } return -1; // ignore }
int nilfs_commit_super(struct nilfs_sb_info *sbi, int flag) { struct the_nilfs *nilfs = sbi->s_nilfs; struct nilfs_super_block **sbp = nilfs->ns_sbp; time_t t; /* nilfs->ns_sem must be locked by the caller. */ t = get_seconds(); nilfs->ns_sbwtime = t; sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[0], nilfs->ns_sbsize)); if (flag == NILFS_SB_COMMIT_ALL && sbp[1]) { sbp[1]->s_wtime = sbp[0]->s_wtime; sbp[1]->s_sum = 0; sbp[1]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[1], nilfs->ns_sbsize)); } clear_nilfs_sb_dirty(nilfs); return nilfs_sync_super(sbi, flag); }
/* * When page is being changed by the kernel, we must update the dedup structure: * 1. unlink changed block from equal blocks * 2. calculate new hash and crc * 3. link to new equal blocks (if exists) */ int dedup_update_page_changed(sector_t block, char* block_data) { size_t block_size = dedup_get_block_size(); sector_t currblock, equal_block; // Todo: add support if there is more than 1 block in page - check them all if (!dedup_is_in_range(block)) { trace_printk("block not in range %ld", block); return 0; } block = block - start_block; equal_block = block; trace_printk("page is being updated : block = %ld\n", block); // Remove from dedup structure dedup_remove_block_duplication(block); // Calc hash calc_hash(block_data, block_size, blocksArray.hashes[block]); // Calc crc32 blocksArray.hash_crc[block] = crc32_le(0, blocksArray.hashes[block], SHA256_DIGEST_SIZE); // Go over other blocks for (currblock = 0; currblock < blocks_count; ++currblock) { // If blocks equal, update dedup structure if (currblock != block) { // first, compare crc - should be faster if (blocksArray.hash_crc[currblock] == blocksArray.hash_crc[block]){ // If hash array is NULL then there is a block at lower index // that is equal to this block and it was already compared to. if (blocksArray.hashes[currblock] && blocksArray.hashes[block] && memcmp(blocksArray.hashes[currblock], blocksArray.hashes[block], SHA256_DIGEST_SIZE) == 0) { equal_block = currblock; break; } } } } if (block != equal_block) { trace_printk("found new duplicated block ! %ld = %ld\n", block + start_block, equal_block + start_block); dedup_set_block_duplication(equal_block, block); } return 0; }
/** * msm_eeprom_verify_sum - verify crc32 checksum * @mem: data buffer * @size: size of data buffer * @sum: expected checksum * * Returns 0 if checksum match, -EINVAL otherwise. */ static int msm_eeprom_verify_sum(const char *mem, uint32_t size, uint32_t sum) { uint32_t crc = ~0UL; /* check overflow */ if (size > crc - sizeof(uint32_t)) return -EINVAL; crc = crc32_le(crc, mem, size); if (~crc != sum) { CDBG("%s: expect 0x%x, result 0x%x\n", __func__, sum, ~crc); return -EINVAL; } CDBG("%s: checksum pass 0x%x\n", __func__, sum); return 0; }
static u32 *s3c_pm_makecheck(struct resource *res, u32 *val) { unsigned long addr, left; for (addr = res->start; addr < res->end; addr += CHECK_CHUNKSIZE) { left = res->end - addr; if (left > CHECK_CHUNKSIZE) left = CHECK_CHUNKSIZE; *val = crc32_le(~0, phys_to_virt(addr), left); val++; } return val; }
int tegra_nct_read_item(u32 index, union nct_item_type *buf) { struct nct_entry_type *entry = NULL; u8 *nct; #if USE_CRC32_IN_NCT u32 crc = 0; #endif if (!tegra_nct_initialized) return -EPERM; entry = kmalloc(sizeof(struct nct_entry_type), GFP_KERNEL); if (!entry) { pr_err("%s: failed to allocate buffer\n", __func__); return -ENOMEM; } nct = (u8 *)(nct_ptr + NCT_ENTRY_OFFSET + (index * sizeof(*entry))); memcpy((u8 *)entry, nct, sizeof(*entry)); /* check CRC integrity */ #if USE_CRC32_IN_NCT /* last checksum field of entry is not included in CRC calculation */ crc = crc32_le(~0, (u8 *)entry, sizeof(*entry) - sizeof(entry->checksum)) ^ ~0; if (crc != entry->checksum) { pr_err("%s: checksum err(0x%x/0x%x)\n", __func__, crc, entry->checksum); kfree(entry); return -EINVAL; } #endif /* check index integrity */ if (index != entry->index) { pr_err("%s: index err(0x%x/0x%x)\n", __func__, index, entry->index); kfree(entry); return -EINVAL; } memcpy(buf, &entry->data, sizeof(*buf)); kfree(entry); return 0; }
int nilfs_commit_super(struct nilfs_sb_info *sbi, int dupsb) { struct the_nilfs *nilfs = sbi->s_nilfs; struct nilfs_super_block **sbp = nilfs->ns_sbp; sector_t nfreeblocks; time_t t; int err; /* nilfs->sem must be locked by the caller. */ nilfs_debug(2, "called\n"); if (sbp[0]->s_magic != NILFS_SUPER_MAGIC) { if (sbp[1] && sbp[1]->s_magic == NILFS_SUPER_MAGIC) nilfs_swap_super_block(nilfs); else { printk(KERN_CRIT "NILFS: superblock broke on dev %s\n", sbi->s_super->s_id); return -EIO; } } err = nilfs_count_free_blocks(nilfs, &nfreeblocks); if (unlikely(err)) { printk(KERN_ERR "NILFS: failed to count free blocks\n"); return err; } spin_lock(&nilfs->ns_last_segment_lock); sbp[0]->s_last_seq = cpu_to_le64(nilfs->ns_last_seq); sbp[0]->s_last_pseg = cpu_to_le64(nilfs->ns_last_pseg); sbp[0]->s_last_cno = cpu_to_le64(nilfs->ns_last_cno); spin_unlock(&nilfs->ns_last_segment_lock); t = get_seconds(); nilfs->ns_sbwtime[0] = t; sbp[0]->s_free_blocks_count = cpu_to_le64(nfreeblocks); sbp[0]->s_wtime = cpu_to_le64(t); sbp[0]->s_sum = 0; sbp[0]->s_sum = cpu_to_le32(crc32_le(nilfs->ns_crc_seed, (unsigned char *)sbp[0], nilfs->ns_sbsize)); if (dupsb && sbp[1]) { memcpy(sbp[1], sbp[0], nilfs->ns_sbsize); nilfs->ns_sbwtime[1] = t; } sbi->s_super->s_dirt = 0; return nilfs_sync_super(sbi, dupsb); }
/* * This function generates check information for a block. * data is the block to be checked. bc is a pointer to the * ocfs2_block_check structure describing the crc32 and the ecc. * * bc should be a pointer inside data, as the function will * take care of zeroing it before calculating the check information. If * bc does not point inside data, the caller must make sure any inline * ocfs2_block_check structures are zeroed. * * The data buffer must be in on-disk endian (little endian for ocfs2). * bc will be filled with little-endian values and will be ready to go to * disk. */ void ocfs2_block_check_compute(void *data, size_t blocksize, struct ocfs2_block_check *bc) { u32 crc; u32 ecc; memset(bc, 0, sizeof(struct ocfs2_block_check)); crc = crc32_le(~0, data, blocksize); ecc = ocfs2_hamming_encode_block(data, blocksize); /* * No ecc'd ocfs2 structure is larger than 4K, so ecc will be no * larger than 16 bits. */ BUG_ON(ecc > USHRT_MAX); bc->bc_crc32e = cpu_to_le32(crc); bc->bc_ecc = cpu_to_le16((u16)ecc); }
int dtsec_add_hash_mac_address(struct fman_mac *dtsec, enet_addr_t *eth_addr) { struct dtsec_regs __iomem *regs = dtsec->regs; struct eth_hash_entry *hash_entry; u64 addr; s32 bucket; u32 crc = 0xFFFFFFFF; bool mcast, ghtx; if (!is_init_done(dtsec->dtsec_drv_param)) return -EINVAL; addr = ENET_ADDR_TO_UINT64(*eth_addr); ghtx = (bool)((ioread32be(®s->rctrl) & RCTRL_GHTX) ? true : false); mcast = (bool)((addr & MAC_GROUP_ADDRESS) ? true : false); /* Cannot handle unicast mac addr when GHTX is on */ if (ghtx && !mcast) { pr_err("Could not compute hash bucket\n"); return -EINVAL; } crc = crc32_le(crc, (u8 *)eth_addr, ETH_ALEN); crc = bitrev32(crc); /* considering the 9 highest order bits in crc H[8:0]: *if ghtx = 0 H[8:6] (highest order 3 bits) identify the hash register *and H[5:1] (next 5 bits) identify the hash bit *if ghts = 1 H[8:5] (highest order 4 bits) identify the hash register *and H[4:0] (next 5 bits) identify the hash bit. * *In bucket index output the low 5 bits identify the hash register *bit, while the higher 4 bits identify the hash register */ if (ghtx) { bucket = (s32)((crc >> 23) & 0x1ff); } else {
static int do_bulk_checksum_crc32(struct ptlrpc_bulk_desc *desc, void *buf) { struct page *page; int off; char *ptr; __u32 crc32 = ~0; int len, i; for (i = 0; i < desc->bd_iov_count; i++) { page = desc->bd_iov[i].kiov_page; off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK; ptr = cfs_kmap(page) + off; len = desc->bd_iov[i].kiov_len; crc32 = crc32_le(crc32, ptr, len); cfs_kunmap(page); } crc32 = cpu_to_le32(crc32); memcpy(buf, &crc32, sizeof(crc32)); return 0; }
/* * Update dedup structure with block's hash and crc */ void dedup_calc_block_hash_crc(sector_t block) { size_t block_size = dedup_get_block_size(); char *block_data; if (block >= blocks_count) // outside dedup range return; block_data = (char*)kmalloc(block_size, GFP_KERNEL); if (block_data == NULL) { printk(KERN_ERR "failed allocating block data buffer.\n"); return; } // Read block read_block(block_data, block_size, start_block + block); // Calc hash calc_hash(block_data, block_size, blocksArray.hashes[block]); // Calc crc32 blocksArray.hash_crc[block] = crc32_le(0, blocksArray.hashes[block], SHA256_DIGEST_SIZE); kfree(block_data); }
int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg, void *buf, int buflen) { __u32 csum32; int i; LASSERT(alg == BULK_HASH_ALG_ADLER32 || alg == BULK_HASH_ALG_CRC32); if (alg == BULK_HASH_ALG_ADLER32) csum32 = 1; else csum32 = ~0; for (i = 0; i < desc->bd_iov_count; i++) { unsigned char *ptr = desc->bd_iov[i].iov_base; int len = desc->bd_iov[i].iov_len; switch (alg) { case BULK_HASH_ALG_ADLER32: #ifdef HAVE_ADLER csum32 = adler32(csum32, ptr, len); #else CERROR("Adler32 not supported\n"); return -EINVAL; #endif break; case BULK_HASH_ALG_CRC32: csum32 = crc32_le(csum32, ptr, len); break; } } csum32 = cpu_to_le32(csum32); memcpy(buf, &csum32, sizeof(csum32)); return 0; }
struct dlm_ctxt *user_dlm_register_context(struct qstr *name, struct dlm_protocol_version *proto) { struct dlm_ctxt *dlm; u32 dlm_key; char *domain; domain = kmalloc(name->len + 1, GFP_NOFS); if (!domain) { mlog_errno(-ENOMEM); return ERR_PTR(-ENOMEM); } dlm_key = crc32_le(0, name->name, name->len); snprintf(domain, name->len + 1, "%.*s", name->len, name->name); dlm = dlm_register_domain(domain, dlm_key, proto); if (IS_ERR(dlm)) mlog_errno(PTR_ERR(dlm)); kfree(domain); return dlm; }
static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) { bool ret = false; void *src_addr; void *dst_addr; dma_addr_t src_phys_addr; dma_addr_t dst_phys_addr; struct pci_dev *pdev = test->pdev; struct device *dev = &pdev->dev; void *orig_src_addr; dma_addr_t orig_src_phys_addr; void *orig_dst_addr; dma_addr_t orig_dst_phys_addr; size_t offset; size_t alignment = test->alignment; u32 src_crc32; u32 dst_crc32; orig_src_addr = dma_alloc_coherent(dev, size + alignment, &orig_src_phys_addr, GFP_KERNEL); if (!orig_src_addr) { dev_err(dev, "failed to allocate source buffer\n"); ret = false; goto err; } if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) { src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment); offset = src_phys_addr - orig_src_phys_addr; src_addr = orig_src_addr + offset; } else { src_phys_addr = orig_src_phys_addr; src_addr = orig_src_addr; } pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, lower_32_bits(src_phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, upper_32_bits(src_phys_addr)); get_random_bytes(src_addr, size); src_crc32 = crc32_le(~0, src_addr, size); orig_dst_addr = dma_alloc_coherent(dev, size + alignment, &orig_dst_phys_addr, GFP_KERNEL); if (!orig_dst_addr) { dev_err(dev, "failed to allocate destination address\n"); ret = false; goto err_orig_src_addr; } if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) { dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment); offset = dst_phys_addr - orig_dst_phys_addr; dst_addr = orig_dst_addr + offset; } else { dst_phys_addr = orig_dst_phys_addr; dst_addr = orig_dst_addr; } pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, lower_32_bits(dst_phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, upper_32_bits(dst_phys_addr)); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 1 << MSI_NUMBER_SHIFT | COMMAND_COPY); wait_for_completion(&test->irq_raised); dst_crc32 = crc32_le(~0, dst_addr, size); if (dst_crc32 == src_crc32) ret = true; dma_free_coherent(dev, size + alignment, orig_dst_addr, orig_dst_phys_addr); err_orig_src_addr: dma_free_coherent(dev, size + alignment, orig_src_addr, orig_src_phys_addr); err: return ret; }
uint32_t bootloader_common_ota_select_crc(const esp_ota_select_entry_t *s) { return crc32_le(UINT32_MAX, (uint8_t*)&s->ota_seq, 4); }
static u32 __crc32_le(u32 crc, unsigned char const *p, size_t len) { return crc32_le(crc, p, len); }