static size_t s_WriteGZipFooter(void* buf, size_t buf_size, unsigned long total, unsigned long crc) { // .gz footer is 8 bytes length if (buf_size < 8) { return 0; } CCompressionUtil::StoreUI4(buf, crc); CCompressionUtil::StoreUI4((unsigned char*)buf+4, LOW32(total)); return 8; }
int sys_cmd_send_filelist(cmi_cmd_t* cmd) { cmi_data_filelist_t dfl; int ret; // generate filstlist memset(&dfl, 0, sizeof(cmi_data_filelist_t)); // we use repo#0's meta as ref struct dfv_repo* repo = sys_ctx.vault->repo_tbl[0]; assert(repo); int slot_num = 0; for(int i=0; i<CMI_MAX_SLOTS; i++) { dfv_fmeta_t* fmeta = dfv_repo_get_fmeta(repo, i); dfl.work_list[i].tag = 0x18efdc0a; dfl.work_list[i].slot_id = i; if (fmeta) { ssize_t slot_sz = dfv_vault_get_slotsize(sys_ctx.vault, i); if (slot_sz < 0) { // skip this file continue; } slot_sz /= 1024; dfl.work_list[i].file_sz_h = HGH32(slot_sz); dfl.work_list[i].file_sz_l = LOW32(slot_sz); dfl.work_list[i].begin_tm = sys_systm_to_lktm(fmeta->file_time); slot_num++; } } dfl.sys_info.tag = 0x5a5a; dfl.sys_info.slot_num = slot_num; cmi_msg_reform_flist(&dfl, cmi_intf_get_endian(sys_ctx.cmi_intf)); uint32_t req_frag; if (cmd) req_frag = cmd->u.file.frag_id; else req_frag = (uint32_t)-1; ret = cmi_intf_write_flist(sys_ctx.cmi_intf, &dfl, req_frag); return(ret); }
mp_limb_t mpn_modexact_1c_odd (mp_srcptr src, mp_size_t size, mp_limb_t d, mp_limb_t orig_c) { mp_limb_t c = orig_c; mp_limb_t s, l, q, h, inverse; ASSERT (size >= 1); ASSERT (d & 1); ASSERT_MPN (src, size); ASSERT_LIMB (d); ASSERT_LIMB (c); /* udivx is faster than 10 or 12 mulx's for one limb via an inverse */ if (size == 1) { s = src[0]; if (s > c) { l = s-c; h = l % d; if (h != 0) h = d - h; } else { l = c-s; h = l % d; } return h; } binvert_limb (inverse, d); if (d <= 0xFFFFFFFF) { s = *src++; size--; do { SUBC_LIMB (c, l, s, c); s = *src++; q = l * inverse; umul_ppmm_half_lowequal (h, q, d, l); c += h; size--; } while (size != 0); if (s <= d) { /* With high s <= d the final step can be a subtract and addback. If c==0 then the addback will restore to l>=0. If c==d then will get l==d if s==0, but that's ok per the function definition. */ l = c - s; l += (l > c ? d : 0); ASSERT_RETVAL (l); return l; } else { /* Can't skip a divide, just do the loop code once more. */ SUBC_LIMB (c, l, s, c); q = l * inverse; umul_ppmm_half_lowequal (h, q, d, l); c += h; ASSERT_RETVAL (c); return c; } } else { mp_limb_t dl = LOW32 (d); mp_limb_t dh = HIGH32 (d); long i; s = *src++; size--; do { SUBC_LIMB (c, l, s, c); s = *src++; q = l * inverse; umul_ppmm_lowequal (h, q, d, dh, dl, l); c += h; size--; } while (size != 0); if (s <= d) { /* With high s <= d the final step can be a subtract and addback. If c==0 then the addback will restore to l>=0. If c==d then will get l==d if s==0, but that's ok per the function definition. */ l = c - s; l += (l > c ? d : 0); ASSERT_RETVAL (l); return l; } else { /* Can't skip a divide, just do the loop code once more. */ SUBC_LIMB (c, l, s, c); q = l * inverse; umul_ppmm_lowequal (h, q, d, dh, dl, l); c += h; ASSERT_RETVAL (c); return c; } } }