/** * store_chunk(buf, buflen, ch, C): * Write the chunk ${buf} of length ${buflen} using the chunk layer cookie * ${C}, and populate the chunkheader structure ${ch}. */ static int store_chunk(uint8_t * buf, size_t buflen, struct chunkheader * ch, CHUNKS_W * C) { ssize_t zlen; /* Hash of chunk. */ if (crypto_hash_data(CRYPTO_KEY_HMAC_CHUNK, buf, buflen, ch->hash)) goto err0; /* Length of chunk. */ le32enc(ch->len, (uint32_t)(buflen)); /* Ask chunk layer to store the chunk. */ zlen = chunks_write_chunk(C, ch->hash, buf, buflen); if (zlen == -1) { warnp("Error in chunk storage layer"); goto err0; } /* Compressed length of chunk. */ le32enc(ch->zlen, (uint32_t)(zlen)); /* Success! */ return (0); err0: /* Failure! */ return (-1); }
/** * callback_write(rec, cookie): * Convert chunkdata record ${rec} into a struct chunkdata_external and * write it to the FILE * ${cookie}; but don't write entries with nrefs == 0. */ static int callback_write(void * rec, void * cookie) { struct chunkdata_external che; struct chunkdata * ch = rec; FILE * f = cookie; /* If nrefs == 0, return without writing anything. */ if (ch->nrefs == 0) return (0); /* Convert to on-disk format. */ memcpy(che.hash, ch->hash, 32); le32enc(che.len, ch->len); le32enc(che.zlen, ch->zlen_flags & CHDATA_ZLEN); le32enc(che.nrefs, ch->nrefs); le32enc(che.ncopies, ch->ncopies); /* Write. */ if (fwrite(&che, sizeof(che), 1, f) != 1) { warnp("Error writing to chunk directory"); return (-1); } /* Success! */ return (0); }
void bsd_partition_le_enc(u_char *ptr, struct partition *d) { le32enc(ptr + 0, d->p_size); le32enc(ptr + 4, d->p_offset); le32enc(ptr + 8, d->p_fsize); ptr[12] = d->p_fstype; ptr[13] = d->p_frag; le16enc(ptr + 14, d->p_cpg); }
static const char *tencode(int t, uint64_t val) { static char res[64]; uint8_t buf[16]; bool be = t > 0; int i; if (t < 0) t = -t; memset(buf, 0xFC, sizeof(buf)); if (be) { switch (t) { case 2: be16enc(buf, val); break; case 4: be32enc(buf, val); break; case 8: be64enc(buf, val); break; } } else { switch (t) { case 2: le16enc(buf, val); break; case 4: le32enc(buf, val); break; case 8: le64enc(buf, val); break; } } for (i = t; i < (int)sizeof(buf); i++) { if (buf[i] != 0xFC) return "OVER"; } snprintf(res, sizeof(res), "%02X %02X %02X %02X %02X %02X %02X %02X ", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); res[t*3 - 1] = 0; return res; }
/* * Encryption: generate a block of keystream, xor it with the plaintext to * produce the ciphertext, and increment the stream position. If vpt is * NULL, simply copy the requested keystream to the output buffer. */ size_t chacha20_encrypt(chacha20_ctx *ctx, const void *vpt, uint8_t *ct, size_t len) { const uint8_t *pt = vpt; uint32_t mix[16]; uint8_t ks[64]; unsigned int b, i; len -= len % sizeof ks; for (b = 0; b < len; b += sizeof ks) { memcpy(mix, ctx->state, sizeof mix); for (i = 0; i < 20; i += 2) { CHACHA_QR(mix, 0, 4, 8, 12); CHACHA_QR(mix, 1, 5, 9, 13); CHACHA_QR(mix, 2, 6, 10, 14); CHACHA_QR(mix, 3, 7, 11, 15); CHACHA_QR(mix, 0, 5, 10, 15); CHACHA_QR(mix, 1, 6, 11, 12); CHACHA_QR(mix, 2, 7, 8, 13); CHACHA_QR(mix, 3, 4, 9, 14); } for (i = 0; i < 16; ++i) le32enc(ks + i * 4, ctx->state[i] + mix[i]); if (pt == NULL) { memcpy(ct, ks, sizeof ks); ct += sizeof ks; } else { for (i = 0; i < 64 && i < len; ++i) *ct++ = *pt++ ^ ks[i]; } if (++ctx->state[12] == 0) ++ctx->state[13]; } return (len); }
void dos_partition_enc(void *pp, struct dos_partition *d) { unsigned char *p = pp; p[0] = d->dp_flag; p[1] = d->dp_shd; p[2] = d->dp_ssect; p[3] = d->dp_scyl; p[4] = d->dp_typ; p[5] = d->dp_ehd; p[6] = d->dp_esect; p[7] = d->dp_ecyl; le32enc(p + 8, d->dp_start); le32enc(p + 12, d->dp_size); }
/** * smix(B, r, N, V, XY): * Compute B = SMix_r(B, N). The input B must be 128r bytes in length; * the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be a * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a * multiple of 64 bytes. */ static void smix(uint8_t * B, size_t r, uint64_t N, void * V, void * XY) { __m128i * X = XY; __m128i * Y = (void *)((uintptr_t)(XY) + 128 * r); __m128i * Z = (void *)((uintptr_t)(XY) + 256 * r); uint32_t * X32 = (void *)X; uint64_t i, j; size_t k; /* 1: X <-- B */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { X32[k * 16 + i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } /* 2: for i = 0 to N - 1 do */ for (i = 0; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy((void *)((uintptr_t)(V) + i * 128 * r), X, 128 * r); /* 4: X <-- H(X) */ blockmix_salsa8(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy((void *)((uintptr_t)(V) + (i + 1) * 128 * r), Y, 128 * r); /* 4: X <-- H(X) */ blockmix_salsa8(Y, X, Z, r); } /* 6: for i = 0 to N - 1 do */ for (i = 0; i < N; i += 2) { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, (void *)((uintptr_t)(V) + j * 128 * r), 128 * r); blockmix_salsa8(X, Y, Z, r); /* 7: j <-- Integerify(X) mod N */ j = integerify(Y, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(Y, (void *)((uintptr_t)(V) + j * 128 * r), 128 * r); blockmix_salsa8(Y, X, Z, r); } /* 10: B' <-- X */ for (k = 0; k < 2 * r; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X32[k * 16 + i]); } } }
/** * endentry(d): * An archive entry or trailer is ending; flush buffers into the stream. */ static int endentry(TAPE_W * d) { struct entryheader eh; uint8_t * hbuf; size_t hlen; /* Export the archive header as a static buffer. */ if (bytebuf_export(d->hbuf, &hbuf, &hlen)) goto err0; /* Create a new elastic archive header buffer. */ if ((d->hbuf = bytebuf_init(0)) == NULL) goto err1; /* Construct entry header. */ le32enc(eh.hlen, hlen); le64enc(eh.clen, d->clen); le32enc(eh.tlen, d->tlen); /* Write entry header to header stream. */ if (chunkify_write(d->h.c, (uint8_t *)(&eh), sizeof(struct entryheader))) goto err1; /* Write archive header to header stream. */ if (chunkify_write(d->h.c, hbuf, hlen)) goto err1; /* Free header buffer. */ free(hbuf); /* Reset pending write lengths. */ d->clen = d->tlen = 0; /* Success! */ return (0); err1: free(hbuf); err0: /* Failure! */ return (-1); }
static int write_ioreq(int fd, struct ioreq *iorq) { struct iorec iorc; le64enc(&iorc.iorc_offset, iorq->iorq_offset); le32enc(&iorc.iorc_length, iorq->iorq_length); iorc.iorc_type = iorq->iorq_type; return (write(fd, &iorc, sizeof(iorc)) != sizeof(iorc)); }
void hodl_le_build_stratum_request( char* req, struct work* work, struct stratum_ctx *sctx ) { uint32_t ntime, nonce, nstartloc, nfinalcalc; char ntimestr[9], noncestr[9], nstartlocstr[9], nfinalcalcstr[9]; unsigned char *xnonce2str; le32enc( &ntime, work->data[ algo_gate.ntime_index ] ); le32enc( &nonce, work->data[ algo_gate.nonce_index ] ); bin2hex( ntimestr, (char*)(&ntime), sizeof(uint32_t) ); bin2hex( noncestr, (char*)(&nonce), sizeof(uint32_t) ); xnonce2str = abin2hex(work->xnonce2, work->xnonce2_len ); le32enc( &nstartloc, work->data[ HODL_NSTARTLOC_INDEX ] ); le32enc( &nfinalcalc, work->data[ HODL_NFINALCALC_INDEX ] ); bin2hex( nstartlocstr, (char*)(&nstartloc), sizeof(uint32_t) ); bin2hex( nfinalcalcstr, (char*)(&nfinalcalc), sizeof(uint32_t) ); sprintf( req, "{\"method\": \"mining.submit\", \"params\": [\"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\", \"%s\"], \"id\":4}", rpc_user, work->job_id, xnonce2str, ntimestr, noncestr, nstartlocstr, nfinalcalcstr ); free( xnonce2str ); }
/** * salsa20_8(B): * Apply the salsa20/8 core to the provided block. */ static void salsa20_8(uint8_t B[64]) { uint32_t B32[16]; uint32_t x[16]; size_t i; /* Convert little-endian values in. */ for (i = 0; i < 16; i++) { B32[i] = le32dec(&B[i * 4]); } /* Compute x = doubleround^4(B32). */ for (i = 0; i < 16; i++) { x[i] = B32[i]; } for (i = 0; i < 8; i += 2) { #define R(a,b) (((a) << (b)) | ((a) >> (32 - (b)))) /* Operate on columns. */ x[ 4] ^= R(x[ 0]+x[12], 7); x[ 8] ^= R(x[ 4]+x[ 0], 9); x[12] ^= R(x[ 8]+x[ 4],13); x[ 0] ^= R(x[12]+x[ 8],18); x[ 9] ^= R(x[ 5]+x[ 1], 7); x[13] ^= R(x[ 9]+x[ 5], 9); x[ 1] ^= R(x[13]+x[ 9],13); x[ 5] ^= R(x[ 1]+x[13],18); x[14] ^= R(x[10]+x[ 6], 7); x[ 2] ^= R(x[14]+x[10], 9); x[ 6] ^= R(x[ 2]+x[14],13); x[10] ^= R(x[ 6]+x[ 2],18); x[ 3] ^= R(x[15]+x[11], 7); x[ 7] ^= R(x[ 3]+x[15], 9); x[11] ^= R(x[ 7]+x[ 3],13); x[15] ^= R(x[11]+x[ 7],18); /* Operate on rows. */ x[ 1] ^= R(x[ 0]+x[ 3], 7); x[ 2] ^= R(x[ 1]+x[ 0], 9); x[ 3] ^= R(x[ 2]+x[ 1],13); x[ 0] ^= R(x[ 3]+x[ 2],18); x[ 6] ^= R(x[ 5]+x[ 4], 7); x[ 7] ^= R(x[ 6]+x[ 5], 9); x[ 4] ^= R(x[ 7]+x[ 6],13); x[ 5] ^= R(x[ 4]+x[ 7],18); x[11] ^= R(x[10]+x[ 9], 7); x[ 8] ^= R(x[11]+x[10], 9); x[ 9] ^= R(x[ 8]+x[11],13); x[10] ^= R(x[ 9]+x[ 8],18); x[12] ^= R(x[15]+x[14], 7); x[13] ^= R(x[12]+x[15], 9); x[14] ^= R(x[13]+x[12],13); x[15] ^= R(x[14]+x[13],18); #undef R } /* Compute B32 = B32 + x. */ for (i = 0; i < 16; i++) { B32[i] += x[i]; } /* Convert little-endian values out. */ for (i = 0; i < 16; i++) { le32enc(&B[4 * i], B32[i]); } }
/** * crypto_scrypt_smix(B, r, N, V, XY): * Compute B = SMix_r(B, N). The input B must be 128r bytes in length; * the temporary storage V must be 128rN bytes in length; the temporary * storage XY must be 256r + 64 bytes in length. The value N must be a * power of 2 greater than 1. The arrays B, V, and XY must be aligned to a * multiple of 64 bytes. */ void crypto_scrypt_smix(uint8_t * B, size_t r, uint64_t N, void * _V, void * XY) { uint32_t * X = XY; uint32_t * Y = (void *)((uint8_t *)(XY) + 128 * r); uint32_t * Z = (void *)((uint8_t *)(XY) + 256 * r); uint32_t * V = _V; uint64_t i; uint64_t j; size_t k; /* 1: X <-- B */ for (k = 0; k < 32 * r; k++) X[k] = le32dec(&B[4 * k]); /* 2: for i = 0 to N - 1 do */ for (i = 0; i < N; i += 2) { /* 3: V_i <-- X */ blkcpy(&V[i * (32 * r)], X, 128 * r); /* 4: X <-- H(X) */ blockmix_salsa8(X, Y, Z, r); /* 3: V_i <-- X */ blkcpy(&V[(i + 1) * (32 * r)], Y, 128 * r); /* 4: X <-- H(X) */ blockmix_salsa8(Y, X, Z, r); } /* 6: for i = 0 to N - 1 do */ for (i = 0; i < N; i += 2) { /* 7: j <-- Integerify(X) mod N */ j = integerify(X, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(X, &V[j * (32 * r)], 128 * r); blockmix_salsa8(X, Y, Z, r); /* 7: j <-- Integerify(X) mod N */ j = integerify(Y, r) & (N - 1); /* 8: X <-- H(X \xor V_j) */ blkxor(Y, &V[j * (32 * r)], 128 * r); blockmix_salsa8(Y, X, Z, r); } /* 10: B' <-- X */ for (k = 0; k < 32 * r; k++) le32enc(&B[4 * k], X[k]); }
void uuid_enc_le(void *buf, const struct uuid *uuid) { uint8_t *p = buf; int i; le32enc(p, uuid->time_low); le16enc(p + 4, uuid->time_mid); le16enc(p + 6, uuid->time_hi_and_version); p[8] = uuid->clock_seq_hi_and_reserved; p[9] = uuid->clock_seq_low; for (i = 0; i < _UUID_NODE_LEN; i++) p[10 + i] = uuid->node[i]; }
void md5_final(uint8_t *dst, struct md5_ctx *ctx) { static const uint8_t padding[MD5_BLOCK_LENGTH] = { 0x80 }; uint64_t final_len = ctx->nbytes * 8; int pad_len, pos = bufpos(ctx); /* add padding */ pad_len = MD5_BLOCK_LENGTH - 8 - pos; if (pad_len <= 0) pad_len += MD5_BLOCK_LENGTH; md5_update(ctx, padding, pad_len); /* add length directly */ swap_words(ctx->buf, 14); ctx->buf[14] = final_len; ctx->buf[15] = final_len >> 32; /* final result */ md5_mix(ctx, ctx->buf); le32enc(dst + 0, ctx->a); le32enc(dst + 4, ctx->b); le32enc(dst + 8, ctx->c); le32enc(dst + 12, ctx->d); }
static void x86bios_emu_wrl(struct x86emu *emu, uint32_t addr, uint32_t val) { uint32_t *va; va = x86bios_get_pages(addr, sizeof(*va)); if (va == NULL) x86bios_set_fault(emu, addr); #ifndef __NO_STRICT_ALIGNMENT if ((addr & 3) != 0) le32enc(va, val); else #endif *va = htole32(val); }
void scrypt_N_1_1_256_sp_sse2(const char *input, char *output, char *scratchpad, unsigned char Nfactor) { uint8_t B[128]; union { __m128i i128[8]; uint32_t u32[32]; } X; __m128i *V; uint32_t i, j, k, N; V = (__m128i *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63)); PBKDF2_SHA256((const uint8_t *)input, 80, (const uint8_t *)input, 80, 1, B, 128); for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { X.u32[k * 16 + i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } N = (1 << (Nfactor + 1)); for (i = 0; i < N; i++) { for (k = 0; k < 8; k++) V[i * 8 + k] = X.i128[k]; xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (i = 0; i < N; i++) { //j = 8 * (X.u32[16] & 1023); j = 8 * (X.u32[16] & (N-1)); for (k = 0; k < 8; k++) X.i128[k] = _mm_xor_si128(X.i128[k], V[j + k]); xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X.u32[k * 16 + i]); } } PBKDF2_SHA256((const uint8_t *)input, 80, B, 128, 1, (uint8_t *)output, 32); }
void scrypt_8_4_1_256_sp_sse2(const char *input, char *output, char *scratchpad) { const int N=123; uint8_t B[128]; union { __m128i i128[8]; uint32_t u32[32]; } X; __m128i *V; uint32_t i, j, k; V = (__m128i *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63)); PBKDF2_SHA256((const uint8_t *)input, 80, (const uint8_t *)input, 80, 1, B, 128); for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { X.u32[k * 16 + i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } for (i = 0; i < N; i++) { for (k = 0; k < 8; k++) V[i * 8 + k] = X.i128[k]; xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (i = 0; i < N i++) { j = 8 * (X.u32[16] % (N)); for (k = 0; k < 8; k++) X.i128[k] = _mm_xor_si128(X.i128[k], V[j + k]); xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X.u32[k * 16 + i]); } } PBKDF2_SHA256((const uint8_t *)input, 80, B, 128, 1, (uint8_t *)output, 32); }
// default void encode_little_endian_17_19 ( uint32_t* ntime, uint32_t* nonce, struct work* work ) { le32enc( ntime, work->data[17] ); le32enc( nonce, work->data[19] ); }
/** * ccache_write(cache, path): * Write the given chunkification cache into the directory ${path}. */ int ccache_write(CCACHE * cache, const char * path) { struct ccache_internal * C = cache; struct ccache_write_internal W; uint8_t N[4]; char * s_old; /* Construct name of temporary cache file. */ if (asprintf(&W.s, "%s/cache.new", path) == -1) { warnp("asprintf"); goto err0; } /* Open the cache file for writing. */ if ((W.f = fopen(W.s, "w")) == NULL) { warnp("fopen(%s)", W.s); goto err1; } /** * We make three passes through the cache tree: * 1. Counting the number of records which will be written to disk. * This is necessary since records in the cache which are too old * will not be written, but the on-disk cache format starts with * the number of records. * 2. Writing the records and suffixes. * 3. Writing the cached chunk headers and compressed entry trailers. */ /* Count the number of records which need to be written. */ W.N = 0; if (patricia_foreach(C->tree, callback_count, &W)) { warnp("patricia_foreach"); goto err2; }; /* Write the number of records to the file. */ le32enc(N, W.N); if (fwrite(N, 4, 1, W.f) != 1) { warnp("fwrite(%s)", W.s); goto err2; } /* Write the records and suffixes. */ W.sbuf = NULL; W.sbuflen = 0; if (patricia_foreach(C->tree, callback_write_rec, &W)) { warnp("Error writing cache to %s", W.s); goto err2; } free(W.sbuf); /* Write the chunk headers and compressed entry trailers. */ if (patricia_foreach(C->tree, callback_write_data, &W)) { warnp("Error writing cache to %s", W.s); goto err2; } /* Close the file. */ fclose(W.f); /* Construct the name of the old cache file. */ if (asprintf(&s_old, "%s/cache", path) == -1) { warnp("asprintf"); goto err1; } /* Delete the old file, if it exists. */ if (unlink(s_old)) { if (errno != ENOENT) { warnp("unlink(%s)", s_old); free(s_old); goto err1; } } /* Move the new cache file into place. */ if (rename(W.s, s_old)) { warnp("rename(%s, %s)", W.s, s_old); free(s_old); goto err1; } /* Free strings allocated by asprintf. */ free(s_old); free(W.s); /* Success! */ return (0); err2: fclose(W.f); err1: free(W.s); err0: /* Failure! */ return (-1); }
/** * multitape_metadata_enc(mdat, bufp, buflenp): * Encode a struct tapemetadata into a buffer. Return the buffer and its * length via ${bufp} and ${buflenp} respectively. */ static int multitape_metadata_enc(const struct tapemetadata * mdat, uint8_t ** bufp, size_t * buflenp) { uint8_t * buf; /* Encoded metadata. */ size_t buflen; /* Encoded metadata size. */ uint8_t * p; int i; /* Add up the lengths of various pieces of metadata. */ buflen = strlen(mdat->name) + 1; /* name */ buflen += 8; /* ctime */ buflen += 4; /* argc */ for (i = 0; i < mdat->argc; i++) /* argv */ buflen += strlen(mdat->argv[i]) + 1; buflen += 32; /* indexhash */ buflen += 8; /* index length */ buflen += 256; /* 2048-bit RSA signature */ /* Allocate memory. */ if ((p = buf = malloc(buflen)) == NULL) goto err0; /* Copy name. */ memcpy(p, mdat->name, strlen(mdat->name) + 1); p += strlen(mdat->name) + 1; /* Encode ctime and argc. */ le64enc(p, mdat->ctime); p += 8; le32enc(p, mdat->argc); p += 4; /* Copy argv. */ for (i = 0; i < mdat->argc; i++) { memcpy(p, mdat->argv[i], strlen(mdat->argv[i]) + 1); p += strlen(mdat->argv[i]) + 1; } /* Copy index hash. */ memcpy(p, mdat->indexhash, 32); p += 32; /* Encode index length. */ le64enc(p, mdat->indexlen); p += 8; /* Generate signature. */ if (crypto_rsa_sign(CRYPTO_KEY_SIGN_PRIV, buf, p - buf, p, 256)) goto err1; /* Return buffer and length. */ *bufp = buf; *buflenp = buflen; /* Success! */ return (0); err1: free(buf); err0: /* Failure! */ return (-1); }
static int vmdk_write(int fd) { struct vmdk_header hdr; uint32_t *gt, *gd, *rgd; char *buf, *desc; off_t cur, lim; uint64_t imagesz; lba_t blkofs, blkcnt; size_t gdsz, gtsz; uint32_t sec, cursec; int error, desc_len, n, ngrains, ngts; imagesz = (image_get_size() * secsz) / VMDK_SECTOR_SIZE; memset(&hdr, 0, sizeof(hdr)); le32enc(&hdr.magic, VMDK_MAGIC); le32enc(&hdr.version, VMDK_VERSION); le32enc(&hdr.flags, VMDK_FLAGS_NL_TEST | VMDK_FLAGS_RGT_USED); le64enc(&hdr.capacity, imagesz); le64enc(&hdr.grain_size, grainsz); n = asprintf(&desc, desc_fmt, 1 /*version*/, 0 /*CID*/, (uintmax_t)imagesz /*size*/, "" /*name*/, ncyls /*cylinders*/, nheads /*heads*/, nsecs /*sectors*/); if (n == -1) return (ENOMEM); desc_len = (n + VMDK_SECTOR_SIZE - 1) & ~(VMDK_SECTOR_SIZE - 1); desc = realloc(desc, desc_len); memset(desc + n, 0, desc_len - n); le64enc(&hdr.desc_offset, 1); le64enc(&hdr.desc_size, desc_len / VMDK_SECTOR_SIZE); le32enc(&hdr.ngtes, VMDK_NGTES); sec = desc_len / VMDK_SECTOR_SIZE + 1; ngrains = imagesz / grainsz; ngts = (ngrains + VMDK_NGTES - 1) / VMDK_NGTES; gdsz = (ngts * sizeof(uint32_t) + VMDK_SECTOR_SIZE - 1) & ~(VMDK_SECTOR_SIZE - 1); gd = calloc(1, gdsz); if (gd == NULL) { free(desc); return (ENOMEM); } le64enc(&hdr.gd_offset, sec); sec += gdsz / VMDK_SECTOR_SIZE; for (n = 0; n < ngts; n++) { le32enc(gd + n, sec); sec += VMDK_NGTES * sizeof(uint32_t) / VMDK_SECTOR_SIZE; } rgd = calloc(1, gdsz); if (rgd == NULL) { free(gd); free(desc); return (ENOMEM); } le64enc(&hdr.rgd_offset, sec); sec += gdsz / VMDK_SECTOR_SIZE; for (n = 0; n < ngts; n++) { le32enc(rgd + n, sec); sec += VMDK_NGTES * sizeof(uint32_t) / VMDK_SECTOR_SIZE; } sec = (sec + grainsz - 1) & ~(grainsz - 1); if (verbose) fprintf(stderr, "VMDK: overhead = %ju\n", (uintmax_t)(sec * VMDK_SECTOR_SIZE)); le64enc(&hdr.overhead, sec); be32enc(&hdr.nl_test, VMDK_NL_TEST); gt = calloc(ngts, VMDK_NGTES * sizeof(uint32_t)); if (gt == NULL) { free(rgd); free(gd); free(desc); return (ENOMEM); } gtsz = ngts * VMDK_NGTES * sizeof(uint32_t); cursec = sec; blkcnt = (grainsz * VMDK_SECTOR_SIZE) / secsz; for (n = 0; n < ngrains; n++) { blkofs = n * blkcnt; if (image_data(blkofs, blkcnt)) { le32enc(gt + n, cursec); cursec += grainsz; } } error = 0; if (!error && sparse_write(fd, &hdr, VMDK_SECTOR_SIZE) < 0) error = errno; if (!error && sparse_write(fd, desc, desc_len) < 0) error = errno; if (!error && sparse_write(fd, gd, gdsz) < 0) error = errno; if (!error && sparse_write(fd, gt, gtsz) < 0) error = errno; if (!error && sparse_write(fd, rgd, gdsz) < 0) error = errno; if (!error && sparse_write(fd, gt, gtsz) < 0) error = errno; free(gt); free(rgd); free(gd); free(desc); if (error) return (error); cur = VMDK_SECTOR_SIZE + desc_len + (gdsz + gtsz) * 2; lim = sec * VMDK_SECTOR_SIZE; if (cur < lim) { buf = calloc(1, VMDK_SECTOR_SIZE); if (buf == NULL) error = ENOMEM; while (!error && cur < lim) { if (sparse_write(fd, buf, VMDK_SECTOR_SIZE) < 0) error = errno; cur += VMDK_SECTOR_SIZE; } if (buf != NULL) free(buf); } if (error) return (error); blkcnt = (grainsz * VMDK_SECTOR_SIZE) / secsz; for (n = 0; n < ngrains; n++) { blkofs = n * blkcnt; if (image_data(blkofs, blkcnt)) { error = image_copyout_region(fd, blkofs, blkcnt); if (error) return (error); } } return (image_copyout_done(fd)); }
void bsd_disklabel_le_enc(u_char *ptr, struct disklabel *d) { int i; u_char *p, *pe; uint16_t sum; le32enc(ptr + 0, d->d_magic); le16enc(ptr + 4, d->d_type); le16enc(ptr + 6, d->d_subtype); bcopy(d->d_typename, ptr + 8, 16); bcopy(d->d_packname, ptr + 24, 16); le32enc(ptr + 40, d->d_secsize); le32enc(ptr + 44, d->d_nsectors); le32enc(ptr + 48, d->d_ntracks); le32enc(ptr + 52, d->d_ncylinders); le32enc(ptr + 56, d->d_secpercyl); le32enc(ptr + 60, d->d_secperunit); le16enc(ptr + 64, d->d_sparespertrack); le16enc(ptr + 66, d->d_sparespercyl); le32enc(ptr + 68, d->d_acylinders); le16enc(ptr + 72, d->d_rpm); le16enc(ptr + 74, d->d_interleave); le16enc(ptr + 76, d->d_trackskew); le16enc(ptr + 78, d->d_cylskew); le32enc(ptr + 80, d->d_headswitch); le32enc(ptr + 84, d->d_trkseek); le32enc(ptr + 88, d->d_flags); le32enc(ptr + 92, d->d_drivedata[0]); le32enc(ptr + 96, d->d_drivedata[1]); le32enc(ptr + 100, d->d_drivedata[2]); le32enc(ptr + 104, d->d_drivedata[3]); le32enc(ptr + 108, d->d_drivedata[4]); le32enc(ptr + 112, d->d_spare[0]); le32enc(ptr + 116, d->d_spare[1]); le32enc(ptr + 120, d->d_spare[2]); le32enc(ptr + 124, d->d_spare[3]); le32enc(ptr + 128, d->d_spare[4]); le32enc(ptr + 132, d->d_magic2); le16enc(ptr + 136, 0); le16enc(ptr + 138, d->d_npartitions); le32enc(ptr + 140, d->d_bbsize); le32enc(ptr + 144, d->d_sbsize); for (i = 0; i < d->d_npartitions; i++) bsd_partition_le_enc(ptr + 148 + 16 * i, &d->d_partitions[i]); pe = ptr + 148 + 16 * d->d_npartitions; sum = 0; for (p = ptr; p < pe; p += 2) sum ^= le16dec(p); le16enc(ptr + 136, sum); }
/** * export_BN(bn, buf, buflen, len): * If ${*buf} != NULL, export the provided large integer into the buffer, * and adjust the buffer pointer and remaining buffer length appropriately. * Add the required storage length to ${len}. */ static int export_BN(BIGNUM * bn, uint8_t ** buf, size_t * buflen, uint32_t * len) { size_t i; unsigned int bnlen; /* Figure out how much space we need. */ bnlen = BN_num_bytes(bn); /* Add the required storage length to ${len}. */ if (*len + sizeof(uint32_t) < *len) { errno = ENOMEM; goto err0; } *len += sizeof(uint32_t); if (*len + bnlen < *len) { errno = ENOMEM; goto err0; } *len += bnlen; /* If ${*buf} == NULL, we're done. */ if (*buf == NULL) goto done; /* Export the length of the integer. */ if (*buflen < sizeof(uint32_t)) { warn0("Unexpected end of key buffer"); goto err0; } le32enc(*buf, bnlen); *buf += sizeof(uint32_t); *buflen -= sizeof(uint32_t); /* Export the key as a big-endian integer. */ if (*buflen < bnlen) { warn0("Unexpected end of key buffer"); goto err0; } BN_bn2bin(bn, *buf); /* Convert to little-endian format. */ for (i = 0; i < bnlen - 1 - i; i++) { (*buf)[i] ^= (*buf)[bnlen - 1 - i]; (*buf)[bnlen - 1 - i] ^= (*buf)[i]; (*buf)[i] ^= (*buf)[bnlen - 1 - i]; } /* Adjust buffer pointer and remaining buffer length. */ *buf += bnlen; *buflen -= bnlen; done: /* Success! */ return (0); err0: /* Failure! */ return (-1); }
int suw_build_hex_string_80( struct work *work ) { for ( int i = 0; i < 80 / sizeof(uint32_t); i++ ) le32enc( &work->data[i], work->data[i] ); return 80; }
int g_bde_encode_lock(u_char *sha2, struct g_bde_key *gl, u_char *ptr) { int shuffle[NLOCK_FIELDS]; u_char *hash, *p; int i; MD5_CTX c; p = ptr; hash = NULL; g_bde_shuffle_lock(sha2, shuffle); for (i = 0; i < NLOCK_FIELDS; i++) { switch(shuffle[i]) { case 0: le64enc(p, gl->sector0); p += 8; break; case 1: le64enc(p, gl->sectorN); p += 8; break; case 2: le64enc(p, gl->keyoffset); p += 8; break; case 3: le32enc(p, gl->sectorsize); p += 4; break; case 4: le32enc(p, gl->flags); p += 4; break; case 5: case 6: case 7: case 8: le64enc(p, gl->lsector[shuffle[i] - 5]); p += 8; break; case 9: bcopy(gl->spare, p, sizeof gl->spare); p += sizeof gl->spare; break; case 10: bcopy(gl->salt, p, sizeof gl->salt); p += sizeof gl->salt; break; case 11: bcopy(gl->mkey, p, sizeof gl->mkey); p += sizeof gl->mkey; break; case 12: bzero(p, 16); hash = p; p += 16; break; } } if(ptr + G_BDE_LOCKSIZE != p) return(-1); if (hash == NULL) return(-1); MD5Init(&c); MD5Update(&c, "0000", 4); /* Versioning */ MD5Update(&c, ptr, G_BDE_LOCKSIZE); MD5Final(hash, &c); return(0); }
/* Write a 32bit uint; return next position. */ unsigned bs_write_u32(bin_stream_t * bs, uint32_t data) { le32enc(bs->data + bs->pos, data); return (bs->pos += 4); }
/* Callback to write a record and path suffix to disk. */ static int callback_write_rec(void * cookie, uint8_t * s, size_t slen, void * rec) { struct ccache_record_external ccre; struct ccache_write_internal * W = cookie; struct ccache_record * ccr = rec; size_t plen; /* Don't write an entry if there are no chunks and no trailer. */ if ((ccr->nch == 0) && (ccr->tlen == 0)) goto done; /* Don't write an entry if it hasn't been used recently. */ if (ccr->age > MAXAGE) goto done; /* Figure out how much prefix is shared. */ for (plen = 0; plen < slen && plen < W->sbuflen; plen++) { if (s[plen] != W->sbuf[plen]) break; } /* Convert integers to portable format. */ le64enc(ccre.ino, ccr->ino); le64enc(ccre.size, ccr->size); le64enc(ccre.mtime, ccr->mtime); le64enc(ccre.nch, ccr->nch); le32enc(ccre.tlen, ccr->tlen); le32enc(ccre.tzlen, ccr->tzlen); le32enc(ccre.prefixlen, plen); le32enc(ccre.suffixlen, slen - plen); le32enc(ccre.age, ccr->age + 1); /* Write cache entry header to disk. */ if (fwrite(&ccre, sizeof(ccre), 1, W->f) != 1) goto err0; /* Write path suffix to disk. */ if (fwrite(s + plen, slen - plen, 1, W->f) != 1) goto err0; /* Enlarge last-path buffer if needed. */ if (W->sbuflen < slen + 1) { free(W->sbuf); W->sbuflen = slen + 1; if ((W->sbuf = malloc(W->sbuflen)) == NULL) { W->sbuflen = 0; goto err0; } memcpy(W->sbuf, s, slen); } else memcpy(W->sbuf + plen, s + plen, slen - plen); W->sbuf[slen] = 0; done: /* Success! */ return (0); err0: /* Failure! */ return (-1); }
// default int suw_build_hex_string_128( struct work *work ) { for ( int i = 0; i < 128 / sizeof(uint32_t); i++ ) le32enc( &work->data[i], work->data[i] ); return 128; }