static void sha3_final(uint8_t *h, unsigned d, struct sha3 *C, unsigned rw) { unsigned nw, iw; assert(d <= 8*25); assert(0 < C->nb); /* Append 01, pad with 10*1 up to buffer boundary, LSB first. */ nw = (C->nb + 7)/8; assert(0 < nw); assert(nw <= rw); C->A[rw - nw] ^= (uint64_t)0x06 << (8*(8*nw - C->nb)); C->A[rw - 1] ^= 0x8000000000000000ULL; /* Permute one last time. */ keccakf1600(C->A); /* Reveal the first 8d bits of state, forget 1600-8d of them. */ for (iw = 0; iw < d/8; iw++) le64enc(h + 8*iw, C->A[iw]); h += 8*iw; d -= 8*iw; if (0 < d) { /* For SHA3-224, we need to expose a partial word. */ uint64_t T = C->A[iw]; do { *h++ = T & 0xff; T >>= 8; } while (--d); }
static void g_bde_kkey(struct g_bde_softc *sc, keyInstance *ki, int dir, off_t sector) { u_int t; MD5_CTX ct; u_char buf[16]; u_char buf2[8]; /* We have to be architecture neutral */ le64enc(buf2, sector); MD5Init(&ct); MD5Update(&ct, sc->key.salt, 8); MD5Update(&ct, buf2, sizeof buf2); MD5Update(&ct, sc->key.salt + 8, 8); MD5Final(buf, &ct); MD5Init(&ct); for (t = 0; t < 16; t++) { MD5Update(&ct, &sc->key.mkey[buf[t]], 1); if (t == 8) MD5Update(&ct, buf2, sizeof buf2); } bzero(buf2, sizeof buf2); MD5Final(buf, &ct); bzero(&ct, sizeof ct); AES_makekey(ki, dir, G_BDE_KKEYBITS, buf); bzero(buf, sizeof buf); }
static const char *tencode(int t, uint64_t val) { static char res[64]; uint8_t buf[16]; bool be = t > 0; int i; if (t < 0) t = -t; memset(buf, 0xFC, sizeof(buf)); if (be) { switch (t) { case 2: be16enc(buf, val); break; case 4: be32enc(buf, val); break; case 8: be64enc(buf, val); break; } } else { switch (t) { case 2: le16enc(buf, val); break; case 4: le32enc(buf, val); break; case 8: le64enc(buf, val); break; } } for (i = t; i < (int)sizeof(buf); i++) { if (buf[i] != 0xFC) return "OVER"; } snprintf(res, sizeof(res), "%02X %02X %02X %02X %02X %02X %02X %02X ", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); res[t*3 - 1] = 0; return res; }
void blake2b_final(struct blake2b *B, void *digest) { uint8_t *d = digest; unsigned dlen = B->dlen; unsigned i; /* Pad with zeros, and do the last compression. */ B->c += B->nb; for (i = B->nb; i < 128; i++) B->b[i] = 0; blake2b_compress(B->h, B->c, ~(uint64_t)0, B->b); /* Reveal the first dlen/8 words of the state. */ for (i = 0; i < dlen/8; i++) le64enc(d + 8*i, B->h[i]); d += 8*i; dlen -= 8*i; /* If the caller wants a partial word, reveal that too. */ if (dlen) { uint64_t hi = B->h[i]; do { *d++ = hi; hi >>= 8; } while (--dlen); } /* Erase the state. */ (void)blake2b_explicit_memset(B, 0, sizeof B); }
int g_bde_keyloc_encrypt(u_char *sha2, uint64_t v0, uint64_t v1, void *output) { u_char buf[16]; keyInstance ki; cipherInstance ci; le64enc(buf, v0); le64enc(buf + 8, v1); AES_init(&ci); AES_makekey(&ki, DIR_ENCRYPT, G_BDE_KKEYBITS, sha2 + 0); AES_encrypt(&ci, &ki, buf, output, sizeof buf); bzero(buf, sizeof buf); bzero(&ci, sizeof ci); bzero(&ki, sizeof ki); return (0); }
static int write_ioreq(int fd, struct ioreq *iorq) { struct iorec iorc; le64enc(&iorc.iorc_offset, iorq->iorq_offset); le32enc(&iorc.iorc_length, iorq->iorq_length); iorc.iorc_type = iorq->iorq_type; return (write(fd, &iorc, sizeof(iorc)) != sizeof(iorc)); }
/** * endentry(d): * An archive entry or trailer is ending; flush buffers into the stream. */ static int endentry(TAPE_W * d) { struct entryheader eh; uint8_t * hbuf; size_t hlen; /* Export the archive header as a static buffer. */ if (bytebuf_export(d->hbuf, &hbuf, &hlen)) goto err0; /* Create a new elastic archive header buffer. */ if ((d->hbuf = bytebuf_init(0)) == NULL) goto err1; /* Construct entry header. */ le32enc(eh.hlen, hlen); le64enc(eh.clen, d->clen); le32enc(eh.tlen, d->tlen); /* Write entry header to header stream. */ if (chunkify_write(d->h.c, (uint8_t *)(&eh), sizeof(struct entryheader))) goto err1; /* Write archive header to header stream. */ if (chunkify_write(d->h.c, hbuf, hlen)) goto err1; /* Free header buffer. */ free(hbuf); /* Reset pending write lengths. */ d->clen = d->tlen = 0; /* Success! */ return (0); err1: free(hbuf); err0: /* Failure! */ return (-1); }
static int g_bsd_writelabel(struct g_geom *gp, u_char *bootcode) { off_t secoff; u_int secsize; struct g_consumer *cp; struct g_slicer *gsp; struct g_bsd_softc *ms; u_char *buf; uint64_t sum; int error, i; gsp = gp->softc; ms = gsp->softc; cp = LIST_FIRST(&gp->consumer); /* Get sector size, we need it to read data. */ secsize = cp->provider->sectorsize; secoff = ms->labeloffset % secsize; if (bootcode == NULL) { buf = g_read_data(cp, ms->labeloffset - secoff, secsize, &error); if (buf == NULL) return (error); bcopy(ms->label, buf + secoff, sizeof(ms->label)); } else { buf = bootcode; bcopy(ms->label, buf + ms->labeloffset, sizeof(ms->label)); } if (ms->labeloffset == ALPHA_LABEL_OFFSET) { sum = 0; for (i = 0; i < 63; i++) sum += le64dec(buf + i * 8); le64enc(buf + 504, sum); } if (bootcode == NULL) { error = g_write_data(cp, ms->labeloffset - secoff, buf, secsize); g_free(buf); } else { error = g_write_data(cp, 0, bootcode, BBSIZE); } return(error); }
/* Write a 64bit uint; return next position. */ unsigned bs_write_u64(bin_stream_t * bs, uint64_t data) { le64enc(bs->data + bs->pos, data); return (bs->pos += 8); }
int g_bde_encode_lock(u_char *sha2, struct g_bde_key *gl, u_char *ptr) { int shuffle[NLOCK_FIELDS]; u_char *hash, *p; int i; MD5_CTX c; p = ptr; hash = NULL; g_bde_shuffle_lock(sha2, shuffle); for (i = 0; i < NLOCK_FIELDS; i++) { switch(shuffle[i]) { case 0: le64enc(p, gl->sector0); p += 8; break; case 1: le64enc(p, gl->sectorN); p += 8; break; case 2: le64enc(p, gl->keyoffset); p += 8; break; case 3: le32enc(p, gl->sectorsize); p += 4; break; case 4: le32enc(p, gl->flags); p += 4; break; case 5: case 6: case 7: case 8: le64enc(p, gl->lsector[shuffle[i] - 5]); p += 8; break; case 9: bcopy(gl->spare, p, sizeof gl->spare); p += sizeof gl->spare; break; case 10: bcopy(gl->salt, p, sizeof gl->salt); p += sizeof gl->salt; break; case 11: bcopy(gl->mkey, p, sizeof gl->mkey); p += sizeof gl->mkey; break; case 12: bzero(p, 16); hash = p; p += 16; break; } } if(ptr + G_BDE_LOCKSIZE != p) return(-1); if (hash == NULL) return(-1); MD5Init(&c); MD5Update(&c, "0000", 4); /* Versioning */ MD5Update(&c, ptr, G_BDE_LOCKSIZE); MD5Final(hash, &c); return(0); }
/* Callback to write a record and path suffix to disk. */ static int callback_write_rec(void * cookie, uint8_t * s, size_t slen, void * rec) { struct ccache_record_external ccre; struct ccache_write_internal * W = cookie; struct ccache_record * ccr = rec; size_t plen; /* Don't write an entry if there are no chunks and no trailer. */ if ((ccr->nch == 0) && (ccr->tlen == 0)) goto done; /* Don't write an entry if it hasn't been used recently. */ if (ccr->age > MAXAGE) goto done; /* Figure out how much prefix is shared. */ for (plen = 0; plen < slen && plen < W->sbuflen; plen++) { if (s[plen] != W->sbuf[plen]) break; } /* Convert integers to portable format. */ le64enc(ccre.ino, ccr->ino); le64enc(ccre.size, ccr->size); le64enc(ccre.mtime, ccr->mtime); le64enc(ccre.nch, ccr->nch); le32enc(ccre.tlen, ccr->tlen); le32enc(ccre.tzlen, ccr->tzlen); le32enc(ccre.prefixlen, plen); le32enc(ccre.suffixlen, slen - plen); le32enc(ccre.age, ccr->age + 1); /* Write cache entry header to disk. */ if (fwrite(&ccre, sizeof(ccre), 1, W->f) != 1) goto err0; /* Write path suffix to disk. */ if (fwrite(s + plen, slen - plen, 1, W->f) != 1) goto err0; /* Enlarge last-path buffer if needed. */ if (W->sbuflen < slen + 1) { free(W->sbuf); W->sbuflen = slen + 1; if ((W->sbuf = malloc(W->sbuflen)) == NULL) { W->sbuflen = 0; goto err0; } memcpy(W->sbuf, s, slen); } else memcpy(W->sbuf + plen, s + plen, slen - plen); W->sbuf[slen] = 0; done: /* Success! */ return (0); err0: /* Failure! */ return (-1); }
/*- * This start routine is only called for non-trivial requests, all the * trivial ones are handled autonomously by the slice code. * For requests we handle here, we must call the g_io_deliver() on the * bio, and return non-zero to indicate to the slice code that we did so. * This code executes in the "DOWN" I/O path, this means: * * No sleeping. * * Don't grab the topology lock. * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() */ static int g_bsd_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) { struct g_geom *gp; struct g_bsd_softc *ms; struct g_slicer *gsp; u_char *label; int error; gp = pp->geom; gsp = gp->softc; ms = gsp->softc; switch(cmd) { case DIOCGDINFO: /* Return a copy of the disklabel to userland. */ bsd_disklabel_le_dec(ms->label, data, MAXPARTITIONS); return(0); case DIOCBSDBB: { struct g_consumer *cp; u_char *buf; void *p; int error, i; uint64_t sum; if (!(fflag & FWRITE)) return (EPERM); /* The disklabel to set is the ioctl argument. */ buf = g_malloc(BBSIZE, M_WAITOK); p = *(void **)data; error = copyin(p, buf, BBSIZE); if (!error) { /* XXX: Rude, but supposedly safe */ DROP_GIANT(); g_topology_lock(); /* Validate and modify our slice instance to match. */ error = g_bsd_modify(gp, buf + ms->labeloffset); if (!error) { cp = LIST_FIRST(&gp->consumer); if (ms->labeloffset == ALPHA_LABEL_OFFSET) { sum = 0; for (i = 0; i < 63; i++) sum += le64dec(buf + i * 8); le64enc(buf + 504, sum); } error = g_write_data(cp, 0, buf, BBSIZE); } g_topology_unlock(); PICKUP_GIANT(); } g_free(buf); return (error); } case DIOCSDINFO: case DIOCWDINFO: { if (!(fflag & FWRITE)) return (EPERM); label = g_malloc(LABELSIZE, M_WAITOK); /* The disklabel to set is the ioctl argument. */ bsd_disklabel_le_enc(label, data); DROP_GIANT(); g_topology_lock(); /* Validate and modify our slice instance to match. */ error = g_bsd_modify(gp, label); if (error == 0 && cmd == DIOCWDINFO) error = g_bsd_writelabel(gp, NULL); g_topology_unlock(); PICKUP_GIANT(); g_free(label); return(error); } default: return (ENOIOCTL); } }
void threefish_encrypt_block(uint64_t ks[9], uint64_t ts[3], uint8_t in[64], uint8_t out[64]) { uint64_t x0 = le64dec(&in[ 0]); uint64_t x1 = le64dec(&in[ 8]); uint64_t x2 = le64dec(&in[16]); uint64_t x3 = le64dec(&in[24]); uint64_t x4 = le64dec(&in[32]); uint64_t x5 = le64dec(&in[40]); uint64_t x6 = le64dec(&in[48]); uint64_t x7 = le64dec(&in[56]); #define MIX(a, b, rotk) ((a) += (b), (b) = ROTL64((b), (rotk)) ^ (a)) #define ER(n0, n1, n2, n3, n4, n5, n6, n7, r) do { \ MIX(x##n0, x##n1, RotK[r][0]); \ MIX(x##n2, x##n3, RotK[r][1]); \ MIX(x##n4, x##n5, RotK[r][2]); \ MIX(x##n6, x##n7, RotK[r][3]); \ } while (0) #define EI(r) do { \ x0 += ks[((r)+1) % 9]; \ x1 += ks[((r)+2) % 9]; \ x2 += ks[((r)+3) % 9]; \ x3 += ks[((r)+4) % 9]; \ x4 += ks[((r)+5) % 9]; \ x5 += ks[((r)+6) % 9] + ts[((r)+1) % 3]; \ x6 += ks[((r)+7) % 9] + ts[((r)+2) % 3]; \ x7 += ks[((r)+8) % 9] + (r)+1; \ } while (0) #define EROUNDS(r) do { \ ER(0, 1, 2, 3, 4, 5, 6, 7, 0); \ ER(2, 1, 4, 7, 6, 5, 0, 3, 1); \ ER(4, 1, 6, 3, 0, 5, 2, 7, 2); \ ER(6, 1, 0, 7, 2, 5, 4, 3, 3); \ EI(2*(r)); \ ER(0, 1, 2, 3, 4, 5, 6, 7, 4); \ ER(2, 1, 4, 7, 6, 5, 0, 3, 5); \ ER(4, 1, 6, 3, 0, 5, 2, 7, 6); \ ER(6, 1, 0, 7, 2, 5, 4, 3, 7); \ EI(2*(r)+1); \ } while (0) EI(-1); EROUNDS(0); EROUNDS(1); EROUNDS(2); EROUNDS(3); EROUNDS(4); EROUNDS(5); EROUNDS(6); EROUNDS(7); EROUNDS(8); #undef EROUNDS #undef EI #undef ER le64enc(&out[ 0], x0); le64enc(&out[ 8], x1); le64enc(&out[16], x2); le64enc(&out[24], x3); le64enc(&out[32], x4); le64enc(&out[40], x5); le64enc(&out[48], x6); le64enc(&out[56], x7); }
void threefish_decrypt_block(uint64_t ks[9], uint64_t ts[3], uint8_t in[64], uint8_t out[64]) { uint64_t x0 = le64dec(&in[ 0]); uint64_t x1 = le64dec(&in[ 8]); uint64_t x2 = le64dec(&in[16]); uint64_t x3 = le64dec(&in[24]); uint64_t x4 = le64dec(&in[32]); uint64_t x5 = le64dec(&in[40]); uint64_t x6 = le64dec(&in[48]); uint64_t x7 = le64dec(&in[56]); #define UNMIX(a, b, rotk) ((b) = ROTR64((b) ^ ((a)), (rotk)), (a) -= (b)) #define DR(n0, n1, n2, n3, n4, n5, n6, n7, r) do { \ UNMIX(x##n0, x##n1, RotK[r][0]); \ UNMIX(x##n2, x##n3, RotK[r][1]); \ UNMIX(x##n4, x##n5, RotK[r][2]); \ UNMIX(x##n6, x##n7, RotK[r][3]); \ } while (0) #define DI(R) do { \ x0 -= ks[((R)+1) % 9]; \ x1 -= ks[((R)+2) % 9]; \ x2 -= ks[((R)+3) % 9]; \ x3 -= ks[((R)+4) % 9]; \ x4 -= ks[((R)+5) % 9]; \ x5 -= ks[((R)+6) % 9] + ts[((R)+1) % 3]; \ x6 -= ks[((R)+7) % 9] + ts[((R)+2) % 3]; \ x7 -= ks[((R)+8) % 9] + (R)+1; \ } while (0) #define DROUNDS(R) do { \ DI(2*(R)+1); \ DR(6, 1, 0, 7, 2, 5, 4, 3, 7); \ DR(4, 1, 6, 3, 0, 5, 2, 7, 6); \ DR(2, 1, 4, 7, 6, 5, 0, 3, 5); \ DR(0, 1, 2, 3, 4, 5, 6, 7, 4); \ DI(2*(R)); \ DR(6, 1, 0, 7, 2, 5, 4, 3, 3); \ DR(4, 1, 6, 3, 0, 5, 2, 7, 2); \ DR(2, 1, 4, 7, 6, 5, 0, 3, 1); \ DR(0, 1, 2, 3, 4, 5, 6, 7, 0); \ } while (0) DROUNDS(8); DROUNDS(7); DROUNDS(6); DROUNDS(5); DROUNDS(4); DROUNDS(3); DROUNDS(2); DROUNDS(1); DROUNDS(0); DI(-1); #undef DROUNDS #undef DI #undef DR le64enc(&out[ 0], x0); le64enc(&out[ 8], x1); le64enc(&out[16], x2); le64enc(&out[24], x3); le64enc(&out[32], x4); le64enc(&out[40], x5); le64enc(&out[48], x6); le64enc(&out[56], x7); }
/** * multitape_metadata_enc(mdat, bufp, buflenp): * Encode a struct tapemetadata into a buffer. Return the buffer and its * length via ${bufp} and ${buflenp} respectively. */ static int multitape_metadata_enc(const struct tapemetadata * mdat, uint8_t ** bufp, size_t * buflenp) { uint8_t * buf; /* Encoded metadata. */ size_t buflen; /* Encoded metadata size. */ uint8_t * p; int i; /* Add up the lengths of various pieces of metadata. */ buflen = strlen(mdat->name) + 1; /* name */ buflen += 8; /* ctime */ buflen += 4; /* argc */ for (i = 0; i < mdat->argc; i++) /* argv */ buflen += strlen(mdat->argv[i]) + 1; buflen += 32; /* indexhash */ buflen += 8; /* index length */ buflen += 256; /* 2048-bit RSA signature */ /* Allocate memory. */ if ((p = buf = malloc(buflen)) == NULL) goto err0; /* Copy name. */ memcpy(p, mdat->name, strlen(mdat->name) + 1); p += strlen(mdat->name) + 1; /* Encode ctime and argc. */ le64enc(p, mdat->ctime); p += 8; le32enc(p, mdat->argc); p += 4; /* Copy argv. */ for (i = 0; i < mdat->argc; i++) { memcpy(p, mdat->argv[i], strlen(mdat->argv[i]) + 1); p += strlen(mdat->argv[i]) + 1; } /* Copy index hash. */ memcpy(p, mdat->indexhash, 32); p += 32; /* Encode index length. */ le64enc(p, mdat->indexlen); p += 8; /* Generate signature. */ if (crypto_rsa_sign(CRYPTO_KEY_SIGN_PRIV, buf, p - buf, p, 256)) goto err1; /* Return buffer and length. */ *bufp = buf; *buflenp = buflen; /* Success! */ return (0); err1: free(buf); err0: /* Failure! */ return (-1); }
/** * chunks_directory_write(cachepath, HT, stats_extra, suff): * Write stats_extra statistics and the contents of the hash table ${HT} of * struct chunkdata records to a new chunk directory in * "${cachepath}/directory${suff}". */ int chunks_directory_write(const char * cachepath, RWHASHTAB * HT, struct chunkstats * stats_extra, const char * suff) { struct chunkstats_external cse; FILE * f; char * s; int fd; /* Construct the path to the new chunk directory. */ if (asprintf(&s, "%s/directory%s", cachepath, suff) == -1) { warnp("asprintf"); goto err0; } /* Create the new chunk directory. */ if ((f = fopen(s, "w")) == NULL) { warnp("fopen(%s)", s); goto err1; } /* Write the extra files statistics. */ le64enc(cse.nchunks, stats_extra->nchunks); le64enc(cse.s_len, stats_extra->s_len); le64enc(cse.s_zlen, stats_extra->s_zlen); if (fwrite(&cse, sizeof(cse), 1, f) != 1) { warnp("Error writing to chunk directory"); goto err2; } /* Write the hash table entries to the new chunk directory. */ if (rwhashtab_foreach(HT, callback_write, f)) goto err2; /* Call fsync on the new chunk directory and close it. */ if (fflush(f)) { warnp("fflush(%s)", s); goto err2; } if ((fd = fileno(f)) == -1) { warnp("fileno(%s)", s); goto err2; } if (fsync(fd)) { warnp("fsync(%s)", s); goto err2; } if (fclose(f)) { warnp("fclose(%s)", s); goto err1; } /* Success! */ return (0); err2: fclose(f); err1: free(s); err0: /* Failure! */ return (-1); }
static int vmdk_write(int fd) { struct vmdk_header hdr; uint32_t *gt, *gd, *rgd; char *buf, *desc; off_t cur, lim; uint64_t imagesz; lba_t blkofs, blkcnt; size_t gdsz, gtsz; uint32_t sec, cursec; int error, desc_len, n, ngrains, ngts; imagesz = (image_get_size() * secsz) / VMDK_SECTOR_SIZE; memset(&hdr, 0, sizeof(hdr)); le32enc(&hdr.magic, VMDK_MAGIC); le32enc(&hdr.version, VMDK_VERSION); le32enc(&hdr.flags, VMDK_FLAGS_NL_TEST | VMDK_FLAGS_RGT_USED); le64enc(&hdr.capacity, imagesz); le64enc(&hdr.grain_size, grainsz); n = asprintf(&desc, desc_fmt, 1 /*version*/, 0 /*CID*/, (uintmax_t)imagesz /*size*/, "" /*name*/, ncyls /*cylinders*/, nheads /*heads*/, nsecs /*sectors*/); if (n == -1) return (ENOMEM); desc_len = (n + VMDK_SECTOR_SIZE - 1) & ~(VMDK_SECTOR_SIZE - 1); desc = realloc(desc, desc_len); memset(desc + n, 0, desc_len - n); le64enc(&hdr.desc_offset, 1); le64enc(&hdr.desc_size, desc_len / VMDK_SECTOR_SIZE); le32enc(&hdr.ngtes, VMDK_NGTES); sec = desc_len / VMDK_SECTOR_SIZE + 1; ngrains = imagesz / grainsz; ngts = (ngrains + VMDK_NGTES - 1) / VMDK_NGTES; gdsz = (ngts * sizeof(uint32_t) + VMDK_SECTOR_SIZE - 1) & ~(VMDK_SECTOR_SIZE - 1); gd = calloc(1, gdsz); if (gd == NULL) { free(desc); return (ENOMEM); } le64enc(&hdr.gd_offset, sec); sec += gdsz / VMDK_SECTOR_SIZE; for (n = 0; n < ngts; n++) { le32enc(gd + n, sec); sec += VMDK_NGTES * sizeof(uint32_t) / VMDK_SECTOR_SIZE; } rgd = calloc(1, gdsz); if (rgd == NULL) { free(gd); free(desc); return (ENOMEM); } le64enc(&hdr.rgd_offset, sec); sec += gdsz / VMDK_SECTOR_SIZE; for (n = 0; n < ngts; n++) { le32enc(rgd + n, sec); sec += VMDK_NGTES * sizeof(uint32_t) / VMDK_SECTOR_SIZE; } sec = (sec + grainsz - 1) & ~(grainsz - 1); if (verbose) fprintf(stderr, "VMDK: overhead = %ju\n", (uintmax_t)(sec * VMDK_SECTOR_SIZE)); le64enc(&hdr.overhead, sec); be32enc(&hdr.nl_test, VMDK_NL_TEST); gt = calloc(ngts, VMDK_NGTES * sizeof(uint32_t)); if (gt == NULL) { free(rgd); free(gd); free(desc); return (ENOMEM); } gtsz = ngts * VMDK_NGTES * sizeof(uint32_t); cursec = sec; blkcnt = (grainsz * VMDK_SECTOR_SIZE) / secsz; for (n = 0; n < ngrains; n++) { blkofs = n * blkcnt; if (image_data(blkofs, blkcnt)) { le32enc(gt + n, cursec); cursec += grainsz; } } error = 0; if (!error && sparse_write(fd, &hdr, VMDK_SECTOR_SIZE) < 0) error = errno; if (!error && sparse_write(fd, desc, desc_len) < 0) error = errno; if (!error && sparse_write(fd, gd, gdsz) < 0) error = errno; if (!error && sparse_write(fd, gt, gtsz) < 0) error = errno; if (!error && sparse_write(fd, rgd, gdsz) < 0) error = errno; if (!error && sparse_write(fd, gt, gtsz) < 0) error = errno; free(gt); free(rgd); free(gd); free(desc); if (error) return (error); cur = VMDK_SECTOR_SIZE + desc_len + (gdsz + gtsz) * 2; lim = sec * VMDK_SECTOR_SIZE; if (cur < lim) { buf = calloc(1, VMDK_SECTOR_SIZE); if (buf == NULL) error = ENOMEM; while (!error && cur < lim) { if (sparse_write(fd, buf, VMDK_SECTOR_SIZE) < 0) error = errno; cur += VMDK_SECTOR_SIZE; } if (buf != NULL) free(buf); } if (error) return (error); blkcnt = (grainsz * VMDK_SECTOR_SIZE) / secsz; for (n = 0; n < ngrains; n++) { blkofs = n * blkcnt; if (image_data(blkofs, blkcnt)) { error = image_copyout_region(fd, blkofs, blkcnt); if (error) return (error); } } return (image_copyout_done(fd)); }