/* Read a 64bit uint & return it */ uint64_t bs_read_u64(bin_stream_t * bs) { uint64_t data = le64dec(bs->data + bs->pos); bs->pos += 8; return data; }
uint64_t siphash24(const void *data, size_t len, uint64_t k0, uint64_t k1) { const uint8_t *s = data; const uint8_t *end = s + len - (len % 8); uint64_t v0 = k0 ^ UINT64_C(0x736f6d6570736575); uint64_t v1 = k1 ^ UINT64_C(0x646f72616e646f6d); uint64_t v2 = k0 ^ UINT64_C(0x6c7967656e657261); uint64_t v3 = k1 ^ UINT64_C(0x7465646279746573); uint64_t m; for (; s < end; s += 8) { m = le64dec(s); sip_compress(2); } m = (uint64_t)len << 56; switch (len & 7) { case 7: m |= (uint64_t)s[6] << 48; case 6: m |= (uint64_t)s[5] << 40; case 5: m |= (uint64_t)s[4] << 32; case 4: m |= (uint64_t)s[3] << 24; case 3: m |= (uint64_t)s[2] << 16; case 2: m |= (uint64_t)s[1] << 8; case 1: m |= (uint64_t)s[0]; break; case 0: break; } sip_compress(2); sip_finalize(4); return (v0 ^ v1 ^ v2 ^ v3); }
static uint64_t tdecode(int t, ...) { uint8_t buf[16]; bool be = t > 0; va_list ap; uint64_t val = 777; int i; if (t < 0) t = -t; va_start(ap, t); memset(buf, 0xC1, sizeof(buf)); for (i = 0; i < t; i++) buf[i] = va_arg(ap, int); va_end(ap); if (be) { switch (t) { case 2: val = be16dec(buf); break; case 4: val = be32dec(buf); break; case 8: val = be64dec(buf); break; } } else { switch (t) { case 2: val = le16dec(buf); break; case 4: val = le32dec(buf); break; case 8: val = le64dec(buf); break; } } return val; }
static void blake2b_compress(uint64_t h[8], uint64_t c, uint64_t last, const uint8_t in[128]) { uint64_t v0,v1,v2,v3,v4,v5,v6,v7,v8,v9,v10,v11,v12,v13,v14,v15; uint64_t m[16]; unsigned i; /* Load the variables: first 8 from state, next 8 from IV. */ v0 = h[0]; v1 = h[1]; v2 = h[2]; v3 = h[3]; v4 = h[4]; v5 = h[5]; v6 = h[6]; v7 = h[7]; v8 = blake2b_iv[0]; v9 = blake2b_iv[1]; v10 = blake2b_iv[2]; v11 = blake2b_iv[3]; v12 = blake2b_iv[4]; v13 = blake2b_iv[5]; v14 = blake2b_iv[6]; v15 = blake2b_iv[7]; /* Incorporate the block counter and whether this is last. */ v12 ^= c; v14 ^= last; /* Load the message block. */ for (i = 0; i < 16; i++) m[i] = le64dec(in + 8*i); /* Transform the variables. */ for (i = 0; i < 12; i++) { const uint8_t *sigma = blake2b_sigma[i]; BLAKE2B_G(v0, v4, v8, v12, m[sigma[ 0]], m[sigma[ 1]]); BLAKE2B_G(v1, v5, v9, v13, m[sigma[ 2]], m[sigma[ 3]]); BLAKE2B_G(v2, v6, v10, v14, m[sigma[ 4]], m[sigma[ 5]]); BLAKE2B_G(v3, v7, v11, v15, m[sigma[ 6]], m[sigma[ 7]]); BLAKE2B_G(v0, v5, v10, v15, m[sigma[ 8]], m[sigma[ 9]]); BLAKE2B_G(v1, v6, v11, v12, m[sigma[10]], m[sigma[11]]); BLAKE2B_G(v2, v7, v8, v13, m[sigma[12]], m[sigma[13]]); BLAKE2B_G(v3, v4, v9, v14, m[sigma[14]], m[sigma[15]]); } /* Update the state. */ h[0] ^= v0 ^ v8; h[1] ^= v1 ^ v9; h[2] ^= v2 ^ v10; h[3] ^= v3 ^ v11; h[4] ^= v4 ^ v12; h[5] ^= v5 ^ v13; h[6] ^= v6 ^ v14; h[7] ^= v7 ^ v15; (void)blake2b_explicit_memset(m, 0, sizeof m); }
static void acpi_handle_rsdt(struct ACPIsdt *rsdp) { struct ACPIsdt *sdp; vm_offset_t addr; int entries, i; entries = (rsdp->len - SIZEOF_SDT_HDR) / addr_size; for (i = 0; i < entries; i++) { switch (addr_size) { case 4: addr = le32dec((char*)rsdp->body + i * addr_size); break; case 8: addr = le64dec((char*)rsdp->body + i * addr_size); break; default: assert((addr = 0)); } sdp = (struct ACPIsdt *)acpi_map_sdt(addr); if (acpi_checksum(sdp, sdp->len)) { #if 0 warnx("RSDT entry %d (sig %.4s) has bad checksum", i, sdp->signature); #endif continue; } if (!memcmp(sdp->signature, "APIC", 4)) acpi_handle_apic(sdp); } }
/** * integerify(B, r): * Return the result of parsing B_{2r-1} as a little-endian integer. */ static uint64_t integerify(uint8_t * B, size_t r) { uint8_t * X = &B[(2 * r - 1) * 64]; return (le64dec(X)); }
static int read_ioreq(int fd, struct ioreq *iorq) { struct iorec iorc; if (read(fd, &iorc, sizeof(iorc)) != sizeof(iorc)) return (1); iorq->iorq_offset = le64dec(&iorc.iorc_offset); iorq->iorq_length = le32dec(&iorc.iorc_length); iorq->iorq_type = iorc.iorc_type; return (0); }
void threefish_expand_key(uint64_t ks[9], uint8_t k[64]) { ks[0] = le64dec(&k[ 0]); ks[1] = le64dec(&k[ 8]); ks[2] = le64dec(&k[16]); ks[3] = le64dec(&k[24]); ks[4] = le64dec(&k[32]); ks[5] = le64dec(&k[40]); ks[6] = le64dec(&k[48]); ks[7] = le64dec(&k[56]); ks[8] = KEYSCHEDULE_PARITY ^ ks[0] ^ ks[1] ^ ks[2] ^ ks[3] ^ ks[4] ^ ks[5] ^ ks[6] ^ ks[7]; }
int g_bde_keyloc_decrypt(u_char *sha2, void *input, uint64_t *output) { keyInstance ki; cipherInstance ci; u_char buf[16]; AES_init(&ci); AES_makekey(&ki, DIR_DECRYPT, G_BDE_KKEYBITS, sha2 + 0); AES_decrypt(&ci, &ki, input, buf, sizeof buf); *output = le64dec(buf); bzero(buf, sizeof buf); bzero(&ci, sizeof ci); bzero(&ki, sizeof ki); return(0); }
static int g_bsd_writelabel(struct g_geom *gp, u_char *bootcode) { off_t secoff; u_int secsize; struct g_consumer *cp; struct g_slicer *gsp; struct g_bsd_softc *ms; u_char *buf; uint64_t sum; int error, i; gsp = gp->softc; ms = gsp->softc; cp = LIST_FIRST(&gp->consumer); /* Get sector size, we need it to read data. */ secsize = cp->provider->sectorsize; secoff = ms->labeloffset % secsize; if (bootcode == NULL) { buf = g_read_data(cp, ms->labeloffset - secoff, secsize, &error); if (buf == NULL) return (error); bcopy(ms->label, buf + secoff, sizeof(ms->label)); } else { buf = bootcode; bcopy(ms->label, buf + ms->labeloffset, sizeof(ms->label)); } if (ms->labeloffset == ALPHA_LABEL_OFFSET) { sum = 0; for (i = 0; i < 63; i++) sum += le64dec(buf + i * 8); le64enc(buf + 504, sum); } if (bootcode == NULL) { error = g_write_data(cp, ms->labeloffset - secoff, buf, secsize); g_free(buf); } else { error = g_write_data(cp, 0, bootcode, BBSIZE); } return(error); }
static u_int64_t elf_get_quad(Elf32_Ehdr *e, void *base, elf_member_t member) { u_int64_t val; val = 0; switch (e->e_ident[EI_CLASS]) { case ELFCLASS32: base = (char *)base + elf32_offsets[member]; switch (e->e_ident[EI_DATA]) { case ELFDATA2MSB: val = be32dec(base); break; case ELFDATA2LSB: val = le32dec(base); break; case ELFDATANONE: errx(1, "invalid data format"); } break; case ELFCLASS64: base = (char *)base + elf64_offsets[member]; switch (e->e_ident[EI_DATA]) { case ELFDATA2MSB: val = be64dec(base); break; case ELFDATA2LSB: val = le64dec(base); break; case ELFDATANONE: errx(1, "invalid data format"); } break; case ELFCLASSNONE: errx(1, "invalid class"); } return val; }
void threefish_encrypt_block(uint64_t ks[9], uint64_t ts[3], uint8_t in[64], uint8_t out[64]) { uint64_t x0 = le64dec(&in[ 0]); uint64_t x1 = le64dec(&in[ 8]); uint64_t x2 = le64dec(&in[16]); uint64_t x3 = le64dec(&in[24]); uint64_t x4 = le64dec(&in[32]); uint64_t x5 = le64dec(&in[40]); uint64_t x6 = le64dec(&in[48]); uint64_t x7 = le64dec(&in[56]); #define MIX(a, b, rotk) ((a) += (b), (b) = ROTL64((b), (rotk)) ^ (a)) #define ER(n0, n1, n2, n3, n4, n5, n6, n7, r) do { \ MIX(x##n0, x##n1, RotK[r][0]); \ MIX(x##n2, x##n3, RotK[r][1]); \ MIX(x##n4, x##n5, RotK[r][2]); \ MIX(x##n6, x##n7, RotK[r][3]); \ } while (0) #define EI(r) do { \ x0 += ks[((r)+1) % 9]; \ x1 += ks[((r)+2) % 9]; \ x2 += ks[((r)+3) % 9]; \ x3 += ks[((r)+4) % 9]; \ x4 += ks[((r)+5) % 9]; \ x5 += ks[((r)+6) % 9] + ts[((r)+1) % 3]; \ x6 += ks[((r)+7) % 9] + ts[((r)+2) % 3]; \ x7 += ks[((r)+8) % 9] + (r)+1; \ } while (0) #define EROUNDS(r) do { \ ER(0, 1, 2, 3, 4, 5, 6, 7, 0); \ ER(2, 1, 4, 7, 6, 5, 0, 3, 1); \ ER(4, 1, 6, 3, 0, 5, 2, 7, 2); \ ER(6, 1, 0, 7, 2, 5, 4, 3, 3); \ EI(2*(r)); \ ER(0, 1, 2, 3, 4, 5, 6, 7, 4); \ ER(2, 1, 4, 7, 6, 5, 0, 3, 5); \ ER(4, 1, 6, 3, 0, 5, 2, 7, 6); \ ER(6, 1, 0, 7, 2, 5, 4, 3, 7); \ EI(2*(r)+1); \ } while (0) EI(-1); EROUNDS(0); EROUNDS(1); EROUNDS(2); EROUNDS(3); EROUNDS(4); EROUNDS(5); EROUNDS(6); EROUNDS(7); EROUNDS(8); #undef EROUNDS #undef EI #undef ER le64enc(&out[ 0], x0); le64enc(&out[ 8], x1); le64enc(&out[16], x2); le64enc(&out[24], x3); le64enc(&out[32], x4); le64enc(&out[40], x5); le64enc(&out[48], x6); le64enc(&out[56], x7); }
void threefish_decrypt_block(uint64_t ks[9], uint64_t ts[3], uint8_t in[64], uint8_t out[64]) { uint64_t x0 = le64dec(&in[ 0]); uint64_t x1 = le64dec(&in[ 8]); uint64_t x2 = le64dec(&in[16]); uint64_t x3 = le64dec(&in[24]); uint64_t x4 = le64dec(&in[32]); uint64_t x5 = le64dec(&in[40]); uint64_t x6 = le64dec(&in[48]); uint64_t x7 = le64dec(&in[56]); #define UNMIX(a, b, rotk) ((b) = ROTR64((b) ^ ((a)), (rotk)), (a) -= (b)) #define DR(n0, n1, n2, n3, n4, n5, n6, n7, r) do { \ UNMIX(x##n0, x##n1, RotK[r][0]); \ UNMIX(x##n2, x##n3, RotK[r][1]); \ UNMIX(x##n4, x##n5, RotK[r][2]); \ UNMIX(x##n6, x##n7, RotK[r][3]); \ } while (0) #define DI(R) do { \ x0 -= ks[((R)+1) % 9]; \ x1 -= ks[((R)+2) % 9]; \ x2 -= ks[((R)+3) % 9]; \ x3 -= ks[((R)+4) % 9]; \ x4 -= ks[((R)+5) % 9]; \ x5 -= ks[((R)+6) % 9] + ts[((R)+1) % 3]; \ x6 -= ks[((R)+7) % 9] + ts[((R)+2) % 3]; \ x7 -= ks[((R)+8) % 9] + (R)+1; \ } while (0) #define DROUNDS(R) do { \ DI(2*(R)+1); \ DR(6, 1, 0, 7, 2, 5, 4, 3, 7); \ DR(4, 1, 6, 3, 0, 5, 2, 7, 6); \ DR(2, 1, 4, 7, 6, 5, 0, 3, 5); \ DR(0, 1, 2, 3, 4, 5, 6, 7, 4); \ DI(2*(R)); \ DR(6, 1, 0, 7, 2, 5, 4, 3, 3); \ DR(4, 1, 6, 3, 0, 5, 2, 7, 2); \ DR(2, 1, 4, 7, 6, 5, 0, 3, 1); \ DR(0, 1, 2, 3, 4, 5, 6, 7, 0); \ } while (0) DROUNDS(8); DROUNDS(7); DROUNDS(6); DROUNDS(5); DROUNDS(4); DROUNDS(3); DROUNDS(2); DROUNDS(1); DROUNDS(0); DI(-1); #undef DROUNDS #undef DI #undef DR le64enc(&out[ 0], x0); le64enc(&out[ 8], x1); le64enc(&out[16], x2); le64enc(&out[24], x3); le64enc(&out[32], x4); le64enc(&out[40], x5); le64enc(&out[48], x6); le64enc(&out[56], x7); }
/** * chunks_directory_read(cachepath, dir, stats_unique, stats_all, stats_extra, * mustexist, statstape): * Read stats_extra statistics (statistics on non-chunks which are stored) * and the chunk directory (if present) from "${cachepath}/directory" into * memory allocated and assigned to ${*dir}; and return a hash table * populated with struct chunkdata records. Populate stats_all with * statistics for all the chunks listed in the directory (counting * multiplicity) and populate stats_unique with statistics reflecting the * unique chunks. If ${mustexist}, error out if the directory does not exist. * If ${statstape}, allocate struct chunkdata_statstape records instead. */ RWHASHTAB * chunks_directory_read(const char * cachepath, void ** dir, struct chunkstats * stats_unique, struct chunkstats * stats_all, struct chunkstats * stats_extra, int mustexist, int statstape) { struct chunkdata_external che; struct chunkstats_external cse; struct stat sb; RWHASHTAB * HT; char * s; struct chunkdata * p = NULL; struct chunkdata_statstape * ps = NULL; FILE * f; size_t numchunks; /* Zero statistics. */ chunks_stats_zero(stats_unique); chunks_stats_zero(stats_all); chunks_stats_zero(stats_extra); /* Create a hash table to hold the chunkdata structures. */ HT = rwhashtab_init(offsetof(struct chunkdata, hash), 32); if (HT == NULL) goto err0; /* Construct the string "${cachepath}/directory". */ if (asprintf(&s, "%s/directory", cachepath) == -1) { warnp("asprintf"); goto err1; } if (stat(s, &sb)) { /* Could not stat ${cachepath}/directory. Error? */ if (errno != ENOENT) { warnp("stat(%s)", s); goto err2; } /* The directory doesn't exist; complain if mustexist != 0. */ if (mustexist) { warn0("Error reading cache directory from %s", cachepath); goto err2; } /* * ${cachepath}/directory does not exist; set ${*dir} to NULL * and return the empty hash table. */ free(s); *dir = NULL; return (HT); } /* * Make sure the directory file isn't too large or too small, in * order to avoid any possibility of integer overflows. */ if ((sb.st_size < 0) || ((sizeof(off_t) > sizeof(size_t)) && (sb.st_size > SIZE_MAX))) { warn0("on-disk directory has insane size (%jd bytes): %s", (intmax_t)(sb.st_size), s); goto err2; } /* Make sure the number of chunks is an integer. */ if ((size_t)(sb.st_size - sizeof(struct chunkstats_external)) % (sizeof(struct chunkdata_external))) { warn0("on-disk directory is corrupt: %s", s); goto err2; } /* Compute the number of on-disk chunks. */ numchunks = (size_t)(sb.st_size - sizeof(struct chunkstats_external)) / sizeof(struct chunkdata_external); /* Make sure we don't get an integer overflow. */ if (numchunks >= SIZE_MAX / sizeof(struct chunkdata_statstape)) { warn0("on-disk directory is too large: %s", s); goto err2; } /* * Allocate memory to ${*dir} large enough to store a struct * chunkdata or struct chunkdata_statstape for each struct * chunkdata_external in ${cachepath}/directory. */ if (statstape) { ps = malloc(numchunks * sizeof(struct chunkdata_statstape)); *dir = ps; } else { p = malloc(numchunks * sizeof(struct chunkdata)); *dir = p; } if (*dir == NULL) goto err2; /* Open the directory file. */ if ((f = fopen(s, "r")) == NULL) { warnp("fopen(%s)", s); goto err3; } /* Read the extra files statistics. */ if (fread(&cse, sizeof(cse), 1, f) != 1) { warnp("fread(%s)", s); goto err4; } stats_extra->nchunks = le64dec(cse.nchunks); stats_extra->s_len = le64dec(cse.s_len); stats_extra->s_zlen = le64dec(cse.s_zlen); /* Read the chunk structures. */ for (; numchunks != 0; numchunks--) { /* Set p to point at the struct chunkdata. */ if (statstape) p = &ps->d; /* Read the file one record at a time... */ if (fread(&che, sizeof(che), 1, f) != 1) { warnp("fread(%s)", s); goto err4; } /* ... creating struct chunkdata records... */ memcpy(p->hash, che.hash, 32); p->len = le32dec(che.len); p->zlen_flags = le32dec(che.zlen); p->nrefs = le32dec(che.nrefs); p->ncopies = le32dec(che.ncopies); /* ... inserting them into the hash table... */ if (rwhashtab_insert(HT, p)) goto err4; /* ... and updating the statistics. */ chunks_stats_add(stats_unique, p->len, p->zlen_flags, 1); chunks_stats_add(stats_all, p->len, p->zlen_flags, p->ncopies); /* Sanity check. */ if ((p->len == 0) || (p->zlen_flags == 0) || (p->nrefs == 0)) { warn0("on-disk directory is corrupt: %s", s); goto err4; } /* Move to next record. */ if (statstape) ps++; else p++; } if (fclose(f)) { warnp("fclose(%s)", s); goto err3; } /* Free string allocated by asprintf. */ free(s); /* Success! */ return (HT); err4: fclose(f); err3: free(*dir); err2: free(s); err1: rwhashtab_free(HT); err0: /* Failure! */ return (NULL); }
static void print_hgst_info_ssd_perf(void *buf, uint16_t subtype __unused, uint8_t res, uint32_t size __unused) { uint8_t *walker = buf; uint64_t val; printf("SSD Performance Subpage Type %d:\n", res); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Read Commands", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Read Blocks", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Cache Read Hits Commands", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Cache Read Hits Blocks", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Read Commands Stalled", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Write Commands", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Write Blocks", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Write Odd Start Commands", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Write Odd End Commands", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "Host Write Commands Stalled", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "NAND Read Commands", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "NAND Read Blocks", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "NAND Write Commands", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "NAND Write Blocks", val); val = le64dec(walker); walker += 8; printf(" %-30s: %ju\n", "NAND Read Before Writes", val); }
/*- * This start routine is only called for non-trivial requests, all the * trivial ones are handled autonomously by the slice code. * For requests we handle here, we must call the g_io_deliver() on the * bio, and return non-zero to indicate to the slice code that we did so. * This code executes in the "DOWN" I/O path, this means: * * No sleeping. * * Don't grab the topology lock. * * Don't call biowait, g_getattr(), g_setattr() or g_read_data() */ static int g_bsd_ioctl(struct g_provider *pp, u_long cmd, void *data, int fflag, struct thread *td) { struct g_geom *gp; struct g_bsd_softc *ms; struct g_slicer *gsp; u_char *label; int error; gp = pp->geom; gsp = gp->softc; ms = gsp->softc; switch(cmd) { case DIOCGDINFO: /* Return a copy of the disklabel to userland. */ bsd_disklabel_le_dec(ms->label, data, MAXPARTITIONS); return(0); case DIOCBSDBB: { struct g_consumer *cp; u_char *buf; void *p; int error, i; uint64_t sum; if (!(fflag & FWRITE)) return (EPERM); /* The disklabel to set is the ioctl argument. */ buf = g_malloc(BBSIZE, M_WAITOK); p = *(void **)data; error = copyin(p, buf, BBSIZE); if (!error) { /* XXX: Rude, but supposedly safe */ DROP_GIANT(); g_topology_lock(); /* Validate and modify our slice instance to match. */ error = g_bsd_modify(gp, buf + ms->labeloffset); if (!error) { cp = LIST_FIRST(&gp->consumer); if (ms->labeloffset == ALPHA_LABEL_OFFSET) { sum = 0; for (i = 0; i < 63; i++) sum += le64dec(buf + i * 8); le64enc(buf + 504, sum); } error = g_write_data(cp, 0, buf, BBSIZE); } g_topology_unlock(); PICKUP_GIANT(); } g_free(buf); return (error); } case DIOCSDINFO: case DIOCWDINFO: { if (!(fflag & FWRITE)) return (EPERM); label = g_malloc(LABELSIZE, M_WAITOK); /* The disklabel to set is the ioctl argument. */ bsd_disklabel_le_enc(label, data); DROP_GIANT(); g_topology_lock(); /* Validate and modify our slice instance to match. */ error = g_bsd_modify(gp, label); if (error == 0 && cmd == DIOCWDINFO) error = g_bsd_writelabel(gp, NULL); g_topology_unlock(); PICKUP_GIANT(); g_free(label); return(error); } default: return (ENOIOCTL); } }
/** * multitape_metadata_dec(mdat, buf, buflen): * Parse a buffer into a struct tapemetadata. Return 0 on success, 1 if the * metadata is corrupt, or -1 on error. */ static int multitape_metadata_dec(struct tapemetadata * mdat, uint8_t * buf, size_t buflen) { uint8_t * p; size_t i; int arg; /* Start at the beginning... */ p = buf; /* Make sure the archive name is NUL-terminated. */ for (i = 0; i < buflen; i++) if (p[i] == '\0') break; if (i == buflen) goto bad0; /* Copy the archive name and move on to next field. */ if ((mdat->name = strdup((char *)p)) == NULL) goto err0; buflen -= strlen((char *)p) + 1; p += strlen((char *)p) + 1; /* Parse ctime and argc. */ if (buflen < 8) goto bad1; mdat->ctime = le64dec(p); buflen -= 8; p += 8; if (buflen < 4) goto bad1; mdat->argc = le32dec(p); buflen -= 4; p += 4; /* Sanity-check argc. */ if ((mdat->argc < 0) || ((size_t)(mdat->argc) > buflen)) goto bad1; /* Allocate space for argv. */ if ((mdat->argv = malloc(mdat->argc * sizeof(char *))) == NULL) goto err1; /* Parse argv. */ for (arg = 0; arg < mdat->argc; arg++) mdat->argv[arg] = NULL; for (arg = 0; arg < mdat->argc; arg++) { /* Make sure argument is NUL-terminated. */ for (i = 0; i < buflen; i++) if (p[i] == '\0') break; if (i == buflen) goto bad2; /* Copy argument and move on to next field. */ if ((mdat->argv[arg] = strdup((char *)p)) == NULL) goto err2; buflen -= strlen((char *)p) + 1; p += strlen((char *)p) + 1; } /* Copy indexhash. */ if (buflen < 32) goto bad2; memcpy(mdat->indexhash, p, 32); buflen -= 32; p += 32; /* Parse index length. */ if (buflen < 8) goto bad2; mdat->indexlen = le64dec(p); buflen -= 8; p += 8; /* Validate signature. */ if (buflen < 256) goto bad2; switch (crypto_rsa_verify(CRYPTO_KEY_SIGN_PUB, buf, p - buf, p, 256)) { case -1: /* Error in crypto_rsa_verify. */ goto err2; case 1: /* Bad signature. */ goto bad2; case 0: /* Signature is good. */ break; } buflen -= 256; p += 256; /* We should be at the end of the metadata now. */ if (buflen != 0) goto bad2; /* Success! */ return (0); bad2: for (arg = 0; arg < mdat->argc; arg++) free(mdat->argv[arg]); free(mdat->argv); bad1: free(mdat->name); bad0: /* Metadata is corrupt. */ return (1); err2: for (arg = 0; arg < mdat->argc; arg++) free(mdat->argv[arg]); free(mdat->argv); err1: free(mdat->name); err0: /* Failure! */ return (-1); }
static void sha3_update(struct sha3 *C, const uint8_t *data, size_t len, unsigned rw) { uint64_t T; unsigned ib, iw; /* index of byte/word */ assert(0 < C->nb); /* If there's a partial word, try to fill it. */ if ((C->nb % 8) != 0) { T = 0; for (ib = 0; ib < MIN(len, C->nb % 8); ib++) T |= (uint64_t)data[ib] << (8*ib); C->A[rw - (C->nb + 7)/8] ^= T << (8*(8 - (C->nb % 8))); C->nb -= ib; data += ib; len -= ib; /* If we filled the buffer, permute now. */ if (C->nb == 0) { keccakf1600(C->A); C->nb = 8*rw; } /* If that exhausted the input, we're done. */ if (len == 0) return; } /* At a word boundary. Fill any partial buffer. */ assert((C->nb % 8) == 0); if (C->nb < 8*rw) { for (iw = 0; iw < MIN(len, C->nb)/8; iw++) C->A[rw - C->nb/8 + iw] ^= le64dec(data + 8*iw); C->nb -= 8*iw; data += 8*iw; len -= 8*iw; /* If we filled the buffer, permute now. */ if (C->nb == 0) { keccakf1600(C->A); C->nb = 8*rw; } else { /* Otherwise, less than a word left. */ assert(len < 8); goto partial; } } /* At a buffer boundary. Absorb input one buffer at a time. */ assert(C->nb == 8*rw); while (8*rw <= len) { for (iw = 0; iw < rw; iw++) C->A[iw] ^= le64dec(data + 8*iw); keccakf1600(C->A); data += 8*rw; len -= 8*rw; } /* Partially fill the buffer with as many words as we can. */ for (iw = 0; iw < len/8; iw++) C->A[rw - C->nb/8 + iw] ^= le64dec(data + 8*iw); C->nb -= 8*iw; data += 8*iw; len -= 8*iw; partial: /* Partially fill the last word with as many bytes as we can. */ assert(len < 8); assert(0 < C->nb); assert((C->nb % 8) == 0); T = 0; for (ib = 0; ib < len; ib++) T |= (uint64_t)data[ib] << (8*ib); C->A[rw - C->nb/8] ^= T; C->nb -= ib; assert(0 < C->nb); }
/* Read a cache record. */ static struct ccache_record * read_rec(void * cookie) { struct ccache_record_external ccre; struct ccache_read_internal * R = cookie; struct ccache_record * ccr; size_t prefixlen, suffixlen; uint8_t * sbuf_new; /* Read a struct ccache_record_external. */ if (fread(&ccre, sizeof(ccre), 1, R->f) != 1) { if (ferror(R->f)) warnp("Error reading cache: %s", R->s); else warn0("Error reading cache: %s", R->s); goto err0; } /* Allocate memory for a record. */ if ((ccr = malloc(sizeof(struct ccache_record))) == NULL) goto err0; /* Decode record. */ ccr->ino = le64dec(ccre.ino); ccr->size = le64dec(ccre.size); ccr->mtime = le64dec(ccre.mtime); ccr->nch = le64dec(ccre.nch); ccr->tlen = le32dec(ccre.tlen); ccr->tzlen = le32dec(ccre.tzlen); prefixlen = le32dec(ccre.prefixlen); suffixlen = le32dec(ccre.suffixlen); ccr->age = le32dec(ccre.age); /* Zero other fields. */ ccr->nchalloc = 0; ccr->chp = NULL; ccr->ztrailer = NULL; ccr->flags = 0; /* Sanity check some fields. */ if ((prefixlen == 0 && suffixlen == 0) || (ccr->nch > SIZE_MAX / sizeof(struct chunkheader)) || (ccr->nch == 0 && ccr->tlen == 0) || (ccr->tlen == 0 && ccr->tzlen != 0) || (ccr->tlen != 0 && ccr->tzlen == 0) || (ccr->age == INT_MAX)) goto err2; /* * The prefix length must be <= the length of the previous path; and * the prefix length + suffix length must not overflow. */ if ((prefixlen > R->slen) || (prefixlen > prefixlen + suffixlen)) goto err2; /* Make sure we have enough space for the entry path. */ if (prefixlen + suffixlen > R->sbuflen) { sbuf_new = realloc(R->sbuf, prefixlen + suffixlen); if (sbuf_new == NULL) goto err1; R->sbuf = sbuf_new; R->sbuflen = prefixlen + suffixlen; } /* Read the entry path suffix. */ if (fread(R->sbuf + prefixlen, suffixlen, 1, R->f) != 1) { if (ferror(R->f)) warnp("Error reading cache: %s", R->s); else warn0("Error reading cache: %s", R->s); goto err1; } R->slen = prefixlen + suffixlen; /* Add chunk header and trailer data lengths to datalen. */ R->datalen += ccr->tzlen; if (R->datalen < ccr->tzlen) goto err2; R->datalen += ccr->nch * sizeof(struct chunkheader); if (R->datalen < ccr->nch * sizeof(struct chunkheader)) goto err2; /* Success! */ return (ccr); err2: warn0("Cache file is corrupt: %s", R->s); err1: free(ccr); err0: /* Failure! */ return (NULL); }
int g_bde_decode_lock(struct g_bde_softc *sc, struct g_bde_key *gl, u_char *ptr) { int shuffle[NLOCK_FIELDS]; u_char *p; u_char hash[16], hash2[16]; MD5_CTX c; int i; p = ptr; g_bde_shuffle_lock(sc->sha2, shuffle); for (i = 0; i < NLOCK_FIELDS; i++) { switch(shuffle[i]) { case 0: gl->sector0 = le64dec(p); p += 8; break; case 1: gl->sectorN = le64dec(p); p += 8; break; case 2: gl->keyoffset = le64dec(p); p += 8; break; case 3: gl->sectorsize = le32dec(p); p += 4; break; case 4: gl->flags = le32dec(p); p += 4; break; case 5: case 6: case 7: case 8: gl->lsector[shuffle[i] - 5] = le64dec(p); p += 8; break; case 9: bcopy(p, gl->spare, sizeof gl->spare); p += sizeof gl->spare; break; case 10: bcopy(p, gl->salt, sizeof gl->salt); p += sizeof gl->salt; break; case 11: bcopy(p, gl->mkey, sizeof gl->mkey); p += sizeof gl->mkey; break; case 12: bcopy(p, hash2, sizeof hash2); bzero(p, sizeof hash2); p += sizeof hash2; break; } } if(ptr + G_BDE_LOCKSIZE != p) return(-1); MD5Init(&c); MD5Update(&c, "0000", 4); /* Versioning */ MD5Update(&c, ptr, G_BDE_LOCKSIZE); MD5Final(hash, &c); if (bcmp(hash, hash2, sizeof hash2)) return (1); return (0); }