static uint32_t x86bios_emu_rdl(struct x86emu *emu, uint32_t addr) { uint32_t *va; va = x86bios_get_pages(addr, sizeof(*va)); if (va == NULL) x86bios_set_fault(emu, addr); #ifndef __NO_STRICT_ALIGNMENT if ((addr & 3) != 0) return (le32dec(va)); else #endif return (le32toh(*va)); }
void scrypt_N_1_1_256_sp_sse2(const char *input, char *output, char *scratchpad, unsigned char Nfactor) { uint8_t B[128]; union { __m128i i128[8]; uint32_t u32[32]; } X; __m128i *V; uint32_t i, j, k, N; V = (__m128i *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63)); PBKDF2_SHA256((const uint8_t *)input, 80, (const uint8_t *)input, 80, 1, B, 128); for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { X.u32[k * 16 + i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } N = (1 << (Nfactor + 1)); for (i = 0; i < N; i++) { for (k = 0; k < 8; k++) V[i * 8 + k] = X.i128[k]; xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (i = 0; i < N; i++) { //j = 8 * (X.u32[16] & 1023); j = 8 * (X.u32[16] & (N-1)); for (k = 0; k < 8; k++) X.i128[k] = _mm_xor_si128(X.i128[k], V[j + k]); xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X.u32[k * 16 + i]); } } PBKDF2_SHA256((const uint8_t *)input, 80, B, 128, 1, (uint8_t *)output, 32); }
void scrypt_8_4_1_256_sp_sse2(const char *input, char *output, char *scratchpad) { const int N=123; uint8_t B[128]; union { __m128i i128[8]; uint32_t u32[32]; } X; __m128i *V; uint32_t i, j, k; V = (__m128i *)(((uintptr_t)(scratchpad) + 63) & ~ (uintptr_t)(63)); PBKDF2_SHA256((const uint8_t *)input, 80, (const uint8_t *)input, 80, 1, B, 128); for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { X.u32[k * 16 + i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } for (i = 0; i < N; i++) { for (k = 0; k < 8; k++) V[i * 8 + k] = X.i128[k]; xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (i = 0; i < N i++) { j = 8 * (X.u32[16] % (N)); for (k = 0; k < 8; k++) X.i128[k] = _mm_xor_si128(X.i128[k], V[j + k]); xor_salsa8_sse2(&X.i128[0], &X.i128[4]); xor_salsa8_sse2(&X.i128[4], &X.i128[0]); } for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X.u32[k * 16 + i]); } } PBKDF2_SHA256((const uint8_t *)input, 80, B, 128, 1, (uint8_t *)output, 32); }
static u_int64_t elf_get_quad(Elf32_Ehdr *e, void *base, elf_member_t member) { u_int64_t val; val = 0; switch (e->e_ident[EI_CLASS]) { case ELFCLASS32: base = (char *)base + elf32_offsets[member]; switch (e->e_ident[EI_DATA]) { case ELFDATA2MSB: val = be32dec(base); break; case ELFDATA2LSB: val = le32dec(base); break; case ELFDATANONE: errx(1, "invalid data format"); } break; case ELFCLASS64: base = (char *)base + elf64_offsets[member]; switch (e->e_ident[EI_DATA]) { case ELFDATA2MSB: val = be64dec(base); break; case ELFDATA2LSB: val = le64dec(base); break; case ELFDATANONE: errx(1, "invalid data format"); } break; case ELFCLASSNONE: errx(1, "invalid class"); } return val; }
static void print_hgst_info_self_test(void *buf, uint16_t subtype __unused, uint8_t res __unused, uint32_t size) { size_t i; uint8_t *walker = buf; uint16_t code, hrs; uint32_t lba; printf("Self Test Subpage:\n"); for (i = 0; i < size / 20; i++) { /* Each entry is 20 bytes */ code = le16dec(walker); walker += 2; walker++; /* Ignore fixed flags */ if (*walker == 0) /* Last entry is zero length */ break; if (*walker++ != 0x10) { printf("Bad length for self test report\n"); return; } printf(" %-30s: %d\n", "Recent Test", code); printf(" %-28s: %#x\n", "Self-Test Results", *walker & 0xf); printf(" %-28s: %#x\n", "Self-Test Code", (*walker >> 5) & 0x7); walker++; printf(" %-28s: %#x\n", "Self-Test Number", *walker++); hrs = le16dec(walker); walker += 2; lba = le32dec(walker); walker += 4; printf(" %-28s: %u\n", "Total Power On Hrs", hrs); printf(" %-28s: %#jx (%jd)\n", "LBA", (uintmax_t)lba, (uintmax_t)lba); printf(" %-28s: %#x\n", "Sense Key", *walker++ & 0xf); printf(" %-28s: %#x\n", "Additional Sense Code", *walker++); printf(" %-28s: %#x\n", "Additional Sense Qualifier", *walker++); printf(" %-28s: %#x\n", "Vendor Specific Detail", *walker++); } }
/** * multitape_metadata_dec(mdat, buf, buflen): * Parse a buffer into a struct tapemetadata. Return 0 on success, 1 if the * metadata is corrupt, or -1 on error. */ static int multitape_metadata_dec(struct tapemetadata * mdat, uint8_t * buf, size_t buflen) { uint8_t * p; size_t i; int arg; /* Start at the beginning... */ p = buf; /* Make sure the archive name is NUL-terminated. */ for (i = 0; i < buflen; i++) if (p[i] == '\0') break; if (i == buflen) goto bad0; /* Copy the archive name and move on to next field. */ if ((mdat->name = strdup((char *)p)) == NULL) goto err0; buflen -= strlen((char *)p) + 1; p += strlen((char *)p) + 1; /* Parse ctime and argc. */ if (buflen < 8) goto bad1; mdat->ctime = le64dec(p); buflen -= 8; p += 8; if (buflen < 4) goto bad1; mdat->argc = le32dec(p); buflen -= 4; p += 4; /* Sanity-check argc. */ if ((mdat->argc < 0) || ((size_t)(mdat->argc) > buflen)) goto bad1; /* Allocate space for argv. */ if ((mdat->argv = malloc(mdat->argc * sizeof(char *))) == NULL) goto err1; /* Parse argv. */ for (arg = 0; arg < mdat->argc; arg++) mdat->argv[arg] = NULL; for (arg = 0; arg < mdat->argc; arg++) { /* Make sure argument is NUL-terminated. */ for (i = 0; i < buflen; i++) if (p[i] == '\0') break; if (i == buflen) goto bad2; /* Copy argument and move on to next field. */ if ((mdat->argv[arg] = strdup((char *)p)) == NULL) goto err2; buflen -= strlen((char *)p) + 1; p += strlen((char *)p) + 1; } /* Copy indexhash. */ if (buflen < 32) goto bad2; memcpy(mdat->indexhash, p, 32); buflen -= 32; p += 32; /* Parse index length. */ if (buflen < 8) goto bad2; mdat->indexlen = le64dec(p); buflen -= 8; p += 8; /* Validate signature. */ if (buflen < 256) goto bad2; switch (crypto_rsa_verify(CRYPTO_KEY_SIGN_PUB, buf, p - buf, p, 256)) { case -1: /* Error in crypto_rsa_verify. */ goto err2; case 1: /* Bad signature. */ goto bad2; case 0: /* Signature is good. */ break; } buflen -= 256; p += 256; /* We should be at the end of the metadata now. */ if (buflen != 0) goto bad2; /* Success! */ return (0); bad2: for (arg = 0; arg < mdat->argc; arg++) free(mdat->argv[arg]); free(mdat->argv); bad1: free(mdat->name); bad0: /* Metadata is corrupt. */ return (1); err2: for (arg = 0; arg < mdat->argc; arg++) free(mdat->argv[arg]); free(mdat->argv); err1: free(mdat->name); err0: /* Failure! */ return (-1); }
$NetBSD$ --- src/crypto/scrypt-sse2.cpp.orig 2015-10-31 14:49:41.000000000 +0000 +++ src/crypto/scrypt-sse2.cpp @@ -108,7 +108,7 @@ void scrypt_1024_1_1_256_sp_sse2(const c for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { - X.u32[k * 16 + i] = le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); + X.u32[k * 16 + i] = scrypt_le32dec(&B[(k * 16 + (i * 5 % 16)) * 4]); } } @@ -128,7 +128,7 @@ void scrypt_1024_1_1_256_sp_sse2(const c for (k = 0; k < 2; k++) { for (i = 0; i < 16; i++) { - le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X.u32[k * 16 + i]); + scrypt_le32enc(&B[(k * 16 + (i * 5 % 16)) * 4], X.u32[k * 16 + i]); } }
int bsd_disklabel_le_dec(u_char *ptr, struct disklabel *d, int maxpart) { int i; u_char *p, *pe; uint16_t sum; d->d_magic = le32dec(ptr + 0); if (d->d_magic != DISKMAGIC) return(EINVAL); d->d_magic2 = le32dec(ptr + 132); if (d->d_magic2 != DISKMAGIC) { return(EINVAL); } d->d_npartitions = le16dec(ptr + 138); if (d->d_npartitions > maxpart) { return(EINVAL); } pe = ptr + 148 + 16 * d->d_npartitions; sum = 0; for (p = ptr; p < pe; p += 2) sum ^= le16dec(p); if (sum != 0) { return(EINVAL); } d->d_type = le16dec(ptr + 4); d->d_subtype = le16dec(ptr + 6); bcopy(ptr + 8, d->d_typename, 16); bcopy(ptr + 24, d->d_packname, 16); d->d_secsize = le32dec(ptr + 40); d->d_nsectors = le32dec(ptr + 44); d->d_ntracks = le32dec(ptr + 48); d->d_ncylinders = le32dec(ptr + 52); d->d_secpercyl = le32dec(ptr + 56); d->d_secperunit = le32dec(ptr + 60); d->d_sparespertrack = le16dec(ptr + 64); d->d_sparespercyl = le16dec(ptr + 66); d->d_acylinders = le32dec(ptr + 68); d->d_rpm = le16dec(ptr + 72); d->d_interleave = le16dec(ptr + 74); d->d_trackskew = le16dec(ptr + 76); d->d_cylskew = le16dec(ptr + 78); d->d_headswitch = le32dec(ptr + 80); d->d_trkseek = le32dec(ptr + 84); d->d_flags = le32dec(ptr + 88); d->d_drivedata[0] = le32dec(ptr + 92); d->d_drivedata[1] = le32dec(ptr + 96); d->d_drivedata[2] = le32dec(ptr + 100); d->d_drivedata[3] = le32dec(ptr + 104); d->d_drivedata[4] = le32dec(ptr + 108); d->d_spare[0] = le32dec(ptr + 112); d->d_spare[1] = le32dec(ptr + 116); d->d_spare[2] = le32dec(ptr + 120); d->d_spare[3] = le32dec(ptr + 124); d->d_spare[4] = le32dec(ptr + 128); d->d_checksum = le16dec(ptr + 136); d->d_npartitions = le16dec(ptr + 138); d->d_bbsize = le32dec(ptr + 140); d->d_sbsize = le32dec(ptr + 144); for (i = 0; i < d->d_npartitions; i++) bsd_partition_le_dec(ptr + 148 + 16 * i, &d->d_partitions[i]); return(0); }
/** * chunks_directory_read(cachepath, dir, stats_unique, stats_all, stats_extra, * mustexist, statstape): * Read stats_extra statistics (statistics on non-chunks which are stored) * and the chunk directory (if present) from "${cachepath}/directory" into * memory allocated and assigned to ${*dir}; and return a hash table * populated with struct chunkdata records. Populate stats_all with * statistics for all the chunks listed in the directory (counting * multiplicity) and populate stats_unique with statistics reflecting the * unique chunks. If ${mustexist}, error out if the directory does not exist. * If ${statstape}, allocate struct chunkdata_statstape records instead. */ RWHASHTAB * chunks_directory_read(const char * cachepath, void ** dir, struct chunkstats * stats_unique, struct chunkstats * stats_all, struct chunkstats * stats_extra, int mustexist, int statstape) { struct chunkdata_external che; struct chunkstats_external cse; struct stat sb; RWHASHTAB * HT; char * s; struct chunkdata * p = NULL; struct chunkdata_statstape * ps = NULL; FILE * f; size_t numchunks; /* Zero statistics. */ chunks_stats_zero(stats_unique); chunks_stats_zero(stats_all); chunks_stats_zero(stats_extra); /* Create a hash table to hold the chunkdata structures. */ HT = rwhashtab_init(offsetof(struct chunkdata, hash), 32); if (HT == NULL) goto err0; /* Construct the string "${cachepath}/directory". */ if (asprintf(&s, "%s/directory", cachepath) == -1) { warnp("asprintf"); goto err1; } if (stat(s, &sb)) { /* Could not stat ${cachepath}/directory. Error? */ if (errno != ENOENT) { warnp("stat(%s)", s); goto err2; } /* The directory doesn't exist; complain if mustexist != 0. */ if (mustexist) { warn0("Error reading cache directory from %s", cachepath); goto err2; } /* * ${cachepath}/directory does not exist; set ${*dir} to NULL * and return the empty hash table. */ free(s); *dir = NULL; return (HT); } /* * Make sure the directory file isn't too large or too small, in * order to avoid any possibility of integer overflows. */ if ((sb.st_size < 0) || ((sizeof(off_t) > sizeof(size_t)) && (sb.st_size > SIZE_MAX))) { warn0("on-disk directory has insane size (%jd bytes): %s", (intmax_t)(sb.st_size), s); goto err2; } /* Make sure the number of chunks is an integer. */ if ((size_t)(sb.st_size - sizeof(struct chunkstats_external)) % (sizeof(struct chunkdata_external))) { warn0("on-disk directory is corrupt: %s", s); goto err2; } /* Compute the number of on-disk chunks. */ numchunks = (size_t)(sb.st_size - sizeof(struct chunkstats_external)) / sizeof(struct chunkdata_external); /* Make sure we don't get an integer overflow. */ if (numchunks >= SIZE_MAX / sizeof(struct chunkdata_statstape)) { warn0("on-disk directory is too large: %s", s); goto err2; } /* * Allocate memory to ${*dir} large enough to store a struct * chunkdata or struct chunkdata_statstape for each struct * chunkdata_external in ${cachepath}/directory. */ if (statstape) { ps = malloc(numchunks * sizeof(struct chunkdata_statstape)); *dir = ps; } else { p = malloc(numchunks * sizeof(struct chunkdata)); *dir = p; } if (*dir == NULL) goto err2; /* Open the directory file. */ if ((f = fopen(s, "r")) == NULL) { warnp("fopen(%s)", s); goto err3; } /* Read the extra files statistics. */ if (fread(&cse, sizeof(cse), 1, f) != 1) { warnp("fread(%s)", s); goto err4; } stats_extra->nchunks = le64dec(cse.nchunks); stats_extra->s_len = le64dec(cse.s_len); stats_extra->s_zlen = le64dec(cse.s_zlen); /* Read the chunk structures. */ for (; numchunks != 0; numchunks--) { /* Set p to point at the struct chunkdata. */ if (statstape) p = &ps->d; /* Read the file one record at a time... */ if (fread(&che, sizeof(che), 1, f) != 1) { warnp("fread(%s)", s); goto err4; } /* ... creating struct chunkdata records... */ memcpy(p->hash, che.hash, 32); p->len = le32dec(che.len); p->zlen_flags = le32dec(che.zlen); p->nrefs = le32dec(che.nrefs); p->ncopies = le32dec(che.ncopies); /* ... inserting them into the hash table... */ if (rwhashtab_insert(HT, p)) goto err4; /* ... and updating the statistics. */ chunks_stats_add(stats_unique, p->len, p->zlen_flags, 1); chunks_stats_add(stats_all, p->len, p->zlen_flags, p->ncopies); /* Sanity check. */ if ((p->len == 0) || (p->zlen_flags == 0) || (p->nrefs == 0)) { warn0("on-disk directory is corrupt: %s", s); goto err4; } /* Move to next record. */ if (statstape) ps++; else p++; } if (fclose(f)) { warnp("fclose(%s)", s); goto err3; } /* Free string allocated by asprintf. */ free(s); /* Success! */ return (HT); err4: fclose(f); err3: free(*dir); err2: free(s); err1: rwhashtab_free(HT); err0: /* Failure! */ return (NULL); }
static int pkg_get_myabi(char *dest, size_t sz) { Elf *elf; Elf_Data *data; Elf_Note note; Elf_Scn *scn; char *src, *osname; const char *abi; GElf_Ehdr elfhdr; GElf_Shdr shdr; int fd, i, ret; uint32_t version; version = 0; ret = -1; scn = NULL; abi = NULL; if (elf_version(EV_CURRENT) == EV_NONE) { warnx("ELF library initialization failed: %s", elf_errmsg(-1)); return (-1); } if ((fd = open("/bin/sh", O_RDONLY)) < 0) { warn("open()"); return (-1); } if ((elf = elf_begin(fd, ELF_C_READ, NULL)) == NULL) { ret = -1; warnx("elf_begin() failed: %s.", elf_errmsg(-1)); goto cleanup; } if (gelf_getehdr(elf, &elfhdr) == NULL) { ret = -1; warn("getehdr() failed: %s.", elf_errmsg(-1)); goto cleanup; } while ((scn = elf_nextscn(elf, scn)) != NULL) { if (gelf_getshdr(scn, &shdr) != &shdr) { ret = -1; warn("getshdr() failed: %s.", elf_errmsg(-1)); goto cleanup; } if (shdr.sh_type == SHT_NOTE) break; } if (scn == NULL) { ret = -1; warn("failed to get the note section"); goto cleanup; } data = elf_getdata(scn, NULL); src = data->d_buf; for (;;) { memcpy(¬e, src, sizeof(Elf_Note)); src += sizeof(Elf_Note); if (note.n_type == NT_VERSION) break; src += note.n_namesz + note.n_descsz; } osname = src; src += note.n_namesz; if (elfhdr.e_ident[EI_DATA] == ELFDATA2MSB) version = be32dec(src); else version = le32dec(src); for (i = 0; osname[i] != '\0'; i++) osname[i] = (char)tolower(osname[i]); snprintf(dest, sz, "%s:%d:%s:%s", osname, version / 100000, elf_corres_to_string(mach_corres, (int)elfhdr.e_machine), elf_corres_to_string(wordsize_corres, (int)elfhdr.e_ident[EI_CLASS])); ret = 0; switch (elfhdr.e_machine) { case EM_ARM: snprintf(dest + strlen(dest), sz - strlen(dest), ":%s:%s:%s", elf_corres_to_string(endian_corres, (int)elfhdr.e_ident[EI_DATA]), (elfhdr.e_flags & EF_ARM_NEW_ABI) > 0 ? "eabi" : "oabi", (elfhdr.e_flags & EF_ARM_VFP_FLOAT) > 0 ? "softfp" : "vfp"); break; case EM_MIPS: /* * this is taken from binutils sources: * include/elf/mips.h * mapping is figured out from binutils: * gas/config/tc-mips.c */ switch (elfhdr.e_flags & EF_MIPS_ABI) { case E_MIPS_ABI_O32: abi = "o32"; break; case E_MIPS_ABI_N32: abi = "n32"; break; default: if (elfhdr.e_ident[EI_DATA] == ELFCLASS32) abi = "o32"; else if (elfhdr.e_ident[EI_DATA] == ELFCLASS64) abi = "n64"; break; } snprintf(dest + strlen(dest), sz - strlen(dest), ":%s:%s", elf_corres_to_string(endian_corres, (int)elfhdr.e_ident[EI_DATA]), abi); break; } cleanup: if (elf != NULL) elf_end(elf); close(fd); return (ret); }
/* * Fill the state array with 16 bytes of magic and 32 bytes of key, * repeating the key if necessary. The 8-byte stream position and the * 8-byte nonce are initialized to all-zeroes. */ void chacha20_init(chacha20_ctx *ctx, const uint8_t *key, size_t keylen) { memset(ctx, 0, sizeof *ctx); if (keylen == 32) { /* magic */ ctx->state[ 0] = le32dec(magic256 + 0); ctx->state[ 1] = le32dec(magic256 + 4); ctx->state[ 2] = le32dec(magic256 + 8); ctx->state[ 3] = le32dec(magic256 + 12); /* first half of key */ ctx->state[ 4] = le32dec(key + 0); ctx->state[ 5] = le32dec(key + 4); ctx->state[ 6] = le32dec(key + 8); ctx->state[ 7] = le32dec(key + 12); /* second half of key */ ctx->state[ 8] = le32dec(key + 16); ctx->state[ 9] = le32dec(key + 20); ctx->state[10] = le32dec(key + 24); ctx->state[11] = le32dec(key + 28); } else { /* magic */ ctx->state[ 0] = le32dec(magic128 + 0); ctx->state[ 1] = le32dec(magic128 + 4); ctx->state[ 2] = le32dec(magic128 + 8); ctx->state[ 3] = le32dec(magic128 + 12); /* first half of key */ ctx->state[ 4] = le32dec(key + 0); ctx->state[ 5] = le32dec(key + 4); ctx->state[ 6] = le32dec(key + 8); ctx->state[ 7] = le32dec(key + 12); /* repeat first half of key */ ctx->state[ 8] = le32dec(key + 0); ctx->state[ 9] = le32dec(key + 4); ctx->state[10] = le32dec(key + 8); ctx->state[11] = le32dec(key + 12); } }
/* ARGSUSED */ struct cdbr * cdbr_open(const char *path, int flags) { uint8_t buf[40]; int fd; struct cdbr *cdbr; struct stat sb; if ((fd = open(path, O_RDONLY)) == -1) return NULL; errno = EINVAL; if (fstat(fd, &sb) == -1 || read(fd, buf, sizeof(buf)) != sizeof(buf) || memcmp(buf, "NBCDB\n\0\001", 8) || (cdbr = malloc(sizeof(*cdbr))) == NULL) { close(fd); return NULL; } cdbr->data_size = le32dec(buf + 24); cdbr->entries = le32dec(buf + 28); cdbr->entries_index = le32dec(buf + 32); cdbr->seed = le32dec(buf + 36); if (cdbr->data_size < 0x100) cdbr->offset_size = 1; else if (cdbr->data_size < 0x10000) cdbr->offset_size = 2; else cdbr->offset_size = 4; if (cdbr->entries_index < 0x100) cdbr->index_size = 1; else if (cdbr->entries_index < 0x10000) cdbr->index_size = 2; else cdbr->index_size = 4; cdbr->mmap_size = (size_t)sb.st_size; #ifdef __minix if(!(cdbr->mmap_base = malloc(cdbr->mmap_size))) { free(cdbr); return NULL; } if ((size_t)read(fd, cdbr->mmap_base, cdbr->mmap_size) != cdbr->mmap_size) { free(cdbr->mmap_base); free(cdbr); return NULL; } #else /* !__minix */ cdbr->mmap_base = mmap(NULL, cdbr->mmap_size, PROT_READ, MAP_FILE|MAP_SHARED, fd, 0); #endif /* __minix */ close(fd); if (cdbr->mmap_base == MAP_FAILED) { free(cdbr); return NULL; } cdbr->hash_base = cdbr->mmap_base + 40; cdbr->offset_base = cdbr->hash_base + cdbr->entries_index * cdbr->index_size; if (cdbr->entries_index * cdbr->index_size % cdbr->offset_size) cdbr->offset_base += cdbr->offset_size - cdbr->entries_index * cdbr->index_size % cdbr->offset_size; cdbr->data_base = cdbr->offset_base + (cdbr->entries + 1) * cdbr->offset_size; if (cdbr->hash_base < cdbr->mmap_base || cdbr->offset_base < cdbr->mmap_base || cdbr->data_base < cdbr->mmap_base || cdbr->data_base + cdbr->data_size < cdbr->mmap_base || cdbr->data_base + cdbr->data_size > cdbr->mmap_base + cdbr->mmap_size || cdbr->entries == 0 || cdbr->entries_index == 0) { errno = EINVAL; cdbr_close(cdbr); return NULL; } fast_divide32_prepare(cdbr->entries, &cdbr->entries_m, &cdbr->entries_s1, &cdbr->entries_s2); fast_divide32_prepare(cdbr->entries_index, &cdbr->entries_index_m, &cdbr->entries_index_s1, &cdbr->entries_index_s2); return cdbr; }
/** * multitape_chunkiter_tmd(S, C, tmd, func, cookie, quiet): * Call ${func} on ${cookie} and each struct chunkheader involved in the * archive associated with the metadata ${tmd}. If ${C} is non-NULL, call * chunks_stats_extrastats on ${C} and the length of each metadata fragment. * If ${quiet}, don't print any warnings about corrupt or missing files. * Return 0 (success), 1 (a required file is missing), 2 (a required file is * corrupt), -1 (error), or the first non-zero value returned by ${func}. */ int multitape_chunkiter_tmd(STORAGE_R * S, CHUNKS_S * C, const struct tapemetadata * tmd, int func(void *, struct chunkheader *), void * cookie, int quiet) { CHUNKS_R * CR; /* Chunk layer read cookie. */ struct tapemetaindex tmi; /* Metaindex. */ size_t hindexpos; /* Header stream index position. */ size_t cindexpos; /* Chunk index stream index position. */ size_t tindexpos; /* Trailer stream index position. */ uint8_t * ibuf; /* Contains a tape index chunk. */ size_t ibufpos; /* Position within ibuf. */ size_t ibuflen; /* Length of valid data in ibuf. */ struct chunkheader * ch; /* Chunk header being processed. */ size_t chunklen, chunkzlen; /* Decoded chunk parameters. */ int rc; /* Obtain a chunk layer read cookie. */ if ((CR = chunks_read_init(S, MAXCHUNK)) == NULL) { rc = -1; goto err0; } /* Read the tape metaindex. */ if ((rc = multitape_metaindex_get(S, C, &tmi, tmd, quiet)) != 0) goto err1; /* Allocate a buffer for holding chunks of index. */ if ((ibuf = malloc(MAXCHUNK + sizeof(struct chunkheader))) == NULL) { rc = -1; goto err2; } ibuflen = 0; /* Iterate through the header stream index. */ for (hindexpos = 0; hindexpos + sizeof(struct chunkheader) <= tmi.hindexlen; hindexpos += sizeof(struct chunkheader)) { ch = (struct chunkheader *)(&tmi.hindex[hindexpos]); if ((rc = func(cookie, ch)) != 0) goto err3; } /* Iterate through the chunk index stream index. */ for (cindexpos = 0; cindexpos + sizeof(struct chunkheader) <= tmi.cindexlen; cindexpos += sizeof(struct chunkheader)) { /* Call func on the next chunk from the stream. */ ch = (struct chunkheader *)(&tmi.cindex[cindexpos]); if ((rc = func(cookie, ch)) != 0) goto err3; /* Decode chunk header. */ chunklen = le32dec(ch->len); chunkzlen = le32dec(ch->zlen); /* Sanity check. */ if (chunklen > MAXCHUNK) { if (quiet == 0) warn0("Chunk exceeds maximum size"); rc = 2; goto err3; } /* We want to cache this chunk after reading it. */ if (chunks_read_cache(CR, ch->hash)) goto err3; /* Read the chunk into buffer. */ if ((rc = chunks_read_chunk(CR, ch->hash, chunklen, chunkzlen, ibuf + ibuflen, quiet)) != 0) goto err3; ibuflen += chunklen; /* Handle any chunk headers within ibuf. */ for (ibufpos = 0; ibufpos + sizeof(struct chunkheader) <= ibuflen; ibufpos += sizeof(struct chunkheader)) { /* Deal with a chunk header. */ ch = (struct chunkheader *)(&ibuf[ibufpos]); if ((rc = func(cookie, ch)) != 0) goto err3; } /* Move buffered data to the start of the buffer. */ memmove(ibuf, ibuf + ibufpos, ibuflen - ibufpos); ibuflen -= ibufpos; } /* Iterate through the trailer stream index. */ for (tindexpos = 0; tindexpos + sizeof(struct chunkheader) <= tmi.tindexlen; tindexpos += sizeof(struct chunkheader)) { ch = (struct chunkheader *)(&tmi.tindex[tindexpos]); if ((rc = func(cookie, ch)) != 0) goto err3; } /* Free index chunk buffer. */ free(ibuf); /* Free metaindex buffers. */ multitape_metaindex_free(&tmi); /* Close handles. */ chunks_read_free(CR); /* Success! */ return (0); err3: free(ibuf); err2: multitape_metaindex_free(&tmi); err1: chunks_read_free(CR); err0: /* Failure! */ return (rc); }
int pkg_get_myarch(char *dest, size_t sz) { Elf *elf = NULL; GElf_Ehdr elfhdr; GElf_Shdr shdr; Elf_Data *data; Elf_Note note; Elf_Scn *scn = NULL; int fd; char *src = NULL; char *osname; uint32_t version = 0; int ret = EPKG_OK; int i; const char *abi, *endian_corres_str, *wordsize_corres_str; if (elf_version(EV_CURRENT) == EV_NONE) { pkg_emit_error("ELF library initialization failed: %s", elf_errmsg(-1)); return (EPKG_FATAL); } if ((fd = open(_PATH_BSHELL, O_RDONLY)) < 0) { pkg_emit_errno("open", _PATH_BSHELL); snprintf(dest, sz, "%s", "unknown"); return (EPKG_FATAL); } if ((elf = elf_begin(fd, ELF_C_READ, NULL)) == NULL) { ret = EPKG_FATAL; pkg_emit_error("elf_begin() failed: %s.", elf_errmsg(-1)); goto cleanup; } if (gelf_getehdr(elf, &elfhdr) == NULL) { ret = EPKG_FATAL; pkg_emit_error("getehdr() failed: %s.", elf_errmsg(-1)); goto cleanup; } while ((scn = elf_nextscn(elf, scn)) != NULL) { if (gelf_getshdr(scn, &shdr) != &shdr) { ret = EPKG_FATAL; pkg_emit_error("getshdr() failed: %s.", elf_errmsg(-1)); goto cleanup; } if (shdr.sh_type == SHT_NOTE) break; } if (scn == NULL) { ret = EPKG_FATAL; pkg_emit_error("fail to get the note section"); goto cleanup; } data = elf_getdata(scn, NULL); src = data->d_buf; while (1) { memcpy(¬e, src, sizeof(Elf_Note)); src += sizeof(Elf_Note); if (note.n_type == NT_VERSION) break; src += note.n_namesz + note.n_descsz; } osname = src; src += roundup2(note.n_namesz, 4); if (elfhdr.e_ident[EI_DATA] == ELFDATA2MSB) version = be32dec(src); else version = le32dec(src); for (i = 0; osname[i] != '\0'; i++) osname[i] = (char)tolower(osname[i]); wordsize_corres_str = elf_corres_to_string(wordsize_corres, (int)elfhdr.e_ident[EI_CLASS]); #if defined(__DragonFly__) snprintf(dest, sz, "%s:%d.%d:%s:%s", osname, version / 100000, (((version / 100 % 1000)+1)/2)*2, #else snprintf(dest, sz, "%s:%d:%s:%s", osname, version / 100000, #endif elf_corres_to_string(mach_corres, (int) elfhdr.e_machine), wordsize_corres_str); switch (elfhdr.e_machine) { case EM_ARM: endian_corres_str = elf_corres_to_string(endian_corres, (int)elfhdr.e_ident[EI_DATA]); snprintf(dest + strlen(dest), sz - strlen(dest), ":%s:%s:%s", endian_corres_str, (elfhdr.e_flags & EF_ARM_NEW_ABI) > 0 ? "eabi" : "oabi", (elfhdr.e_flags & EF_ARM_VFP_FLOAT) > 0 ? "softfp" : "vfp"); break; case EM_MIPS: /* * this is taken from binutils sources: * include/elf/mips.h * mapping is figured out from binutils: * gas/config/tc-mips.c */ switch (elfhdr.e_flags & EF_MIPS_ABI) { case E_MIPS_ABI_O32: abi = "o32"; break; case E_MIPS_ABI_N32: abi = "n32"; break; default: if (elfhdr.e_ident[EI_DATA] == ELFCLASS32) abi = "o32"; else if (elfhdr.e_ident[EI_DATA] == ELFCLASS64) abi = "n64"; else abi = "unknown"; break; } endian_corres_str = elf_corres_to_string(endian_corres, (int)elfhdr.e_ident[EI_DATA]); snprintf(dest + strlen(dest), sz - strlen(dest), ":%s:%s", endian_corres_str, abi); break; default: break; } cleanup: if (elf != NULL) elf_end(elf); close(fd); return (ret); }
/*************************************************************************** Function: sInitChan Purpose: Initialization of a channel and channel structure Call: sInitChan(CtlP,ChP,AiopNum,ChanNum) CONTROLLER_T *CtlP; Ptr to controller structure CHANNEL_T *ChP; Ptr to channel structure int AiopNum; AIOP number within controller int ChanNum; Channel number within AIOP Return: int: TRUE if initialization succeeded, FALSE if it fails because channel number exceeds number of channels available in AIOP. Comments: This function must be called before a channel can be used. Warnings: No range checking on any of the parameters is done. No context switches are allowed while executing this function. */ int sInitChan( CONTROLLER_T *CtlP, CHANNEL_T *ChP, int AiopNum, int ChanNum) { int i, ChOff; Byte_t *ChR; static Byte_t R[4]; if(ChanNum >= CtlP->AiopNumChan[AiopNum]) return(FALSE); /* exceeds num chans in AIOP */ /* Channel, AIOP, and controller identifiers */ ChP->CtlP = CtlP; ChP->ChanID = CtlP->AiopID[AiopNum]; ChP->AiopNum = AiopNum; ChP->ChanNum = ChanNum; /* Initialize the channel from the RData array */ for(i=0; i < RDATASIZE; i+=4) { R[0] = RData[i]; R[1] = RData[i+1] + 0x10 * ChanNum; R[2] = RData[i+2]; R[3] = RData[i+3]; rp_writech4(ChP,_INDX_ADDR,le32dec(R)); } ChR = ChP->R; for(i=0; i < RREGDATASIZE; i+=4) { ChR[i] = RRegData[i]; ChR[i+1] = RRegData[i+1] + 0x10 * ChanNum; ChR[i+2] = RRegData[i+2]; ChR[i+3] = RRegData[i+3]; } /* Indexed registers */ ChOff = (Word_t)ChanNum * 0x1000; ChP->BaudDiv[0] = (Byte_t)(ChOff + _BAUD); ChP->BaudDiv[1] = (Byte_t)((ChOff + _BAUD) >> 8); ChP->BaudDiv[2] = (Byte_t)BRD9600; ChP->BaudDiv[3] = (Byte_t)(BRD9600 >> 8); rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->BaudDiv)); ChP->TxControl[0] = (Byte_t)(ChOff + _TX_CTRL); ChP->TxControl[1] = (Byte_t)((ChOff + _TX_CTRL) >> 8); ChP->TxControl[2] = 0; ChP->TxControl[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxControl)); ChP->RxControl[0] = (Byte_t)(ChOff + _RX_CTRL); ChP->RxControl[1] = (Byte_t)((ChOff + _RX_CTRL) >> 8); ChP->RxControl[2] = 0; ChP->RxControl[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->RxControl)); ChP->TxEnables[0] = (Byte_t)(ChOff + _TX_ENBLS); ChP->TxEnables[1] = (Byte_t)((ChOff + _TX_ENBLS) >> 8); ChP->TxEnables[2] = 0; ChP->TxEnables[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxEnables)); ChP->TxCompare[0] = (Byte_t)(ChOff + _TXCMP1); ChP->TxCompare[1] = (Byte_t)((ChOff + _TXCMP1) >> 8); ChP->TxCompare[2] = 0; ChP->TxCompare[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxCompare)); ChP->TxReplace1[0] = (Byte_t)(ChOff + _TXREP1B1); ChP->TxReplace1[1] = (Byte_t)((ChOff + _TXREP1B1) >> 8); ChP->TxReplace1[2] = 0; ChP->TxReplace1[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxReplace1)); ChP->TxReplace2[0] = (Byte_t)(ChOff + _TXREP2); ChP->TxReplace2[1] = (Byte_t)((ChOff + _TXREP2) >> 8); ChP->TxReplace2[2] = 0; ChP->TxReplace2[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxReplace2)); ChP->TxFIFOPtrs = ChOff + _TXF_OUTP; ChP->TxFIFO = ChOff + _TX_FIFO; rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum | RESTXFCNT); /* apply reset Tx FIFO count */ rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum); /* remove reset Tx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */ rp_writech2(ChP,_INDX_DATA,0); ChP->RxFIFOPtrs = ChOff + _RXF_OUTP; ChP->RxFIFO = ChOff + _RX_FIFO; rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum | RESRXFCNT); /* apply reset Rx FIFO count */ rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum); /* remove reset Rx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs); /* clear Rx out ptr */ rp_writech2(ChP,_INDX_DATA,0); rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */ rp_writech2(ChP,_INDX_DATA,0); ChP->TxPrioCnt = ChOff + _TXP_CNT; rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioCnt); rp_writech1(ChP,_INDX_DATA,0); ChP->TxPrioPtr = ChOff + _TXP_PNTR; rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioPtr); rp_writech1(ChP,_INDX_DATA,0); ChP->TxPrioBuf = ChOff + _TXP_BUF; sEnRxProcessor(ChP); /* start the Rx processor */ return(TRUE); }
int pkg_get_myarch(char *dest, size_t sz) { Elf *elf = NULL; GElf_Ehdr elfhdr; GElf_Shdr shdr; Elf_Data *data; Elf_Note note; Elf_Scn *scn = NULL; int fd; char *src = NULL; char *osname; uint32_t version = 0; int ret = EPKG_OK; int i; const char *arch, *abi, *endian_corres_str, *wordsize_corres_str, *fpu; if (elf_version(EV_CURRENT) == EV_NONE) { pkg_emit_error("ELF library initialization failed: %s", elf_errmsg(-1)); return (EPKG_FATAL); } if ((fd = open(_PATH_BSHELL, O_RDONLY)) < 0) { pkg_emit_errno("open", _PATH_BSHELL); snprintf(dest, sz, "%s", "unknown"); return (EPKG_FATAL); } if ((elf = elf_begin(fd, ELF_C_READ, NULL)) == NULL) { ret = EPKG_FATAL; pkg_emit_error("elf_begin() failed: %s.", elf_errmsg(-1)); goto cleanup; } if (gelf_getehdr(elf, &elfhdr) == NULL) { ret = EPKG_FATAL; pkg_emit_error("getehdr() failed: %s.", elf_errmsg(-1)); goto cleanup; } while ((scn = elf_nextscn(elf, scn)) != NULL) { if (gelf_getshdr(scn, &shdr) != &shdr) { ret = EPKG_FATAL; pkg_emit_error("getshdr() failed: %s.", elf_errmsg(-1)); goto cleanup; } if (shdr.sh_type == SHT_NOTE) break; } if (scn == NULL) { ret = EPKG_FATAL; pkg_emit_error("failed to get the note section"); goto cleanup; } data = elf_getdata(scn, NULL); src = data->d_buf; while ((uintptr_t)src < ((uintptr_t)data->d_buf + data->d_size)) { memcpy(¬e, src, sizeof(Elf_Note)); src += sizeof(Elf_Note); if (note.n_type == NT_VERSION) break; src += note.n_namesz + note.n_descsz; } if ((uintptr_t)src >= ((uintptr_t)data->d_buf + data->d_size)) { ret = EPKG_FATAL; pkg_emit_error("failed to find the version elf note"); goto cleanup; } osname = src; src += roundup2(note.n_namesz, 4); if (elfhdr.e_ident[EI_DATA] == ELFDATA2MSB) version = be32dec(src); else version = le32dec(src); for (i = 0; osname[i] != '\0'; i++) osname[i] = (char)tolower(osname[i]); wordsize_corres_str = elf_corres_to_string(wordsize_corres, (int)elfhdr.e_ident[EI_CLASS]); arch = elf_corres_to_string(mach_corres, (int) elfhdr.e_machine); #if defined(__DragonFly__) snprintf(dest, sz, "%s:%d.%d", osname, version / 100000, (((version / 100 % 1000)+1)/2)*2); #else snprintf(dest, sz, "%s:%d", osname, version / 100000); #endif switch (elfhdr.e_machine) { case EM_ARM: endian_corres_str = elf_corres_to_string(endian_corres, (int)elfhdr.e_ident[EI_DATA]); /* FreeBSD doesn't support the hard-float ABI yet */ fpu = "softfp"; if ((elfhdr.e_flags & 0xFF000000) != 0) { const char *sh_name = NULL; size_t shstrndx; /* This is an EABI file, the conformance level is set */ abi = "eabi"; /* Find which TARGET_ARCH we are building for. */ elf_getshdrstrndx(elf, &shstrndx); while ((scn = elf_nextscn(elf, scn)) != NULL) { sh_name = NULL; if (gelf_getshdr(scn, &shdr) != &shdr) { scn = NULL; break; } sh_name = elf_strptr(elf, shstrndx, shdr.sh_name); if (sh_name == NULL) continue; if (strcmp(".ARM.attributes", sh_name) == 0) break; } if (scn != NULL && sh_name != NULL) { data = elf_getdata(scn, NULL); /* * Prior to FreeBSD 10.0 libelf would return * NULL from elf_getdata on the .ARM.attributes * section. As this was the first release to * get armv6 support assume a NULL value means * arm. * * This assumption can be removed when 9.x * is unsupported. */ if (data != NULL) { arch = aeabi_parse_arm_attributes( data->d_buf, data->d_size); if (arch == NULL) { ret = EPKG_FATAL; pkg_emit_error( "unknown ARM ARCH"); goto cleanup; } } } else { ret = EPKG_FATAL; pkg_emit_error("Unable to find the " ".ARM.attributes section"); goto cleanup; } } else if (elfhdr.e_ident[EI_OSABI] != ELFOSABI_NONE) { /* * EABI executables all have this field set to * ELFOSABI_NONE, therefore it must be an oabi file. */ abi = "oabi"; } else { /* * We may have failed to positively detect the ABI, * set the ABI to unknown. If we end up here one of * the above cases should be fixed for the binary. */ ret = EPKG_FATAL; pkg_emit_error("unknown ARM ABI"); goto cleanup; } snprintf(dest + strlen(dest), sz - strlen(dest), ":%s:%s:%s:%s:%s", arch, wordsize_corres_str, endian_corres_str, abi, fpu); break; case EM_MIPS: /* * this is taken from binutils sources: * include/elf/mips.h * mapping is figured out from binutils: * gas/config/tc-mips.c */ switch (elfhdr.e_flags & EF_MIPS_ABI) { case E_MIPS_ABI_O32: abi = "o32"; break; case E_MIPS_ABI_N32: abi = "n32"; break; default: if (elfhdr.e_ident[EI_DATA] == ELFCLASS32) abi = "o32"; else if (elfhdr.e_ident[EI_DATA] == ELFCLASS64) abi = "n64"; else abi = "unknown"; break; } endian_corres_str = elf_corres_to_string(endian_corres, (int)elfhdr.e_ident[EI_DATA]); snprintf(dest + strlen(dest), sz - strlen(dest), ":%s:%s:%s:%s", arch, wordsize_corres_str, endian_corres_str, abi); break; default: snprintf(dest + strlen(dest), sz - strlen(dest), ":%s:%s", arch, wordsize_corres_str); break; } cleanup: if (elf != NULL) elf_end(elf); close(fd); return (ret); }
int g_bde_decode_lock(struct g_bde_softc *sc, struct g_bde_key *gl, u_char *ptr) { int shuffle[NLOCK_FIELDS]; u_char *p; u_char hash[16], hash2[16]; MD5_CTX c; int i; p = ptr; g_bde_shuffle_lock(sc->sha2, shuffle); for (i = 0; i < NLOCK_FIELDS; i++) { switch(shuffle[i]) { case 0: gl->sector0 = le64dec(p); p += 8; break; case 1: gl->sectorN = le64dec(p); p += 8; break; case 2: gl->keyoffset = le64dec(p); p += 8; break; case 3: gl->sectorsize = le32dec(p); p += 4; break; case 4: gl->flags = le32dec(p); p += 4; break; case 5: case 6: case 7: case 8: gl->lsector[shuffle[i] - 5] = le64dec(p); p += 8; break; case 9: bcopy(p, gl->spare, sizeof gl->spare); p += sizeof gl->spare; break; case 10: bcopy(p, gl->salt, sizeof gl->salt); p += sizeof gl->salt; break; case 11: bcopy(p, gl->mkey, sizeof gl->mkey); p += sizeof gl->mkey; break; case 12: bcopy(p, hash2, sizeof hash2); bzero(p, sizeof hash2); p += sizeof hash2; break; } } if(ptr + G_BDE_LOCKSIZE != p) return(-1); MD5Init(&c); MD5Update(&c, "0000", 4); /* Versioning */ MD5Update(&c, ptr, G_BDE_LOCKSIZE); MD5Final(hash, &c); if (bcmp(hash, hash2, sizeof hash2)) return (1); return (0); }
/* * Table 19. 5.4 SMART Attributes */ static void print_intel_add_smart(void *buf, uint32_t size __unused) { uint8_t *walker = buf; uint8_t *end = walker + 150; const char *name; uint64_t raw; uint8_t normalized; static struct kv_name kv[] = { { 0xab, "Program Fail Count" }, { 0xac, "Erase Fail Count" }, { 0xad, "Wear Leveling Count" }, { 0xb8, "End to End Error Count" }, { 0xc7, "CRC Error Count" }, { 0xe2, "Timed: Media Wear" }, { 0xe3, "Timed: Host Read %" }, { 0xe4, "Timed: Elapsed Time" }, { 0xea, "Thermal Throttle Status" }, { 0xf0, "Retry Buffer Overflows" }, { 0xf3, "PLL Lock Loss Count" }, { 0xf4, "NAND Bytes Written" }, { 0xf5, "Host Bytes Written" }, }; printf("Additional SMART Data Log\n"); printf("=========================\n"); /* * walker[0] = Key * walker[1,2] = reserved * walker[3] = Normalized Value * walker[4] = reserved * walker[5..10] = Little Endian Raw value * (or other represenations) * walker[11] = reserved */ while (walker < end) { name = kv_lookup(kv, nitems(kv), *walker); normalized = walker[3]; raw = le48dec(walker + 5); switch (*walker){ case 0: break; case 0xad: printf("%-32s: %3d min: %u max: %u ave: %u\n", name, normalized, le16dec(walker + 5), le16dec(walker + 7), le16dec(walker + 9)); break; case 0xe2: printf("%-32s: %3d %.3f%%\n", name, normalized, raw / 1024.0); break; case 0xea: printf("%-32s: %3d %d%% %d times\n", name, normalized, walker[5], le32dec(walker+6)); break; default: printf("%-32s: %3d %ju\n", name, normalized, (uintmax_t)raw); break; } walker += 12; } }
/** * ccache_read(path): * Read the chunkification cache (if present) from the directory ${path}; * return a Patricia tree mapping absolute paths to cache entries. */ CCACHE * ccache_read(const char * path) { struct ccache_internal * C; struct ccache_read_internal R; struct ccache_record * ccr; #ifdef HAVE_MMAP struct stat sb; off_t fpos; long int pagesize; #endif size_t i; uint8_t N[4]; /* The caller must pass a file name to be read. */ assert(path != NULL); /* Allocate memory for the cache. */ if ((C = malloc(sizeof(struct ccache_internal))) == NULL) goto err0; memset(C, 0, sizeof(struct ccache_internal)); /* Create a Patricia tree to store cache entries. */ if ((C->tree = patricia_init()) == NULL) goto err1; /* Construct the name of cache file. */ if (asprintf(&R.s, "%s/cache", path) == -1) { warnp("asprintf"); goto err2; } /* Open the cache file. */ if ((R.f = fopen(R.s, "r")) == NULL) { /* ENOENT isn't an error. */ if (errno != ENOENT) { warnp("fopen(%s)", R.s); goto err3; } /* No cache exists on disk; return an empty cache. */ goto emptycache; } /** * We read the cache file in three steps: * 1. Read a little-endian uint32_t which indicates the number of * records in the cache file. * 2. Read N (record, path suffix) pairs and insert them into a * Patricia tree. * 3. Iterate through the tree and read chunk headers and compressed * entry trailers. */ /* Read the number of cache entries. */ if (fread(N, 4, 1, R.f) != 1) { if (ferror(R.f)) warnp("Error reading cache: %s", R.s); else warn0("Error reading cache: %s", R.s); goto err4; } R.N = le32dec(N); /* Read N (record, path suffix) pairs. */ R.sbuf = NULL; R.sbuflen = R.slen = R.datalen = 0; for (i = 0; i < R.N; i++) { if ((ccr = read_rec(&R)) == NULL) goto err5; if (patricia_insert(C->tree, R.sbuf, R.slen, ccr)) goto err5; C->chunksusage += ccr->nch * sizeof(struct chunkheader); C->trailerusage += ccr->tzlen; } #ifdef HAVE_MMAP /* Obtain page size, since mmapped regions must be page-aligned. */ if ((pagesize = sysconf(_SC_PAGESIZE)) == -1) { warnp("sysconf(_SC_PAGESIZE)"); goto err5; } /* Map the remainder of the cache into memory. */ fpos = ftello(R.f); if (fpos == -1) { warnp("ftello(%s)", R.s); goto err5; } if (fstat(fileno(R.f), &sb)) { warnp("fstat(%s)", R.s); goto err5; } if (sb.st_size != (off_t)(fpos + R.datalen)) { warn0("Cache has incorrect size (%jd, expected %jd)\n", (intmax_t)(sb.st_size), (intmax_t)(fpos + R.datalen)); goto err5; } C->datalen = R.datalen + (fpos % pagesize); if ((C->data = mmap(NULL, C->datalen, PROT_READ, #ifdef MAP_NOCORE MAP_PRIVATE | MAP_NOCORE, #else MAP_PRIVATE, #endif fileno(R.f), fpos - (fpos % pagesize))) == MAP_FAILED) { warnp("mmap(%s)", R.s); goto err5; } R.data = (uint8_t *)C->data + (fpos % pagesize); #else /* Allocate space. */ C->datalen = R.datalen; if (((C->data = malloc(C->datalen)) == NULL) && (C->datalen > 0)) goto err5; if (fread(C->data, C->datalen, 1, R.f) != 1) { warnp("fread(%s)", R.s); goto err6; } R.data = (uint8_t *)C->data; #endif /* Iterate through the tree reading chunk headers and trailers. */ if (patricia_foreach(C->tree, callback_read_data, &R)) { warnp("Error reading cache: %s", R.s); goto err6; } /* Free buffer used for storing paths. */ free(R.sbuf); /* Close the cache file. */ fclose(R.f); /* Free string allocated by asprintf. */ free(R.s); /* Success! */ return (C); emptycache: /* Nothing went wrong, but there's nothing on disk. */ free(R.s); return (C); err6: #ifdef HAVE_MMAP if (C->datalen > 0) munmap(C->data, C->datalen); #else free(C->data); #endif err5: free(R.sbuf); patricia_foreach(C->tree, callback_free, NULL); err4: fclose(R.f); err3: free(R.s); err2: patricia_free(C->tree); err1: free(C); err0: /* Failure! */ return (NULL); }
static void print_hgst_info_background_scan(void *buf, uint16_t subtype __unused, uint8_t res __unused, uint32_t size) { uint8_t *walker = buf; uint8_t status; uint16_t code, nscan, progress; uint32_t pom, nand; printf("Background Media Scan Subpage:\n"); /* Decode the header */ code = le16dec(walker); walker += 2; walker++; /* Ignore fixed flags */ if (*walker++ != 0x10) { printf("Bad length for background scan header\n"); return; } if (code != 0) { printf("Expceted code 0, found code %#x\n", code); return; } pom = le32dec(walker); walker += 4; walker++; /* Reserved */ status = *walker++; nscan = le16dec(walker); walker += 2; progress = le16dec(walker); walker += 2; walker += 6; /* Reserved */ printf(" %-30s: %d\n", "Power On Minutes", pom); printf(" %-30s: %x (%s)\n", "BMS Status", status, status == 0 ? "idle" : (status == 1 ? "active" : (status == 8 ? "suspended" : "unknown"))); printf(" %-30s: %d\n", "Number of BMS", nscan); printf(" %-30s: %d\n", "Progress Current BMS", progress); /* Report retirements */ if (walker - (uint8_t *)buf != 20) { printf("Coding error, offset not 20\n"); return; } size -= 20; printf(" %-30s: %d\n", "BMS retirements", size / 0x18); while (size > 0) { code = le16dec(walker); walker += 2; walker++; if (*walker++ != 0x14) { printf("Bad length parameter\n"); return; } pom = le32dec(walker); walker += 4; /* * Spec sheet says the following are hard coded, if true, just * print the NAND retirement. */ if (walker[0] == 0x41 && walker[1] == 0x0b && walker[2] == 0x01 && walker[3] == 0x00 && walker[4] == 0x00 && walker[5] == 0x00 && walker[6] == 0x00 && walker[7] == 0x00) { walker += 8; walker += 4; /* Skip reserved */ nand = le32dec(walker); walker += 4; printf(" %-30s: %d\n", "Retirement number", code); printf(" %-28s: %#x\n", "NAND (C/T)BBBPPP", nand); } else { printf("Parameter %#x entry corrupt\n", code); walker += 16; } } }
/* Read a cache record. */ static struct ccache_record * read_rec(void * cookie) { struct ccache_record_external ccre; struct ccache_read_internal * R = cookie; struct ccache_record * ccr; size_t prefixlen, suffixlen; uint8_t * sbuf_new; /* Read a struct ccache_record_external. */ if (fread(&ccre, sizeof(ccre), 1, R->f) != 1) { if (ferror(R->f)) warnp("Error reading cache: %s", R->s); else warn0("Error reading cache: %s", R->s); goto err0; } /* Allocate memory for a record. */ if ((ccr = malloc(sizeof(struct ccache_record))) == NULL) goto err0; /* Decode record. */ ccr->ino = le64dec(ccre.ino); ccr->size = le64dec(ccre.size); ccr->mtime = le64dec(ccre.mtime); ccr->nch = le64dec(ccre.nch); ccr->tlen = le32dec(ccre.tlen); ccr->tzlen = le32dec(ccre.tzlen); prefixlen = le32dec(ccre.prefixlen); suffixlen = le32dec(ccre.suffixlen); ccr->age = le32dec(ccre.age); /* Zero other fields. */ ccr->nchalloc = 0; ccr->chp = NULL; ccr->ztrailer = NULL; ccr->flags = 0; /* Sanity check some fields. */ if ((prefixlen == 0 && suffixlen == 0) || (ccr->nch > SIZE_MAX / sizeof(struct chunkheader)) || (ccr->nch == 0 && ccr->tlen == 0) || (ccr->tlen == 0 && ccr->tzlen != 0) || (ccr->tlen != 0 && ccr->tzlen == 0) || (ccr->age == INT_MAX)) goto err2; /* * The prefix length must be <= the length of the previous path; and * the prefix length + suffix length must not overflow. */ if ((prefixlen > R->slen) || (prefixlen > prefixlen + suffixlen)) goto err2; /* Make sure we have enough space for the entry path. */ if (prefixlen + suffixlen > R->sbuflen) { sbuf_new = realloc(R->sbuf, prefixlen + suffixlen); if (sbuf_new == NULL) goto err1; R->sbuf = sbuf_new; R->sbuflen = prefixlen + suffixlen; } /* Read the entry path suffix. */ if (fread(R->sbuf + prefixlen, suffixlen, 1, R->f) != 1) { if (ferror(R->f)) warnp("Error reading cache: %s", R->s); else warn0("Error reading cache: %s", R->s); goto err1; } R->slen = prefixlen + suffixlen; /* Add chunk header and trailer data lengths to datalen. */ R->datalen += ccr->tzlen; if (R->datalen < ccr->tzlen) goto err2; R->datalen += ccr->nch * sizeof(struct chunkheader); if (R->datalen < ccr->nch * sizeof(struct chunkheader)) goto err2; /* Success! */ return (ccr); err2: warn0("Cache file is corrupt: %s", R->s); err1: free(ccr); err0: /* Failure! */ return (NULL); }
/* * Intercept management frames to collect beacon rssi data * and to do ibss merges. */ void ath_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m, int subtype, const struct ieee80211_rx_stats *rxs, int rssi, int nf) { struct ieee80211vap *vap = ni->ni_vap; struct ath_softc *sc = vap->iv_ic->ic_softc; uint64_t tsf_beacon_old, tsf_beacon; uint64_t nexttbtt; int64_t tsf_delta; int32_t tsf_delta_bmiss; int32_t tsf_remainder; uint64_t tsf_beacon_target; int tsf_intval; tsf_beacon_old = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32; tsf_beacon_old |= le32dec(ni->ni_tstamp.data); #define TU_TO_TSF(_tu) (((u_int64_t)(_tu)) << 10) tsf_intval = 1; if (ni->ni_intval > 0) { tsf_intval = TU_TO_TSF(ni->ni_intval); } #undef TU_TO_TSF /* * Call up first so subsequent work can use information * potentially stored in the node (e.g. for ibss merge). */ ATH_VAP(vap)->av_recv_mgmt(ni, m, subtype, rxs, rssi, nf); switch (subtype) { case IEEE80211_FC0_SUBTYPE_BEACON: /* * Only do the following processing if it's for * the current BSS. * * In scan and IBSS mode we receive all beacons, * which means we need to filter out stuff * that isn't for us or we'll end up constantly * trying to sync / merge to BSSes that aren't * actually us. */ if (IEEE80211_ADDR_EQ(ni->ni_bssid, vap->iv_bss->ni_bssid)) { /* update rssi statistics for use by the hal */ /* XXX unlocked check against vap->iv_bss? */ ATH_RSSI_LPF(sc->sc_halstats.ns_avgbrssi, rssi); tsf_beacon = ((uint64_t) le32dec(ni->ni_tstamp.data + 4)) << 32; tsf_beacon |= le32dec(ni->ni_tstamp.data); nexttbtt = ath_hal_getnexttbtt(sc->sc_ah); /* * Let's calculate the delta and remainder, so we can see * if the beacon timer from the AP is varying by more than * a few TU. (Which would be a huge, huge problem.) */ tsf_delta = (long long) tsf_beacon - (long long) tsf_beacon_old; tsf_delta_bmiss = tsf_delta / tsf_intval; /* * If our delta is greater than half the beacon interval, * let's round the bmiss value up to the next beacon * interval. Ie, we're running really, really early * on the next beacon. */ if (tsf_delta % tsf_intval > (tsf_intval / 2)) tsf_delta_bmiss ++; tsf_beacon_target = tsf_beacon_old + (((unsigned long long) tsf_delta_bmiss) * (long long) tsf_intval); /* * The remainder using '%' is between 0 .. intval-1. * If we're actually running too fast, then the remainder * will be some large number just under intval-1. * So we need to look at whether we're running * before or after the target beacon interval * and if we are, modify how we do the remainder * calculation. */ if (tsf_beacon < tsf_beacon_target) { tsf_remainder = -(tsf_intval - ((tsf_beacon - tsf_beacon_old) % tsf_intval)); } else { tsf_remainder = (tsf_beacon - tsf_beacon_old) % tsf_intval; } DPRINTF(sc, ATH_DEBUG_BEACON, "%s: old_tsf=%llu, new_tsf=%llu, target_tsf=%llu, delta=%lld, bmiss=%d, remainder=%d\n", __func__, (unsigned long long) tsf_beacon_old, (unsigned long long) tsf_beacon, (unsigned long long) tsf_beacon_target, (long long) tsf_delta, tsf_delta_bmiss, tsf_remainder); DPRINTF(sc, ATH_DEBUG_BEACON, "%s: tsf=%llu, nexttbtt=%llu, delta=%d\n", __func__, (unsigned long long) tsf_beacon, (unsigned long long) nexttbtt, (int32_t) tsf_beacon - (int32_t) nexttbtt + tsf_intval); /* We only do syncbeacon on STA VAPs; not on IBSS */ if (vap->iv_opmode == IEEE80211_M_STA && sc->sc_syncbeacon && ni == vap->iv_bss && (vap->iv_state == IEEE80211_S_RUN || vap->iv_state == IEEE80211_S_SLEEP)) { DPRINTF(sc, ATH_DEBUG_BEACON, "%s: syncbeacon=1; syncing\n", __func__); /* * Resync beacon timers using the tsf of the beacon * frame we just received. */ ath_beacon_config(sc, vap); sc->sc_syncbeacon = 0; } } /* fall thru... */ case IEEE80211_FC0_SUBTYPE_PROBE_RESP: if (vap->iv_opmode == IEEE80211_M_IBSS && vap->iv_state == IEEE80211_S_RUN && ieee80211_ibss_merge_check(ni)) { uint32_t rstamp = sc->sc_lastrs->rs_tstamp; uint64_t tsf = ath_extend_tsf(sc, rstamp, ath_hal_gettsf64(sc->sc_ah)); /* * Handle ibss merge as needed; check the tsf on the * frame before attempting the merge. The 802.11 spec * says the station should change it's bssid to match * the oldest station with the same ssid, where oldest * is determined by the tsf. Note that hardware * reconfiguration happens through callback to * ath_newstate as the state machine will go from * RUN -> RUN when this happens. */ if (le64toh(ni->ni_tstamp.tsf) >= tsf) { DPRINTF(sc, ATH_DEBUG_STATE, "ibss merge, rstamp %u tsf %ju " "tstamp %ju\n", rstamp, (uintmax_t)tsf, (uintmax_t)ni->ni_tstamp.tsf); (void) ieee80211_ibss_merge(ni); } } break; } }
struct cdbr * cdbr_open_mem(void *base, size_t size, int flags, void (*unmap)(void *, void *, size_t), void *cookie) { struct cdbr *cdbr; uint8_t *buf = base; if (size < 40 || memcmp(buf, "NBCDB\n\0\001", 8)) { SET_ERRNO(EINVAL); return NULL; } cdbr = malloc(sizeof(*cdbr)); cdbr->unmap = unmap; cdbr->cookie = cookie; cdbr->data_size = le32dec(buf + 24); cdbr->entries = le32dec(buf + 28); cdbr->entries_index = le32dec(buf + 32); cdbr->seed = le32dec(buf + 36); if (cdbr->data_size < 0x100) cdbr->offset_size = 1; else if (cdbr->data_size < 0x10000) cdbr->offset_size = 2; else cdbr->offset_size = 4; if (cdbr->entries_index < 0x100) cdbr->index_size = 1; else if (cdbr->entries_index < 0x10000) cdbr->index_size = 2; else cdbr->index_size = 4; cdbr->mmap_base = base; cdbr->mmap_size = size; cdbr->hash_base = cdbr->mmap_base + 40; cdbr->offset_base = cdbr->hash_base + cdbr->entries_index * cdbr->index_size; if (cdbr->entries_index * cdbr->index_size % cdbr->offset_size) cdbr->offset_base += cdbr->offset_size - cdbr->entries_index * cdbr->index_size % cdbr->offset_size; cdbr->data_base = cdbr->offset_base + (cdbr->entries + 1) * cdbr->offset_size; if (cdbr->hash_base < cdbr->mmap_base || cdbr->offset_base < cdbr->mmap_base || cdbr->data_base < cdbr->mmap_base || cdbr->data_base + cdbr->data_size < cdbr->mmap_base || cdbr->data_base + cdbr->data_size > cdbr->mmap_base + cdbr->mmap_size) { SET_ERRNO(EINVAL); free(cdbr); return NULL; } if (cdbr->entries) { fast_divide32_prepare(cdbr->entries, &cdbr->entries_m, &cdbr->entries_s1, &cdbr->entries_s2); } if (cdbr->entries_index) { fast_divide32_prepare(cdbr->entries_index, &cdbr->entries_index_m, &cdbr->entries_index_s1, &cdbr->entries_index_s2); } return cdbr; }