/** * Compute the SHA1 hash for the given file. * @param filename The filename (with or without prefixed by a path). If the file * is not found the exeption 'FileNotFoundException' is thrown. * @param bufferSize The size of the buffer used to give the data to 'QCryptographicHash'. * @return The sha1 hash. */ QByteArray computeSHA1(const QString& filename, qint32 bufferSize) throw (FileNotFoundException) { #if not WITH_SHA1_LINUS QCryptographicHash crypto(QCryptographicHash::Sha1); #endif QFile file(filename); if (!file.open(QIODevice::ReadOnly)) throw FileNotFoundException(); #if WITH_SHA1_LINUS blk_SHA_CTX sha1State; blk_SHA1_Init(&sha1State); unsigned char bufferHash[20]; #endif char buffer[bufferSize]; qint64 bytesRead = 0; while ((bytesRead = file.read(buffer, bufferSize)) > 0) { #if WITH_SHA1_LINUS blk_SHA1_Update(&sha1State, buffer, bytesRead); #else crypto.addData(buffer, bytesRead); #endif } #if WITH_SHA1_LINUS blk_SHA1_Final(bufferHash, &sha1State); return QByteArray((const char*)bufferHash, 20); #else return crypto.result(); #endif }
/* * Write a portion of a blob to local storage and derive a partial digest. * Return 1 if the accumulated digest matches the expected digest, * 0 if the partial digest does not match do not match. */ static int eat_chunk(struct request *r, blk_SHA_CTX *p_ctx, int fd, unsigned char *buf, int buf_size) { struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data; blk_SHA_CTX ctx; unsigned char digest[20]; /* * Update the incremental digest. */ blk_SHA1_Update(p_ctx, buf, buf_size); /* * Write the chunk to the local temp file. */ if (io_write_buf(fd, buf, buf_size)) _panic2(r, "eat_chunk: write(tmp) failed", strerror(errno)); /* * Determine if we have seen the whole blob * by copying the incremental digest, finalizing it, * then comparing to the expected blob. */ memcpy(&ctx, p_ctx, sizeof *p_ctx); blk_SHA1_Final(digest, &ctx); return memcmp(sp->digest, digest, 20) == 0 ? 1 : 0; }
uint8_t *SHA_final(SHA_CTX *ctx) { unsigned char hashout[20]; blk_SHA1_Final(hashout, ctx); ftl_memcpy(ctx->H, hashout, sizeof(ctx->H)); return (uint8_t *)ctx->H; }
void SHA1_Encrypt(const unsigned char *pszMessage, unsigned int uPlainTextLen, unsigned char *pszDigest) { blk_SHA_CTX ctx; blk_SHA1_Init(&ctx); blk_SHA1_Update(&ctx, pszMessage, uPlainTextLen); blk_SHA1_Final(pszDigest, &ctx); }
void SHA1_EncryptLR(const unsigned char *pszMessageL, const unsigned char *pszMessageR, unsigned char *pszDigest) { //char empty[SHA1_DIGEST_BLOCKLEN] = { 0, }; //if (!memcmp(pszMessageL, empty, SHA1_DIGEST_BLOCKLEN) && !memcmp(pszMessageR, empty, SHA1_DIGEST_BLOCKLEN)) return; blk_SHA_CTX ctx; blk_SHA1_Init(&ctx); blk_SHA1_Update(&ctx, pszMessageL, SHA1_DIGEST_BLOCKLEN); blk_SHA1_Update(&ctx, pszMessageR, SHA1_DIGEST_BLOCKLEN); blk_SHA1_Final(pszDigest, &ctx); }
/** * Compute some SHA1 hash for each chunk of the given file. * @param filename The filename (with or without prefixed by a path). If the file * is not found the exeption 'FileNotFoundException' is thrown. * @param chunkSize The size of each chunk. It must be a divisor of 'bufferSizer'. * @param bufferSize The size of the buffer used to give the data to 'QCryptographicHash'. * @return A list of chunk hashes. */ QList<QByteArray> computeMultiSHA1(const QString& filename, qint32 chunkSize, qint32 bufferSize) throw (FileNotFoundException) { QList<QByteArray> result; #if not WITH_SHA1_LINUS QCryptographicHash crypto(QCryptographicHash::Sha1); #endif QFile file(filename); if (!file.open(QIODevice::ReadOnly)) throw FileNotFoundException(); #if WITH_SHA1_LINUS blk_SHA_CTX sha1State; unsigned char bufferHash[20]; #endif char buffer[bufferSize]; bool endOfFile = false; while (!endOfFile) { #if WITH_SHA1_LINUS blk_SHA1_Init(&sha1State); #endif qint64 bytesReadTotal = 0; while (bytesReadTotal < chunkSize) { qint64 bytesRead = file.read(buffer, bufferSize); if (bytesRead == 0) { endOfFile = true; break; } #if WITH_SHA1_LINUS blk_SHA1_Update(&sha1State, buffer, bytesRead); #else crypto.addData(buffer, bytesRead); #endif bytesReadTotal += bytesRead; } #if WITH_SHA1_LINUS blk_SHA1_Final(bufferHash, &sha1State); result.append(QByteArray((const char*)bufferHash, 20)); #else result.append(crypto.result()); crypto.reset(); #endif } return result; }
uint64_t ccv_cache_generate_signature(const char* msg, int len, uint64_t sig_start, ...) { blk_SHA_CTX ctx; blk_SHA1_Init(&ctx); uint64_t sigi; va_list arguments; va_start(arguments, sig_start); for (sigi = sig_start; sigi != 0; sigi = va_arg(arguments, uint64_t)) blk_SHA1_Update(&ctx, &sigi, 8); va_end(arguments); blk_SHA1_Update(&ctx, msg, len); union { uint64_t u; uint8_t chr[20]; } sig; blk_SHA1_Final(sig.chr, &ctx); return sig.u; }
/* * calculate a sha1 hash of the contents of all config files * sorted by their full path. * *hash must hold at least 20 bytes */ int get_config_hash(unsigned char *hash) { struct file_list **sorted_flist; unsigned int num_files = 0, i = 0; blk_SHA_CTX ctx; blk_SHA1_Init(&ctx); sorted_flist = get_sorted_oconf_files(&num_files); for (i = 0; i < num_files; i++) { hash_add_file(sorted_flist[i]->name, &ctx); sorted_flist[i]->next = NULL; file_list_free(sorted_flist[i]); } blk_SHA1_Final(hash, &ctx); free(sorted_flist); return 0; }
char *hashtree_compute(HashTreeNode * tree) { unsigned int i = 0; char *hash; void *bhash; blk_SHA_CTX ctx; assert(tree != NULL); /* precomputed */ if (tree->hash != NULL) return (char *)tree->hash; /* space for binary hash */ bhash = smalloc(20 * sizeof(unsigned char)); /* get some memory to store the hex and bin representation */ hash = smalloc(41 * sizeof(char)); /* initialize */ blk_SHA1_Init(&ctx); for (i = 0; IS_VALID_CHILD(tree, i); i++) { if (!IS_LEAF(tree, i)) tree->children[i]->hash = hashtree_compute(tree->children[i]); blk_SHA1_Update(&ctx, tree->children[i]->hash, 40); } blk_SHA1_Final((unsigned char *)bhash, &ctx); /* generate hex representation. binary hash is *big endian* */ sprintf(hash, "%08x%08x%08x%08x%08x", PIECE(0), PIECE(1), PIECE(2), PIECE(3), PIECE(4)); free(bhash); tree->hash = hash; return hash; }
/* * Digest a local blob stream and store the digested blob. */ static int sha_fs_digest(struct request *r, int fd, char *hex_digest, int do_put) { char unsigned buf[4096], digest[20], *d, *d_end; char *h; blk_SHA_CTX ctx; int nread; int tmp_fd = -1; char tmp_path[MAX_FILE_PATH_LEN]; int status = 0; tmp_path[0] = 0; if (do_put) { static int drift = 0; if (drift++ >= 999) drift = 0; /* * Open a temporary file in $sha_fs_root/tmp to accumulate the * blob read from the stream. The file looks like * * digest-time-pid-drift */ snprintf(tmp_path, sizeof tmp_path, "%s/digest-%d-%u-%d", boot_data.tmp_dir_path, /* * Warning: * Casting time() to int is * incorrect!! */ (int)time((time_t *)0), getpid(), drift); /* * Open the file ... need O_LARGEFILE support!! * Need to catch EINTR!!!! */ tmp_fd = io_open(tmp_path, O_CREAT|O_EXCL|O_WRONLY|O_APPEND, S_IRUSR); if (tmp_fd < 0) _panic3(r, "digest: open(tmp) failed", tmp_path, strerror(errno)); } blk_SHA1_Init(&ctx); while ((nread = io_read(fd, buf, sizeof buf)) > 0) { blk_SHA1_Update(&ctx, buf, nread); if (do_put && io_write_buf(tmp_fd, buf, nread) != 0) _panic2(r, "digest: write_buf(tmp) failed", strerror(errno)); } if (nread < 0) { _error(r, "digest: _read() failed"); goto croak; } blk_SHA1_Final(digest, &ctx); if (do_put) { status = io_close(tmp_fd); tmp_fd = -1; if (status) _panic2(r,"digest: close(tmp) failed",strerror(errno)); } /* * Convert the binary sha digest to text. */ h = hex_digest; d = digest; d_end = d + 20; while (d < d_end) { *h++ = nib2hex[(*d & 0xf0) >> 4]; *h++ = nib2hex[*d & 0xf]; d++; } *h = 0; /* * Move the blob from the temporary file to the blob file. */ if (do_put) { blob_path(r, hex_digest); arbor_rename(tmp_path, ((struct sha_fs_request *)r->open_data)->blob_path); tmp_path[0] = 0; } goto cleanup; croak: status = -1; cleanup: if (tmp_fd > -1 && io_close(tmp_fd)) _panic2(r, "digest: close(tmp) failed", strerror(errno)); if (tmp_path[0] && io_unlink(tmp_path)) _panic3(r, "digest: unlink(tmp) failed", tmp_path, strerror(errno)); return status; }
static int sha_fs_eat(struct request *r) { struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data; int status = 0; blk_SHA_CTX ctx; unsigned char digest[20]; int fd; unsigned char chunk[CHUNK_SIZE]; int nread; blob_path(r, r->digest); /* * Open the file to the blob. */ switch (_open(r, sp->blob_path, &fd)) { case 0: break; /* * Blob not found. */ case ENOENT: return 1; default: _panic(r, "_open(blob) failed"); } blk_SHA1_Init(&ctx); /* * Read a chunk from the file and chew. */ while ((nread = _read(r, fd, chunk, sizeof chunk)) > 0) /* * Update the incremental digest. */ blk_SHA1_Update(&ctx, chunk, nread); if (nread < 0) _panic(r, "_read(blob) failed"); /* * Finalize the digest. */ blk_SHA1_Final(digest, &ctx); /* * If the calculated digest does NOT match the stored digest, * then zap the blob from storage and get panicy. * A corrupt blob is a bad, bad thang. * * Note: unfortunately we've already deceived the client * by sending "ok". Probably need to improve for * the special case when the entire blob is read * in first chunk. */ if (memcmp(sp->digest, digest, 20)) _panic2(r, "stored blob doesn't match digest", r->digest); if (_close(r, &fd)) _panic(r, "_close(blob) failed"); return status; }
/* * Copy a local blob to a local stream. * * Return 0 if stream matches signature, -1 otherwise. * Note: this needs to be folded into sha_fs_get(). */ static int sha_fs_copy(struct request *r, int out_fd) { struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data; int status = 0; blk_SHA_CTX ctx; unsigned char digest[20]; int fd; unsigned char chunk[CHUNK_SIZE]; int nread; static char n[] = "sha_fs_write"; blob_path(r, r->digest); /* * Open the file to the blob. */ switch (_open(r, sp->blob_path, &fd)) { case 0: break; case ENOENT: _warn3(r, n, "open(blob): not found", r->digest); return 1; default: _panic2(r, n, "_open(blob) failed"); } blk_SHA1_Init(&ctx); /* * Read a chunk from the file, write chunk to local stream, * update incremental digest. */ while ((nread = _read(r, fd, chunk, sizeof chunk)) > 0) { if (io_write_buf(out_fd, chunk, nread)) { _error2(r, n, "write_buf() failed"); goto croak; } /* * Update the incremental digest. */ blk_SHA1_Update(&ctx, chunk, nread); } if (nread < 0) _panic2(r, n, "_read(blob) failed"); /* * Finalize the digest. */ blk_SHA1_Final(digest, &ctx); /* * If the calculated digest does NOT match the stored digest, * then zap the blob from storage and get panicy. * A corrupt blob is a bad, bad thang. */ if (memcmp(sp->digest, digest, 20)) _panic3(r, n, "stored blob doesn't match digest", r->digest); goto cleanup; croak: status = -1; cleanup: if (_close(r, &fd)) _panic2(r, n, "_close(blob) failed"); return status; }
static int sha_fs_get(struct request *r) { struct sha_fs_request *sp = (struct sha_fs_request *)r->open_data; int status = 0; blk_SHA_CTX ctx; unsigned char digest[20]; int fd; unsigned char chunk[CHUNK_SIZE]; int nread; blob_path(r, r->digest); /* * Open the file to the blob. */ switch (_open(r, sp->blob_path, &fd)) { case 0: break; case ENOENT: return 1; default: _panic(r, "_open(blob) failed"); } /* * Tell the client we have the blob. */ if (write_ok(r)) { _error(r, "write_ok() failed"); goto croak; } blk_SHA1_Init(&ctx); /* * Read a chunk from the file, write chunk to client, * update incremental digest. * * In principle, we ought to first scan the blob file * before sending "ok" to the requestor. */ while ((nread = _read(r, fd, chunk, sizeof chunk)) > 0) { if (blob_write(r, chunk, nread)) { _error(r, "blob_write(blob chunk) failed"); goto croak; } /* * Update the incremental digest. */ blk_SHA1_Update(&ctx, chunk, nread); } if (nread < 0) _panic(r, "_read(blob) failed"); /* * Finalize the digest. */ blk_SHA1_Final(digest, &ctx); /* * If the calculated digest does NOT match the stored digest, * then zap the blob from storage and get panicy. * A corrupt blob is a bad, bad thang. * * Note: unfortunately we've already deceived the client * by sending "ok". Probably need to improve for * the special case when the entire blob is read * in first chunk. */ if (memcmp(sp->digest, digest, 20)) { _error2(r, "PANIC: stored blob doesn't match digest", r->digest); if (zap_blob(r)) _panic(r, "zap_blob() failed"); goto croak; } goto cleanup; croak: status = -1; cleanup: if (_close(r, &fd)) _panic(r, "_close(blob) failed"); return status; }