virtual int copy_file_to_file_hash (source_file_t& source, target_file_t& target, hash_t& hash) { int err = 1; if (0 <= (hash.initialize())) { err = 0; for (ssize_t count = 0, amount = 0; 0 <= amount; count += amount) { if (0 < (amount = source.read(block_, block_size_))) { if (amount != (hash.hash(block_, amount))) { err = 1; } else { continue; } } else { if (0 > (amount)) { err = 1; } } if (!(err)) { if (0 < (count = hash.finalize(block_, block_size_))) { if (count > (target.writexln(block_, count))) { err = 1; } } else { err = 1; } } break; } } return err; }
int hash_delete_if (hash_t h, hash_arg_f arg_f, void *arg) { int i; struct hash_node **pp; struct hash_node *p; int n = 0; if (!h || !arg_f) { errno = EINVAL; return (-1); } lsd_mutex_lock (&h->mutex); for (i = 0; i < h->size; i++) { pp = &(h->table[i]); while ((p = *pp) != NULL) { if (arg_f (p->data, p->hkey, arg) > 0) { if (h->del_f) h->del_f (p->data); *pp = p->next; hash_node_free (p); h->count--; n++; } else { pp = &(p->next); } } } lsd_mutex_unlock (&h->mutex); return (n); }
void hash_destroy (hash_t h) { int i; struct hash_node *p, *q; if (!h) { errno = EINVAL; return; } lsd_mutex_lock (&h->mutex); for (i = 0; i < h->size; i++) { for (p = h->table[i]; p != NULL; p = q) { q = p->next; if (h->del_f) h->del_f (p->data); hash_node_free (p); } } lsd_mutex_unlock (&h->mutex); lsd_mutex_destroy (&h->mutex); free (h->table); free (h); return; }
void start(int th_id) { SparseMatrix<uint64_t>* mx = gcp_mx->getThreadMatrix(th_id); for(size_t i = slice_id++; i < nb_slices; i = slice_id++) { typename hash_t::iterator it = hash->iterator_slice(i, nb_slices); while(it.next()) { string kmer = it.get_dna_str(); uint64_t kmer_count = it.get_val(); uint16_t g_or_c = 0; for(uint16_t i = 0; i < kmer.length(); i++) { char c = kmer[i]; if (c == 'G' || c == 'g' || c == 'C' || c == 'c') g_or_c++; } // Apply scaling factor uint64_t cvg_pos = kmer_count == 0 ? 0 : ceil((double)kmer_count * args->cvg_scale); if(cvg_pos > args->cvg_bins) mx->inc(g_or_c, args->cvg_bins, 1); else mx->inc(g_or_c, cvg_pos, 1); } } }
void hash_destroy (hash_t h) { int i; struct hash_node *p, *q; if (!h) { errno = EINVAL; return; } lsd_mutex_lock (&h->mutex); assert (h->magic == HASH_MAGIC); for (i = 0; i < h->size; i++) { for (p = h->table[i]; p != NULL; p = q) { q = p->next; if (h->del_f) h->del_f (p->data); hash_node_free (p); } } assert (h->magic = ~HASH_MAGIC); /* clear magic via assert abuse */ lsd_mutex_unlock (&h->mutex); lsd_mutex_destroy (&h->mutex); free (h->table); free (h); return; }
void print(std::ostream &out) { // Output header out << mme::KEY_TITLE << "K-mer spectra for: " << args->db_path << endl; out << mme::KEY_X_LABEL << "K" << hash->get_mer_len() << " multiplicity: " << args->db_path << endl; out << mme::KEY_Y_LABEL << "Number of distinct K" << hash->get_mer_len() << " mers" << endl; out << mme::MX_META_END << endl; uint64_t col = base; for(uint64_t i = 0; i < nb_buckets; i++, col += inc) { uint64_t count = 0; for(uint_t j = 0; j < args->threads; j++) count += data[j * nb_buckets + i]; out << col << " " << count << "\n"; } }
static void hash_kvp(hash_t& out, const merkle_cow::key_type& key, const merkle_cow::mapped_type& value) { SHA256_CTX ctx; SHA256_Init(&ctx); uint32_t klen = htonl(uint32_t(key->size())); SHA256_Update(&ctx, (const char *) &klen, sizeof(uint32_t)); SHA256_Update(&ctx, key->data(), key->size()); SHA256_Update(&ctx, value->data(), value->size()); SHA256_Final((unsigned char*) out.data(), &ctx); }
static list_iter_t locate(hash_t tab, void *key, unsigned hash) { list_t lst = tab->ptr[hash]; list_iter_t i; for (i = list_begin(lst); i != list_end(lst); i = list_next(i)) if (!tab->cmp(key, KEY(i))) break; return i; }
virtual ssize_t hash_file (source_file_t& source, hash_t& target, char block[]) { if (0 <= (target.initialize())) { for (ssize_t count = 0, amount = 0; 0 <= amount; count += amount) { if (0 < (amount = source.read(block, block_size_))) { if (amount != (target.hash(block, amount))) { return 0; } else { continue; } } else { if (0 > (amount)) { return 0; } } if (0 < (count = target.finalize(block, block_size_))) { return count; } break; } } return 0; }
virtual int copy_file_to_hash (const entry_t& source, const path_t& target, hash_t& hash) { entry_t& entry = target_entry_; string_t target_path(target.chars()); const char_t* chars = 0; int err = 1; if ((append_hash_name_to_target_path_) && (chars = hash.name())) { target_path.append(&target.extension_separator(), 1); target_path.append(hash_name_prefix_); target_path.append(chars); target_path.append(hash_name_suffix_); } if ((entry.exists(chars = target_path.chars()))) { if ((write_overwrite != write_) && (write_append != write_)) { errf("target file \"%s\" already exists\n", chars); } else { fs::entry_type type = fs::entry_type_none; switch (type = entry.type()) { case fs::entry_type_file: err = copy_file_to_file_hash(source, entry, hash); break; default: break; } } } else { if (!(err = make_directory(entry, target))) { entry.set_path(chars); err = copy_file_to_file_hash(source, entry, hash); } else { errf("failed to make directory \"%s\"\n", target.directory().chars()); } } if (!(err) && (!(to_same != to_) || !(target_modified_))) { if ((entry.set_times_to_set(source))) { if ((entry.set_times_set())) { } else { } } } return err; }
void start(int th_id) { uint64_t *hist = &data[th_id * nb_buckets]; for(size_t i = slice_id++; i < nb_slices; i = slice_id++) { typename hash_t::iterator it = hash->iterator_slice(i, nb_slices); while(it.next()) { if(it.get_val() < base) ++hist[0]; else if(it.get_val() > ceil) ++hist[nb_buckets - 1]; else ++hist[(it.get_val() - base) / inc]; } } }
void hash_del(hash_t ht, hash_key_t key) { if (ht) { struct hash_element *entry = NULL; entry = hash_find(ht, key, 0); if (entry) { if (ht->free && entry->value) { ht->free(entry->value); } if (entry->key != HASH_KEY_INVALID) { ht->count--; } entry->key = HASH_KEY_INVALID; entry->value = NULL; } } }
void do_it() { // Setup output stream for jellyfish initialisation std::ostream* out_stream = args->verbose ? &cerr : (std::ostream*)0; // Load the jellyfish hash for sequential access hash = jfh->loadHash(true, out_stream); // Create matrix of appropriate size (adds 1 to cvg bins to account for 0) gcp_mx = new ThreadedSparseMatrix<uint64_t>(hash->get_mer_len(), args->cvg_bins + 1, args->threads_arg); // Process batch with worker threads // Process each sequence is processed in a different thread. // In each thread lookup each K-mer in the hash exec_join(args->threads_arg); // Merge the contamination matrix gcp_mx->mergeThreadedMatricies(); }
void device_v::writeKernelBuildFile(const std::string &filename, const hash_t &kernelHash, const occa::properties &kernelProps, const lang::kernelMetadataMap &metadataMap) const { occa::properties infoProps; infoProps["device"] = properties; infoProps["device/hash"] = versionedHash().toFullString(); infoProps["kernel/props"] = kernelProps; infoProps["kernel/hash"] = kernelHash.toFullString(); json &metadataJson = infoProps["kernel/metadata"].asArray(); lang::kernelMetadataMap::const_iterator kIt = metadataMap.begin(); while (kIt != metadataMap.end()) { metadataJson += (kIt->second).toJson(); ++kIt; } io::writeBuildFile(filename, kernelHash, infoProps); }
void hash_destroy(hash_t ht) { if (ht) { if (ht->hash_table) { int i = 0; for (i = 0; i < ht->size; i++) { if (ht->hash_table[i].key != HASH_KEY_INVALID) { if (ht->free && ht->hash_table[i].value) { ht->free(ht->hash_table[i].value); } ht->hash_table[i].key = HASH_KEY_INVALID; ht->hash_table[i].value = NULL; } } free(ht->hash_table); ht->hash_table = NULL; } free(ht); ht = NULL; } }
std::string device_v::getKernelHash(const hash_t &kernelHash, const std::string &kernelName) { return getKernelHash(kernelHash.toFullString(), kernelName); }