void _isvn_commitdrain_add(unsigned rev, int incr) { struct fetchdone_range *newr, *exist, key; key.r_lo = rev; hashmap_entry_init(&key.r_entry, memhash(&key.r_lo, sizeof(key.r_lo))); if (incr > 0) { newr = xmalloc(sizeof(*newr)); newr->r_lo = rev; newr->r_hi = incr; /* reused as refcnt */ hashmap_entry_init(&newr->r_entry, memhash(&newr->r_lo, sizeof(newr->r_lo))); } else newr = NULL; isvn_g_lock(); exist = hashmap_get(&g_commitdrain_hash, &key); if (incr > 0) { if (exist) exist->r_hi += incr; else hashmap_add(&g_commitdrain_hash, newr); } else { int refcnt; /* INVARIANTS */ if (exist == NULL) die("negative refcnt %d (ne)", incr); refcnt = (int)exist->r_hi + incr; /* INVARIANTS */ if (refcnt < 0) die("negative refcnt %d", refcnt); if (refcnt > 0) exist->r_hi = refcnt; else { hashmap_remove(&g_commitdrain_hash, exist); /* free it below */ newr = exist; } } isvn_g_unlock(); if (exist && newr) free(newr); }
/* * Basically keep a cache of X->Y so that we can repeatedly replace * the same anonymized string with another. The actual generation * is farmed out to the generate function. */ static const void *anonymize_mem(struct hashmap *map, void *(*generate)(const void *, size_t *), const void *orig, size_t *len) { struct anonymized_entry key, *ret; if (!map->cmpfn) hashmap_init(map, anonymized_entry_cmp, 0); hashmap_entry_init(&key, memhash(orig, *len)); key.orig = orig; key.orig_len = *len; ret = hashmap_get(map, &key, NULL); if (!ret) { ret = xmalloc(sizeof(*ret)); hashmap_entry_init(&ret->hash, key.hash.hash); ret->orig = xstrdup(orig); ret->orig_len = *len; ret->anon = generate(orig, len); ret->anon_len = *len; hashmap_put(map, ret); } *len = ret->anon_len; return ret->anon; }
cell_t *new_symbol(secd_t *secd, const char *sym) { cell_t *cell = pop_free(secd); cell->type = CELL_SYM; cell->as.sym.size = strlen(sym); cell->as.sym.data = strdup(sym); cell->as.sym.hash = memhash(sym, cell->as.sym.size); return cell; }
unsigned String0::LHashValue() const { int l = LLen(); if(l < 15) { dword w[4]; w[0] = w[1] = w[2] = w[3] = 0; memcpy(w, ptr, l); ((byte *)w)[SLEN] = l; return CombineHash(w[0], w[1], w[2], w[3]); } return memhash(ptr, l); }
void isvn_mark_fetchdone(unsigned revlo, unsigned revhi) { struct fetchdone_range *done, *exist, key; done = xmalloc(sizeof(*done)); if (done == NULL) die("malloc"); isvn_g_lock(); if (g_rev_fetchdone == revlo - 1) { g_rev_fetchdone = revhi; while (true) { key.r_lo = revhi + 1; hashmap_entry_init(&key.r_entry, memhash(&key.r_lo, sizeof(key.r_lo))); exist = hashmap_remove(&g_fetchdone_hash, &key); if (!exist) break; g_rev_fetchdone = revhi = exist->r_hi; free(exist); } cond_broadcast(&g_rev_cond); } else { done->r_lo = revlo; done->r_hi = revhi; hashmap_entry_init(&done->r_entry, memhash(&done->r_lo, sizeof(done->r_lo))); hashmap_add(&g_fetchdone_hash, done); done = NULL; } isvn_g_unlock(); if (done) free(done); }
static uptrint_t bits_hash(void *b, size_t sz) { switch (sz) { case 1: return int32hash(*(int8_t*)b); case 2: return int32hash(*(int16_t*)b); case 4: return int32hash(*(int32_t*)b); case 8: return hash64(*(int64_t*)b); default: #ifdef _P64 return memhash((char*)b, sz); #else return memhash32((char*)b, sz); #endif } }
static jl_sym_t *mk_symbol(const char *str) { jl_sym_t *sym; size_t len = strlen(str); sym = (jl_sym_t*)malloc((sizeof(jl_sym_t)-sizeof(void*)+len+1+7)&-8); sym->type = (jl_value_t*)jl_sym_type; sym->left = sym->right = NULL; #ifdef _P64 sym->hash = memhash(str, len)^0xAAAAAAAAAAAAAAAAL; #else sym->hash = memhash32(str, len)^0xAAAAAAAA; #endif strcpy(&sym->name[0], str); return sym; }
/* * Retrieve the 'value' stored in a hashmap given the provided 'key'. * If there is no matching entry, return NULL. */ static void *attr_hashmap_get(struct attr_hashmap *map, const char *key, size_t keylen) { struct attr_hash_entry k; struct attr_hash_entry *e; if (!map->map.tablesize) attr_hashmap_init(map); hashmap_entry_init(&k, memhash(key, keylen)); k.key = key; k.keylen = keylen; e = hashmap_get(&map->map, &k, NULL); return e ? e->value : NULL; }
/* Add 'value' to a hashmap based on the provided 'key'. */ static void attr_hashmap_add(struct attr_hashmap *map, const char *key, size_t keylen, void *value) { struct attr_hash_entry *e; if (!map->map.tablesize) attr_hashmap_init(map); e = xmalloc(sizeof(struct attr_hash_entry)); hashmap_entry_init(e, memhash(key, keylen)); e->key = key; e->keylen = keylen; e->value = value; hashmap_add(&map->map, e); }
static uintptr_t bits_hash(const void *b, size_t sz) { switch (sz) { case 1: return int32hash(*(const int8_t*)b); case 2: return int32hash(jl_load_unaligned_i16(b)); case 4: return int32hash(jl_load_unaligned_i32(b)); #ifdef _P64 case 8: return int64hash(jl_load_unaligned_i64(b)); #else case 8: return int64to32hash(jl_load_unaligned_i64(b)); #endif default: #ifdef _P64 return memhash((const char*)b, sz); #else return memhash32((const char*)b, sz); #endif } }
DLLEXPORT uptrint_t jl_object_id(jl_value_t *v) { if (jl_is_symbol(v)) return ((jl_sym_t*)v)->hash; jl_value_t *tv = (jl_value_t*)jl_typeof(v); if (jl_is_bits_type(tv)) { size_t nb = jl_bitstype_nbits(tv)/8; uptrint_t h = inthash((uptrint_t)tv); switch (nb) { case 1: return int32hash(*(int8_t*)jl_bits_data(v) ^ h); case 2: return int32hash(*(int16_t*)jl_bits_data(v) ^ h); case 4: return int32hash(*(int32_t*)jl_bits_data(v) ^ h); case 8: return hash64(*(int64_t*)jl_bits_data(v) ^ h); default: #ifdef __LP64__ return h ^ memhash((char*)jl_bits_data(v), nb); #else return h ^ memhash32((char*)jl_bits_data(v), nb); #endif } } if (tv == (jl_value_t*)jl_union_kind) { #ifdef __LP64__ return jl_object_id(jl_fieldref(v,0))^0xA5A5A5A5A5A5A5A5L; #else return jl_object_id(jl_fieldref(v,0))^0xA5A5A5A5; #endif } if (jl_is_struct_type(tv)) return inthash((uptrint_t)v); assert(jl_is_tuple(v)); uptrint_t h = 0; size_t l = jl_tuple_len(v); for(size_t i = 0; i < l; i++) { uptrint_t u = jl_object_id(jl_tupleref(v,i)); h = bitmix(h, u); } return h; }
uint32_t memhash_string(const char *s) { return memhash(s, strlen(s)); }
static int state_scrub_process(struct snapraid_state* state, struct snapraid_parity** parity, block_off_t blockstart, block_off_t blockmax, time_t timelimit, block_off_t countlimit, time_t now) { struct snapraid_handle* handle; void* rehandle_alloc; struct snapraid_rehash* rehandle; unsigned diskmax; block_off_t i; unsigned j; void* buffer_alloc; void** buffer; unsigned buffermax; data_off_t countsize; block_off_t countpos; block_off_t countmax; block_off_t recountmax; block_off_t autosavedone; block_off_t autosavelimit; block_off_t autosavemissing; int ret; unsigned error; unsigned silent_error; unsigned l; /* maps the disks to handles */ handle = handle_map(state, &diskmax); /* rehash buffers */ rehandle = malloc_nofail_align(diskmax * sizeof(struct snapraid_rehash), &rehandle_alloc); /* we need disk + 2 for each parity level buffers */ buffermax = diskmax + state->level * 2; buffer = malloc_nofail_vector_align(diskmax, buffermax, state->block_size, &buffer_alloc); if (!state->opt.skip_self) mtest_vector(buffermax, state->block_size, buffer); error = 0; silent_error = 0; /* first count the number of blocks to process */ countmax = 0; for(i=blockstart;i<blockmax;++i) { time_t blocktime; snapraid_info info; /* if it's unused */ info = info_get(&state->infoarr, i); if (info == 0) { /* skip it */ continue; } /* blocks marked as bad are always checked */ if (!info_get_bad(info)) { /* if it's too new */ blocktime = info_get_time(info); if (blocktime > timelimit) { /* skip it */ continue; } /* skip odd blocks, used only for testing */ if (state->opt.force_scrub_even && (i % 2) != 0) { /* skip it */ continue; } /* if the time is less than the limit, always include */ /* otherwise, check if we reached the max count */ if (blocktime == timelimit) { /* if we reached the count limit */ if (countmax >= countlimit) { /* skip it */ continue; } } } ++countmax; } /* compute the autosave size for all disk, even if not read */ /* this makes sense because the speed should be almost the same */ /* if the disks are read in parallel */ autosavelimit = state->autosave / (diskmax * state->block_size); autosavemissing = countmax; /* blocks to do */ autosavedone = 0; /* blocks done */ countsize = 0; countpos = 0; state_progress_begin(state, blockstart, blockmax, countmax); recountmax = 0; for(i=blockstart;i<blockmax;++i) { time_t blocktime; snapraid_info info; int error_on_this_block; int silent_error_on_this_block; int block_is_unsynced; int rehash; /* if it's unused */ info = info_get(&state->infoarr, i); if (info == 0) { /* skip it */ continue; } /* blocks marked as bad are always checked */ if (!info_get_bad(info)) { /* if it's too new */ blocktime = info_get_time(info); if (blocktime > timelimit) { /* skip it */ continue; } /* skip odd blocks, used only for testing */ if (state->opt.force_scrub_even && (i % 2) != 0) { /* skip it */ continue; } /* if the time is less than the limit, always include */ /* otherwise, check if we reaced the count max */ if (blocktime == timelimit) { /* if we reached the count limit */ if (recountmax >= countlimit) { /* skip it */ continue; } } } ++recountmax; /* one more block processed for autosave */ ++autosavedone; --autosavemissing; /* by default process the block, and skip it if something goes wrong */ error_on_this_block = 0; silent_error_on_this_block = 0; /* if all the blocks at this address are synced */ /* if not, parity is not even checked */ block_is_unsynced = 0; /* if we have to use the old hash */ rehash = info_get_rehash(info); /* for each disk, process the block */ for(j=0;j<diskmax;++j) { int read_size; unsigned char hash[HASH_SIZE]; struct snapraid_block* block; int file_is_unsynced; /* if the file on this disk is synced */ /* if not, silent errors are assumed as expected error */ file_is_unsynced = 0; /* by default no rehash in case of "continue" */ rehandle[j].block = 0; /* if the disk position is not used */ if (!handle[j].disk) { /* use an empty block */ memset(buffer[j], 0, state->block_size); continue; } /* if the block is not used */ block = disk_block_get(handle[j].disk, i); if (!block_has_file(block)) { /* use an empty block */ memset(buffer[j], 0, state->block_size); continue; } /* if the block is unsynced, errors are expected */ if (block_has_invalid_parity(block)) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* if the file is different than the current one, close it */ if (handle[j].file != 0 && handle[j].file != block_file_get(block)) { /* keep a pointer at the file we are going to close for error reporting */ struct snapraid_file* file = handle[j].file; ret = handle_close(&handle[j]); if (ret == -1) { /* LCOV_EXCL_START */ /* This one is really an unexpected error, because we are only reading */ /* and closing a descriptor should never fail */ fprintf(stdlog, "error:%u:%s:%s: Close error. %s\n", i, handle[j].disk->name, file->sub, strerror(errno)); fprintf(stderr, "DANGER! Unexpected close error in a data disk, it isn't possible to scrub.\n"); printf("Stopping at block %u\n", i); ++error; goto bail; /* LCOV_EXCL_STOP */ } } ret = handle_open(&handle[j], block_file_get(block), state->opt.skip_sequential, stderr); if (ret == -1) { /* file we have tried to open for error reporting */ struct snapraid_file* file = block_file_get(block); fprintf(stdlog, "error:%u:%s:%s: Open error. %s\n", i, handle[j].disk->name, file->sub, strerror(errno)); ++error; error_on_this_block = 1; continue; } /* check if the file is changed */ if (handle[j].st.st_size != block_file_get(block)->size || handle[j].st.st_mtime != block_file_get(block)->mtime_sec || STAT_NSEC(&handle[j].st) != block_file_get(block)->mtime_nsec || handle[j].st.st_ino != block_file_get(block)->inode ) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* note that we intentionally don't abort if the file has different attributes */ /* from the last sync, as we are expected to return errors if running */ /* in an unsynced array. This is just like the check command. */ read_size = handle_read(&handle[j], block, buffer[j], state->block_size, stderr); if (read_size == -1) { fprintf(stdlog, "error:%u:%s:%s: Read error at position %u\n", i, handle[j].disk->name, handle[j].file->sub, block_file_pos(block)); ++error; error_on_this_block = 1; continue; } countsize += read_size; /* now compute the hash */ if (rehash) { memhash(state->prevhash, state->prevhashseed, hash, buffer[j], read_size); /* compute the new hash, and store it */ rehandle[j].block = block; memhash(state->hash, state->hashseed, rehandle[j].hash, buffer[j], read_size); } else { memhash(state->hash, state->hashseed, hash, buffer[j], read_size); } if (block_has_updated_hash(block)) { /* compare the hash */ if (memcmp(hash, block->hash, HASH_SIZE) != 0) { fprintf(stdlog, "error:%u:%s:%s: Data error at position %u\n", i, handle[j].disk->name, handle[j].file->sub, block_file_pos(block)); /* it's a silent error only if we are dealing with synced files */ if (file_is_unsynced) { ++error; error_on_this_block = 1; } else { fprintf(stderr, "Data error in file '%s' at position '%u'\n", handle[j].path, block_file_pos(block)); fprintf(stderr, "WARNING! Unexpected data error in a data disk! The block is now marked as bad!\n"); fprintf(stderr, "Try with 'snapraid -e fix' to recover!\n"); ++silent_error; silent_error_on_this_block = 1; } continue; } } } /* if we have read all the data required and it's correct, proceed with the parity check */ if (!error_on_this_block && !silent_error_on_this_block) { unsigned char* buffer_recov[LEV_MAX]; /* buffers for parity read and not computed */ for(l=0;l<state->level;++l) buffer_recov[l] = buffer[diskmax + state->level + l]; for(;l<LEV_MAX;++l) buffer_recov[l] = 0; /* read the parity */ for(l=0;l<state->level;++l) { ret = parity_read(parity[l], i, buffer_recov[l], state->block_size, stdlog); if (ret == -1) { buffer_recov[l] = 0; fprintf(stdlog, "parity_error:%u:%s: Read error\n", i, lev_config_name(l)); ++error; error_on_this_block = 1; /* follow */ } } /* compute the parity */ raid_gen(diskmax, state->level, state->block_size, buffer); /* compare the parity */ for(l=0;l<state->level;++l) { if (buffer_recov[l] && memcmp(buffer[diskmax + l], buffer_recov[l], state->block_size) != 0) { fprintf(stdlog, "parity_error:%u:%s: Data error\n", i, lev_config_name(l)); /* it's a silent error only if we are dealing with synced blocks */ if (block_is_unsynced) { ++error; error_on_this_block = 1; } else { fprintf(stderr, "Data error in parity '%s' at position '%u'\n", lev_config_name(l), i); fprintf(stderr, "WARNING! Unexpected data error in a parity disk! The block is now marked as bad!\n"); fprintf(stderr, "Try with 'snapraid -e fix' to recover!\n"); ++silent_error; silent_error_on_this_block = 1; } } } } if (silent_error_on_this_block) { /* set the error status keeping the existing time and hash */ info_set(&state->infoarr, i, info_set_bad(info)); } else if (error_on_this_block) { /* do nothing, as this is a generic error */ /* likely caused by a not synced array */ } else { /* if rehash is needed */ if (rehash) { /* store all the new hash already computed */ for(j=0;j<diskmax;++j) { if (rehandle[j].block) memcpy(rehandle[j].block->hash, rehandle[j].hash, HASH_SIZE); } } /* update the time info of the block */ /* and clear any other flag */ info_set(&state->infoarr, i, info_make(now, 0, 0)); } /* mark the state as needing write */ state->need_write = 1; /* count the number of processed block */ ++countpos; /* progress */ if (state_progress(state, i, countpos, countmax, countsize)) { /* LCOV_EXCL_START */ break; /* LCOV_EXCL_STOP */ } /* autosave */ if (state->autosave != 0 && autosavedone >= autosavelimit /* if we have reached the limit */ && autosavemissing >= autosavelimit /* if we have at least a full step to do */ ) { autosavedone = 0; /* restart the counter */ state_progress_stop(state); printf("Autosaving...\n"); state_write(state); state_progress_restart(state); } } state_progress_end(state, countpos, countmax, countsize); if (error || silent_error) { printf("\n"); printf("%8u read errors\n", error); printf("%8u data errors\n", silent_error); printf("WARNING! There are errors!\n"); } else { /* print the result only if processed something */ if (countpos != 0) printf("Everything OK\n"); } fprintf(stdlog, "summary:error_read:%u\n", error); fprintf(stdlog, "summary:error_data:%u\n", silent_error); if (error + silent_error == 0) fprintf(stdlog, "summary:exit:ok\n"); else fprintf(stdlog, "summary:exit:error\n"); fflush(stdlog); bail: for(j=0;j<diskmax;++j) { ret = handle_close(&handle[j]); if (ret == -1) { /* LCOV_EXCL_START */ fprintf(stderr, "DANGER! Unexpected close error in a data disk.\n"); ++error; /* continue, as we are already exiting */ /* LCOV_EXCL_STOP */ } } free(handle); free(buffer_alloc); free(buffer); free(rehandle_alloc); if (state->opt.expect_recoverable) { if (error + silent_error == 0) return -1; } else { if (error + silent_error != 0) return -1; } return 0; }
static void test_hash(void) { unsigned i; unsigned char* seed_aligned; void* seed_alloc; unsigned char* buffer_aligned; void* buffer_alloc; uint32_t seed32; uint64_t seed64; seed_aligned = malloc_nofail_align(HASH_SIZE, &seed_alloc); buffer_aligned = malloc_nofail_align(HASH_TEST_MAX, &buffer_alloc); seed32 = 0xa766795d; seed64 = 0x2f022773a766795dULL; seed_aligned[0] = 0x5d; seed_aligned[1] = 0x79; seed_aligned[2] = 0x66; seed_aligned[3] = 0xa7; seed_aligned[4] = 0x73; seed_aligned[5] = 0x27; seed_aligned[6] = 0x02; seed_aligned[7] = 0x2f; seed_aligned[8] = 0x6a; seed_aligned[9] = 0xa1; seed_aligned[10] = 0x9e; seed_aligned[11] = 0xc1; seed_aligned[12] = 0x14; seed_aligned[13] = 0x8c; seed_aligned[14] = 0x9e; seed_aligned[15] = 0x43; for(i=0;TEST_HASH32[i].data;++i) { uint32_t digest; memcpy(buffer_aligned, TEST_HASH32[i].data, TEST_HASH32[i].len); digest = tommy_hash_u32(seed32, buffer_aligned, TEST_MURMUR3[i].len); if (digest != TEST_HASH32[i].digest) { /* LCOV_EXCL_START */ fprintf(stderr, "Failed hash32 test\n"); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } } for(i=0;TEST_HASH64[i].data;++i) { uint64_t digest; memcpy(buffer_aligned, TEST_HASH64[i].data, TEST_HASH64[i].len); digest = tommy_hash_u64(seed64, buffer_aligned, TEST_MURMUR3[i].len); if (digest != TEST_HASH64[i].digest) { /* LCOV_EXCL_START */ fprintf(stderr, "Failed hash64 test\n"); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } } for(i=0;TEST_MURMUR3[i].data;++i) { unsigned char digest[HASH_SIZE]; memcpy(buffer_aligned, TEST_MURMUR3[i].data, TEST_MURMUR3[i].len); memhash(HASH_MURMUR3, seed_aligned, digest, buffer_aligned, TEST_MURMUR3[i].len); if (memcmp(digest, TEST_MURMUR3[i].digest, HASH_SIZE) != 0) { /* LCOV_EXCL_START */ fprintf(stderr, "Failed Murmur3 test\n"); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } } for(i=0;TEST_SPOOKY2[i].data;++i) { unsigned char digest[HASH_SIZE]; memcpy(buffer_aligned, TEST_SPOOKY2[i].data, TEST_SPOOKY2[i].len); memhash(HASH_SPOOKY2, seed_aligned, digest, buffer_aligned, TEST_SPOOKY2[i].len); if (memcmp(digest, TEST_SPOOKY2[i].digest, HASH_SIZE) != 0) { /* LCOV_EXCL_START */ fprintf(stderr, "Failed Spooky2 test\n"); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } } free(buffer_alloc); free(seed_alloc); }
static uintptr_t hash_symbol(const char *str, size_t len) { return memhash(str, len) ^ ~(uintptr_t)0/3*2; }
unsigned GetHashValue() const { return memhash(s, sIdLen(s)); }
static unsigned int hash_sha1_string(const unsigned char *sha1, const char *string) { return memhash(sha1, 20) + strhash(string); }
static int state_scrub_process(struct snapraid_state* state, struct snapraid_parity_handle* parity_handle, block_off_t blockstart, block_off_t blockmax, struct snapraid_plan* plan, time_t now) { struct snapraid_io io; struct snapraid_handle* handle; void* rehandle_alloc; struct snapraid_rehash* rehandle; unsigned diskmax; block_off_t blockcur; unsigned j; unsigned buffermax; data_off_t countsize; block_off_t countpos; block_off_t countmax; block_off_t autosavedone; block_off_t autosavelimit; block_off_t autosavemissing; int ret; unsigned error; unsigned silent_error; unsigned io_error; unsigned l; unsigned* waiting_map; unsigned waiting_mac; char esc_buffer[ESC_MAX]; /* maps the disks to handles */ handle = handle_mapping(state, &diskmax); /* rehash buffers */ rehandle = malloc_nofail_align(diskmax * sizeof(struct snapraid_rehash), &rehandle_alloc); /* we need 1 * data + 2 * parity */ buffermax = diskmax + 2 * state->level; /* initialize the io threads */ io_init(&io, state, state->opt.io_cache, buffermax, scrub_data_reader, handle, diskmax, scrub_parity_reader, 0, parity_handle, state->level); /* possibly waiting disks */ waiting_mac = diskmax > RAID_PARITY_MAX ? diskmax : RAID_PARITY_MAX; waiting_map = malloc_nofail(waiting_mac * sizeof(unsigned)); error = 0; silent_error = 0; io_error = 0; /* first count the number of blocks to process */ countmax = 0; plan->countlast = 0; for (blockcur = blockstart; blockcur < blockmax; ++blockcur) { if (!block_is_enabled(plan, blockcur)) continue; ++countmax; } /* compute the autosave size for all disk, even if not read */ /* this makes sense because the speed should be almost the same */ /* if the disks are read in parallel */ autosavelimit = state->autosave / (diskmax * state->block_size); autosavemissing = countmax; /* blocks to do */ autosavedone = 0; /* blocks done */ /* drop until now */ state_usage_waste(state); countsize = 0; countpos = 0; plan->countlast = 0; /* start all the worker threads */ io_start(&io, blockstart, blockmax, &block_is_enabled, plan); state_progress_begin(state, blockstart, blockmax, countmax); while (1) { unsigned char* buffer_recov[LEV_MAX]; snapraid_info info; int error_on_this_block; int silent_error_on_this_block; int io_error_on_this_block; int block_is_unsynced; int rehash; void** buffer; /* go to the next block */ blockcur = io_read_next(&io, &buffer); if (blockcur >= blockmax) break; /* until now is scheduling */ state_usage_sched(state); /* one more block processed for autosave */ ++autosavedone; --autosavemissing; /* by default process the block, and skip it if something goes wrong */ error_on_this_block = 0; silent_error_on_this_block = 0; io_error_on_this_block = 0; /* if all the blocks at this address are synced */ /* if not, parity is not even checked */ block_is_unsynced = 0; /* get block specific info */ info = info_get(&state->infoarr, blockcur); /* if we have to use the old hash */ rehash = info_get_rehash(info); /* for each disk, process the block */ for (j = 0; j < diskmax; ++j) { struct snapraid_task* task; int read_size; unsigned char hash[HASH_SIZE]; struct snapraid_block* block; int file_is_unsynced; struct snapraid_disk* disk; struct snapraid_file* file; block_off_t file_pos; unsigned diskcur; /* if the file on this disk is synced */ /* if not, silent errors are assumed as expected error */ file_is_unsynced = 0; /* until now is misc */ state_usage_misc(state); /* get the next task */ task = io_data_read(&io, &diskcur, waiting_map, &waiting_mac); /* until now is disk */ state_usage_disk(state, handle, waiting_map, waiting_mac); /* get the task results */ disk = task->disk; block = task->block; file = task->file; file_pos = task->file_pos; read_size = task->read_size; /* by default no rehash in case of "continue" */ rehandle[diskcur].block = 0; /* if the disk position is not used */ if (!disk) continue; /* if the block is unsynced, errors are expected */ if (block_has_invalid_parity(block)) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* if the block is not used */ if (!block_has_file(block)) continue; /* if the block is unsynced, errors are expected */ if (task->is_timestamp_different) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* handle error conditions */ if (task->state == TASK_STATE_IOERROR) { ++io_error; goto bail; } if (task->state == TASK_STATE_ERROR) { ++error; goto bail; } if (task->state == TASK_STATE_ERROR_CONTINUE) { ++error; error_on_this_block = 1; continue; } if (task->state == TASK_STATE_IOERROR_CONTINUE) { ++io_error; if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in a data disk, it isn't possible to scrub.\n"); log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, task->path); log_fatal("Stopping at block %u\n", blockcur); goto bail; /* LCOV_EXCL_STOP */ } /* otherwise continue */ io_error_on_this_block = 1; continue; } if (task->state != TASK_STATE_DONE) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency in task state\n"); os_abort(); /* LCOV_EXCL_STOP */ } countsize += read_size; /* now compute the hash */ if (rehash) { memhash(state->prevhash, state->prevhashseed, hash, buffer[diskcur], read_size); /* compute the new hash, and store it */ rehandle[diskcur].block = block; memhash(state->hash, state->hashseed, rehandle[diskcur].hash, buffer[diskcur], read_size); } else { memhash(state->hash, state->hashseed, hash, buffer[diskcur], read_size); } /* until now is hash */ state_usage_hash(state); if (block_has_updated_hash(block)) { /* compare the hash */ if (memcmp(hash, block->hash, HASH_SIZE) != 0) { unsigned diff = memdiff(hash, block->hash, HASH_SIZE); log_tag("error:%u:%s:%s: Data error at position %u, diff bits %u\n", blockcur, disk->name, esc(file->sub, esc_buffer), file_pos, diff); /* it's a silent error only if we are dealing with synced files */ if (file_is_unsynced) { ++error; error_on_this_block = 1; } else { log_error("Data error in file '%s' at position '%u', diff bits %u\n", task->path, file_pos, diff); ++silent_error; silent_error_on_this_block = 1; } continue; } } } /* buffers for parity read and not computed */ for (l = 0; l < state->level; ++l) buffer_recov[l] = buffer[diskmax + state->level + l]; for (; l < LEV_MAX; ++l) buffer_recov[l] = 0; /* until now is misc */ state_usage_misc(state); /* read the parity */ for (l = 0; l < state->level; ++l) { struct snapraid_task* task; unsigned levcur; task = io_parity_read(&io, &levcur, waiting_map, &waiting_mac); /* until now is parity */ state_usage_parity(state, waiting_map, waiting_mac); /* handle error conditions */ if (task->state == TASK_STATE_IOERROR) { ++io_error; goto bail; } if (task->state == TASK_STATE_ERROR) { ++error; goto bail; } if (task->state == TASK_STATE_ERROR_CONTINUE) { ++error; error_on_this_block = 1; /* if continuing on error, clear the missing buffer */ buffer_recov[levcur] = 0; continue; } if (task->state == TASK_STATE_IOERROR_CONTINUE) { ++io_error; if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in the %s disk, it isn't possible to scrub.\n", lev_name(levcur)); log_fatal("Ensure that disk '%s' is sane and can be read.\n", lev_config_name(levcur)); log_fatal("Stopping at block %u\n", blockcur); goto bail; /* LCOV_EXCL_STOP */ } /* otherwise continue */ io_error_on_this_block = 1; /* if continuing on error, clear the missing buffer */ buffer_recov[levcur] = 0; continue; } if (task->state != TASK_STATE_DONE) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency in task state\n"); os_abort(); /* LCOV_EXCL_STOP */ } } /* if we have read all the data required and it's correct, proceed with the parity check */ if (!error_on_this_block && !silent_error_on_this_block && !io_error_on_this_block) { /* compute the parity */ raid_gen(diskmax, state->level, state->block_size, buffer); /* compare the parity */ for (l = 0; l < state->level; ++l) { if (buffer_recov[l] && memcmp(buffer[diskmax + l], buffer_recov[l], state->block_size) != 0) { unsigned diff = memdiff(buffer[diskmax + l], buffer_recov[l], state->block_size); log_tag("parity_error:%u:%s: Data error, diff bits %u\n", blockcur, lev_config_name(l), diff); /* it's a silent error only if we are dealing with synced blocks */ if (block_is_unsynced) { ++error; error_on_this_block = 1; } else { log_fatal("Data error in parity '%s' at position '%u', diff bits %u\n", lev_config_name(l), blockcur, diff); ++silent_error; silent_error_on_this_block = 1; } } } /* until now is raid */ state_usage_raid(state); } if (silent_error_on_this_block || io_error_on_this_block) { /* set the error status keeping other info */ info_set(&state->infoarr, blockcur, info_set_bad(info)); } else if (error_on_this_block) { /* do nothing, as this is a generic error */ /* likely caused by a not synced array */ } else { /* if rehash is needed */ if (rehash) { /* store all the new hash already computed */ for (j = 0; j < diskmax; ++j) { if (rehandle[j].block) memcpy(rehandle[j].block->hash, rehandle[j].hash, HASH_SIZE); } } /* update the time info of the block */ /* and clear any other flag */ info_set(&state->infoarr, blockcur, info_make(now, 0, 0, 0)); } /* mark the state as needing write */ state->need_write = 1; /* count the number of processed block */ ++countpos; /* progress */ if (state_progress(state, &io, blockcur, countpos, countmax, countsize)) { /* LCOV_EXCL_START */ break; /* LCOV_EXCL_STOP */ } /* autosave */ if (state->autosave != 0 && autosavedone >= autosavelimit /* if we have reached the limit */ && autosavemissing >= autosavelimit /* if we have at least a full step to do */ ) { autosavedone = 0; /* restart the counter */ /* until now is misc */ state_usage_misc(state); state_progress_stop(state); msg_progress("Autosaving...\n"); state_write(state); state_progress_restart(state); /* drop until now */ state_usage_waste(state); } } state_progress_end(state, countpos, countmax, countsize); state_usage_print(state); if (error || silent_error || io_error) { msg_status("\n"); msg_status("%8u file errors\n", error); msg_status("%8u io errors\n", io_error); msg_status("%8u data errors\n", silent_error); } else { /* print the result only if processed something */ if (countpos != 0) msg_status("Everything OK\n"); } if (error) log_fatal("WARNING! Unexpected file errors!\n"); if (io_error) log_fatal("DANGER! Unexpected input/output errors! The failing blocks are now marked as bad!\n"); if (silent_error) log_fatal("DANGER! Unexpected data errors! The failing blocks are now marked as bad!\n"); if (io_error || silent_error) { log_fatal("Use 'snapraid status' to list the bad blocks.\n"); log_fatal("Use 'snapraid -e fix' to recover.\n"); } log_tag("summary:error_file:%u\n", error); log_tag("summary:error_io:%u\n", io_error); log_tag("summary:error_data:%u\n", silent_error); if (error + silent_error + io_error == 0) log_tag("summary:exit:ok\n"); else log_tag("summary:exit:error\n"); log_flush(); bail: /* stop all the worker threads */ io_stop(&io); for (j = 0; j < diskmax; ++j) { struct snapraid_file* file = handle[j].file; struct snapraid_disk* disk = handle[j].disk; ret = handle_close(&handle[j]); if (ret == -1) { /* LCOV_EXCL_START */ log_tag("error:%u:%s:%s: Close error. %s\n", blockcur, disk->name, esc(file->sub, esc_buffer), strerror(errno)); log_fatal("DANGER! Unexpected close error in a data disk.\n"); ++error; /* continue, as we are already exiting */ /* LCOV_EXCL_STOP */ } } free(handle); free(rehandle_alloc); free(waiting_map); io_done(&io); if (state->opt.expect_recoverable) { if (error + silent_error + io_error == 0) return -1; } else { if (error + silent_error + io_error != 0) return -1; } return 0; }
void isvn_mark_commitdone(unsigned revlo, unsigned revhi) { struct fetchdone_range *done, *exist, key; struct fetchdone_range *drainex, drainkey; /* In particular, checking for refs in need of draining ... for each * rev in range. */ if (revlo != revhi) die("XXX batched commitdones notimpl."); done = xmalloc(sizeof(*done)); drainkey.r_lo = revlo; hashmap_entry_init(&drainkey.r_entry, memhash(&drainkey.r_lo, sizeof(drainkey.r_lo))); isvn_g_lock(); /* For revs with multiple branch edits (rare), wait until all commits * are in before marking done. */ drainex = hashmap_get(&g_commitdrain_hash, &drainkey); if (drainex) { drainex->r_hi--; if (drainex->r_hi == 0) { hashmap_remove(&g_commitdrain_hash, drainex); free(drainex); } else goto out; } if (g_rev_commitdone == revlo - 1) { g_rev_commitdone = revhi; while (true) { key.r_lo = revhi + 1; hashmap_entry_init(&key.r_entry, memhash(&key.r_lo, sizeof(key.r_lo))); exist = hashmap_remove(&g_commitdone_hash, &key); if (!exist) break; g_rev_commitdone = revhi = exist->r_hi; free(exist); } cond_broadcast(&g_commitdone_cond); } else { done->r_lo = revlo; done->r_hi = revhi; hashmap_entry_init(&done->r_entry, memhash(&done->r_lo, sizeof(done->r_lo))); hashmap_add(&g_commitdone_hash, done); done = NULL; } out: isvn_g_unlock(); if (done) free(done); }
static int state_scrub_process(struct snapraid_state* state, struct snapraid_parity_handle** parity, block_off_t blockstart, block_off_t blockmax, struct snapraid_plan* plan, time_t now) { struct snapraid_handle* handle; void* rehandle_alloc; struct snapraid_rehash* rehandle; unsigned diskmax; block_off_t i; unsigned j; void* buffer_alloc; void** buffer; unsigned buffermax; data_off_t countsize; block_off_t countpos; block_off_t countmax; block_off_t autosavedone; block_off_t autosavelimit; block_off_t autosavemissing; int ret; unsigned error; unsigned silent_error; unsigned io_error; unsigned l; /* maps the disks to handles */ handle = handle_map(state, &diskmax); /* rehash buffers */ rehandle = malloc_nofail_align(diskmax * sizeof(struct snapraid_rehash), &rehandle_alloc); /* we need disk + 2 for each parity level buffers */ buffermax = diskmax + state->level * 2; buffer = malloc_nofail_vector_align(diskmax, buffermax, state->block_size, &buffer_alloc); if (!state->opt.skip_self) mtest_vector(buffermax, state->block_size, buffer); error = 0; silent_error = 0; io_error = 0; /* first count the number of blocks to process */ countmax = 0; plan->countlast = 0; for (i = blockstart; i < blockmax; ++i) { if (!block_is_enabled(state, i, plan)) continue; ++countmax; } /* compute the autosave size for all disk, even if not read */ /* this makes sense because the speed should be almost the same */ /* if the disks are read in parallel */ autosavelimit = state->autosave / (diskmax * state->block_size); autosavemissing = countmax; /* blocks to do */ autosavedone = 0; /* blocks done */ /* drop until now */ state_usage_waste(state); countsize = 0; countpos = 0; plan->countlast = 0; state_progress_begin(state, blockstart, blockmax, countmax); for (i = blockstart; i < blockmax; ++i) { snapraid_info info; int error_on_this_block; int silent_error_on_this_block; int io_error_on_this_block; int block_is_unsynced; int rehash; if (!block_is_enabled(state, i, plan)) continue; /* one more block processed for autosave */ ++autosavedone; --autosavemissing; /* by default process the block, and skip it if something goes wrong */ error_on_this_block = 0; silent_error_on_this_block = 0; io_error_on_this_block = 0; /* if all the blocks at this address are synced */ /* if not, parity is not even checked */ block_is_unsynced = 0; /* get block specific info */ info = info_get(&state->infoarr, i); /* if we have to use the old hash */ rehash = info_get_rehash(info); /* for each disk, process the block */ for (j = 0; j < diskmax; ++j) { int read_size; unsigned char hash[HASH_SIZE]; struct snapraid_block* block; int file_is_unsynced; struct snapraid_disk* disk = handle[j].disk; struct snapraid_file* file; block_off_t file_pos; /* if the file on this disk is synced */ /* if not, silent errors are assumed as expected error */ file_is_unsynced = 0; /* by default no rehash in case of "continue" */ rehandle[j].block = 0; /* if the disk position is not used */ if (!disk) { /* use an empty block */ memset(buffer[j], 0, state->block_size); continue; } /* if the block is not used */ block = fs_par2block_get(disk, i); if (!block_has_file(block)) { /* use an empty block */ memset(buffer[j], 0, state->block_size); continue; } /* get the file of this block */ file = fs_par2file_get(disk, i, &file_pos); /* if the block is unsynced, errors are expected */ if (block_has_invalid_parity(block)) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* until now is CPU */ state_usage_cpu(state); /* if the file is different than the current one, close it */ if (handle[j].file != 0 && handle[j].file != file) { /* keep a pointer at the file we are going to close for error reporting */ struct snapraid_file* report = handle[j].file; ret = handle_close(&handle[j]); if (ret == -1) { /* LCOV_EXCL_START */ /* This one is really an unexpected error, because we are only reading */ /* and closing a descriptor should never fail */ if (errno == EIO) { log_tag("error:%u:%s:%s: Close EIO error. %s\n", i, disk->name, esc(report->sub), strerror(errno)); log_fatal("DANGER! Unexpected input/output close error in a data disk, it isn't possible to scrub.\n"); log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, handle[j].path); log_fatal("Stopping at block %u\n", i); ++io_error; goto bail; } log_tag("error:%u:%s:%s: Close error. %s\n", i, disk->name, esc(report->sub), strerror(errno)); log_fatal("WARNING! Unexpected close error in a data disk, it isn't possible to scrub.\n"); log_fatal("Ensure that file '%s' can be accessed.\n", handle[j].path); log_fatal("Stopping at block %u\n", i); ++error; goto bail; /* LCOV_EXCL_STOP */ } } ret = handle_open(&handle[j], file, state->file_mode, log_error, 0); if (ret == -1) { if (errno == EIO) { /* LCOV_EXCL_START */ log_tag("error:%u:%s:%s: Open EIO error. %s\n", i, disk->name, esc(file->sub), strerror(errno)); log_fatal("DANGER! Unexpected input/output open error in a data disk, it isn't possible to scrub.\n"); log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, handle[j].path); log_fatal("Stopping at block %u\n", i); ++io_error; goto bail; /* LCOV_EXCL_STOP */ } log_tag("error:%u:%s:%s: Open error. %s\n", i, disk->name, esc(file->sub), strerror(errno)); ++error; error_on_this_block = 1; continue; } /* check if the file is changed */ if (handle[j].st.st_size != file->size || handle[j].st.st_mtime != file->mtime_sec || STAT_NSEC(&handle[j].st) != file->mtime_nsec /* don't check the inode to support filesystem without persistent inodes */ ) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* note that we intentionally don't abort if the file has different attributes */ /* from the last sync, as we are expected to return errors if running */ /* in an unsynced array. This is just like the check command. */ read_size = handle_read(&handle[j], file_pos, buffer[j], state->block_size, log_error, 0); if (read_size == -1) { if (errno == EIO) { log_tag("error:%u:%s:%s: Read EIO error at position %u. %s\n", i, disk->name, esc(file->sub), file_pos, strerror(errno)); if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in a data disk, it isn't possible to scrub.\n"); log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, handle[j].path); log_fatal("Stopping at block %u\n", i); ++io_error; goto bail; /* LCOV_EXCL_STOP */ } log_error("Input/Output error in file '%s' at position '%u'\n", handle[j].path, file_pos); ++io_error; io_error_on_this_block = 1; continue; } log_tag("error:%u:%s:%s: Read error at position %u. %s\n", i, disk->name, esc(file->sub), file_pos, strerror(errno)); ++error; error_on_this_block = 1; continue; } /* until now is disk */ state_usage_disk(state, disk); countsize += read_size; /* now compute the hash */ if (rehash) { memhash(state->prevhash, state->prevhashseed, hash, buffer[j], read_size); /* compute the new hash, and store it */ rehandle[j].block = block; memhash(state->hash, state->hashseed, rehandle[j].hash, buffer[j], read_size); } else { memhash(state->hash, state->hashseed, hash, buffer[j], read_size); } if (block_has_updated_hash(block)) { /* compare the hash */ if (memcmp(hash, block->hash, HASH_SIZE) != 0) { unsigned diff = memdiff(hash, block->hash, HASH_SIZE); log_tag("error:%u:%s:%s: Data error at position %u, diff bits %u\n", i, disk->name, esc(file->sub), file_pos, diff); /* it's a silent error only if we are dealing with synced files */ if (file_is_unsynced) { ++error; error_on_this_block = 1; } else { log_error("Data error in file '%s' at position '%u', diff bits %u\n", handle[j].path, file_pos, diff); ++silent_error; silent_error_on_this_block = 1; } continue; } } } /* if we have read all the data required and it's correct, proceed with the parity check */ if (!error_on_this_block && !silent_error_on_this_block && !io_error_on_this_block) { unsigned char* buffer_recov[LEV_MAX]; /* until now is CPU */ state_usage_cpu(state); /* buffers for parity read and not computed */ for (l = 0; l < state->level; ++l) buffer_recov[l] = buffer[diskmax + state->level + l]; for (; l < LEV_MAX; ++l) buffer_recov[l] = 0; /* read the parity */ for (l = 0; l < state->level; ++l) { ret = parity_read(parity[l], i, buffer_recov[l], state->block_size, log_error); if (ret == -1) { buffer_recov[l] = 0; if (errno == EIO) { log_tag("parity_error:%u:%s: Read EIO error. %s\n", i, lev_config_name(l), strerror(errno)); if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in the %s disk, it isn't possible to scrub.\n", lev_name(l)); log_fatal("Ensure that disk '%s' is sane and can be read.\n", lev_config_name(l)); log_fatal("Stopping at block %u\n", i); ++io_error; goto bail; /* LCOV_EXCL_STOP */ } log_error("Input/Output error in parity '%s' at position '%u'\n", lev_config_name(l), i); ++io_error; io_error_on_this_block = 1; continue; } log_tag("parity_error:%u:%s: Read error. %s\n", i, lev_config_name(l), strerror(errno)); ++error; error_on_this_block = 1; continue; } /* until now is parity */ state_usage_parity(state, l); } /* compute the parity */ raid_gen(diskmax, state->level, state->block_size, buffer); /* compare the parity */ for (l = 0; l < state->level; ++l) { if (buffer_recov[l] && memcmp(buffer[diskmax + l], buffer_recov[l], state->block_size) != 0) { unsigned diff = memdiff(buffer[diskmax + l], buffer_recov[l], state->block_size); log_tag("parity_error:%u:%s: Data error, diff bits %u\n", i, lev_config_name(l), diff); /* it's a silent error only if we are dealing with synced blocks */ if (block_is_unsynced) { ++error; error_on_this_block = 1; } else { log_fatal("Data error in parity '%s' at position '%u', diff bits %u\n", lev_config_name(l), i, diff); ++silent_error; silent_error_on_this_block = 1; } } } } if (silent_error_on_this_block || io_error_on_this_block) { /* set the error status keeping other info */ info_set(&state->infoarr, i, info_set_bad(info)); } else if (error_on_this_block) { /* do nothing, as this is a generic error */ /* likely caused by a not synced array */ } else { /* if rehash is needed */ if (rehash) { /* store all the new hash already computed */ for (j = 0; j < diskmax; ++j) { if (rehandle[j].block) memcpy(rehandle[j].block->hash, rehandle[j].hash, HASH_SIZE); } } /* update the time info of the block */ /* and clear any other flag */ info_set(&state->infoarr, i, info_make(now, 0, 0, 0)); } /* mark the state as needing write */ state->need_write = 1; /* count the number of processed block */ ++countpos; /* progress */ if (state_progress(state, i, countpos, countmax, countsize)) { /* LCOV_EXCL_START */ break; /* LCOV_EXCL_STOP */ } /* autosave */ if (state->autosave != 0 && autosavedone >= autosavelimit /* if we have reached the limit */ && autosavemissing >= autosavelimit /* if we have at least a full step to do */ ) { autosavedone = 0; /* restart the counter */ /* until now is CPU */ state_usage_cpu(state); state_progress_stop(state); msg_progress("Autosaving...\n"); state_write(state); state_progress_restart(state); /* drop until now */ state_usage_waste(state); } } state_progress_end(state, countpos, countmax, countsize); state_usage_print(state); if (error || silent_error || io_error) { msg_status("\n"); msg_status("%8u file errors\n", error); msg_status("%8u io errors\n", io_error); msg_status("%8u data errors\n", silent_error); } else { /* print the result only if processed something */ if (countpos != 0) msg_status("Everything OK\n"); } if (error) log_fatal("WARNING! Unexpected file errors!\n"); if (io_error) log_fatal("DANGER! Unexpected input/output errors! The failing blocks are now marked as bad!\n"); if (silent_error) log_fatal("DANGER! Unexpected data errors! The failing blocks are now marked as bad!\n"); if (io_error || silent_error) { log_fatal("Use 'snapraid status' to list the bad blocks.\n"); log_fatal("Use 'snapraid -e fix' to recover.\n"); } log_tag("summary:error_file:%u\n", error); log_tag("summary:error_io:%u\n", io_error); log_tag("summary:error_data:%u\n", silent_error); if (error + silent_error + io_error == 0) log_tag("summary:exit:ok\n"); else log_tag("summary:exit:error\n"); log_flush(); bail: for (j = 0; j < diskmax; ++j) { struct snapraid_file* file = handle[j].file; struct snapraid_disk* disk = handle[j].disk; ret = handle_close(&handle[j]); if (ret == -1) { /* LCOV_EXCL_START */ log_tag("error:%u:%s:%s: Close error. %s\n", i, disk->name, esc(file->sub), strerror(errno)); log_fatal("DANGER! Unexpected close error in a data disk.\n"); ++error; /* continue, as we are already exiting */ /* LCOV_EXCL_STOP */ } } free(handle); free(buffer_alloc); free(buffer); free(rehandle_alloc); if (state->opt.expect_recoverable) { if (error + silent_error + io_error == 0) return -1; } else { if (error + silent_error + io_error != 0) return -1; } return 0; }
unsigned GetHashValue0(const double& d) { return memhash(&d, sizeof(double)); }
int search_file_compare(const void* void_arg, const void* void_data) { const struct search_file_compare_arg* arg = void_arg; const struct snapraid_search_file* file = void_data; const struct snapraid_state* state = arg->state; unsigned char buffer_hash[HASH_SIZE]; const char* path = file->path; int f; ssize_t ret; /* compare file info */ if (arg->file->size != file->size) return -1; if (arg->file->mtime_sec != file->mtime_sec) return -1; if (arg->file->mtime_nsec != file->mtime_nsec) return -1; /* read the block and compare the hash */ f = open(path, O_RDONLY | O_BINARY); if (f == -1) { /* LCOV_EXCL_START */ msg_error("Error opening file '%s'. %s.\n", path, strerror(errno)); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } if (lseek(f, arg->offset, SEEK_SET) != arg->offset) { /* LCOV_EXCL_START */ msg_error("Error seeking file '%s'. %s.\n", path, strerror(errno)); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } ret = read(f, arg->buffer, arg->read_size); if (ret < 0 || (unsigned)ret != arg->read_size) { /* LCOV_EXCL_START */ msg_error("Error reading file '%s'. %s.\n", path, strerror(errno)); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } ret = close(f); if (ret != 0) { /* LCOV_EXCL_START */ msg_error("Error closing file '%s'. %s.\n", path, strerror(errno)); exit(EXIT_FAILURE); /* LCOV_EXCL_STOP */ } /* compute the hash */ if (arg->prevhash) memhash(state->prevhash, state->prevhashseed, buffer_hash, arg->buffer, arg->read_size); else memhash(state->hash, state->hashseed, buffer_hash, arg->buffer, arg->read_size); /* check if the hash is matching */ if (memcmp(buffer_hash, arg->block->hash, HASH_SIZE) != 0) return -1; if (arg->read_size != state->block_size) { /* fill the remaining with 0 */ memset(arg->buffer + arg->read_size, 0, state->block_size - arg->read_size); } return 0; }
// *oob: output argument, means we hit the limit specified by 'bound' static uptrint_t bounded_hash(value_t a, int bound, int *oob) { *oob = 0; union { double d; int64_t i64; } u; numerictype_t nt; size_t i, len; cvalue_t *cv; cprim_t *cp; void *data; uptrint_t h = 0; int oob2, tg = tag(a); switch(tg) { case TAG_NUM : case TAG_NUM1: u.d = (double)numval(a); return doublehash(u.i64); case TAG_FUNCTION: if (uintval(a) > N_BUILTINS) return bounded_hash(((function_t*)ptr(a))->bcode, bound, oob); return inthash(a); case TAG_SYM: return ((symbol_t*)ptr(a))->hash; case TAG_CPRIM: cp = (cprim_t*)ptr(a); data = cp_data(cp); if (cp_class(cp) == wchartype) return inthash(*(int32_t*)data); nt = cp_numtype(cp); u.d = conv_to_double(data, nt); return doublehash(u.i64); case TAG_CVALUE: cv = (cvalue_t*)ptr(a); data = cv_data(cv); return memhash(data, cv_len(cv)); case TAG_VECTOR: if (bound <= 0) { *oob = 1; return 1; } len = vector_size(a); for(i=0; i < len; i++) { h = MIX(h, bounded_hash(vector_elt(a,i), bound/2, &oob2)^1); if (oob2) bound/=2; *oob = *oob || oob2; } return h; case TAG_CONS: do { if (bound <= 0) { *oob = 1; return h; } h = MIX(h, bounded_hash(car_(a), bound/2, &oob2)); // bounds balancing: try to share the bounds efficiently // so we can hash better when a list is cdr-deep (a common case) if (oob2) bound/=2; else bound--; // recursive OOB propagation. otherwise this case is slow: // (hash '#2=((#0=(#1=(#1#) . #0#)) . #2#)) *oob = *oob || oob2; a = cdr_(a); } while (iscons(a)); h = MIX(h, bounded_hash(a, bound-1, &oob2)^2); *oob = *oob || oob2; return h; } return 0; }
static unsigned int hash_oid_string(const struct object_id *oid, const char *string) { return memhash(oid->hash, the_hash_algo->rawsz) + strhash(string); }
/* * Read stdin line by line and print result of commands to stdout: * * hash key -> strhash(key) memhash(key) strihash(key) memihash(key) * put key value -> NULL / old value * get key -> NULL / value * remove key -> NULL / old value * iterate -> key1 value1\nkey2 value2\n... * size -> tablesize numentries * * perfhashmap method rounds -> test hashmap.[ch] performance */ int main(int argc, char *argv[]) { char line[1024]; struct hashmap map; int icase; /* init hash map */ icase = argc > 1 && !strcmp("ignorecase", argv[1]); hashmap_init(&map, (hashmap_cmp_fn) (icase ? test_entry_cmp_icase : test_entry_cmp), 0); /* process commands from stdin */ while (fgets(line, sizeof(line), stdin)) { char *cmd, *p1 = NULL, *p2 = NULL; int l1 = 0, l2 = 0, hash = 0; struct test_entry *entry; /* break line into command and up to two parameters */ cmd = strtok(line, DELIM); /* ignore empty lines */ if (!cmd || *cmd == '#') continue; p1 = strtok(NULL, DELIM); if (p1) { l1 = strlen(p1); hash = icase ? strihash(p1) : strhash(p1); p2 = strtok(NULL, DELIM); if (p2) l2 = strlen(p2); } if (!strcmp("hash", cmd) && l1) { /* print results of different hash functions */ printf("%u %u %u %u\n", strhash(p1), memhash(p1, l1), strihash(p1), memihash(p1, l1)); } else if (!strcmp("add", cmd) && l1 && l2) { /* create entry with key = p1, value = p2 */ entry = alloc_test_entry(hash, p1, l1, p2, l2); /* add to hashmap */ hashmap_add(&map, entry); } else if (!strcmp("put", cmd) && l1 && l2) { /* create entry with key = p1, value = p2 */ entry = alloc_test_entry(hash, p1, l1, p2, l2); /* add / replace entry */ entry = hashmap_put(&map, entry); /* print and free replaced entry, if any */ puts(entry ? get_value(entry) : "NULL"); free(entry); } else if (!strcmp("get", cmd) && l1) { /* lookup entry in hashmap */ entry = hashmap_get_from_hash(&map, hash, p1); /* print result */ if (!entry) puts("NULL"); while (entry) { puts(get_value(entry)); entry = hashmap_get_next(&map, entry); } } else if (!strcmp("remove", cmd) && l1) { /* setup static key */ struct hashmap_entry key; hashmap_entry_init(&key, hash); /* remove entry from hashmap */ entry = hashmap_remove(&map, &key, p1); /* print result and free entry*/ puts(entry ? get_value(entry) : "NULL"); free(entry); } else if (!strcmp("iterate", cmd)) { struct hashmap_iter iter; hashmap_iter_init(&map, &iter); while ((entry = hashmap_iter_next(&iter))) printf("%s %s\n", entry->key, get_value(entry)); } else if (!strcmp("size", cmd)) { /* print table sizes */ printf("%u %u\n", map.tablesize, map.size); } else if (!strcmp("intern", cmd) && l1) { /* test that strintern works */ const char *i1 = strintern(p1); const char *i2 = strintern(p1); if (strcmp(i1, p1)) printf("strintern(%s) returns %s\n", p1, i1); else if (i1 == p1) printf("strintern(%s) returns input pointer\n", p1); else if (i1 != i2) printf("strintern(%s) != strintern(%s)", i1, i2); else printf("%s\n", i1); } else if (!strcmp("perfhashmap", cmd) && l1 && l2) { perf_hashmap(atoi(p1), atoi(p2)); } else { printf("Unknown command %s\n", cmd); } } hashmap_free(&map, 1); return 0; }