void read_allocs(ibp_capset_t *caps, int n, int asize, int block_size) { int i, j, nblocks, rem, len, err; oplist_t *iolist; ibp_op_t *op; char *buffer = (char *)malloc(asize); iolist = new_ibp_oplist(NULL); nblocks = asize / block_size; rem = asize % block_size; if (rem > 0) nblocks++; // for (j=0; j<nblocks; j++) { for (j=nblocks-1; j>= 0; j--) { for (i=0; i<n; i++) { if ((j==(nblocks-1)) && (rem > 0)) { len = rem; } else { len = block_size; } op = new_ibp_read_op(get_ibp_cap(&(caps[i]), IBP_READCAP), j*block_size, len, buffer, ibp_timeout, NULL, cc); add_ibp_oplist(iolist, op); } } io_start(iolist); err = io_waitall(iolist); if (err != IBP_OK) { printf("read_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, oplist_nfailed(iolist)); } free_oplist(iolist); free(buffer); }
void remove_allocs(ibp_capset_t *caps_list, int nallocs) { int i, err; oplist_t *iolist; ibp_op_t *op; iolist = new_ibp_oplist(NULL); for (i=0; i<nallocs; i++) { op = new_ibp_remove_op(get_ibp_cap(&(caps_list[i]), IBP_MANAGECAP), ibp_timeout, NULL, NULL); add_ibp_oplist(iolist, op); } io_start(iolist); err = io_waitall(iolist); if (err != IBP_OK) { printf("remove_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, oplist_nfailed(iolist)); abort(); } free_oplist(iolist); //** Lastly free all the caps and the array for (i=0; i<nallocs; i++) { destroy_ibp_cap(get_ibp_cap(&(caps_list[i]), IBP_READCAP)); destroy_ibp_cap(get_ibp_cap(&(caps_list[i]), IBP_WRITECAP)); destroy_ibp_cap(get_ibp_cap(&(caps_list[i]), IBP_MANAGECAP)); } free(caps_list); return; }
ibp_capset_t *create_allocs(int nallocs, int asize) { int i, err; ibp_attributes_t attr; ibp_depot_t *depot; opque_t *q; op_generic_t *op; ibp_capset_t *caps = (ibp_capset_t *)malloc(sizeof(ibp_capset_t)*nallocs); set_ibp_attributes(&attr, time(NULL) + a_duration, IBP_HARD, IBP_BYTEARRAY); q = new_opque(); for (i=0; i<nallocs; i++) { depot = &(depot_list[i % n_depots]); op = new_ibp_alloc_op(ic, &(caps[i]), asize, depot, &attr, disk_cs_type, disk_blocksize, ibp_timeout); opque_add(q, op); } io_start(q); err = io_waitall(q); if (err != 0) { printf("create_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); abort(); } opque_free(q, OP_DESTROY); return(caps); }
ibp_capset_t *create_alias_allocs(int nallocs, ibp_capset_t *base_caps, int n_base) { int i, err; oplist_t *iolist; ibp_op_t *op; ibp_capset_t *bcap; ibp_capset_t *caps = (ibp_capset_t *)malloc(sizeof(ibp_capset_t)*nallocs); iolist = new_ibp_oplist(NULL); for (i=0; i<nallocs; i++) { bcap = &(base_caps[i % n_base]); op = new_ibp_alias_alloc_op(&(caps[i]), get_ibp_cap(bcap, IBP_MANAGECAP), 0, 0, 0, ibp_timeout, NULL, NULL); add_ibp_oplist(iolist, op); } io_start(iolist); err = io_waitall(iolist); if (err != IBP_OK) { printf("create_alias_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, oplist_nfailed(iolist)); } free_oplist(iolist); return(caps); }
ibp_capset_t *create_alias_allocs(int nallocs, ibp_capset_t *base_caps, int n_base) { int i, err; opque_t *q; op_generic_t *op; ibp_capset_t *bcap; ibp_capset_t *caps = (ibp_capset_t *)malloc(sizeof(ibp_capset_t)*nallocs); q = new_opque(); for (i=0; i<nallocs; i++) { bcap = &(base_caps[i % n_base]); op = new_ibp_alias_alloc_op(ic, &(caps[i]), get_ibp_cap(bcap, IBP_MANAGECAP), 0, 0, 0, ibp_timeout); opque_add(q, op); } io_start(q); err = io_waitall(q); if (err != 0) { printf("create_alias_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); } opque_free(q, OP_DESTROY); return(caps); }
ibp_capset_t *create_allocs(int nallocs, int asize) { int i, err; ibp_attributes_t attr; ibp_depot_t *depot; oplist_t *iolist; ibp_op_t *op; ibp_capset_t *caps = (ibp_capset_t *)malloc(sizeof(ibp_capset_t)*nallocs); set_ibp_attributes(&attr, time(NULL) + a_duration, IBP_HARD, IBP_BYTEARRAY); iolist = new_ibp_oplist(NULL); for (i=0; i<nallocs; i++) { depot = &(depot_list[i % n_depots]); op = new_ibp_alloc_op(&(caps[i]), asize, depot, &attr, ibp_timeout, NULL, NULL); add_ibp_oplist(iolist, op); } io_start(iolist); err = io_waitall(iolist); if (err != IBP_OK) { printf("create_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, oplist_nfailed(iolist)); abort(); } free_oplist(iolist); return(caps); }
void remove_allocs(ibp_capset_t *caps_list, int nallocs) { int i, err; opque_t *q; op_generic_t *op; q = new_opque(); for (i=0; i<nallocs; i++) { op = new_ibp_remove_op(ic, get_ibp_cap(&(caps_list[i]), IBP_MANAGECAP), ibp_timeout); opque_add(q, op); } io_start(q); err = io_waitall(q); if (err != 0) { printf("remove_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); } opque_free(q, OP_DESTROY); //** Lastly free all the caps and the array for (i=0; i<nallocs; i++) { destroy_ibp_cap(get_ibp_cap(&(caps_list[i]), IBP_READCAP)); destroy_ibp_cap(get_ibp_cap(&(caps_list[i]), IBP_WRITECAP)); destroy_ibp_cap(get_ibp_cap(&(caps_list[i]), IBP_MANAGECAP)); } free(caps_list); return; }
int main(int argc, char *argv[]) { int dev; buf_size = sysconf(_SC_PAGESIZE); init(argc, argv); verbose && fprintf(stderr, "%s compiled " __DATE__ " " __TIME__ "\n", argv[0]); if (io_type == ioctl_io && buf_size >= 1 << _IOC_SIZEBITS) buf_size = (1 << _IOC_SIZEBITS) - 1; inbuf = malloc(buf_size); outbuf = malloc(buf_size); chkne(dev = open(dev_name, O_CREAT | O_RDWR, 0666)); if (io_type == mmap_io) { mm = mmap(NULL, buf_size, PROT_READ | PROT_WRITE, MAP_SHARED, dev, offset & ~(sysconf(_SC_PAGESIZE)-1)); if (mm == MAP_FAILED) { warn("mmap() failed"); goto exit; } mem = mm + (offset & (sysconf(_SC_PAGESIZE)-1)); } if (verbose) { trvs_(dev_name); trvd_(io_type); trvd_(buf_size); trvd_(ignore_eof); trvd_(verbose); trvp_(mm); trvp_(mem); trln(); } switch (io_type) { case mmap_io: case ioctl_io: if (!ro) { chkne(ret = read(fileno(stdin), inbuf, buf_size)); if (ret < 0) goto exit; chkne(ret = output(dev, inbuf, ret)); } if (!wo) { chkne(ret = input(dev, outbuf, buf_size)); if (ret < 0) goto exit; write(fileno(stdout), outbuf, ret); } break; case file_io: default: io_start(dev); } exit: if (mm && mm != MAP_FAILED) munmap(mm, buf_size); free(outbuf); free(inbuf); close(dev); exit(EXIT_SUCCESS); }
int user_startup (void) { sys_start (); net_start (); io_start (); pm_start (); return 0; }
double small_random_allocs(ibp_capset_t *caps, int n, int asize, double readfrac, int small_count, int min_size, int max_size) { int i, io_size, offset, slot, err; oplist_t *iolist; ibp_op_t *op; double rnd, lmin, lmax; double nbytes; iolist = new_ibp_oplist(NULL); lmin = log(min_size); lmax = log(max_size); if (asize < max_size) { max_size = asize; log_printf(0, "small_random_allocs: Adjusting max_size=%d\n", max_size); } char *buffer = (char *)malloc(max_size); memset(buffer, 0, max_size); nbytes = 0; for (i=0; i<small_count; i++) { rnd = rand()/(RAND_MAX+1.0); slot = n * rnd; rnd = rand()/(RAND_MAX+1.0); rnd = lmin + (lmax - lmin) * rnd; io_size = exp(rnd); if (io_size == 0) io_size = 1; nbytes = nbytes + io_size; rnd = rand()/(RAND_MAX+1.0); offset = (asize - io_size) * rnd; // log_printf(15, "small_random_allocs: slot=%d offset=%d size=%d\n", slot, offset, io_size); rnd = rand()/(RAND_MAX+1.0); if (rnd < readfrac) { op = new_ibp_read_op(get_ibp_cap(&(caps[slot]), IBP_READCAP), offset, io_size, buffer, ibp_timeout, NULL, cc); } else { op = new_ibp_write_op(get_ibp_cap(&(caps[slot]), IBP_WRITECAP), offset, io_size, buffer, ibp_timeout, NULL, cc); } add_ibp_oplist(iolist, op); } io_start(iolist); err = io_waitall(iolist); if (err != IBP_OK) { printf("small_random_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, oplist_nfailed(iolist)); } free_oplist(iolist); free(buffer); return(nbytes); }
double small_read_allocs(ibp_capset_t *caps, int n, int asize, int small_count, int min_size, int max_size) { int i, io_size, offset, slot, err; opque_t *q; op_generic_t *op; double rnd, lmin, lmax; double nbytes; tbuffer_t *buf; q = new_opque(); lmin = log(min_size); lmax = log(max_size); if (asize < max_size) { max_size = asize; log_printf(0, "small_read_allocs: Adjusting max_size=%d\n", max_size); } char *buffer = (char *)malloc(max_size); init_buffer(buffer, 'r', max_size); type_malloc_clear(buf, tbuffer_t, small_count); nbytes = 0; for (i=0; i<small_count; i++) { rnd = rand()/(RAND_MAX+1.0); slot = n * rnd; rnd = rand()/(RAND_MAX+1.0); rnd = lmin + (lmax - lmin) * rnd; io_size = exp(rnd); if (io_size == 0) io_size = 1; nbytes = nbytes + io_size; rnd = rand()/(RAND_MAX+1.0); offset = (asize - io_size) * rnd; tbuffer_single(&(buf[i]), io_size, buffer); op = new_ibp_read_op(ic, get_ibp_cap(&(caps[slot]), IBP_READCAP), offset, &(buf[i]), 0, io_size, ibp_timeout); opque_add(q, op); } io_start(q); err = io_waitall(q); if (err != 0) { printf("small_read_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); } opque_free(q, OP_DESTROY); free(buf); free(buffer); return(nbytes); }
void random_allocs(ibp_capset_t *caps, int n, int asize, int block_size, double rfrac) { int i, err, bslot; int j, nblocks, rem, len; opque_t *q; op_generic_t *op; double rnd; tbuffer_t *buf; char *rbuffer = (char *)malloc(block_size); char *wbuffer = (char *)malloc(block_size); init_buffer(rbuffer, 'r', block_size); init_buffer(wbuffer, 'w', block_size); q = new_opque(); nblocks = asize / block_size; rem = asize % block_size; if (rem > 0) nblocks++; type_malloc_clear(buf, tbuffer_t, n*nblocks); for (j=0; j<nblocks; j++) { for (i=0; i<n; i++) { rnd = rand()/(RAND_MAX + 1.0); if ((j==(nblocks-1)) && (rem > 0)) { len = rem; } else { len = block_size; } bslot = j*n + i; if (rnd < rfrac) { tbuffer_single(&(buf[bslot]), len, rbuffer); op = new_ibp_read_op(ic, get_ibp_cap(&(caps[i]), IBP_READCAP), j*block_size, &(buf[bslot]), 0, len, ibp_timeout); } else { tbuffer_single(&(buf[bslot]), len, wbuffer); op = new_ibp_write_op(ic, get_ibp_cap(&(caps[i]), IBP_WRITECAP), j*block_size, &(buf[bslot]), 0, len, ibp_timeout); } opque_add(q, op); } } io_start(q); err = io_waitall(q); if (err != 0) { printf("random_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); } opque_free(q, OP_DESTROY); free(buf); free(rbuffer); free(wbuffer); }
void random_allocs(ibp_capset_t *caps, int n, int asize, int block_size, double rfrac) { int i, slot, err; int j, nblocks, rem, len; oplist_t *iolist; ibp_op_t *op; double rnd; char *buffer = (char *)malloc(asize); memset(buffer, 'R', asize); iolist = new_ibp_oplist(NULL); nblocks = asize / block_size; rem = asize % block_size; if (rem > 0) nblocks++; for (j=0; j<nblocks; j++) { for (i=0; i<n; i++) { rnd = rand()/(RAND_MAX+1.0); slot = n * rnd; rnd = rand()/(RAND_MAX + 1.0); if ((j==(nblocks-1)) && (rem > 0)) { len = rem; } else { len = block_size; } // op = new_ibp_read_op(get_ibp_cap(&(caps[i]), IBP_READCAP), j*block_size, len, buffer, ibp_timeout, NULL, cc); if (rnd < rfrac) { op = new_ibp_read_op(get_ibp_cap(&(caps[i]), IBP_READCAP), j*block_size, len, buffer, ibp_timeout, NULL, cc); } else { op = new_ibp_write_op(get_ibp_cap(&(caps[i]), IBP_WRITECAP), j*block_size, len, buffer, ibp_timeout, NULL, cc); } add_ibp_oplist(iolist, op); } } io_start(iolist); err = io_waitall(iolist); if (err != IBP_OK) { printf("random_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, oplist_nfailed(iolist)); } free_oplist(iolist); free(buffer); }
void write_allocs(ibp_capset_t *caps, int n, int asize, int block_size) { int i, j, nblocks, rem, len, err, slot; opque_t *q; op_generic_t *op; tbuffer_t *buf; char *buffer = (char *)malloc(block_size); init_buffer(buffer, 'W', block_size); q = new_opque(); nblocks = asize / block_size; rem = asize % block_size; if (rem > 0) nblocks++; type_malloc_clear(buf, tbuffer_t, n*nblocks); //for (j=0; j<nblocks; j++) { for (j=nblocks-1; j>= 0; j--) { for (i=0; i<n; i++) { if ((j==(nblocks-1)) && (rem > 0)) { len = rem; } else { len = block_size; } slot = j*n + i; tbuffer_single(&(buf[slot]), len, buffer); op = new_ibp_write_op(ic, get_ibp_cap(&(caps[i]), IBP_WRITECAP), j*block_size, &(buf[slot]), 0, len, ibp_timeout); opque_add(q, op); } } io_start(q); err = io_waitall(q); if (err != 0) { printf("write_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); } opque_free(q, OP_DESTROY); free(buf); free(buffer); }
void validate_allocs(ibp_capset_t *caps_list, int nallocs) { int i, err; int nalloc_bad, nblocks_bad; opque_t *q; op_generic_t *op; int *bad_blocks = (int *) malloc(sizeof(int)*nallocs); int correct_errors = 0; q = new_opque(); for (i=0; i<nallocs; i++) { bad_blocks[i] = 0; op = new_ibp_validate_chksum_op(ic, get_ibp_cap(&(caps_list[i]), IBP_MANAGECAP), correct_errors, &(bad_blocks[i]), ibp_timeout); opque_add(q, op); } io_start(q); err = io_waitall(q); if (err != 0) { printf("validate_allocs: At least 1 error occured! * ibp_errno=%d * nfailed=%d\n", err, opque_tasks_failed(q)); nalloc_bad = 0; nblocks_bad = 0; for (i=0; i<nallocs; i++) { if (bad_blocks[i] != 0) { printf(" %d cap=%s blocks_bad=%d\n", i, get_ibp_cap(&(caps_list[i]), IBP_MANAGECAP), bad_blocks[i]); nalloc_bad++; nblocks_bad = nblocks_bad + bad_blocks[i]; } } printf(" Total Bad allocations: %d Total Bad blocks: %d\n", nalloc_bad, nblocks_bad); } opque_free(q, OP_DESTROY); free(bad_blocks); return; }
int io_cgi (HTTP_INFO *info) { int chan = info->chan ? info->chan - 1 : 0; int i; tIO *conf; tIO temp; int ret = 0; conf = &sIo; memcpy (&temp, conf, sizeof (temp)); for (i = 0; i < info->argc; i ++) { char *ptr = info->argv[i]; if (strncmp (ptr, "IO_OUTPUT_1_CURRENT_STATE=", 27) == 0) temp.ext_outputs[0].current_state = atol (ptr + 27); else if (strncmp (ptr, "IO_OUTPUT_2_CURRENT_STATE=", 27) == 0) temp.ext_outputs[1].current_state = atol (ptr + 27); else if (strncmp (ptr, "IO_OUTPUT_1_DEFAULT_STATE=", 27) == 0) temp.ext_outputs[0].default_state = atol (ptr + 27); else if (strncmp (ptr, "IO_OUTPUT_2_DEFAULT_STATE=", 27) == 0) temp.ext_outputs[1].default_state = atol (ptr + 27); } if (ret == 0) { if (memcmp (conf, &temp, sizeof (temp)) != 0) { if (io_write_config (&temp) == 0) { memcpy (conf, &temp, sizeof (temp)); io_stop(); io_start(); } } } return ret; }
static int state_scrub_process(struct snapraid_state* state, struct snapraid_parity_handle* parity_handle, block_off_t blockstart, block_off_t blockmax, struct snapraid_plan* plan, time_t now) { struct snapraid_io io; struct snapraid_handle* handle; void* rehandle_alloc; struct snapraid_rehash* rehandle; unsigned diskmax; block_off_t blockcur; unsigned j; unsigned buffermax; data_off_t countsize; block_off_t countpos; block_off_t countmax; block_off_t autosavedone; block_off_t autosavelimit; block_off_t autosavemissing; int ret; unsigned error; unsigned silent_error; unsigned io_error; unsigned l; unsigned* waiting_map; unsigned waiting_mac; char esc_buffer[ESC_MAX]; /* maps the disks to handles */ handle = handle_mapping(state, &diskmax); /* rehash buffers */ rehandle = malloc_nofail_align(diskmax * sizeof(struct snapraid_rehash), &rehandle_alloc); /* we need 1 * data + 2 * parity */ buffermax = diskmax + 2 * state->level; /* initialize the io threads */ io_init(&io, state, state->opt.io_cache, buffermax, scrub_data_reader, handle, diskmax, scrub_parity_reader, 0, parity_handle, state->level); /* possibly waiting disks */ waiting_mac = diskmax > RAID_PARITY_MAX ? diskmax : RAID_PARITY_MAX; waiting_map = malloc_nofail(waiting_mac * sizeof(unsigned)); error = 0; silent_error = 0; io_error = 0; /* first count the number of blocks to process */ countmax = 0; plan->countlast = 0; for (blockcur = blockstart; blockcur < blockmax; ++blockcur) { if (!block_is_enabled(plan, blockcur)) continue; ++countmax; } /* compute the autosave size for all disk, even if not read */ /* this makes sense because the speed should be almost the same */ /* if the disks are read in parallel */ autosavelimit = state->autosave / (diskmax * state->block_size); autosavemissing = countmax; /* blocks to do */ autosavedone = 0; /* blocks done */ /* drop until now */ state_usage_waste(state); countsize = 0; countpos = 0; plan->countlast = 0; /* start all the worker threads */ io_start(&io, blockstart, blockmax, &block_is_enabled, plan); state_progress_begin(state, blockstart, blockmax, countmax); while (1) { unsigned char* buffer_recov[LEV_MAX]; snapraid_info info; int error_on_this_block; int silent_error_on_this_block; int io_error_on_this_block; int block_is_unsynced; int rehash; void** buffer; /* go to the next block */ blockcur = io_read_next(&io, &buffer); if (blockcur >= blockmax) break; /* until now is scheduling */ state_usage_sched(state); /* one more block processed for autosave */ ++autosavedone; --autosavemissing; /* by default process the block, and skip it if something goes wrong */ error_on_this_block = 0; silent_error_on_this_block = 0; io_error_on_this_block = 0; /* if all the blocks at this address are synced */ /* if not, parity is not even checked */ block_is_unsynced = 0; /* get block specific info */ info = info_get(&state->infoarr, blockcur); /* if we have to use the old hash */ rehash = info_get_rehash(info); /* for each disk, process the block */ for (j = 0; j < diskmax; ++j) { struct snapraid_task* task; int read_size; unsigned char hash[HASH_SIZE]; struct snapraid_block* block; int file_is_unsynced; struct snapraid_disk* disk; struct snapraid_file* file; block_off_t file_pos; unsigned diskcur; /* if the file on this disk is synced */ /* if not, silent errors are assumed as expected error */ file_is_unsynced = 0; /* until now is misc */ state_usage_misc(state); /* get the next task */ task = io_data_read(&io, &diskcur, waiting_map, &waiting_mac); /* until now is disk */ state_usage_disk(state, handle, waiting_map, waiting_mac); /* get the task results */ disk = task->disk; block = task->block; file = task->file; file_pos = task->file_pos; read_size = task->read_size; /* by default no rehash in case of "continue" */ rehandle[diskcur].block = 0; /* if the disk position is not used */ if (!disk) continue; /* if the block is unsynced, errors are expected */ if (block_has_invalid_parity(block)) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* if the block is not used */ if (!block_has_file(block)) continue; /* if the block is unsynced, errors are expected */ if (task->is_timestamp_different) { /* report that the block and the file are not synced */ block_is_unsynced = 1; file_is_unsynced = 1; /* follow */ } /* handle error conditions */ if (task->state == TASK_STATE_IOERROR) { ++io_error; goto bail; } if (task->state == TASK_STATE_ERROR) { ++error; goto bail; } if (task->state == TASK_STATE_ERROR_CONTINUE) { ++error; error_on_this_block = 1; continue; } if (task->state == TASK_STATE_IOERROR_CONTINUE) { ++io_error; if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in a data disk, it isn't possible to scrub.\n"); log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, task->path); log_fatal("Stopping at block %u\n", blockcur); goto bail; /* LCOV_EXCL_STOP */ } /* otherwise continue */ io_error_on_this_block = 1; continue; } if (task->state != TASK_STATE_DONE) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency in task state\n"); os_abort(); /* LCOV_EXCL_STOP */ } countsize += read_size; /* now compute the hash */ if (rehash) { memhash(state->prevhash, state->prevhashseed, hash, buffer[diskcur], read_size); /* compute the new hash, and store it */ rehandle[diskcur].block = block; memhash(state->hash, state->hashseed, rehandle[diskcur].hash, buffer[diskcur], read_size); } else { memhash(state->hash, state->hashseed, hash, buffer[diskcur], read_size); } /* until now is hash */ state_usage_hash(state); if (block_has_updated_hash(block)) { /* compare the hash */ if (memcmp(hash, block->hash, HASH_SIZE) != 0) { unsigned diff = memdiff(hash, block->hash, HASH_SIZE); log_tag("error:%u:%s:%s: Data error at position %u, diff bits %u\n", blockcur, disk->name, esc(file->sub, esc_buffer), file_pos, diff); /* it's a silent error only if we are dealing with synced files */ if (file_is_unsynced) { ++error; error_on_this_block = 1; } else { log_error("Data error in file '%s' at position '%u', diff bits %u\n", task->path, file_pos, diff); ++silent_error; silent_error_on_this_block = 1; } continue; } } } /* buffers for parity read and not computed */ for (l = 0; l < state->level; ++l) buffer_recov[l] = buffer[diskmax + state->level + l]; for (; l < LEV_MAX; ++l) buffer_recov[l] = 0; /* until now is misc */ state_usage_misc(state); /* read the parity */ for (l = 0; l < state->level; ++l) { struct snapraid_task* task; unsigned levcur; task = io_parity_read(&io, &levcur, waiting_map, &waiting_mac); /* until now is parity */ state_usage_parity(state, waiting_map, waiting_mac); /* handle error conditions */ if (task->state == TASK_STATE_IOERROR) { ++io_error; goto bail; } if (task->state == TASK_STATE_ERROR) { ++error; goto bail; } if (task->state == TASK_STATE_ERROR_CONTINUE) { ++error; error_on_this_block = 1; /* if continuing on error, clear the missing buffer */ buffer_recov[levcur] = 0; continue; } if (task->state == TASK_STATE_IOERROR_CONTINUE) { ++io_error; if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in the %s disk, it isn't possible to scrub.\n", lev_name(levcur)); log_fatal("Ensure that disk '%s' is sane and can be read.\n", lev_config_name(levcur)); log_fatal("Stopping at block %u\n", blockcur); goto bail; /* LCOV_EXCL_STOP */ } /* otherwise continue */ io_error_on_this_block = 1; /* if continuing on error, clear the missing buffer */ buffer_recov[levcur] = 0; continue; } if (task->state != TASK_STATE_DONE) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency in task state\n"); os_abort(); /* LCOV_EXCL_STOP */ } } /* if we have read all the data required and it's correct, proceed with the parity check */ if (!error_on_this_block && !silent_error_on_this_block && !io_error_on_this_block) { /* compute the parity */ raid_gen(diskmax, state->level, state->block_size, buffer); /* compare the parity */ for (l = 0; l < state->level; ++l) { if (buffer_recov[l] && memcmp(buffer[diskmax + l], buffer_recov[l], state->block_size) != 0) { unsigned diff = memdiff(buffer[diskmax + l], buffer_recov[l], state->block_size); log_tag("parity_error:%u:%s: Data error, diff bits %u\n", blockcur, lev_config_name(l), diff); /* it's a silent error only if we are dealing with synced blocks */ if (block_is_unsynced) { ++error; error_on_this_block = 1; } else { log_fatal("Data error in parity '%s' at position '%u', diff bits %u\n", lev_config_name(l), blockcur, diff); ++silent_error; silent_error_on_this_block = 1; } } } /* until now is raid */ state_usage_raid(state); } if (silent_error_on_this_block || io_error_on_this_block) { /* set the error status keeping other info */ info_set(&state->infoarr, blockcur, info_set_bad(info)); } else if (error_on_this_block) { /* do nothing, as this is a generic error */ /* likely caused by a not synced array */ } else { /* if rehash is needed */ if (rehash) { /* store all the new hash already computed */ for (j = 0; j < diskmax; ++j) { if (rehandle[j].block) memcpy(rehandle[j].block->hash, rehandle[j].hash, HASH_SIZE); } } /* update the time info of the block */ /* and clear any other flag */ info_set(&state->infoarr, blockcur, info_make(now, 0, 0, 0)); } /* mark the state as needing write */ state->need_write = 1; /* count the number of processed block */ ++countpos; /* progress */ if (state_progress(state, &io, blockcur, countpos, countmax, countsize)) { /* LCOV_EXCL_START */ break; /* LCOV_EXCL_STOP */ } /* autosave */ if (state->autosave != 0 && autosavedone >= autosavelimit /* if we have reached the limit */ && autosavemissing >= autosavelimit /* if we have at least a full step to do */ ) { autosavedone = 0; /* restart the counter */ /* until now is misc */ state_usage_misc(state); state_progress_stop(state); msg_progress("Autosaving...\n"); state_write(state); state_progress_restart(state); /* drop until now */ state_usage_waste(state); } } state_progress_end(state, countpos, countmax, countsize); state_usage_print(state); if (error || silent_error || io_error) { msg_status("\n"); msg_status("%8u file errors\n", error); msg_status("%8u io errors\n", io_error); msg_status("%8u data errors\n", silent_error); } else { /* print the result only if processed something */ if (countpos != 0) msg_status("Everything OK\n"); } if (error) log_fatal("WARNING! Unexpected file errors!\n"); if (io_error) log_fatal("DANGER! Unexpected input/output errors! The failing blocks are now marked as bad!\n"); if (silent_error) log_fatal("DANGER! Unexpected data errors! The failing blocks are now marked as bad!\n"); if (io_error || silent_error) { log_fatal("Use 'snapraid status' to list the bad blocks.\n"); log_fatal("Use 'snapraid -e fix' to recover.\n"); } log_tag("summary:error_file:%u\n", error); log_tag("summary:error_io:%u\n", io_error); log_tag("summary:error_data:%u\n", silent_error); if (error + silent_error + io_error == 0) log_tag("summary:exit:ok\n"); else log_tag("summary:exit:error\n"); log_flush(); bail: /* stop all the worker threads */ io_stop(&io); for (j = 0; j < diskmax; ++j) { struct snapraid_file* file = handle[j].file; struct snapraid_disk* disk = handle[j].disk; ret = handle_close(&handle[j]); if (ret == -1) { /* LCOV_EXCL_START */ log_tag("error:%u:%s:%s: Close error. %s\n", blockcur, disk->name, esc(file->sub, esc_buffer), strerror(errno)); log_fatal("DANGER! Unexpected close error in a data disk.\n"); ++error; /* continue, as we are already exiting */ /* LCOV_EXCL_STOP */ } } free(handle); free(rehandle_alloc); free(waiting_map); io_done(&io); if (state->opt.expect_recoverable) { if (error + silent_error + io_error == 0) return -1; } else { if (error + silent_error + io_error != 0) return -1; } return 0; }
static int state_dry_process(struct snapraid_state* state, struct snapraid_parity_handle* parity_handle, block_off_t blockstart, block_off_t blockmax) { struct snapraid_io io; struct snapraid_handle* handle; unsigned diskmax; block_off_t blockcur; unsigned j; unsigned buffermax; int ret; data_off_t countsize; block_off_t countpos; block_off_t countmax; unsigned error; unsigned io_error; unsigned l; unsigned* waiting_map; unsigned waiting_mac; char esc_buffer[ESC_MAX]; handle = handle_mapping(state, &diskmax); /* we need 1 * data + 2 * parity */ buffermax = diskmax + 2 * state->level; /* initialize the io threads */ io_init(&io, state, state->opt.io_cache, buffermax, dry_data_reader, handle, diskmax, dry_parity_reader, 0, parity_handle, state->level); /* possibly waiting disks */ waiting_mac = diskmax > RAID_PARITY_MAX ? diskmax : RAID_PARITY_MAX; waiting_map = malloc_nofail(waiting_mac * sizeof(unsigned)); error = 0; io_error = 0; /* drop until now */ state_usage_waste(state); countmax = blockmax - blockstart; countsize = 0; countpos = 0; /* start all the worker threads */ io_start(&io, blockstart, blockmax, &block_is_enabled, 0); state_progress_begin(state, blockstart, blockmax, countmax); while (1) { void** buffer; /* go to the next block */ blockcur = io_read_next(&io, &buffer); if (blockcur >= blockmax) break; /* until now is scheduling */ state_usage_sched(state); /* for each disk, process the block */ for (j = 0; j < diskmax; ++j) { struct snapraid_task* task; int read_size; struct snapraid_block* block; struct snapraid_disk* disk; unsigned diskcur; /* until now is misc */ state_usage_misc(state); /* get the next task */ task = io_data_read(&io, &diskcur, waiting_map, &waiting_mac); /* until now is disk */ state_usage_disk(state, handle, waiting_map, waiting_mac); /* get the task results */ disk = task->disk; block = task->block; read_size = task->read_size; /* if the disk position is not used */ if (!disk) continue; /* if the block is not used */ if (!block_has_file(block)) continue; /* handle error conditions */ if (task->state == TASK_STATE_IOERROR) { ++io_error; goto bail; } if (task->state == TASK_STATE_ERROR) { ++error; goto bail; } if (task->state == TASK_STATE_ERROR_CONTINUE) { ++error; continue; } if (task->state == TASK_STATE_IOERROR_CONTINUE) { ++io_error; if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in a data disk, it isn't possible to scrub.\n"); log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, task->path); log_fatal("Stopping at block %u\n", blockcur); goto bail; /* LCOV_EXCL_STOP */ } /* otherwise continue */ continue; } if (task->state != TASK_STATE_DONE) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency in task state\n"); os_abort(); /* LCOV_EXCL_STOP */ } countsize += read_size; } /* until now is misc */ state_usage_misc(state); /* read the parity */ for (l = 0; l < state->level; ++l) { struct snapraid_task* task; unsigned levcur; task = io_parity_read(&io, &levcur, waiting_map, &waiting_mac); /* until now is parity */ state_usage_parity(state, waiting_map, waiting_mac); /* handle error conditions */ if (task->state == TASK_STATE_IOERROR) { ++io_error; goto bail; } if (task->state == TASK_STATE_ERROR) { ++error; goto bail; } if (task->state == TASK_STATE_ERROR_CONTINUE) { ++error; continue; } if (task->state == TASK_STATE_IOERROR_CONTINUE) { ++io_error; if (io_error >= state->opt.io_error_limit) { /* LCOV_EXCL_START */ log_fatal("DANGER! Too many input/output read error in the %s disk, it isn't possible to scrub.\n", lev_name(levcur)); log_fatal("Ensure that disk '%s' is sane and can be read.\n", lev_config_name(levcur)); log_fatal("Stopping at block %u\n", blockcur); goto bail; /* LCOV_EXCL_STOP */ } continue; } if (task->state != TASK_STATE_DONE) { /* LCOV_EXCL_START */ log_fatal("Internal inconsistency in task state\n"); os_abort(); /* LCOV_EXCL_STOP */ } } /* count the number of processed block */ ++countpos; /* progress */ if (state_progress(state, &io, blockcur, countpos, countmax, countsize)) { /* LCOV_EXCL_START */ break; /* LCOV_EXCL_STOP */ } } state_progress_end(state, countpos, countmax, countsize); state_usage_print(state); bail: /* stop all the worker threads */ io_stop(&io); for (j = 0; j < diskmax; ++j) { struct snapraid_file* file = handle[j].file; struct snapraid_disk* disk = handle[j].disk; ret = handle_close(&handle[j]); if (ret == -1) { /* LCOV_EXCL_START */ log_tag("error:%u:%s:%s: Close error. %s\n", blockmax, disk->name, esc(file->sub, esc_buffer), strerror(errno)); log_fatal("DANGER! Unexpected close error in a data disk.\n"); ++error; /* continue, as we are already exiting */ /* LCOV_EXCL_STOP */ } } if (error || io_error) { msg_status("\n"); msg_status("%8u file errors\n", error); msg_status("%8u io errors\n", io_error); } else { msg_status("Everything OK\n"); } if (error) log_fatal("DANGER! Unexpected errors!\n"); if (io_error) log_fatal("DANGER! Unexpected input/output errors!\n"); free(handle); free(waiting_map); io_done(&io); if (error + io_error != 0) return -1; return 0; }