Пример #1
0
int main(int argc, char *argv[]) {
  int i, status;

  char file_name[__FLUID_STRLEN_MAX];

  MPI_Init(&argc, &argv);

  if(argc > 1) {
    config_file_name = argv[1];
    printf("Using config file %s\n", config_file_name);
  }

  status = init();
  error_check(&status, "error in init\n");
  if(status) return status;

  clock_t begin, end;
  double time_spent;

  begin = clock();

  for(i  = 0; i < 1000; i++) {
    if(i%1 == 0) {
      snprintf(file_name,__FLUID_STRLEN_MAX-1,"out/restart_%1.1lf.bin",time_model);
      state_write(file_name);
      snprintf(file_name,__FLUID_STRLEN_MAX-1,"out/vort_%1.1lf.bin",time_model);
      state_write_vort(file_name);
    }
    time_step();
  }

  end = clock();

  time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
  printf("Time Spent: %1.16lf\n", time_spent);


  snprintf(file_name,__FLUID_STRLEN_MAX-1,"out/restart_%1.1lf.bin",time_model);
  state_write(file_name);
  snprintf(file_name,__FLUID_STRLEN_MAX-1,"out/vort_%1.1lf.bin",time_model);
  state_write_vort(file_name);

  MPI_Barrier(MPI_COMM_WORLD);

  finalize();

  MPI_Finalize();

  return 0;
}
Пример #2
0
void view_save(view *V, const char *name)
{
    FILE *fp;

    if ((fp = fopen(name, "w")))
    {
        int i;

        for (i = 0; i < V->n; ++i)
            state_write(fp, V->list + i);

        fclose(fp);
    }
}
Пример #3
0
static int state_scrub_process(struct snapraid_state* state, struct snapraid_parity** parity, block_off_t blockstart, block_off_t blockmax, time_t timelimit, block_off_t countlimit, time_t now)
{
	struct snapraid_handle* handle;
	void* rehandle_alloc;
	struct snapraid_rehash* rehandle;
	unsigned diskmax;
	block_off_t i;
	unsigned j;
	void* buffer_alloc;
	void** buffer;
	unsigned buffermax;
	data_off_t countsize;
	block_off_t countpos;
	block_off_t countmax;
	block_off_t recountmax;
	block_off_t autosavedone;
	block_off_t autosavelimit;
	block_off_t autosavemissing;
	int ret;
	unsigned error;
	unsigned silent_error;
	unsigned l;

	/* maps the disks to handles */
	handle = handle_map(state, &diskmax);

	/* rehash buffers */
	rehandle = malloc_nofail_align(diskmax * sizeof(struct snapraid_rehash), &rehandle_alloc);

	/* we need disk + 2 for each parity level buffers */
	buffermax = diskmax + state->level * 2;

	buffer = malloc_nofail_vector_align(diskmax, buffermax, state->block_size, &buffer_alloc);
	if (!state->opt.skip_self)
		mtest_vector(buffermax, state->block_size, buffer);

	error = 0;
	silent_error = 0;

	/* first count the number of blocks to process */
	countmax = 0;
	for(i=blockstart;i<blockmax;++i) {
		time_t blocktime;
		snapraid_info info;

		/* if it's unused */
		info = info_get(&state->infoarr, i);
		if (info == 0) {
			/* skip it */
			continue;
		}

		/* blocks marked as bad are always checked */
		if (!info_get_bad(info)) {

			/* if it's too new */
			blocktime = info_get_time(info);
			if (blocktime > timelimit) {
				/* skip it */
				continue;
			}

			/* skip odd blocks, used only for testing */
			if (state->opt.force_scrub_even && (i % 2) != 0) {
				/* skip it */
				continue;
			}

			/* if the time is less than the limit, always include */
			/* otherwise, check if we reached the max count */
			if (blocktime == timelimit) {
				/* if we reached the count limit */
				if (countmax >= countlimit) {
					/* skip it */
					continue;
				}
			}
		}

		++countmax;
	}

	/* compute the autosave size for all disk, even if not read */
	/* this makes sense because the speed should be almost the same */
	/* if the disks are read in parallel */
	autosavelimit = state->autosave / (diskmax * state->block_size);
	autosavemissing = countmax; /* blocks to do */
	autosavedone = 0; /* blocks done */

	countsize = 0;
	countpos = 0;
	state_progress_begin(state, blockstart, blockmax, countmax);
	recountmax = 0;
	for(i=blockstart;i<blockmax;++i) {
		time_t blocktime;
		snapraid_info info;
		int error_on_this_block;
		int silent_error_on_this_block;
		int block_is_unsynced;
		int rehash;

		/* if it's unused */
		info = info_get(&state->infoarr, i);
		if (info == 0) {
			/* skip it */
			continue;
		}

		/* blocks marked as bad are always checked */
		if (!info_get_bad(info)) {

			/* if it's too new */
			blocktime = info_get_time(info);
			if (blocktime > timelimit) {
				/* skip it */
				continue;
			}

			/* skip odd blocks, used only for testing */
			if (state->opt.force_scrub_even && (i % 2) != 0) {
				/* skip it */
				continue;
			}

			/* if the time is less than the limit, always include */
			/* otherwise, check if we reaced the count max */
			if (blocktime == timelimit) {
				/* if we reached the count limit */
				if (recountmax >= countlimit) {
					/* skip it */
					continue;
				}
			}
		}

		++recountmax;

		/* one more block processed for autosave */
		++autosavedone;
		--autosavemissing;

		/* by default process the block, and skip it if something goes wrong */
		error_on_this_block = 0;
		silent_error_on_this_block = 0;

		/* if all the blocks at this address are synced */
		/* if not, parity is not even checked */
		block_is_unsynced = 0;

		/* if we have to use the old hash */
		rehash = info_get_rehash(info);

		/* for each disk, process the block */
		for(j=0;j<diskmax;++j) {
			int read_size;
			unsigned char hash[HASH_SIZE];
			struct snapraid_block* block;
			int file_is_unsynced;

			/* if the file on this disk is synced */
			/* if not, silent errors are assumed as expected error */
			file_is_unsynced = 0;

			/* by default no rehash in case of "continue" */
			rehandle[j].block = 0;

			/* if the disk position is not used */
			if (!handle[j].disk) {
				/* use an empty block */
				memset(buffer[j], 0, state->block_size);
				continue;
			}

			/* if the block is not used */
			block = disk_block_get(handle[j].disk, i);
			if (!block_has_file(block)) {
				/* use an empty block */
				memset(buffer[j], 0, state->block_size);
				continue;
			}

			/* if the block is unsynced, errors are expected */
			if (block_has_invalid_parity(block)) {
				/* report that the block and the file are not synced */
				block_is_unsynced = 1;
				file_is_unsynced = 1;

				/* follow */
			}

			/* if the file is different than the current one, close it */
			if (handle[j].file != 0 && handle[j].file != block_file_get(block)) {
				/* keep a pointer at the file we are going to close for error reporting */
				struct snapraid_file* file = handle[j].file;
				ret = handle_close(&handle[j]);
				if (ret == -1) {
					/* LCOV_EXCL_START */
					/* This one is really an unexpected error, because we are only reading */
					/* and closing a descriptor should never fail */
					fprintf(stdlog, "error:%u:%s:%s: Close error. %s\n", i, handle[j].disk->name, file->sub, strerror(errno));
					fprintf(stderr, "DANGER! Unexpected close error in a data disk, it isn't possible to scrub.\n");
					printf("Stopping at block %u\n", i);
					++error;
					goto bail;
					/* LCOV_EXCL_STOP */
				}
			}

			ret = handle_open(&handle[j], block_file_get(block), state->opt.skip_sequential, stderr);
			if (ret == -1) {
				/* file we have tried to open for error reporting */
				struct snapraid_file* file = block_file_get(block);
				fprintf(stdlog, "error:%u:%s:%s: Open error. %s\n", i, handle[j].disk->name, file->sub, strerror(errno));
				++error;
				error_on_this_block = 1;
				continue;
			}

			/* check if the file is changed */
			if (handle[j].st.st_size != block_file_get(block)->size
				|| handle[j].st.st_mtime != block_file_get(block)->mtime_sec
				|| STAT_NSEC(&handle[j].st) != block_file_get(block)->mtime_nsec
				|| handle[j].st.st_ino != block_file_get(block)->inode
			) {
				/* report that the block and the file are not synced */
				block_is_unsynced = 1;
				file_is_unsynced = 1;

				/* follow */
			}

			/* note that we intentionally don't abort if the file has different attributes */
			/* from the last sync, as we are expected to return errors if running */
			/* in an unsynced array. This is just like the check command. */

			read_size = handle_read(&handle[j], block, buffer[j], state->block_size, stderr);
			if (read_size == -1) {
				fprintf(stdlog, "error:%u:%s:%s: Read error at position %u\n", i, handle[j].disk->name, handle[j].file->sub, block_file_pos(block));
				++error;
				error_on_this_block = 1;
				continue;
			}

			countsize += read_size;

			/* now compute the hash */
			if (rehash) {
				memhash(state->prevhash, state->prevhashseed, hash, buffer[j], read_size);

				/* compute the new hash, and store it */
				rehandle[j].block = block;
				memhash(state->hash, state->hashseed, rehandle[j].hash, buffer[j], read_size);
			} else {
				memhash(state->hash, state->hashseed, hash, buffer[j], read_size);
			}

			if (block_has_updated_hash(block)) {
				/* compare the hash */
				if (memcmp(hash, block->hash, HASH_SIZE) != 0) {
					fprintf(stdlog, "error:%u:%s:%s: Data error at position %u\n", i, handle[j].disk->name, handle[j].file->sub, block_file_pos(block));

					/* it's a silent error only if we are dealing with synced files */
					if (file_is_unsynced) {
						++error;
						error_on_this_block = 1;
					} else {
						fprintf(stderr, "Data error in file '%s' at position '%u'\n", handle[j].path, block_file_pos(block));
						fprintf(stderr, "WARNING! Unexpected data error in a data disk! The block is now marked as bad!\n");
						fprintf(stderr, "Try with 'snapraid -e fix' to recover!\n");

						++silent_error;
						silent_error_on_this_block = 1;
					}
					continue;
				}
			}
		}

		/* if we have read all the data required and it's correct, proceed with the parity check */
		if (!error_on_this_block && !silent_error_on_this_block) {
			unsigned char* buffer_recov[LEV_MAX];

			/* buffers for parity read and not computed */
			for(l=0;l<state->level;++l)
				buffer_recov[l] = buffer[diskmax + state->level + l];
			for(;l<LEV_MAX;++l)
				buffer_recov[l] = 0;

			/* read the parity */
			for(l=0;l<state->level;++l) {
				ret = parity_read(parity[l], i, buffer_recov[l], state->block_size, stdlog);
				if (ret == -1) {
					buffer_recov[l] = 0;
					fprintf(stdlog, "parity_error:%u:%s: Read error\n", i, lev_config_name(l));
					++error;
					error_on_this_block = 1;

					/* follow */
				}
			}

			/* compute the parity */
			raid_gen(diskmax, state->level, state->block_size, buffer);

			/* compare the parity */
			for(l=0;l<state->level;++l) {
				if (buffer_recov[l] && memcmp(buffer[diskmax + l], buffer_recov[l], state->block_size) != 0) {
					fprintf(stdlog, "parity_error:%u:%s: Data error\n", i, lev_config_name(l));

					/* it's a silent error only if we are dealing with synced blocks */
					if (block_is_unsynced) {
						++error;
						error_on_this_block = 1;
					} else {
						fprintf(stderr, "Data error in parity '%s' at position '%u'\n", lev_config_name(l), i);
						fprintf(stderr, "WARNING! Unexpected data error in a parity disk! The block is now marked as bad!\n");
						fprintf(stderr, "Try with 'snapraid -e fix' to recover!\n");

						++silent_error;
						silent_error_on_this_block = 1;
					}
				}
			}
		}

		if (silent_error_on_this_block) {
			/* set the error status keeping the existing time and hash */
			info_set(&state->infoarr, i, info_set_bad(info));
		} else if (error_on_this_block) {
			/* do nothing, as this is a generic error */
			/* likely caused by a not synced array */
		} else {
			/* if rehash is needed */
			if (rehash) {
				/* store all the new hash already computed */
				for(j=0;j<diskmax;++j) {
					if (rehandle[j].block)
						memcpy(rehandle[j].block->hash, rehandle[j].hash, HASH_SIZE);
				}
			}

			/* update the time info of the block */
			/* and clear any other flag */
			info_set(&state->infoarr, i, info_make(now, 0, 0));
		}

		/* mark the state as needing write */
		state->need_write = 1;

		/* count the number of processed block */
		++countpos;

		/* progress */
		if (state_progress(state, i, countpos, countmax, countsize)) {
			/* LCOV_EXCL_START */
			break;
			/* LCOV_EXCL_STOP */
		}

		/* autosave */
		if (state->autosave != 0
			&& autosavedone >= autosavelimit /* if we have reached the limit */
			&& autosavemissing >= autosavelimit /* if we have at least a full step to do */
		) {
			autosavedone = 0; /* restart the counter */

			state_progress_stop(state);

			printf("Autosaving...\n");
			state_write(state);

			state_progress_restart(state);
		}
	}

	state_progress_end(state, countpos, countmax, countsize);

	if (error || silent_error) {
		printf("\n");
		printf("%8u read errors\n", error);
		printf("%8u data errors\n", silent_error);
		printf("WARNING! There are errors!\n");
	} else {
		/* print the result only if processed something */
		if (countpos != 0)
			printf("Everything OK\n");
	}

	fprintf(stdlog, "summary:error_read:%u\n", error);
	fprintf(stdlog, "summary:error_data:%u\n", silent_error);
	if (error + silent_error == 0)
		fprintf(stdlog, "summary:exit:ok\n");
	else
		fprintf(stdlog, "summary:exit:error\n");
	fflush(stdlog);

bail:
	for(j=0;j<diskmax;++j) {
		ret = handle_close(&handle[j]);
		if (ret == -1) {
			/* LCOV_EXCL_START */
			fprintf(stderr, "DANGER! Unexpected close error in a data disk.\n");
			++error;
			/* continue, as we are already exiting */
			/* LCOV_EXCL_STOP */
		}
	}

	free(handle);
	free(buffer_alloc);
	free(buffer);
	free(rehandle_alloc);

	if (state->opt.expect_recoverable) {
		if (error + silent_error == 0)
			return -1;
	} else {
		if (error + silent_error != 0)
			return -1;
	}
	return 0;
}
Пример #4
0
static int state_scrub_process(struct snapraid_state* state, struct snapraid_parity_handle* parity_handle, block_off_t blockstart, block_off_t blockmax, struct snapraid_plan* plan, time_t now)
{
	struct snapraid_io io;
	struct snapraid_handle* handle;
	void* rehandle_alloc;
	struct snapraid_rehash* rehandle;
	unsigned diskmax;
	block_off_t blockcur;
	unsigned j;
	unsigned buffermax;
	data_off_t countsize;
	block_off_t countpos;
	block_off_t countmax;
	block_off_t autosavedone;
	block_off_t autosavelimit;
	block_off_t autosavemissing;
	int ret;
	unsigned error;
	unsigned silent_error;
	unsigned io_error;
	unsigned l;
	unsigned* waiting_map;
	unsigned waiting_mac;
	char esc_buffer[ESC_MAX];

	/* maps the disks to handles */
	handle = handle_mapping(state, &diskmax);

	/* rehash buffers */
	rehandle = malloc_nofail_align(diskmax * sizeof(struct snapraid_rehash), &rehandle_alloc);

	/* we need 1 * data + 2 * parity */
	buffermax = diskmax + 2 * state->level;

	/* initialize the io threads */
	io_init(&io, state, state->opt.io_cache, buffermax, scrub_data_reader, handle, diskmax, scrub_parity_reader, 0, parity_handle, state->level);

	/* possibly waiting disks */
	waiting_mac = diskmax > RAID_PARITY_MAX ? diskmax : RAID_PARITY_MAX;
	waiting_map = malloc_nofail(waiting_mac * sizeof(unsigned));

	error = 0;
	silent_error = 0;
	io_error = 0;

	/* first count the number of blocks to process */
	countmax = 0;
	plan->countlast = 0;
	for (blockcur = blockstart; blockcur < blockmax; ++blockcur) {
		if (!block_is_enabled(plan, blockcur))
			continue;
		++countmax;
	}

	/* compute the autosave size for all disk, even if not read */
	/* this makes sense because the speed should be almost the same */
	/* if the disks are read in parallel */
	autosavelimit = state->autosave / (diskmax * state->block_size);
	autosavemissing = countmax; /* blocks to do */
	autosavedone = 0; /* blocks done */

	/* drop until now */
	state_usage_waste(state);

	countsize = 0;
	countpos = 0;
	plan->countlast = 0;

	/* start all the worker threads */
	io_start(&io, blockstart, blockmax, &block_is_enabled, plan);

	state_progress_begin(state, blockstart, blockmax, countmax);
	while (1) {
		unsigned char* buffer_recov[LEV_MAX];
		snapraid_info info;
		int error_on_this_block;
		int silent_error_on_this_block;
		int io_error_on_this_block;
		int block_is_unsynced;
		int rehash;
		void** buffer;

		/* go to the next block */
		blockcur = io_read_next(&io, &buffer);
		if (blockcur >= blockmax)
			break;

		/* until now is scheduling */
		state_usage_sched(state);

		/* one more block processed for autosave */
		++autosavedone;
		--autosavemissing;

		/* by default process the block, and skip it if something goes wrong */
		error_on_this_block = 0;
		silent_error_on_this_block = 0;
		io_error_on_this_block = 0;

		/* if all the blocks at this address are synced */
		/* if not, parity is not even checked */
		block_is_unsynced = 0;

		/* get block specific info */
		info = info_get(&state->infoarr, blockcur);

		/* if we have to use the old hash */
		rehash = info_get_rehash(info);

		/* for each disk, process the block */
		for (j = 0; j < diskmax; ++j) {
			struct snapraid_task* task;
			int read_size;
			unsigned char hash[HASH_SIZE];
			struct snapraid_block* block;
			int file_is_unsynced;
			struct snapraid_disk* disk;
			struct snapraid_file* file;
			block_off_t file_pos;
			unsigned diskcur;

			/* if the file on this disk is synced */
			/* if not, silent errors are assumed as expected error */
			file_is_unsynced = 0;

			/* until now is misc */
			state_usage_misc(state);

			/* get the next task */
			task = io_data_read(&io, &diskcur, waiting_map, &waiting_mac);

			/* until now is disk */
			state_usage_disk(state, handle, waiting_map, waiting_mac);

			/* get the task results */
			disk = task->disk;
			block = task->block;
			file = task->file;
			file_pos = task->file_pos;
			read_size = task->read_size;

			/* by default no rehash in case of "continue" */
			rehandle[diskcur].block = 0;

			/* if the disk position is not used */
			if (!disk)
				continue;

			/* if the block is unsynced, errors are expected */
			if (block_has_invalid_parity(block)) {
				/* report that the block and the file are not synced */
				block_is_unsynced = 1;
				file_is_unsynced = 1;
				/* follow */
			}

			/* if the block is not used */
			if (!block_has_file(block))
				continue;

			/* if the block is unsynced, errors are expected */
			if (task->is_timestamp_different) {
				/* report that the block and the file are not synced */
				block_is_unsynced = 1;
				file_is_unsynced = 1;
				/* follow */
			}

			/* handle error conditions */
			if (task->state == TASK_STATE_IOERROR) {
				++io_error;
				goto bail;
			}
			if (task->state == TASK_STATE_ERROR) {
				++error;
				goto bail;
			}
			if (task->state == TASK_STATE_ERROR_CONTINUE) {
				++error;
				error_on_this_block = 1;
				continue;
			}
			if (task->state == TASK_STATE_IOERROR_CONTINUE) {
				++io_error;
				if (io_error >= state->opt.io_error_limit) {
					/* LCOV_EXCL_START */
					log_fatal("DANGER! Too many input/output read error in a data disk, it isn't possible to scrub.\n");
					log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, task->path);
					log_fatal("Stopping at block %u\n", blockcur);
					goto bail;
					/* LCOV_EXCL_STOP */
				}

				/* otherwise continue */
				io_error_on_this_block = 1;
				continue;
			}
			if (task->state != TASK_STATE_DONE) {
				/* LCOV_EXCL_START */
				log_fatal("Internal inconsistency in task state\n");
				os_abort();
				/* LCOV_EXCL_STOP */
			}

			countsize += read_size;

			/* now compute the hash */
			if (rehash) {
				memhash(state->prevhash, state->prevhashseed, hash, buffer[diskcur], read_size);

				/* compute the new hash, and store it */
				rehandle[diskcur].block = block;
				memhash(state->hash, state->hashseed, rehandle[diskcur].hash, buffer[diskcur], read_size);
			} else {
				memhash(state->hash, state->hashseed, hash, buffer[diskcur], read_size);
			}

			/* until now is hash */
			state_usage_hash(state);

			if (block_has_updated_hash(block)) {
				/* compare the hash */
				if (memcmp(hash, block->hash, HASH_SIZE) != 0) {
					unsigned diff = memdiff(hash, block->hash, HASH_SIZE);

					log_tag("error:%u:%s:%s: Data error at position %u, diff bits %u\n", blockcur, disk->name, esc(file->sub, esc_buffer), file_pos, diff);

					/* it's a silent error only if we are dealing with synced files */
					if (file_is_unsynced) {
						++error;
						error_on_this_block = 1;
					} else {
						log_error("Data error in file '%s' at position '%u', diff bits %u\n", task->path, file_pos, diff);
						++silent_error;
						silent_error_on_this_block = 1;
					}
					continue;
				}
			}
		}

		/* buffers for parity read and not computed */
		for (l = 0; l < state->level; ++l)
			buffer_recov[l] = buffer[diskmax + state->level + l];
		for (; l < LEV_MAX; ++l)
			buffer_recov[l] = 0;

		/* until now is misc */
		state_usage_misc(state);

		/* read the parity */
		for (l = 0; l < state->level; ++l) {
			struct snapraid_task* task;
			unsigned levcur;

			task = io_parity_read(&io, &levcur, waiting_map, &waiting_mac);

			/* until now is parity */
			state_usage_parity(state, waiting_map, waiting_mac);

			/* handle error conditions */
			if (task->state == TASK_STATE_IOERROR) {
				++io_error;
				goto bail;
			}
			if (task->state == TASK_STATE_ERROR) {
				++error;
				goto bail;
			}
			if (task->state == TASK_STATE_ERROR_CONTINUE) {
				++error;
				error_on_this_block = 1;

				/* if continuing on error, clear the missing buffer */
				buffer_recov[levcur] = 0;
				continue;
			}
			if (task->state == TASK_STATE_IOERROR_CONTINUE) {
				++io_error;
				if (io_error >= state->opt.io_error_limit) {
					/* LCOV_EXCL_START */
					log_fatal("DANGER! Too many input/output read error in the %s disk, it isn't possible to scrub.\n", lev_name(levcur));
					log_fatal("Ensure that disk '%s' is sane and can be read.\n", lev_config_name(levcur));
					log_fatal("Stopping at block %u\n", blockcur);
					goto bail;
					/* LCOV_EXCL_STOP */
				}

				/* otherwise continue */
				io_error_on_this_block = 1;

				/* if continuing on error, clear the missing buffer */
				buffer_recov[levcur] = 0;
				continue;
			}
			if (task->state != TASK_STATE_DONE) {
				/* LCOV_EXCL_START */
				log_fatal("Internal inconsistency in task state\n");
				os_abort();
				/* LCOV_EXCL_STOP */
			}
		}

		/* if we have read all the data required and it's correct, proceed with the parity check */
		if (!error_on_this_block && !silent_error_on_this_block && !io_error_on_this_block) {

			/* compute the parity */
			raid_gen(diskmax, state->level, state->block_size, buffer);

			/* compare the parity */
			for (l = 0; l < state->level; ++l) {
				if (buffer_recov[l] && memcmp(buffer[diskmax + l], buffer_recov[l], state->block_size) != 0) {
					unsigned diff = memdiff(buffer[diskmax + l], buffer_recov[l], state->block_size);

					log_tag("parity_error:%u:%s: Data error, diff bits %u\n", blockcur, lev_config_name(l), diff);

					/* it's a silent error only if we are dealing with synced blocks */
					if (block_is_unsynced) {
						++error;
						error_on_this_block = 1;
					} else {
						log_fatal("Data error in parity '%s' at position '%u', diff bits %u\n", lev_config_name(l), blockcur, diff);
						++silent_error;
						silent_error_on_this_block = 1;
					}
				}
			}

			/* until now is raid */
			state_usage_raid(state);
		}

		if (silent_error_on_this_block || io_error_on_this_block) {
			/* set the error status keeping other info */
			info_set(&state->infoarr, blockcur, info_set_bad(info));
		} else if (error_on_this_block) {
			/* do nothing, as this is a generic error */
			/* likely caused by a not synced array */
		} else {
			/* if rehash is needed */
			if (rehash) {
				/* store all the new hash already computed */
				for (j = 0; j < diskmax; ++j) {
					if (rehandle[j].block)
						memcpy(rehandle[j].block->hash, rehandle[j].hash, HASH_SIZE);
				}
			}

			/* update the time info of the block */
			/* and clear any other flag */
			info_set(&state->infoarr, blockcur, info_make(now, 0, 0, 0));
		}

		/* mark the state as needing write */
		state->need_write = 1;

		/* count the number of processed block */
		++countpos;

		/* progress */
		if (state_progress(state, &io, blockcur, countpos, countmax, countsize)) {
			/* LCOV_EXCL_START */
			break;
			/* LCOV_EXCL_STOP */
		}

		/* autosave */
		if (state->autosave != 0
			&& autosavedone >= autosavelimit /* if we have reached the limit */
			&& autosavemissing >= autosavelimit /* if we have at least a full step to do */
		) {
			autosavedone = 0; /* restart the counter */

			/* until now is misc */
			state_usage_misc(state);

			state_progress_stop(state);

			msg_progress("Autosaving...\n");
			state_write(state);

			state_progress_restart(state);

			/* drop until now */
			state_usage_waste(state);
		}
	}

	state_progress_end(state, countpos, countmax, countsize);

	state_usage_print(state);

	if (error || silent_error || io_error) {
		msg_status("\n");
		msg_status("%8u file errors\n", error);
		msg_status("%8u io errors\n", io_error);
		msg_status("%8u data errors\n", silent_error);
	} else {
		/* print the result only if processed something */
		if (countpos != 0)
			msg_status("Everything OK\n");
	}

	if (error)
		log_fatal("WARNING! Unexpected file errors!\n");
	if (io_error)
		log_fatal("DANGER! Unexpected input/output errors! The failing blocks are now marked as bad!\n");
	if (silent_error)
		log_fatal("DANGER! Unexpected data errors! The failing blocks are now marked as bad!\n");
	if (io_error || silent_error) {
		log_fatal("Use 'snapraid status' to list the bad blocks.\n");
		log_fatal("Use 'snapraid -e fix' to recover.\n");
	}

	log_tag("summary:error_file:%u\n", error);
	log_tag("summary:error_io:%u\n", io_error);
	log_tag("summary:error_data:%u\n", silent_error);
	if (error + silent_error + io_error == 0)
		log_tag("summary:exit:ok\n");
	else
		log_tag("summary:exit:error\n");
	log_flush();

bail:
	/* stop all the worker threads */
	io_stop(&io);

	for (j = 0; j < diskmax; ++j) {
		struct snapraid_file* file = handle[j].file;
		struct snapraid_disk* disk = handle[j].disk;
		ret = handle_close(&handle[j]);
		if (ret == -1) {
			/* LCOV_EXCL_START */
			log_tag("error:%u:%s:%s: Close error. %s\n", blockcur, disk->name, esc(file->sub, esc_buffer), strerror(errno));
			log_fatal("DANGER! Unexpected close error in a data disk.\n");
			++error;
			/* continue, as we are already exiting */
			/* LCOV_EXCL_STOP */
		}
	}

	free(handle);
	free(rehandle_alloc);
	free(waiting_map);
	io_done(&io);

	if (state->opt.expect_recoverable) {
		if (error + silent_error + io_error == 0)
			return -1;
	} else {
		if (error + silent_error + io_error != 0)
			return -1;
	}
	return 0;
}
Пример #5
0
static int state_scrub_process(struct snapraid_state* state, struct snapraid_parity_handle** parity, block_off_t blockstart, block_off_t blockmax, struct snapraid_plan* plan, time_t now)
{
	struct snapraid_handle* handle;
	void* rehandle_alloc;
	struct snapraid_rehash* rehandle;
	unsigned diskmax;
	block_off_t i;
	unsigned j;
	void* buffer_alloc;
	void** buffer;
	unsigned buffermax;
	data_off_t countsize;
	block_off_t countpos;
	block_off_t countmax;
	block_off_t autosavedone;
	block_off_t autosavelimit;
	block_off_t autosavemissing;
	int ret;
	unsigned error;
	unsigned silent_error;
	unsigned io_error;
	unsigned l;

	/* maps the disks to handles */
	handle = handle_map(state, &diskmax);

	/* rehash buffers */
	rehandle = malloc_nofail_align(diskmax * sizeof(struct snapraid_rehash), &rehandle_alloc);

	/* we need disk + 2 for each parity level buffers */
	buffermax = diskmax + state->level * 2;

	buffer = malloc_nofail_vector_align(diskmax, buffermax, state->block_size, &buffer_alloc);
	if (!state->opt.skip_self)
		mtest_vector(buffermax, state->block_size, buffer);

	error = 0;
	silent_error = 0;
	io_error = 0;

	/* first count the number of blocks to process */
	countmax = 0;
	plan->countlast = 0;
	for (i = blockstart; i < blockmax; ++i) {
		if (!block_is_enabled(state, i, plan))
			continue;

		++countmax;
	}

	/* compute the autosave size for all disk, even if not read */
	/* this makes sense because the speed should be almost the same */
	/* if the disks are read in parallel */
	autosavelimit = state->autosave / (diskmax * state->block_size);
	autosavemissing = countmax; /* blocks to do */
	autosavedone = 0; /* blocks done */

	/* drop until now */
	state_usage_waste(state);

	countsize = 0;
	countpos = 0;
	plan->countlast = 0;
	state_progress_begin(state, blockstart, blockmax, countmax);
	for (i = blockstart; i < blockmax; ++i) {
		snapraid_info info;
		int error_on_this_block;
		int silent_error_on_this_block;
		int io_error_on_this_block;
		int block_is_unsynced;
		int rehash;

		if (!block_is_enabled(state, i, plan))
			continue;

		/* one more block processed for autosave */
		++autosavedone;
		--autosavemissing;

		/* by default process the block, and skip it if something goes wrong */
		error_on_this_block = 0;
		silent_error_on_this_block = 0;
		io_error_on_this_block = 0;

		/* if all the blocks at this address are synced */
		/* if not, parity is not even checked */
		block_is_unsynced = 0;

		/* get block specific info */
		info = info_get(&state->infoarr, i);

		/* if we have to use the old hash */
		rehash = info_get_rehash(info);

		/* for each disk, process the block */
		for (j = 0; j < diskmax; ++j) {
			int read_size;
			unsigned char hash[HASH_SIZE];
			struct snapraid_block* block;
			int file_is_unsynced;
			struct snapraid_disk* disk = handle[j].disk;
			struct snapraid_file* file;
			block_off_t file_pos;

			/* if the file on this disk is synced */
			/* if not, silent errors are assumed as expected error */
			file_is_unsynced = 0;

			/* by default no rehash in case of "continue" */
			rehandle[j].block = 0;

			/* if the disk position is not used */
			if (!disk) {
				/* use an empty block */
				memset(buffer[j], 0, state->block_size);
				continue;
			}

			/* if the block is not used */
			block = fs_par2block_get(disk, i);
			if (!block_has_file(block)) {
				/* use an empty block */
				memset(buffer[j], 0, state->block_size);
				continue;
			}

			/* get the file of this block */
			file = fs_par2file_get(disk, i, &file_pos);

			/* if the block is unsynced, errors are expected */
			if (block_has_invalid_parity(block)) {
				/* report that the block and the file are not synced */
				block_is_unsynced = 1;
				file_is_unsynced = 1;
				/* follow */
			}

			/* until now is CPU */
			state_usage_cpu(state);

			/* if the file is different than the current one, close it */
			if (handle[j].file != 0 && handle[j].file != file) {
				/* keep a pointer at the file we are going to close for error reporting */
				struct snapraid_file* report = handle[j].file;
				ret = handle_close(&handle[j]);
				if (ret == -1) {
					/* LCOV_EXCL_START */
					/* This one is really an unexpected error, because we are only reading */
					/* and closing a descriptor should never fail */
					if (errno == EIO) {
						log_tag("error:%u:%s:%s: Close EIO error. %s\n", i, disk->name, esc(report->sub), strerror(errno));
						log_fatal("DANGER! Unexpected input/output close error in a data disk, it isn't possible to scrub.\n");
						log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, handle[j].path);
						log_fatal("Stopping at block %u\n", i);
						++io_error;
						goto bail;
					}

					log_tag("error:%u:%s:%s: Close error. %s\n", i, disk->name, esc(report->sub), strerror(errno));
					log_fatal("WARNING! Unexpected close error in a data disk, it isn't possible to scrub.\n");
					log_fatal("Ensure that file '%s' can be accessed.\n", handle[j].path);
					log_fatal("Stopping at block %u\n", i);
					++error;
					goto bail;
					/* LCOV_EXCL_STOP */
				}
			}

			ret = handle_open(&handle[j], file, state->file_mode, log_error, 0);
			if (ret == -1) {
				if (errno == EIO) {
					/* LCOV_EXCL_START */
					log_tag("error:%u:%s:%s: Open EIO error. %s\n", i, disk->name, esc(file->sub), strerror(errno));
					log_fatal("DANGER! Unexpected input/output open error in a data disk, it isn't possible to scrub.\n");
					log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, handle[j].path);
					log_fatal("Stopping at block %u\n", i);
					++io_error;
					goto bail;
					/* LCOV_EXCL_STOP */
				}

				log_tag("error:%u:%s:%s: Open error. %s\n", i, disk->name, esc(file->sub), strerror(errno));
				++error;
				error_on_this_block = 1;
				continue;
			}

			/* check if the file is changed */
			if (handle[j].st.st_size != file->size
				|| handle[j].st.st_mtime != file->mtime_sec
				|| STAT_NSEC(&handle[j].st) != file->mtime_nsec
				/* don't check the inode to support filesystem without persistent inodes */
			) {
				/* report that the block and the file are not synced */
				block_is_unsynced = 1;
				file_is_unsynced = 1;
				/* follow */
			}

			/* note that we intentionally don't abort if the file has different attributes */
			/* from the last sync, as we are expected to return errors if running */
			/* in an unsynced array. This is just like the check command. */

			read_size = handle_read(&handle[j], file_pos, buffer[j], state->block_size, log_error, 0);
			if (read_size == -1) {
				if (errno == EIO) {
					log_tag("error:%u:%s:%s: Read EIO error at position %u. %s\n", i, disk->name, esc(file->sub), file_pos, strerror(errno));
					if (io_error >= state->opt.io_error_limit) {
						/* LCOV_EXCL_START */
						log_fatal("DANGER! Too many input/output read error in a data disk, it isn't possible to scrub.\n");
						log_fatal("Ensure that disk '%s' is sane and that file '%s' can be accessed.\n", disk->dir, handle[j].path);
						log_fatal("Stopping at block %u\n", i);
						++io_error;
						goto bail;
						/* LCOV_EXCL_STOP */
					}

					log_error("Input/Output error in file '%s' at position '%u'\n", handle[j].path, file_pos);
					++io_error;
					io_error_on_this_block = 1;
					continue;
				}

				log_tag("error:%u:%s:%s: Read error at position %u. %s\n", i, disk->name, esc(file->sub), file_pos, strerror(errno));
				++error;
				error_on_this_block = 1;
				continue;
			}

			/* until now is disk */
			state_usage_disk(state, disk);

			countsize += read_size;

			/* now compute the hash */
			if (rehash) {
				memhash(state->prevhash, state->prevhashseed, hash, buffer[j], read_size);

				/* compute the new hash, and store it */
				rehandle[j].block = block;
				memhash(state->hash, state->hashseed, rehandle[j].hash, buffer[j], read_size);
			} else {
				memhash(state->hash, state->hashseed, hash, buffer[j], read_size);
			}

			if (block_has_updated_hash(block)) {
				/* compare the hash */
				if (memcmp(hash, block->hash, HASH_SIZE) != 0) {
					unsigned diff = memdiff(hash, block->hash, HASH_SIZE);

					log_tag("error:%u:%s:%s: Data error at position %u, diff bits %u\n", i, disk->name, esc(file->sub), file_pos, diff);

					/* it's a silent error only if we are dealing with synced files */
					if (file_is_unsynced) {
						++error;
						error_on_this_block = 1;
					} else {
						log_error("Data error in file '%s' at position '%u', diff bits %u\n", handle[j].path, file_pos, diff);
						++silent_error;
						silent_error_on_this_block = 1;
					}
					continue;
				}
			}
		}

		/* if we have read all the data required and it's correct, proceed with the parity check */
		if (!error_on_this_block && !silent_error_on_this_block && !io_error_on_this_block) {
			unsigned char* buffer_recov[LEV_MAX];

			/* until now is CPU */
			state_usage_cpu(state);

			/* buffers for parity read and not computed */
			for (l = 0; l < state->level; ++l)
				buffer_recov[l] = buffer[diskmax + state->level + l];
			for (; l < LEV_MAX; ++l)
				buffer_recov[l] = 0;

			/* read the parity */
			for (l = 0; l < state->level; ++l) {
				ret = parity_read(parity[l], i, buffer_recov[l], state->block_size, log_error);
				if (ret == -1) {
					buffer_recov[l] = 0;

					if (errno == EIO) {
						log_tag("parity_error:%u:%s: Read EIO error. %s\n", i, lev_config_name(l), strerror(errno));
						if (io_error >= state->opt.io_error_limit) {
							/* LCOV_EXCL_START */
							log_fatal("DANGER! Too many input/output read error in the %s disk, it isn't possible to scrub.\n", lev_name(l));
							log_fatal("Ensure that disk '%s' is sane and can be read.\n", lev_config_name(l));
							log_fatal("Stopping at block %u\n", i);
							++io_error;
							goto bail;
							/* LCOV_EXCL_STOP */
						}

						log_error("Input/Output error in parity '%s' at position '%u'\n", lev_config_name(l), i);
						++io_error;
						io_error_on_this_block = 1;
						continue;
					}

					log_tag("parity_error:%u:%s: Read error. %s\n", i, lev_config_name(l), strerror(errno));
					++error;
					error_on_this_block = 1;
					continue;
				}

				/* until now is parity */
				state_usage_parity(state, l);
			}

			/* compute the parity */
			raid_gen(diskmax, state->level, state->block_size, buffer);

			/* compare the parity */
			for (l = 0; l < state->level; ++l) {
				if (buffer_recov[l] && memcmp(buffer[diskmax + l], buffer_recov[l], state->block_size) != 0) {
					unsigned diff = memdiff(buffer[diskmax + l], buffer_recov[l], state->block_size);

					log_tag("parity_error:%u:%s: Data error, diff bits %u\n", i, lev_config_name(l), diff);

					/* it's a silent error only if we are dealing with synced blocks */
					if (block_is_unsynced) {
						++error;
						error_on_this_block = 1;
					} else {
						log_fatal("Data error in parity '%s' at position '%u', diff bits %u\n", lev_config_name(l), i, diff);
						++silent_error;
						silent_error_on_this_block = 1;
					}
				}
			}
		}

		if (silent_error_on_this_block || io_error_on_this_block) {
			/* set the error status keeping other info */
			info_set(&state->infoarr, i, info_set_bad(info));
		} else if (error_on_this_block) {
			/* do nothing, as this is a generic error */
			/* likely caused by a not synced array */
		} else {
			/* if rehash is needed */
			if (rehash) {
				/* store all the new hash already computed */
				for (j = 0; j < diskmax; ++j) {
					if (rehandle[j].block)
						memcpy(rehandle[j].block->hash, rehandle[j].hash, HASH_SIZE);
				}
			}

			/* update the time info of the block */
			/* and clear any other flag */
			info_set(&state->infoarr, i, info_make(now, 0, 0, 0));
		}

		/* mark the state as needing write */
		state->need_write = 1;

		/* count the number of processed block */
		++countpos;

		/* progress */
		if (state_progress(state, i, countpos, countmax, countsize)) {
			/* LCOV_EXCL_START */
			break;
			/* LCOV_EXCL_STOP */
		}

		/* autosave */
		if (state->autosave != 0
			&& autosavedone >= autosavelimit /* if we have reached the limit */
			&& autosavemissing >= autosavelimit /* if we have at least a full step to do */
		) {
			autosavedone = 0; /* restart the counter */

			/* until now is CPU */
			state_usage_cpu(state);

			state_progress_stop(state);

			msg_progress("Autosaving...\n");
			state_write(state);

			state_progress_restart(state);

			/* drop until now */
			state_usage_waste(state);
		}
	}

	state_progress_end(state, countpos, countmax, countsize);

	state_usage_print(state);

	if (error || silent_error || io_error) {
		msg_status("\n");
		msg_status("%8u file errors\n", error);
		msg_status("%8u io errors\n", io_error);
		msg_status("%8u data errors\n", silent_error);
	} else {
		/* print the result only if processed something */
		if (countpos != 0)
			msg_status("Everything OK\n");
	}

	if (error)
		log_fatal("WARNING! Unexpected file errors!\n");
	if (io_error)
		log_fatal("DANGER! Unexpected input/output errors! The failing blocks are now marked as bad!\n");
	if (silent_error)
		log_fatal("DANGER! Unexpected data errors! The failing blocks are now marked as bad!\n");
	if (io_error || silent_error) {
		log_fatal("Use 'snapraid status' to list the bad blocks.\n");
		log_fatal("Use 'snapraid -e fix' to recover.\n");
	}

	log_tag("summary:error_file:%u\n", error);
	log_tag("summary:error_io:%u\n", io_error);
	log_tag("summary:error_data:%u\n", silent_error);
	if (error + silent_error + io_error == 0)
		log_tag("summary:exit:ok\n");
	else
		log_tag("summary:exit:error\n");
	log_flush();

bail:
	for (j = 0; j < diskmax; ++j) {
		struct snapraid_file* file = handle[j].file;
		struct snapraid_disk* disk = handle[j].disk;
		ret = handle_close(&handle[j]);
		if (ret == -1) {
			/* LCOV_EXCL_START */
			log_tag("error:%u:%s:%s: Close error. %s\n", i, disk->name, esc(file->sub), strerror(errno));
			log_fatal("DANGER! Unexpected close error in a data disk.\n");
			++error;
			/* continue, as we are already exiting */
			/* LCOV_EXCL_STOP */
		}
	}

	free(handle);
	free(buffer_alloc);
	free(buffer);
	free(rehandle_alloc);

	if (state->opt.expect_recoverable) {
		if (error + silent_error + io_error == 0)
			return -1;
	} else {
		if (error + silent_error + io_error != 0)
			return -1;
	}
	return 0;
}
Пример #6
0
int main(int argc, char* argv[])
{
	int c;
	struct snapraid_option opt;
	char conf[PATH_MAX];
	struct snapraid_state state;
	int operation;
	block_off_t blockstart;
	block_off_t blockcount;
	int ret;
	tommy_list filterlist_file;
	tommy_list filterlist_disk;
	int filter_missing;
	int filter_error;
	int plan;
	int olderthan;
	char* e;
	const char* command;
	const char* import_timestamp;
	const char* import_content;
	const char* log_file;
	int lock;
	const char* gen_conf;
	const char* run;
	int speedtest;
	int period;
	time_t t;
	struct tm* tm;
	int i;

	/* defaults */
	config(conf, sizeof(conf), argv[0]);
	memset(&opt, 0, sizeof(opt));
	opt.io_error_limit = 100;
	blockstart = 0;
	blockcount = 0;
	tommy_list_init(&filterlist_file);
	tommy_list_init(&filterlist_disk);
	period = 1000;
	filter_missing = 0;
	filter_error = 0;
	plan = SCRUB_AUTO;
	olderthan = SCRUB_AUTO;
	import_timestamp = 0;
	import_content = 0;
	log_file = 0;
	lock = 0;
	gen_conf = 0;
	speedtest = 0;
	run = 0;

	opterr = 0;
	while ((c =
#if HAVE_GETOPT_LONG
		getopt_long(argc, argv, OPTIONS, long_options, 0))
#else
		getopt(argc, argv, OPTIONS))
#endif
		!= EOF) {
		switch (c) {
		case 'c' :
			pathimport(conf, sizeof(conf), optarg);
			break;
		case 'f' : {
			struct snapraid_filter* filter = filter_alloc_file(1, optarg);
			if (!filter) {
				/* LCOV_EXCL_START */
				log_fatal("Invalid filter specification '%s'\n", optarg);
				log_fatal("Filters using relative paths are not supported. Ensure to add an initial slash\n");
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			tommy_list_insert_tail(&filterlist_file, &filter->node, filter);
		} break;
		case 'd' : {
			struct snapraid_filter* filter = filter_alloc_disk(1, optarg);
			if (!filter) {
				/* LCOV_EXCL_START */
				log_fatal("Invalid filter specification '%s'\n", optarg);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			tommy_list_insert_tail(&filterlist_disk, &filter->node, filter);
		} break;
		case 'm' :
			filter_missing = 1;
			opt.expected_missing = 1;
			break;
		case 'e' :
			/* when processing only error, we filter both files and blocks */
			/* and we apply fixes only to synced ones */
			filter_error = 1;
			opt.badonly = 1;
			opt.syncedonly = 1;
			break;
		case 'p' :
			if (strcmp(optarg, "bad") == 0) {
				plan = SCRUB_BAD;
			} else if (strcmp(optarg, "new") == 0) {
				plan = SCRUB_NEW;
			} else if (strcmp(optarg, "full") == 0) {
				plan = SCRUB_FULL;
			} else {
				plan = strtoul(optarg, &e, 10);
				if (!e || *e || plan > 100) {
					/* LCOV_EXCL_START */
					log_fatal("Invalid plan/percentage '%s'\n", optarg);
					exit(EXIT_FAILURE);
					/* LCOV_EXCL_STOP */
				}
			}
			break;
		case 'o' :
			olderthan = strtoul(optarg, &e, 10);
			if (!e || *e || olderthan > 1000) {
				/* LCOV_EXCL_START */
				log_fatal("Invalid number of days '%s'\n", optarg);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			break;
		case 'S' :
			blockstart = strtoul(optarg, &e, 0);
			if (!e || *e) {
				/* LCOV_EXCL_START */
				log_fatal("Invalid start position '%s'\n", optarg);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			break;
		case 'B' :
			blockcount = strtoul(optarg, &e, 0);
			if (!e || *e) {
				/* LCOV_EXCL_START */
				log_fatal("Invalid count number '%s'\n", optarg);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			break;
		case 'L' :
			opt.io_error_limit = strtoul(optarg, &e, 0);
			if (!e || *e) {
				/* LCOV_EXCL_START */
				log_fatal("Invalid error limit number '%s'\n", optarg);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			break;
		case 'i' :
			if (import_timestamp) {
				/* LCOV_EXCL_START */
				log_fatal("Import directory '%s' already specified as '%s'\n", optarg, import_timestamp);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			import_timestamp = optarg;
			break;
		case OPT_TEST_IMPORT_CONTENT :
			if (import_content) {
				/* LCOV_EXCL_START */
				log_fatal("Import directory '%s' already specified as '%s'\n", optarg, import_content);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			import_content = optarg;
			break;
		case 'l' :
			if (log_file) {
				/* LCOV_EXCL_START */
				log_fatal("Log file '%s' already specified as '%s'\n", optarg, log_file);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
			log_file = optarg;
			break;
		case 'Z' :
			opt.force_zero = 1;
			break;
		case 'E' :
			opt.force_empty = 1;
			break;
		case 'U' :
			opt.force_uuid = 1;
			break;
		case 'D' :
			opt.force_device = 1;
			break;
		case 'N' :
			opt.force_nocopy = 1;
			break;
		case 'F' :
			opt.force_full = 1;
			break;
		case 'a' :
			opt.auditonly = 1;
			break;
		case 'h' :
			opt.prehash = 1;
			break;
		case 'v' :
			++msg_level;
			break;
		case 'q' :
			--msg_level;
			break;
		case 'G' :
			opt.gui = 1;
			break;
		case 'H' :
			usage();
			exit(EXIT_SUCCESS);
		case 'V' :
			version();
			exit(EXIT_SUCCESS);
		case 'T' :
			speedtest = 1;
			break;
		case 'C' :
			gen_conf = optarg;
			break;
		case OPT_TEST_KILL_AFTER_SYNC :
			opt.kill_after_sync = 1;
			break;
		case OPT_TEST_EXPECT_UNRECOVERABLE :
			opt.expect_unrecoverable = 1;
			break;
		case OPT_TEST_EXPECT_RECOVERABLE :
			opt.expect_recoverable = 1;
			break;
		case OPT_TEST_SKIP_SELF :
			opt.skip_self = 1;
			break;
		case OPT_TEST_SKIP_SIGN :
			opt.skip_sign = 1;
			break;
		case OPT_TEST_SKIP_FALLOCATE :
			opt.skip_fallocate = 1;
			break;
		case OPT_TEST_SKIP_SEQUENTIAL :
			opt.skip_sequential = 1;
			break;
		case OPT_TEST_SKIP_DEVICE :
			opt.skip_device = 1;
			period = 50; /* reduce period of the speed test */
			break;
		case OPT_TEST_SKIP_CONTENT_CHECK :
			opt.skip_content_check = 1;
			break;
		case OPT_TEST_SKIP_PARITY_ACCESS :
			opt.skip_parity_access = 1;
			break;
		case OPT_TEST_SKIP_DISK_ACCESS :
			opt.skip_disk_access = 1;
			break;
		case OPT_TEST_FORCE_MURMUR3 :
			opt.force_murmur3 = 1;
			break;
		case OPT_TEST_FORCE_SPOOKY2 :
			opt.force_spooky2 = 1;
			break;
		case OPT_TEST_SKIP_LOCK :
			opt.skip_lock = 1;
			break;
		case OPT_TEST_FORCE_ORDER_PHYSICAL :
			opt.force_order = SORT_PHYSICAL;
			break;
		case OPT_TEST_FORCE_ORDER_INODE :
			opt.force_order = SORT_INODE;
			break;
		case OPT_TEST_FORCE_ORDER_ALPHA :
			opt.force_order = SORT_ALPHA;
			break;
		case OPT_TEST_FORCE_ORDER_DIR :
			opt.force_order = SORT_DIR;
			break;
		case OPT_TEST_FORCE_SCRUB_AT :
			opt.force_scrub_at = atoi(optarg);
			break;
		case OPT_TEST_FORCE_SCRUB_EVEN :
			opt.force_scrub_even = 1;
			break;
		case OPT_TEST_FORCE_CONTENT_WRITE :
			opt.force_content_write = 1;
			break;
		case OPT_TEST_EXPECT_FAILURE :
			/* invert the exit codes */
			exit_success = 1;
			exit_failure = 0;
			break;
		case OPT_TEST_EXPECT_NEED_SYNC :
			/* invert the exit codes */
			exit_success = 1;
			exit_sync_needed = 0;
			break;
		case OPT_TEST_RUN :
			run = optarg;
			break;
		case OPT_TEST_FORCE_SCAN_WINFIND :
			opt.force_scan_winfind = 1;
			break;
		case OPT_TEST_FORCE_PROGRESS :
			opt.force_progress = 1;
			break;
		case OPT_TEST_FORCE_AUTOSAVE_AT :
			opt.force_autosave_at = atoi(optarg);
			break;
		case OPT_TEST_FAKE_DEVICE :
			opt.fake_device = 1;
			break;
		case OPT_NO_WARNINGS :
			opt.no_warnings = 1;
			break;
		default :
			/* LCOV_EXCL_START */
			log_fatal("Unknown option '%c'\n", (char)c);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	os_init(opt.force_scan_winfind);
	raid_init();
	crc32c_init();

	if (speedtest != 0) {
		speed(period);
		os_done();
		exit(EXIT_SUCCESS);
	}

	if (gen_conf != 0) {
		generate_configuration(gen_conf);
		os_done();
		exit(EXIT_SUCCESS);
	}

	if (optind + 1 != argc) {
		/* LCOV_EXCL_START */
		usage();
		exit(EXIT_FAILURE);
		/* LCOV_EXCL_STOP */
	}

	command = argv[optind];
	if (strcmp(command, "diff") == 0) {
		operation = OPERATION_DIFF;
	} else if (strcmp(argv[optind], "sync") == 0) {
		operation = OPERATION_SYNC;
	} else if (strcmp(argv[optind], "check") == 0) {
		operation = OPERATION_CHECK;
	} else if (strcmp(argv[optind], "fix") == 0) {
		operation = OPERATION_FIX;
	} else if (strcmp(argv[optind], "test-dry") == 0) {
		operation = OPERATION_DRY;
	} else if (strcmp(argv[optind], "dup") == 0) {
		operation = OPERATION_DUP;
	} else if (strcmp(argv[optind], "list") == 0) {
		operation = OPERATION_LIST;
	} else if (strcmp(argv[optind], "pool") == 0) {
		operation = OPERATION_POOL;
	} else if (strcmp(argv[optind], "rehash") == 0) {
		operation = OPERATION_REHASH;
	} else if (strcmp(argv[optind], "scrub") == 0) {
		operation = OPERATION_SCRUB;
	} else if (strcmp(argv[optind], "status") == 0) {
		operation = OPERATION_STATUS;
	} else if (strcmp(argv[optind], "test-rewrite") == 0) {
		operation = OPERATION_REWRITE;
	} else if (strcmp(argv[optind], "test-read") == 0) {
		operation = OPERATION_READ;
	} else if (strcmp(argv[optind], "test-nano") == 0) {
		operation = OPERATION_NANO;
	} else if (strcmp(argv[optind], "up") == 0) {
		operation = OPERATION_SPINUP;
	} else if (strcmp(argv[optind], "down") == 0) {
		operation = OPERATION_SPINDOWN;
	} else if (strcmp(argv[optind], "devices") == 0) {
		operation = OPERATION_DEVICES;
	} else if (strcmp(argv[optind], "smart") == 0) {
		operation = OPERATION_SMART;
	} else {
		/* LCOV_EXCL_START */
		log_fatal("Unknown command '%s'\n", argv[optind]);
		exit(EXIT_FAILURE);
		/* LCOV_EXCL_STOP */
	}

	/* check options compatibility */
	switch (operation) {
	case OPERATION_CHECK :
		break;
	default :
		if (opt.auditonly) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -a, --audit-only with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	switch (operation) {
	case OPERATION_FIX :
		break;
	default :
		if (opt.force_device) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -D, --force-device with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	switch (operation) {
	case OPERATION_SYNC :
	case OPERATION_CHECK :
	case OPERATION_FIX :
		break;
	default :
		if (opt.force_nocopy) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -N, --force-nocopy with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	switch (operation) {
	case OPERATION_SYNC :
		break;
	default :
		if (opt.prehash) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -h, --pre-hash with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}

		if (opt.force_full) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -F, --force-full with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	if (opt.force_full && opt.force_nocopy) {
		/* LCOV_EXCL_START */
		log_fatal("You cannot use the -F, --force-full and -N, --force-nocopy options at the same time\n");
		exit(EXIT_FAILURE);
		/* LCOV_EXCL_STOP */
	}

	if (opt.prehash && opt.force_nocopy) {
		/* LCOV_EXCL_START */
		log_fatal("You cannot use the -h, --pre-hash and -N, --force-nocopy options at the same time\n");
		exit(EXIT_FAILURE);
		/* LCOV_EXCL_STOP */
	}

	switch (operation) {
	case OPERATION_CHECK :
	case OPERATION_FIX :
	case OPERATION_DRY :
		break;
	default :
		if (!tommy_list_empty(&filterlist_file)) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -f, --filter with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
		if (!tommy_list_empty(&filterlist_disk)) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -d, --filter-disk with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
		if (filter_missing != 0) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -m, --filter-missing with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
		if (filter_error != 0) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot use -e, --filter-error with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	/* errors must be always fixed on all disks */
	/* becasue we don't keep the information on what disk is the error */
	if (filter_error != 0 && !tommy_list_empty(&filterlist_disk)) {
		/* LCOV_EXCL_START */
		log_fatal("You cannot use -e, --filter-error and -d, --filter-disk at the same time\n");
		exit(EXIT_FAILURE);
		/* LCOV_EXCL_STOP */
	}

	switch (operation) {
	case OPERATION_CHECK :
	case OPERATION_FIX :
		break;
	default :
		if (import_timestamp != 0 || import_content != 0) {
			/* LCOV_EXCL_START */
			log_fatal("You cannot import with the '%s' command\n", command);
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	switch (operation) {
	case OPERATION_LIST :
	case OPERATION_DUP :
	case OPERATION_STATUS :
	case OPERATION_REWRITE :
	case OPERATION_READ :
	case OPERATION_REHASH :
	case OPERATION_SPINUP : /* we want to do it in different threads to avoid blocking */
		/* avoid to check and access data disks if not needed */
		opt.skip_disk_access = 1;
		break;
	}

	switch (operation) {
	case OPERATION_DIFF :
	case OPERATION_LIST :
	case OPERATION_DUP :
	case OPERATION_POOL :
	case OPERATION_STATUS :
	case OPERATION_REWRITE :
	case OPERATION_READ :
	case OPERATION_REHASH :
	case OPERATION_NANO :
	case OPERATION_SPINUP : /* we want to do it in different threads to avoid blocking */
		/* avoid to check and access parity disks if not needed */
		opt.skip_parity_access = 1;
		break;
	}

	switch (operation) {
	case OPERATION_FIX :
	case OPERATION_CHECK :
		/* avoid to stop processing if a content file is not accessible */
		opt.skip_content_access = 1;
		break;
	}

	switch (operation) {
	case OPERATION_DIFF :
	case OPERATION_LIST :
	case OPERATION_DUP :
	case OPERATION_POOL :
	case OPERATION_NANO :
	case OPERATION_SPINUP :
	case OPERATION_SPINDOWN :
	case OPERATION_DEVICES :
	case OPERATION_SMART :
		opt.skip_self = 1;
		break;
	}

	switch (operation) {
	case OPERATION_DEVICES :
	case OPERATION_SMART :
		/* we may need to use these commands during operations */
		opt.skip_lock = 1;
		break;
	}

	/* open the log file */
	log_open(log_file);

	/* print generic info into the log */
	t = time(0);
	tm = localtime(&t);
	log_tag("version:%s\n", PACKAGE_VERSION);
	log_tag("unixtime:%" PRIi64 "\n", (int64_t)t);
	if (tm) {
		char datetime[64];
		strftime(datetime, sizeof(datetime), "%Y-%m-%d %H:%M:%S", tm);
		log_tag("time:%s\n", datetime);
	}
	log_tag("command:%s\n", command);
	for (i = 0; i < argc; ++i)
		log_tag("argv:%u:%s\n", i, argv[i]);
	log_flush();

	if (!opt.skip_self)
		selftest();

	state_init(&state);

	/* read the configuration file */
	state_config(&state, conf, command, &opt, &filterlist_disk);

	/* set the raid mode */
	raid_mode(state.raid_mode);

#if HAVE_LOCKFILE
	/* create the lock file */
	if (!opt.skip_lock && state.lockfile[0]) {
		lock = lock_lock(state.lockfile);
		if (lock == -1) {
			/* LCOV_EXCL_START */
			if (errno != EWOULDBLOCK) {
				log_fatal("Error creating the lock file '%s'. %s.\n", state.lockfile, strerror(errno));
			} else {
				log_fatal("The lock file '%s' is already locked!\n", state.lockfile);
				log_fatal("SnapRAID is already in use!\n");
			}
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}
#else
	(void)lock;
#endif

	if (operation == OPERATION_DIFF) {
		state_read(&state);

		ret = state_diff(&state);

		/* abort if sync needed */
		if (ret > 0)
			exit(EXIT_SYNC_NEEDED);
	} else if (operation == OPERATION_SYNC) {

		/* in the next state read ensures to clear all the past hashes in case */
		/* we are reading from an incomplete sync */
		/* The undeterminated hash are only for CHG/DELETED blocks for which we don't */
		/* know if the previous interrupted sync was able to update or not the parity. */
		/* The sync process instead needs to trust this information because it's used */
		/* to avoid to recompute the parity if all the input are equals as before. */

		/* In these cases we don't know if the old state is still the one */
		/* stored inside the parity, because after an aborted sync, the parity */
		/* may be or may be not have been updated with the data that may be now */
		/* deleted. Then we reset the hash to a bogus value. */

		/* An example for CHG blocks is: */
		/* - One file is added creating a CHG block with ZERO state */
		/* - Sync aborted after updating the parity to the new state, */
		/*   but without saving the content file representing this new BLK state. */
		/* - File is now deleted after the aborted sync */
		/* - Sync again, deleting the blocks overt the CHG ones */
		/*   with the hash of CHG blocks not represeting the real parity state */

		/* An example for DELETED blocks is: */
		/* - One file is deleted creating DELETED blocks */
		/* - Sync aborted after, updating the parity to the new state, */
		/*   but without saving the content file representing this new EMPTY state. */
		/* - Another file is added again over the DELETE ones */
		/*   with the hash of DELETED blocks not represeting the real parity state */
		state.clear_past_hash = 1;

		state_read(&state);

		state_scan(&state);

		/* refresh the size info before the content write */
		state_refresh(&state);

		memory();

		/* intercept signals while operating */
		signal_init();

		/* save the new state before the sync */
		/* this allow to recover the case of the changes in the array after an aborted sync. */

		/* for example, think at this case: */
		/* - add some files at the array */
		/* - run a sync command, it will recompute the parity adding the new files */
		/* - abort the sync command before it stores the new content file */
		/* - delete the not yet synced files from the array */
		/* - run a new sync command */

		/* the new sync command has now way to know that the parity file was modified */
		/* because the files triggering these changes are now deleted */
		/* and they aren't listed in the content file */

		if (state.need_write)
			state_write(&state);

		/* run a test command if required */
		if (run != 0) {
			ret = system(run); /* ignore error */
			if (ret != 0) {
				/* LCOV_EXCL_START */
				log_fatal("Error in running command '%s'.\n", run);
				exit(EXIT_FAILURE);
				/* LCOV_EXCL_STOP */
			}
		}

		/* waits some time to ensure that any concurrent modification done at the files, */
		/* using the same mtime read by the scan process, will be read by sync. */
		/* Note that any later modification done, potentially not read by sync, will have */
		/* a different mtime, and it will be syncronized at the next sync. */
		/* The worst case is the FAT filesystem with a two seconds resolution for mtime. */
		/* If you don't use FAT, the wait is not needed, because most filesystems have now */
		/* at least microseconds resolution, but better to be safe. */
		if (!opt.skip_self)
			sleep(2);

		ret = state_sync(&state, blockstart, blockcount);

		/* save the new state if required */
		if (!opt.kill_after_sync && (state.need_write || state.opt.force_content_write))
			state_write(&state);

		/* abort if required */
		if (ret != 0) {
			/* LCOV_EXCL_START */
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	} else if (operation == OPERATION_DRY) {
		state_read(&state);

		/* filter */
		state_skip(&state);
		state_filter(&state, &filterlist_file, &filterlist_disk, filter_missing, filter_error);

		memory();

		/* intercept signals while operating */
		signal_init();

		state_dry(&state, blockstart, blockcount);
	} else if (operation == OPERATION_REHASH) {
		state_read(&state);

		/* intercept signals while operating */
		signal_init();

		state_rehash(&state);

		/* save the new state if required */
		if (state.need_write)
			state_write(&state);
	} else if (operation == OPERATION_SCRUB) {
		state_read(&state);

		memory();

		/* intercept signals while operating */
		signal_init();

		ret = state_scrub(&state, plan, olderthan);

		/* save the new state if required */
		if (state.need_write || state.opt.force_content_write)
			state_write(&state);

		/* abort if required */
		if (ret != 0) {
			/* LCOV_EXCL_START */
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	} else if (operation == OPERATION_REWRITE) {
		state_read(&state);

		/* intercept signals while operating */
		signal_init();

		state_write(&state);

		memory();
	} else if (operation == OPERATION_READ) {
		state_read(&state);

		memory();
	} else if (operation == OPERATION_NANO) {
		state_read(&state);

		state_nano(&state);

		/* intercept signals while operating */
		signal_init();

		state_write(&state);

		memory();
	} else if (operation == OPERATION_SPINUP) {
		state_device(&state, DEVICE_UP);
	} else if (operation == OPERATION_SPINDOWN) {
		state_device(&state, DEVICE_DOWN);
	} else if (operation == OPERATION_DEVICES) {
		state_device(&state, DEVICE_LIST);
	} else if (operation == OPERATION_SMART) {
		state_device(&state, DEVICE_SMART);
	} else if (operation == OPERATION_STATUS) {
		state_read(&state);

		memory();

		state_status(&state);
	} else if (operation == OPERATION_DUP) {
		state_read(&state);

		state_dup(&state);
	} else if (operation == OPERATION_LIST) {
		state_read(&state);

		state_list(&state);
	} else if (operation == OPERATION_POOL) {
		state_read(&state);

		state_pool(&state);
	} else {
		state_read(&state);

		/* if we are also trying to recover */
		if (!state.opt.auditonly) {
			/* import the user specified dirs */
			if (import_timestamp != 0)
				state_search(&state, import_timestamp);
			if (import_content != 0)
				state_import(&state, import_content);

			/* import from all the array */
			if (!state.opt.force_nocopy)
				state_search_array(&state);
		}

		/* filter */
		state_skip(&state);
		state_filter(&state, &filterlist_file, &filterlist_disk, filter_missing, filter_error);

		memory();

		/* intercept signals while operating */
		signal_init();

		if (operation == OPERATION_CHECK) {
			ret = state_check(&state, 0, blockstart, blockcount);
		} else { /* it's fix */
			ret = state_check(&state, 1, blockstart, blockcount);
		}

		/* abort if required */
		if (ret != 0) {
			/* LCOV_EXCL_START */
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}

	/* close log file */
	log_close(log_file);

#if HAVE_LOCKFILE
	if (!opt.skip_lock && state.lockfile[0]) {
		if (lock_unlock(lock) == -1) {
			/* LCOV_EXCL_START */
			log_fatal("Error closing the lock file '%s'. %s.\n", state.lockfile, strerror(errno));
			exit(EXIT_FAILURE);
			/* LCOV_EXCL_STOP */
		}
	}
#endif

	state_done(&state);
	tommy_list_foreach(&filterlist_file, (tommy_foreach_func*)filter_free);
	tommy_list_foreach(&filterlist_disk, (tommy_foreach_func*)filter_free);

	os_done();

	return EXIT_SUCCESS;
}
Пример #7
0
u8 *cmd_save_game(u8 *c)
{
	u8 newline_orig;
	FILE *save_stream;	// file handle
	u8 *msg;
	
	clock_state = 1;
	newline_orig = msgstate.newline_char;
	msgstate.newline_char = '@';
		
	decrypt_string(inv_obj_string, inv_obj_string+inv_obj_string_size);
	
	if (save_dir == 0)
		save_dir = vstring_new(0, 200);
	if (save_filename == 0)
		save_filename = vstring_new(0, 250);
	
	if ( state_get_info('s') != 0)// select the game
	{
		if (strlen(save_filename->data) > strlen(save_dir->data))
			msg = alloca(200 + strlen(save_filename->data));
		else
			msg = alloca(200 + strlen(save_dir->data));
		if (state_name_auto[0] == 0)
		{
			sprintf(msg, "About to save the game\ndescribed as:\n\n%s\n\nin file:\n %s\n\n%s",
					save_description, save_filename->data,
					"Press ENTER to continue.\nPress ESC to cancel.");
			message_box_draw(msg, 0, 0x23, 0);
			
			if ( user_bolean_poll() == 0)
				goto save_end;
		}
		dir_preset_change(DIR_PRESET_GAME);
		save_stream = fopen(save_filename->data, "wb");
		if ( save_stream == 0)
		{
			sprintf(msg, "The directory\n   %s\n is full or the disk is write-protected.\nPress ENTER to continue."
				, save_dir->data);
			message_box(msg);
		}
		else
		{
			if (fwrite(save_description, sizeof(u8), 0x1f, save_stream) != 0x1f)
				goto save_err;
			if (state_write(save_stream, &state, sizeof(AGI_STATE)) == 0)
				goto save_err;
			if (state_write(save_stream, objtable, objtable_size) == 0)
				goto save_err;
			if (state_write(save_stream, inv_obj_table, inv_obj_table_size*sizeof(INV_OBJ)) == 0)
				goto save_err;
			if (state_write(save_stream, inv_obj_string, inv_obj_string_size) == 0)
				goto save_err;
			if (state_write(save_stream, script_head, state.script_size<<1) == 0)
				goto save_err;
			if (state_write(save_stream, (void *)scan_start_list, logic_save_scan_start()) != 0)
				goto save_close;
		save_err:
			fclose(save_stream);
			remove(save_filename->data);
			message_box("The disk is full.\nPress ENTER to continue.");
			goto save_end;
		save_close:
			fclose(save_stream);
		}
	}
save_end:
	cmd_close_window(0);
	msgstate.newline_char = newline_orig;
	clock_state = 0;
	decrypt_string(inv_obj_string, inv_obj_string+inv_obj_string_size);
	return c;
}