Exemplo n.º 1
0
void mail_cache_sync_lost_handler(struct mail_index *index)
{
	struct mail_cache *cache = index->cache;

	if (!MAIL_CACHE_IS_UNUSABLE(cache)) {
		mail_index_flush_read_cache(cache->index, cache->filepath,
					    cache->fd, cache->locked);
	}
	file_cache_invalidate(cache->file_cache, 0, (uoff_t)-1);
}
Exemplo n.º 2
0
int mail_cache_sync_handler(struct mail_index_sync_map_ctx *sync_ctx,
			    uint32_t seq ATTR_UNUSED,
			    void *old_data, const void *new_data,
			    void **context)
{
	struct mail_index_view *view = sync_ctx->view;
	struct mail_index *index = view->index;
	struct mail_cache *cache = index->cache;
	struct mail_cache_sync_context *ctx = *context;
	const uint32_t *old_cache_offset = old_data;
	const uint32_t *new_cache_offset = new_data;
	uint32_t cache_file_seq, cur_seq, tail_seq;
	uoff_t cur_offset, tail_offset;
	int ret;

	if (new_cache_offset == NULL) {
		mail_cache_handler_deinit(sync_ctx, ctx);
		*context = NULL;
		return 1;
	}

	ctx = mail_cache_handler_init(context);
	if (cache->file_cache != NULL && !MAIL_CACHE_IS_UNUSABLE(cache)) {
		/* flush read cache only once per sync */
		if (!ctx->nfs_read_cache_flushed &&
		    (index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) {
			ctx->nfs_read_cache_flushed = TRUE;
			mail_index_flush_read_cache(index,
						    cache->filepath, cache->fd,
						    cache->locked);
		}
		/* don't invalidate anything that's already been invalidated
		   within this sync. */
		if (*new_cache_offset < ctx->invalidate_highwater) {
			file_cache_invalidate(cache->file_cache,
					      *new_cache_offset,
					      ctx->invalidate_highwater -
					      *new_cache_offset);
			ctx->invalidate_highwater = *new_cache_offset;
		}
	}

	if (*old_cache_offset == 0 || *old_cache_offset == *new_cache_offset ||
	    sync_ctx->type == MAIL_INDEX_SYNC_HANDLER_VIEW)
		return 1;

	mail_transaction_log_view_get_prev_pos(view->log_view,
					       &cur_seq, &cur_offset);
	mail_transaction_log_get_mailbox_sync_pos(index->log,
						  &tail_seq, &tail_offset);
	if (LOG_IS_BEFORE(cur_seq, cur_offset, tail_seq, tail_offset)) {
		/* already been linked */
		return 1;
	}

	/* we'll need to link the old and new cache records */
	ret = mail_cache_handler_lock(ctx, cache);
	if (ret <= 0)
		return ret < 0 ? -1 : 1;

	if (!get_cache_file_seq(view, &cache_file_seq))
		return 1;

	if (cache_file_seq != cache->hdr->file_seq) {
		/* cache has been compressed, don't modify it */
		return 1;
	}

	if (mail_cache_link(cache, *old_cache_offset, *new_cache_offset) < 0)
		return -1;

	return 1;
}
static int
mail_transaction_log_file_sync(struct mail_transaction_log_file *file)
{
        const struct mail_transaction_header *hdr;
	const void *data;
	struct stat st;
	size_t size, avail;
	uint32_t trans_size = 0;
	int ret;

	i_assert(file->sync_offset >= file->buffer_offset);

	data = buffer_get_data(file->buffer, &size);
	if (file->buffer_offset + size < file->sync_offset) {
		mail_transaction_log_file_set_corrupted(file,
			"log file shrank (%"PRIuUOFF_T" < %"PRIuUOFF_T")",
			file->buffer_offset + (uoff_t)size, file->sync_offset);
		return -1;
	}
	while (file->sync_offset - file->buffer_offset + sizeof(*hdr) <= size) {
		hdr = CONST_PTR_OFFSET(data, file->sync_offset -
				       file->buffer_offset);
		trans_size = mail_index_offset_to_uint32(hdr->size);
		if (trans_size == 0) {
			/* unfinished */
			return 1;
		}
		if (trans_size < sizeof(*hdr)) {
			mail_transaction_log_file_set_corrupted(file,
				"hdr.size too small (%u)", trans_size);
			return -1;
		}

		if (file->sync_offset - file->buffer_offset + trans_size > size)
			break;

		/* transaction has been fully written */
		if ((ret = log_file_track_sync(file, hdr, trans_size)) <= 0) {
			if (ret < 0)
				return -1;
			break;
		}

		file->sync_offset += trans_size;
	}

	if (file->mmap_base != NULL && !file->locked) {
		/* Now that all the mmaped pages have page faulted, check if
		   the file had changed while doing that. Only after the last
		   page has faulted, the size returned by fstat() can be
		   trusted. Otherwise it might point to a page boundary while
		   the next page is still being written.

		   Without this check we might see partial transactions,
		   sometimes causing "Extension record updated without intro
		   prefix" errors. */
		if (fstat(file->fd, &st) < 0) {
			log_file_set_syscall_error(file, "fstat()");
			return -1;
		}
		if ((uoff_t)st.st_size != file->last_size) {
			file->last_size = st.st_size;
			return 0;
		}
	}

	avail = file->sync_offset - file->buffer_offset;
	if (avail != size) {
		/* There's more data than we could sync at the moment. If the
		   last record's size wasn't valid, we can't know if it will
		   be updated unless we've locked the log. */
		if (file->locked) {
			mail_transaction_log_file_set_corrupted(file,
				"Unexpected garbage at EOF");
			return -1;
		}
		/* The size field will be updated soon */
		mail_index_flush_read_cache(file->log->index, file->filepath,
					    file->fd, file->locked);
	}

	if (file->next != NULL &&
	    file->hdr.file_seq == file->next->hdr.prev_file_seq &&
	    file->next->hdr.prev_file_offset != file->sync_offset) {
		mail_transaction_log_file_set_corrupted(file,
			"Invalid transaction log size "
			"(%"PRIuUOFF_T" vs %u): %s", file->sync_offset,
			file->log->head->hdr.prev_file_offset, file->filepath);
		return -1;
	}

	return 1;
}