static int view_sync_set_log_view_range(struct mail_index_view *view, bool sync_expunges, bool *reset_r, bool *partial_sync_r) { const struct mail_index_header *hdr = &view->index->map->hdr; uint32_t start_seq, end_seq; uoff_t start_offset, end_offset; const char *reason; int ret; *partial_sync_r = FALSE; start_seq = view->log_file_expunge_seq; start_offset = view->log_file_expunge_offset; end_seq = hdr->log_file_seq; end_offset = hdr->log_file_head_offset; if (end_seq < view->log_file_head_seq || (end_seq == view->log_file_head_seq && end_offset < view->log_file_head_offset)) { mail_index_set_error(view->index, "%s log position went backwards " "(%u,%"PRIuUOFF_T" < %u,%"PRIuUOFF_T")", view->index->filepath, end_seq, end_offset, view->log_file_head_seq, view->log_file_head_offset); return -1; } for (;;) { /* the view begins from the first non-synced transaction */ ret = mail_transaction_log_view_set(view->log_view, start_seq, start_offset, end_seq, end_offset, reset_r, &reason); if (ret <= 0) return ret; if (!*reset_r || sync_expunges) break; /* log was reset, but we don't want to sync expunges. we can't do this, so sync only up to the reset. */ mail_transaction_log_view_get_prev_pos(view->log_view, &end_seq, &end_offset); end_seq--; end_offset = (uoff_t)-1; if (end_seq < start_seq) { /* we have only this reset log */ mail_transaction_log_view_clear(view->log_view, view->log_file_expunge_seq); break; } *partial_sync_r = TRUE; } return 1; }
int mail_cache_sync_handler(struct mail_index_sync_map_ctx *sync_ctx, uint32_t seq ATTR_UNUSED, void *old_data, const void *new_data, void **context) { struct mail_index_view *view = sync_ctx->view; struct mail_index *index = view->index; struct mail_cache *cache = index->cache; struct mail_cache_sync_context *ctx = *context; const uint32_t *old_cache_offset = old_data; const uint32_t *new_cache_offset = new_data; uint32_t cache_file_seq, cur_seq, tail_seq; uoff_t cur_offset, tail_offset; int ret; if (new_cache_offset == NULL) { mail_cache_handler_deinit(sync_ctx, ctx); *context = NULL; return 1; } ctx = mail_cache_handler_init(context); if (cache->file_cache != NULL && !MAIL_CACHE_IS_UNUSABLE(cache)) { /* flush read cache only once per sync */ if (!ctx->nfs_read_cache_flushed && (index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) { ctx->nfs_read_cache_flushed = TRUE; mail_index_flush_read_cache(index, cache->filepath, cache->fd, cache->locked); } /* don't invalidate anything that's already been invalidated within this sync. */ if (*new_cache_offset < ctx->invalidate_highwater) { file_cache_invalidate(cache->file_cache, *new_cache_offset, ctx->invalidate_highwater - *new_cache_offset); ctx->invalidate_highwater = *new_cache_offset; } } if (*old_cache_offset == 0 || *old_cache_offset == *new_cache_offset || sync_ctx->type == MAIL_INDEX_SYNC_HANDLER_VIEW) return 1; mail_transaction_log_view_get_prev_pos(view->log_view, &cur_seq, &cur_offset); mail_transaction_log_get_mailbox_sync_pos(index->log, &tail_seq, &tail_offset); if (LOG_IS_BEFORE(cur_seq, cur_offset, tail_seq, tail_offset)) { /* already been linked */ return 1; } /* we'll need to link the old and new cache records */ ret = mail_cache_handler_lock(ctx, cache); if (ret <= 0) return ret < 0 ? -1 : 1; if (!get_cache_file_seq(view, &cache_file_seq)) return 1; if (cache_file_seq != cache->hdr->file_seq) { /* cache has been compressed, don't modify it */ return 1; } if (mail_cache_link(cache, *old_cache_offset, *new_cache_offset) < 0) return -1; return 1; }