int mail_cache_expunge_handler(struct mail_index_sync_map_ctx *sync_ctx, uint32_t seq ATTR_UNUSED, const void *data, void **sync_context, void *context) { struct mail_cache *cache = context; struct mail_cache_sync_context *ctx = *sync_context; const uint32_t *cache_offset = data; uint32_t cache_file_seq; int ret; if (data == NULL) { mail_cache_handler_deinit(sync_ctx, ctx); *sync_context = NULL; return 0; } if (*cache_offset == 0) return 0; ctx = mail_cache_handler_init(sync_context); ret = mail_cache_handler_lock(ctx, cache); if (ret <= 0) return ret; if (!get_cache_file_seq(sync_ctx->view, &cache_file_seq)) return 0; if (!MAIL_CACHE_IS_UNUSABLE(cache) && cache_file_seq == cache->hdr->file_seq) (void)mail_cache_delete(cache, *cache_offset); return 0; }
void mail_cache_decision_add(struct mail_cache_view *view, uint32_t seq, unsigned int field) { struct mail_cache *cache = view->cache; uint32_t uid; i_assert(field < cache->fields_count); if (MAIL_CACHE_IS_UNUSABLE(cache) || view->no_decision_updates) return; if (cache->fields[field].field.decision != MAIL_CACHE_DECISION_NO) { /* a) forced decision b) we're already caching it, so it just wasn't in cache */ return; } /* field used the first time */ cache->fields[field].field.decision = MAIL_CACHE_DECISION_TEMP; cache->fields[field].decision_dirty = TRUE; cache->field_header_write_pending = TRUE; mail_index_lookup_uid(view->view, seq, &uid); cache->fields[field].uid_highwater = uid; }
void mail_cache_lookup_iter_init(struct mail_cache_view *view, uint32_t seq, struct mail_cache_lookup_iterate_ctx *ctx_r) { struct mail_cache_lookup_iterate_ctx *ctx = ctx_r; int ret; if (!view->cache->opened) (void)mail_cache_open_and_verify(view->cache); memset(ctx, 0, sizeof(*ctx)); ctx->view = view; ctx->seq = seq; if (!MAIL_CACHE_IS_UNUSABLE(view->cache)) { /* look up the first offset */ ret = mail_cache_lookup_offset(view->cache, view->view, seq, &ctx->offset); if (ret <= 0) { ctx->stop = TRUE; ctx->failed = ret < 0; } } ctx->remap_counter = view->cache->remap_counter; memset(&view->loop_track, 0, sizeof(view->loop_track)); }
void mail_cache_sync_lost_handler(struct mail_index *index) { struct mail_cache *cache = index->cache; if (!MAIL_CACHE_IS_UNUSABLE(cache)) { mail_index_flush_read_cache(cache->index, cache->filepath, cache->fd, cache->locked); } file_cache_invalidate(cache->file_cache, 0, (uoff_t)-1); }
static int mail_cache_lookup_iter_next_record(struct mail_cache_lookup_iterate_ctx *ctx) { struct mail_cache_view *view = ctx->view; if (ctx->failed) return -1; if (ctx->rec != NULL) ctx->offset = ctx->rec->prev_offset; if (ctx->offset == 0) { /* end of this record list. check newly appended data. */ if (view->trans_seq1 > ctx->seq || view->trans_seq2 < ctx->seq) return 0; /* check data still in memory. this works for recent mails even with INDEX=MEMORY */ if (!ctx->memory_appends_checked) { if (mail_cache_lookup_iter_transaction(ctx)) return 1; ctx->memory_appends_checked = TRUE; } if (MAIL_CACHE_IS_UNUSABLE(view->cache)) return 0; /* check data already written to cache file */ if (ctx->disk_appends_checked || mail_cache_lookup_offset(view->cache, view->trans_view, ctx->seq, &ctx->offset) <= 0) return 0; ctx->disk_appends_checked = TRUE; ctx->remap_counter = view->cache->remap_counter; memset(&view->loop_track, 0, sizeof(view->loop_track)); } if (ctx->stop) return 0; /* look up the next record */ if (mail_cache_get_record(view->cache, ctx->offset, &ctx->rec) < 0) return -1; if (mail_cache_track_loops(&view->loop_track, ctx->offset, ctx->rec->size)) { mail_cache_set_corrupted(view->cache, "record list is circular"); return -1; } ctx->remap_counter = view->cache->remap_counter; ctx->pos = sizeof(*ctx->rec); ctx->rec_size = ctx->rec->size; return 1; }
static int mail_cache_handler_lock(struct mail_cache_sync_context *ctx, struct mail_cache *cache) { int ret; if (ctx->locked) return MAIL_CACHE_IS_UNUSABLE(cache) ? 0 : 1; if (ctx->lock_failed) return 0; if (!ctx->locked) { if ((ret = mail_cache_lock(cache, TRUE)) <= 0) { ctx->lock_failed = TRUE; return ret; } ctx->locked = TRUE; } return 1; }
int mail_cache_sync_handler(struct mail_index_sync_map_ctx *sync_ctx, uint32_t seq ATTR_UNUSED, void *old_data, const void *new_data, void **context) { struct mail_index_view *view = sync_ctx->view; struct mail_index *index = view->index; struct mail_cache *cache = index->cache; struct mail_cache_sync_context *ctx = *context; const uint32_t *old_cache_offset = old_data; const uint32_t *new_cache_offset = new_data; uint32_t cache_file_seq, cur_seq, tail_seq; uoff_t cur_offset, tail_offset; int ret; if (new_cache_offset == NULL) { mail_cache_handler_deinit(sync_ctx, ctx); *context = NULL; return 1; } ctx = mail_cache_handler_init(context); if (cache->file_cache != NULL && !MAIL_CACHE_IS_UNUSABLE(cache)) { /* flush read cache only once per sync */ if (!ctx->nfs_read_cache_flushed && (index->flags & MAIL_INDEX_OPEN_FLAG_NFS_FLUSH) != 0) { ctx->nfs_read_cache_flushed = TRUE; mail_index_flush_read_cache(index, cache->filepath, cache->fd, cache->locked); } /* don't invalidate anything that's already been invalidated within this sync. */ if (*new_cache_offset < ctx->invalidate_highwater) { file_cache_invalidate(cache->file_cache, *new_cache_offset, ctx->invalidate_highwater - *new_cache_offset); ctx->invalidate_highwater = *new_cache_offset; } } if (*old_cache_offset == 0 || *old_cache_offset == *new_cache_offset || sync_ctx->type == MAIL_INDEX_SYNC_HANDLER_VIEW) return 1; mail_transaction_log_view_get_prev_pos(view->log_view, &cur_seq, &cur_offset); mail_transaction_log_get_mailbox_sync_pos(index->log, &tail_seq, &tail_offset); if (LOG_IS_BEFORE(cur_seq, cur_offset, tail_seq, tail_offset)) { /* already been linked */ return 1; } /* we'll need to link the old and new cache records */ ret = mail_cache_handler_lock(ctx, cache); if (ret <= 0) return ret < 0 ? -1 : 1; if (!get_cache_file_seq(view, &cache_file_seq)) return 1; if (cache_file_seq != cache->hdr->file_seq) { /* cache has been compressed, don't modify it */ return 1; } if (mail_cache_link(cache, *old_cache_offset, *new_cache_offset) < 0) return -1; return 1; }