static void maildir_handle_size_caching(struct index_mail *mail, bool quick_check, bool vsize) { struct mailbox *box = mail->mail.mail.box; struct maildir_mailbox *mbox = (struct maildir_mailbox *)box; enum mail_fetch_field field; uoff_t size; int pop3_state; field = vsize ? MAIL_FETCH_VIRTUAL_SIZE : MAIL_FETCH_PHYSICAL_SIZE; if ((mail->data.dont_cache_fetch_fields & field) != 0) return; if (quick_check && maildir_quick_size_lookup(mail, vsize, &size) > 0) { /* already in filename / uidlist. don't add it anywhere, including to the uidlist if it's already in filename. do some extra checks here to catch potential cache bugs. */ if (vsize && mail->data.virtual_size != size) { mail_cache_set_corrupted(box->cache, "Corrupted virtual size for uid=%u: " "%"PRIuUOFF_T" != %"PRIuUOFF_T, mail->mail.mail.uid, mail->data.virtual_size, size); mail->data.virtual_size = size; } else if (!vsize && mail->data.physical_size != size) { mail_cache_set_corrupted(box->cache, "Corrupted physical size for uid=%u: " "%"PRIuUOFF_T" != %"PRIuUOFF_T, mail->mail.mail.uid, mail->data.physical_size, size); mail->data.physical_size = size; } mail->data.dont_cache_fetch_fields |= field; return; } /* 1 = pop3-only, 0 = mixed, -1 = no pop3 */ pop3_state = maildir_get_pop3_state(mail); if (pop3_state >= 0 && mail->mail.mail.uid != 0) { /* if size is wanted permanently, store it to uidlist so that in case cache file gets lost we can get it quickly */ mail->data.dont_cache_fetch_fields |= field; size = vsize ? mail->data.virtual_size : mail->data.physical_size; maildir_uidlist_set_ext(mbox->uidlist, mail->mail.mail.uid, vsize ? MAILDIR_UIDLIST_REC_EXT_VSIZE : MAILDIR_UIDLIST_REC_EXT_PSIZE, dec2str(size)); } }
static int field_type_verify(struct mail_cache *cache, unsigned int idx, enum mail_cache_field_type type, unsigned int size) { const struct mail_cache_field *field = &cache->fields[idx].field; if (field->type != type) { mail_cache_set_corrupted(cache, "registered field %s type changed", field->name); return -1; } if (field->field_size != size && field_has_fixed_size(type)) { mail_cache_set_corrupted(cache, "registered field %s size changed", field->name); return -1; } return 0; }
int mail_cache_get_record(struct mail_cache *cache, uint32_t offset, const struct mail_cache_record **rec_r) { const struct mail_cache_record *rec; const void *data; int ret; i_assert(offset != 0); if (offset % sizeof(uint32_t) != 0) { /* records are always 32-bit aligned */ mail_cache_set_corrupted(cache, "invalid record offset"); return -1; } /* we don't know yet how large the record is, so just guess */ if (mail_cache_map(cache, offset, sizeof(*rec) + CACHE_PREFETCH, &data) < 0) return -1; if (offset + sizeof(*rec) > cache->mmap_length) { mail_cache_set_corrupted(cache, "record points outside file"); return -1; } rec = data; if (rec->size < sizeof(*rec)) { mail_cache_set_corrupted(cache, "invalid record size"); return -1; } if (rec->size > CACHE_PREFETCH) { /* larger than we guessed. map the rest of the record. */ if ((ret = mail_cache_map(cache, offset, rec->size, &data)) < 0) return -1; if (ret == 0) { mail_cache_set_corrupted(cache, "record points outside file"); return -1; } rec = data; } *rec_r = rec; return 0; }
static int mail_cache_lookup_iter_next_record(struct mail_cache_lookup_iterate_ctx *ctx) { struct mail_cache_view *view = ctx->view; if (ctx->failed) return -1; if (ctx->rec != NULL) ctx->offset = ctx->rec->prev_offset; if (ctx->offset == 0) { /* end of this record list. check newly appended data. */ if (view->trans_seq1 > ctx->seq || view->trans_seq2 < ctx->seq) return 0; /* check data still in memory. this works for recent mails even with INDEX=MEMORY */ if (!ctx->memory_appends_checked) { if (mail_cache_lookup_iter_transaction(ctx)) return 1; ctx->memory_appends_checked = TRUE; } if (MAIL_CACHE_IS_UNUSABLE(view->cache)) return 0; /* check data already written to cache file */ if (ctx->disk_appends_checked || mail_cache_lookup_offset(view->cache, view->trans_view, ctx->seq, &ctx->offset) <= 0) return 0; ctx->disk_appends_checked = TRUE; ctx->remap_counter = view->cache->remap_counter; memset(&view->loop_track, 0, sizeof(view->loop_track)); } if (ctx->stop) return 0; /* look up the next record */ if (mail_cache_get_record(view->cache, ctx->offset, &ctx->rec) < 0) return -1; if (mail_cache_track_loops(&view->loop_track, ctx->offset, ctx->rec->size)) { mail_cache_set_corrupted(view->cache, "record list is circular"); return -1; } ctx->remap_counter = view->cache->remap_counter; ctx->pos = sizeof(*ctx->rec); ctx->rec_size = ctx->rec->size; return 1; }
static struct message_part *get_unserialized_parts(struct index_mail *mail) { unsigned int field_idx = mail->ibox->cache_fields[MAIL_CACHE_MESSAGE_PARTS].idx; struct message_part *parts; buffer_t *part_buf; const char *error; int ret; part_buf = buffer_create_dynamic(pool_datastack_create(), 128); ret = index_mail_cache_lookup_field(mail, part_buf, field_idx); if (ret <= 0) return NULL; parts = message_part_deserialize(mail->data_pool, part_buf->data, part_buf->used, &error); if (parts == NULL) { mail_cache_set_corrupted(mail->mail.mail.box->cache, "Corrupted cached message_part data (%s)", error); } return parts; }
int mail_cache_lookup_iter_next(struct mail_cache_lookup_iterate_ctx *ctx, struct mail_cache_iterate_field *field_r) { struct mail_cache *cache = ctx->view->cache; unsigned int field_idx; unsigned int data_size; uint32_t file_field; int ret; i_assert(ctx->remap_counter == cache->remap_counter); if (ctx->pos + sizeof(uint32_t) > ctx->rec_size) { if (ctx->pos != ctx->rec_size) { mail_cache_set_corrupted(cache, "record has invalid size"); return -1; } if ((ret = mail_cache_lookup_iter_next_record(ctx)) <= 0) return ret; } /* return the next field */ file_field = *((const uint32_t *)CONST_PTR_OFFSET(ctx->rec, ctx->pos)); ctx->pos += sizeof(uint32_t); if (file_field >= cache->file_fields_count) { /* new field, have to re-read fields header to figure out its size. don't do this if we're compressing. */ if (!cache->locked) { if (mail_cache_header_fields_read(cache) < 0) return -1; } if (file_field >= cache->file_fields_count) { mail_cache_set_corrupted(cache, "field index too large (%u >= %u)", file_field, cache->file_fields_count); return -1; } /* field reading might have re-mmaped the file and caused rec pointer to break. need to get it again. */ if (mail_cache_get_record(cache, ctx->offset, &ctx->rec) < 0) return -1; ctx->remap_counter = cache->remap_counter; } field_idx = cache->file_field_map[file_field]; data_size = cache->fields[field_idx].field.field_size; if (data_size == UINT_MAX && ctx->pos + sizeof(uint32_t) <= ctx->rec->size) { /* variable size field. get its size from the file. */ data_size = *((const uint32_t *) CONST_PTR_OFFSET(ctx->rec, ctx->pos)); ctx->pos += sizeof(uint32_t); } if (ctx->rec->size - ctx->pos < data_size) { mail_cache_set_corrupted(cache, "record continues outside its allocated size"); return -1; } field_r->field_idx = field_idx; field_r->data = CONST_PTR_OFFSET(ctx->rec, ctx->pos); field_r->size = data_size; field_r->offset = ctx->offset + ctx->pos; /* each record begins from 32bit aligned position */ ctx->pos += (data_size + sizeof(uint32_t)-1) & ~(sizeof(uint32_t)-1); return 1; }