/** * Read value from database file, returning a pointer to the allocated * deserialized data. These data can be modified freely and stored back, * but their lifetime will not exceed that of the next call to a dbmw * operation on the same descriptor. * * User code does not need to bother with freeing the allocated data, this * is managed directly by the DBM wrapper. * * @param dw the DBM wrapper * @param key the key (constant-width, determined at open time) * @param lenptr if non-NULL, writes length of (deserialized) value * * @return pointer to value, or NULL if it was either not found or the * deserialization failed. */ G_GNUC_HOT gpointer dbmw_read(dbmw_t *dw, gconstpointer key, size_t *lenptr) { struct cached *entry; dbmap_datum_t dval; dbmw_check(dw); g_assert(key); dw->r_access++; entry = map_lookup(dw->values, key); if (entry) { dw->r_hits++; if (lenptr) *lenptr = entry->len; return entry->data; } /* * Not cached, must read from DB. */ dw->ioerr = FALSE; dval = dbmap_lookup(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst reading entry: %s", dw->name, dbmap_strerror(dw->dm)); return NULL; } else if (NULL == dval.data) return NULL; /* Not found in DB */ /* * Value was found, allocate a cache entry object for it. */ WALLOC0(entry); /* * Deserialize data if needed. */ if (dw->unpack) { /* * Allocate cache entry arena to hold the deserialized version. */ entry->data = walloc(dw->value_size); entry->len = dw->value_size; bstr_reset(dw->bs, dval.data, dval.len, BSTR_F_ERROR); if (!dbmw_deserialize(dw, dw->bs, entry->data, dw->value_size)) { g_carp("DBMW \"%s\" deserialization error in %s(): %s", dw->name, stacktrace_routine_name(func_to_pointer(dw->unpack), FALSE), bstr_error(dw->bs)); /* Not calling value free routine on deserialization failures */ wfree(entry->data, dw->value_size); WFREE(entry); return NULL; } if (lenptr) *lenptr = dw->value_size; } else { g_assert(dw->value_size >= dval.len); if (dval.len) { entry->len = dval.len; entry->data = wcopy(dval.data, dval.len); } else { entry->data = NULL; entry->len = 0; } if (lenptr) *lenptr = dval.len; } g_assert((entry->len != 0) == (entry->data != NULL)); /* * Insert into cache. */ (void) allocate_entry(dw, key, entry); return entry->data; }
/** * Write back cached value to disk. * @return TRUE on success */ static gboolean write_back(dbmw_t *dw, gconstpointer key, struct cached *value) { dbmap_datum_t dval; gboolean ok; g_assert(value->dirty); if (value->absent) { /* Key not present, value is null item */ dval.data = NULL; dval.len = 0; } else { /* * Serialize value into our reused message block if a * serialization routine was provided. */ if (dw->pack) { pmsg_reset(dw->mb); (*dw->pack)(dw->mb, value->data); dval.data = pmsg_start(dw->mb); dval.len = pmsg_size(dw->mb); /* * We allocated the message block one byte larger than the * maximum size, in order to detect unexpected serialization * overflows. */ if (dval.len > dw->value_data_size) { /* Don't g_carp() as this is asynchronous wrt data change */ g_warning("DBMW \"%s\" serialization overflow in %s() " "whilst %s dirty entry", dw->name, stacktrace_routine_name(func_to_pointer(dw->pack), FALSE), value->absent ? "deleting" : "flushing"); return FALSE; } } else { dval.data = value->data; dval.len = value->len; } } /* * If cached entry is absent, delete the key. * Otherwise store the serialized value. * * Dirty bit is cleared on success. */ if (common_dbg > 4) g_debug("DBMW \"%s\" %s dirty value (%lu byte%s)", dw->name, value->absent ? "deleting" : "flushing", (unsigned long) dval.len, 1 == dval.len ? "" : "s"); dw->ioerr = FALSE; ok = value->absent ? dbmap_remove(dw->dm, key) : dbmap_insert(dw->dm, key, dval); if (ok) { value->dirty = FALSE; } else if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst %s dirty entry: %s", dw->name, value->absent ? "deleting" : "flushing", dbmap_strerror(dw->dm)); } else { g_warning("DBMW \"%s\" error whilst %s dirty entry: %s", dw->name, value->absent ? "deleting" : "flushing", dbmap_strerror(dw->dm)); } return ok; }
/** * Main task scheduling timer. */ static gboolean bg_sched_timer(void *unused_arg) { struct bgtask * volatile bt; volatile int remain = MAX_LIFE; volatile int target; volatile unsigned schedules = 0; volatile int ticks; bgret_t ret; g_assert(current_task == NULL); g_assert(bg_runcount >= 0); (void) unused_arg; bg_ticker_adjust_period(); /* * Loop as long as there are tasks to be scheduled and we have some * time left to spend. */ while (bg_runcount > 0 && remain > 0) { /* * Compute how much time we can spend for this task. */ target = MAX_LIFE / bg_runcount; target = MIN(target, remain); bt = bg_sched_pick(); g_assert(bt != NULL); /* bg_runcount > 0 => there is a task */ g_assert(bt->flags & TASK_F_RUNNABLE); bt->flags &= ~TASK_F_NOTICK; /* We'll want tick cost update */ /* * Compute how many ticks we can ask for this processing step. * * We don't allow brutal variations of the amount of ticks larger * than DELTA_FACTOR. */ if (bt->tick_cost > 0.0) { g_assert(bt->prev_ticks >= 0); g_assert(bt->prev_ticks <= INT_MAX / DELTA_FACTOR); if (target < bt->tick_cost * (INT_MAX / DELTA_FACTOR - 1)) ticks = 1 + target / bt->tick_cost; else ticks = INT_MAX / DELTA_FACTOR; if (bt->prev_ticks) { if (ticks > bt->prev_ticks * DELTA_FACTOR) { ticks = bt->prev_ticks * DELTA_FACTOR; } else if (ticks < bt->prev_ticks / DELTA_FACTOR) { if (bt->prev_ticks > DELTA_FACTOR) ticks = bt->prev_ticks / DELTA_FACTOR; else ticks = 1; } } g_assert(ticks > 0); } else { ticks = 1; } bt->ticks = ticks; bt->ticks_used = ticks; /* * Switch to the selected task. */ bg_task_switch(bt, 0); schedules++; g_assert(current_task == bt); g_assert(bt->flags & TASK_F_RUNNING); /* * Before running the step, ensure we setjmp(), so that they * may call bg_task_exit() and immediately come back here. */ if (setjmp(bt->env)) { /* * So they exited, or someone is killing the task. */ if (bg_debug > 1) g_debug("BGTASK back from setjmp() for \"%s\"", bt->name); bt->flags |= TASK_F_NOTICK; bg_task_switch(NULL, target); if (bg_debug > 0 && remain < bt->elapsed) { g_debug("%s: remain=%d, bt->elapsed=%d", G_STRFUNC, remain, bt->elapsed); } remain -= MIN(remain, bt->elapsed); bg_task_terminate(bt); continue; } /* * Run the next step. */ if (bg_debug > 2 && 0 == bt->seqno) { g_debug("BGTASK \"%s\" starting step #%d (%s)", bt->name, bt->step, stacktrace_routine_name( func_to_pointer(bt->stepvec[bt->step]), FALSE)); } if (bg_debug > 4) { g_debug("BGTASK \"%s\" running step #%d.%d with %d tick%s", bt->name, bt->step, bt->seqno, ticks, ticks == 1 ? "" : "s"); } bg_task_deliver_signals(bt); /* Send any queued signal */ /* * If task is a daemon task, and we're starting at the first step, * process the first item in the work queue. */ if ((bt->flags & TASK_F_DAEMON) && bt->step == 0 && bt->seqno == 0) { gpointer item; g_assert(bt->wq != NULL); /* Runnable daemon, must have work */ item = bt->wq->data; if (bg_debug > 2) g_debug("BGTASK daemon \"%s\" starting with item 0x%lx", bt->name, (gulong) item); (*bt->start_cb)(bt, bt->ucontext, item); } g_assert(bt->step < bt->stepcnt); ret = (*bt->stepvec[bt->step])(bt, bt->ucontext, ticks); bg_task_switch(NULL, target); /* Stop current task, update stats */ if (bg_debug > 0 && remain < bt->elapsed) { g_debug("%s: remain=%d, bt->elapsed=%d", G_STRFUNC, remain, bt->elapsed); } remain -= MIN(remain, bt->elapsed); if (bg_debug > 4) g_debug("BGTASK \"%s\" step #%d.%d ran %d tick%s " "in %d usecs [ret=%d]", bt->name, bt->step, bt->seqno, bt->ticks_used, bt->ticks_used == 1 ? "" : "s", bt->elapsed, ret); /* * Analyse return code from processing callback. */ switch (ret) { case BGR_DONE: /* OK, end processing */ bg_task_ended(bt); break; case BGR_NEXT: /* OK, move to next step */ if (bt->step == (bt->stepcnt - 1)) bg_task_ended(bt); else { bt->seqno = 0; bt->step++; bt->tick_cost = 0.0; /* Don't know cost of this new step */ } break; case BGR_MORE: bt->seqno++; break; case BGR_ERROR: bt->exitcode = -1; /* Fake an exit(-1) */ bg_task_terminate(bt); break; } } if (dead_tasks != NULL) bg_reclaim_dead(); /* Free dead tasks */ if (bg_debug > 3 && MAX_LIFE != remain) { g_debug("BGTASK runable=%d, ran for %lu usecs, scheduling %u task%s", bg_runcount, MAX_LIFE - remain, schedules, 1 == schedules ? "" : "s"); } return TRUE; /* Keep calling */ }
/** * Common code for dbmw_foreach_trampoline() and * dbmw_foreach_remove_trampoline(). */ static gboolean dbmw_foreach_common(gboolean removing, gpointer key, dbmap_datum_t *d, gpointer arg) { struct foreach_ctx *ctx = arg; dbmw_t *dw = ctx->dw; struct cached *entry; dbmw_check(dw); entry = map_lookup(dw->values, key); if (entry != NULL) { /* * Key / value pair is present in the cache. * * This affects us in two ways: * * - We may already know that the key was deleted, in which case * that entry is just skipped: no further access is possible * through DBMW until that key is recreated. We still return * TRUE to make sure the lower layers will delete the entry * physically, since deletion has not been flushed yet (that's * the reason we're still iterating on it). * * - Should the cached key need to be deleted (as determined by * the user callback, we make sure we delete the entry in the * cache upon callback return). */ entry->traversed = TRUE; /* Signal we iterated on cached value */ if (entry->absent) return TRUE; /* Key was already deleted, info cached */ if (removing) { gboolean status; status = (*ctx->u.cbr)(key, entry->data, entry->len, ctx->arg); if (status) { entry->removable = TRUE; /* Discard it after traversal */ } return status; } else { (*ctx->u.cb)(key, entry->data, entry->len, ctx->arg); return FALSE; } } else { gboolean status = FALSE; gpointer data = d->data; size_t len = d->len; /* * Deserialize data if needed, but do not cache this value. * Iterating over the map must not disrupt the cache. */ if (dw->unpack) { len = dw->value_size; data = walloc(len); bstr_reset(dw->bs, d->data, d->len, BSTR_F_ERROR); if (!dbmw_deserialize(dw, dw->bs, data, len)) { g_carp("DBMW \"%s\" deserialization error in %s(): %s", dw->name, stacktrace_routine_name(func_to_pointer(dw->unpack), FALSE), bstr_error(dw->bs)); /* Not calling value free routine on deserialization failures */ wfree(data, len); return FALSE; } } if (removing) { status = (*ctx->u.cbr)(key, data, len, ctx->arg); } else { (*ctx->u.cb)(key, data, len, ctx->arg); } if (dw->unpack) { if (dw->valfree) (*dw->valfree)(data, len); wfree(data, len); } return status; } }
/** * Map iterator to traverse cached entries that were not already flagged * as being traversed, invoking the supplied trampoline callback. */ static void cache_finish_traversal(void *key, void *value, void *data) { struct cached *entry = value; struct cache_foreach_ctx *fctx = data; dbmap_datum_t d; if (entry->traversed) return; #define dw fctx->foreach->dw if (dbg_ds_debugging(dw->dbg, 3, DBG_DSF_ITERATOR)) { dbg_ds_log(dw->dbg, dw, "%s: traversing cached %s key=%s%s", G_STRFUNC, entry->dirty ? "dirty" : "clean", dbg_ds_keystr(dw->dbg, key, (size_t) -1), entry->absent ? " (absent)" : ""); } if (entry->absent) return; /* Entry not there, just caching it is missing */ /* * We should not be traversing a clean entry at this stage: if the entry * is clean, it means it was flushed to the database, and we should * have traversed it already. Loudly warn, but process anyway! */ if (!entry->dirty && !fctx->warned_clean_key) { fctx->warned_clean_key = TRUE; s_critical("%s(): DBMW \"%s\" " "iterating via %s over a clean key in cache", G_STRFUNC, dw->name, stacktrace_routine_name(fctx->foreach->u.any, FALSE)); } #undef dw d.data = entry->data; d.len = entry->len; /* * We ignore the returned value because to-be-removed data (when traversing * for removal) will be marked as "removable": we can't delete them yet as * we are traversing the cache structure already. * * For values we're keeping, we increment the cached count: these are keys * present in the cache but not in the underlying database because they * have not been flushed yet. Knowing this count allows us to compute an * accurate item count without flushing. */ if (fctx->removing) { if ((*fctx->u.cbr)(key, &d, fctx->foreach)) { fctx->removed++; /* Item removed in cache_free_removable() */ entry->removable = TRUE; } else { fctx->cached++; } } else { (*fctx->u.cb)(key, &d, fctx->foreach); fctx->cached++; } }