/** * Copy all the data from one DBMW map to another, replacing values if the * destination is not empty and already holds some data. * * @return TRUE on success. */ gboolean dbmw_copy(dbmw_t *from, dbmw_t *to) { dbmw_check(from); dbmw_check(to); dbmw_sync(from, DBMW_SYNC_CACHE); dbmw_sync(to, DBMW_SYNC_CACHE); dbmw_clear_cache(to); /* * Since ``from'' was sync'ed and the cache from ``to'' was cleared, * we can ignore caches and handle the copy at the dbmap level. */ return dbmap_copy(from->dm, to->dm); }
/** * Snapshot all the keys, returning them into a singly linked list. * To free the returned keys, use the dbmw_free_all_keys() helper. */ GSList * dbmw_all_keys(dbmw_t *dw) { dbmw_check(dw); dbmw_sync(dw, DBMW_SYNC_CACHE); return dbmap_all_keys(dw->dm); }
/** * Store DBMW map to disk in an SDBM database, at the specified base. * Two files are created (using suffixes .pag and .dir). * * @param dw the DBMW map to store * @param base base path for the persistent database * @param inplace if TRUE and map was an SDBM already, persist as itself * * @return TRUE on success. */ gboolean dbmw_store(dbmw_t *dw, const char *base, gboolean inplace) { dbmw_check(dw); dbmw_sync(dw, DBMW_SYNC_CACHE); return dbmap_store(dw->dm, base, inplace); }
/** * @return amount of items held. */ size_t dbmw_count(dbmw_t *dw) { /* * Must write pending new items first and delete removed items to allow * proper count in the underlying map. */ if (dw->count_needs_sync) dbmw_sync(dw, DBMW_SYNC_CACHE); return dbmap_count(dw->dm); }
/** * Attempt to rebuild the DB on disk. * * @return TRUE if successful. */ bool dbmw_rebuild(dbmw_t *dw) { /* * We're going to work at the SDBM level, so we need to flush the cache * to make sure SDBM knows the latest database state: cached data pending * writing need to be flushed, including deleted data. */ dbmw_sync(dw, DBMW_SYNC_CACHE); return dbmap_rebuild(dw->dm); }
/** * Destroy the DBM wrapper, optionally closing the underlying DB map. */ void dbmw_destroy(dbmw_t *dw, bool close_map) { dbmw_check(dw); if (common_stats) { s_debug("DBMW destroying \"%s\" with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), plural(dw->r_access), dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), plural(dw->w_access)); } if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_DESTROY)) { dbg_ds_log(dw->dbg, dw, "%s: with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", G_STRFUNC, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), plural(dw->r_access), dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), plural(dw->w_access)); } /* * If we close the map and we're volatile, there's no need to flush * the cache as the data is going to be gone soon anyway. */ if (!close_map || !dw->is_volatile) { dbmw_sync(dw, DBMW_SYNC_CACHE); } dbmw_clear_cache(dw); hash_list_free(&dw->keys); map_destroy(dw->values); if (dw->mb) pmsg_free(dw->mb); bstr_free(&dw->bs); if (close_map) dbmap_destroy(dw->dm); WFREE_TYPE_NULL(dw->dbmap_dbg); dw->magic = 0; WFREE(dw); }
/** * Synchronize a DBMW database, flushing its local cache. */ void dbstore_flush(dbmw_t *dw) { ssize_t n; n = dbmw_sync(dw, DBMW_SYNC_CACHE); if (-1 == n) { g_warning("DBSTORE could not flush cache for DBMW \"%s\": %m", dbmw_name(dw)); } else if (n && dbstore_debug > 1) { g_debug("DBSTORE flushed %u dirty value%s in DBMW \"%s\"", (unsigned) n, 1 == n ? "" : "s", dbmw_name(dw)); } }
/** * Synchronize a DBMW database, flushing its SDBM cache. */ void dbstore_sync(dbmw_t *dw) { ssize_t n; n = dbmw_sync(dw, DBMW_SYNC_MAP); if (-1 == n) { g_warning("DBSTORE could not synchronize DBMW \"%s\": %m", dbmw_name(dw)); } else if (n && dbstore_debug > 1) { g_debug("DBSTORE flushed %u SDBM page%s in DBMW \"%s\"", (unsigned) n, 1 == n ? "" : "s", dbmw_name(dw)); } }
/** * Iterate over the DB, invoking the callback on each item along with the * supplied argument and removing the item when the callback returns TRUE. */ void dbmw_foreach_remove(dbmw_t *dw, dbmw_cbr_t cbr, gpointer arg) { struct foreach_ctx ctx; struct cache_foreach_ctx fctx; dbmw_check(dw); /* * Before iterating we flush the deleted keys we know about in the cache * and whose deletion was deferred, so that the underlying map will * not have to iterate on them. */ dbmw_sync(dw, DBMW_SYNC_CACHE | DBMW_DELETED_ONLY); /* * Some values may be present only in the cache. Hence we clear all * marks in the cache and each traversed value that happens to be * present in the cache will be marked as "traversed". * * We flushed deleted keys above, but that does not remove them from * the cache structure. We don't need to traverse these after iterating * on the map, so we make sure they are artifically set to "traversed". */ ctx.u.cbr = cbr; ctx.arg = arg; ctx.dw = dw; map_foreach(dw->values, cache_reset_before_traversal, NULL); dbmap_foreach_remove(dw->dm, dbmw_foreach_remove_trampoline, &ctx); fctx.removing = TRUE; fctx.foreach = &ctx; fctx.u.cbr = dbmw_foreach_remove_trampoline; /* * Continue traversal with all the cached entries that were not traversed * already because they do not exist in the underlying map. * * Any cached entry that needs to be removed will be marked as such * and we'll complete processing by discarding from the cache all * the entries that have been marked as "removable" during the traversal. */ map_foreach(dw->values, cache_finish_traversal, &fctx); map_foreach_remove(dw->values, cache_free_removable, dw); }
/** * Destroy the DBM wrapper, optionally closing the underlying DB map. */ void dbmw_destroy(dbmw_t *dw, gboolean close_map) { dbmw_check(dw); if (common_stats) g_debug("DBMW destroying \"%s\" with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), 1 == dw->r_access ? "" : "s", dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), 1 == dw->w_access ? "" : "s"); /* * If we close the map and we're volatile, there's no need to flush * the cache as the data is going to be gone soon anyway. */ if (!close_map || !dw->is_volatile) { dbmw_sync(dw, DBMW_SYNC_CACHE); } dbmw_clear_cache(dw); hash_list_free(&dw->keys); map_destroy(dw->values); if (dw->mb) pmsg_free(dw->mb); bstr_free(&dw->bs); if (close_map) dbmap_destroy(dw->dm); dw->magic = 0; WFREE(dw); }
/** * Iterate over the DB, invoking the callback on each item along with the * supplied argument and removing the item when the callback returns TRUE. * * @return the amount of removed entries. */ size_t dbmw_foreach_remove(dbmw_t *dw, dbmw_cbr_t cbr, void *arg) { struct foreach_ctx ctx; struct cache_foreach_ctx fctx; size_t pruned; dbmw_check(dw); if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) { dbg_ds_log(dw->dbg, dw, "%s: starting with %s(%p)", G_STRFUNC, stacktrace_function_name(cbr), arg); } /* * Before iterating we flush the deleted keys we know about in the cache * and whose deletion was deferred, so that the underlying map will * not have to iterate on them. */ dbmw_sync(dw, DBMW_SYNC_CACHE | DBMW_DELETED_ONLY); /* * Some values may be present only in the cache. Hence we clear all * marks in the cache and each traversed value that happens to be * present in the cache will be marked as "traversed". * * We flushed deleted keys above, but that does not remove them from * the cache structure. We don't need to traverse these after iterating * on the map, so we make sure they are artifically set to "traversed". */ ctx.u.cbr = cbr; ctx.arg = arg; ctx.dw = dw; map_foreach(dw->values, cache_reset_before_traversal, NULL); pruned = dbmap_foreach_remove(dw->dm, dbmw_foreach_remove_trampoline, &ctx); ZERO(&fctx); fctx.removing = TRUE; fctx.foreach = &ctx; fctx.u.cbr = dbmw_foreach_remove_trampoline; /* * Continue traversal with all the cached entries that were not traversed * already because they do not exist in the underlying map. * * We count these and remember how many there are so that we can determine * the correct overall item count after an iteration without having to * flush all the dirty values! * * Any cached entry that needs to be removed will be marked as such * and we'll complete processing by discarding from the cache all * the entries that have been marked as "removable" during the traversal. */ map_foreach(dw->values, cache_finish_traversal, &fctx); map_foreach_remove(dw->values, cache_free_removable, dw); dw->cached = fctx.cached; dw->count_needs_sync = FALSE; /* We just counted items the slow way! */ if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) { dbg_ds_log(dw->dbg, dw, "%s: done with %s(%p): " "pruned %zu from dbmap, %zu from cache (%zu total), " "has %zu unflushed entr%s in cache", G_STRFUNC, stacktrace_function_name(cbr), arg, pruned, fctx.removed, pruned + fctx.removed, dw->cached, plural_y(dw->cached)); } return pruned + fctx.removed; }