/** * Set the map cache size, as an amount of 1 KiB pages. * @return TRUE on success. */ gboolean dbmw_set_map_cache(dbmw_t *dw, long pages) { dbmw_check(dw); return 0 == dbmap_set_cachesize(dw->dm, pages); }
/** * Helper routine to free list and keys returned by dbmw_all_keys(). */ void dbmw_free_all_keys(const dbmw_t *dw, GSList *keys) { dbmw_check(dw); dbmap_free_all_keys(dw->dm, keys); }
/** * Record debugging configuration. */ void dbmw_set_debugging(dbmw_t *dw, const dbg_config_t *dbg) { dbmw_check(dw); dw->dbg = dbg; if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_DEBUGGING)) { dbg_ds_log(dw->dbg, dw, "%s: attached with %s back-end " "(max cached = %zu, key=%zu bytes, value=%zu bytes, " "%zu max serialized)", G_STRFUNC, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->max_cached, dw->key_size, dw->value_size, dw->value_data_size); } /* * Patch in place for the DBMAP. */ WFREE_TYPE_NULL(dw->dbmap_dbg); dw->dbmap_dbg = WCOPY(dbg); dw->dbmap_dbg->o2str = NULL; dw->dbmap_dbg->type = "DBMAP"; dbmap_set_debugging(dw->dm, dw->dbmap_dbg); }
/** * Write value to the database file, possibly caching it and deferring write. * * Any registered value cleanup callback will be invoked right after the value * is written to disk (for immediated writes) or removed from the cache (for * deferred writes). * * @param dw the DBM wrapper * @param key the key (constant-width, determined at open time) * @param value the start of the value in memory * @param length length of the value */ void dbmw_write(dbmw_t *dw, gconstpointer key, gpointer value, size_t length) { struct cached *entry; dbmw_check(dw); g_assert(key); g_assert(length <= dw->value_size); g_assert(length || value == NULL); g_assert(length == 0 || value); dw->w_access++; entry = map_lookup(dw->values, key); if (entry) { if (entry->dirty) dw->w_hits++; else if (entry->absent) dw->count_needs_sync = TRUE; /* Key exists now */ fill_entry(dw, entry, value, length); hash_list_moveto_tail(dw->keys, key); } else if (dw->max_cached > 1) { entry = allocate_entry(dw, key, NULL); fill_entry(dw, entry, value, length); dw->count_needs_sync = TRUE; /* Does not know whether key exists */ } else { write_immediately(dw, key, value, length); } }
/** * Flag whether database is volatile (never outlives a close). * * @return TRUE on success. */ gboolean dbmw_set_volatile(dbmw_t *dw, gboolean is_volatile) { dbmw_check(dw); dw->is_volatile = TRUE; return 0 == dbmap_set_volatile(dw->dm, is_volatile); }
/** * Store DBMW map to disk in an SDBM database, at the specified base. * Two files are created (using suffixes .pag and .dir). * * @param dw the DBMW map to store * @param base base path for the persistent database * @param inplace if TRUE and map was an SDBM already, persist as itself * * @return TRUE on success. */ gboolean dbmw_store(dbmw_t *dw, const char *base, gboolean inplace) { dbmw_check(dw); dbmw_sync(dw, DBMW_SYNC_CACHE); return dbmap_store(dw->dm, base, inplace); }
/** * Snapshot all the keys, returning them into a singly linked list. * To free the returned keys, use the dbmw_free_all_keys() helper. */ GSList * dbmw_all_keys(dbmw_t *dw) { dbmw_check(dw); dbmw_sync(dw, DBMW_SYNC_CACHE); return dbmap_all_keys(dw->dm); }
/** * Copy all the data from one DBMW map to another, replacing values if the * destination is not empty and already holds some data. * * @return TRUE on success. */ gboolean dbmw_copy(dbmw_t *from, dbmw_t *to) { dbmw_check(from); dbmw_check(to); dbmw_sync(from, DBMW_SYNC_CACHE); dbmw_sync(to, DBMW_SYNC_CACHE); dbmw_clear_cache(to); /* * Since ``from'' was sync'ed and the cache from ``to'' was cleared, * we can ignore caches and handle the copy at the dbmap level. */ return dbmap_copy(from->dm, to->dm); }
/** * Write value to the database file immediately, without caching for write-back * nor for future reading. If defined, the registered value cleanup callback * is invoked before returning. * * @param dw the DBM wrapper * @param key the key (constant-width, determined at open time) * @param value the start of the value in memory * @param length length of the value */ void dbmw_write_nocache(dbmw_t *dw, gconstpointer key, gpointer value, size_t length) { dbmw_check(dw); g_assert(key); g_assert(length <= dw->value_size); g_assert(length || value == NULL); g_assert(length == 0 || value); (void) remove_entry(dw, key, TRUE, FALSE); /* Discard any cached data */ write_immediately(dw, key, value, length); }
/** * Destroy the DBM wrapper, optionally closing the underlying DB map. */ void dbmw_destroy(dbmw_t *dw, bool close_map) { dbmw_check(dw); if (common_stats) { s_debug("DBMW destroying \"%s\" with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), plural(dw->r_access), dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), plural(dw->w_access)); } if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_DESTROY)) { dbg_ds_log(dw->dbg, dw, "%s: with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", G_STRFUNC, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), plural(dw->r_access), dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), plural(dw->w_access)); } /* * If we close the map and we're volatile, there's no need to flush * the cache as the data is going to be gone soon anyway. */ if (!close_map || !dw->is_volatile) { dbmw_sync(dw, DBMW_SYNC_CACHE); } dbmw_clear_cache(dw); hash_list_free(&dw->keys); map_destroy(dw->values); if (dw->mb) pmsg_free(dw->mb); bstr_free(&dw->bs); if (close_map) dbmap_destroy(dw->dm); WFREE_TYPE_NULL(dw->dbmap_dbg); dw->magic = 0; WFREE(dw); }
/** * Clear the cache, discard everything. */ static void dbmw_clear_cache(dbmw_t *dw) { dbmw_check(dw); /* * In the cache, the hash list and the value cache share the same * key pointers. Therefore, we need to iterate on the map only * to free both at the same time. */ hash_list_clear(dw->keys); map_foreach_remove(dw->values, free_cached, dw); }
/** * Map iterator to free cached entries. */ static gboolean free_cached(gpointer key, gpointer value, gpointer data) { dbmw_t *dw = data; struct cached *entry = value; dbmw_check(dw); g_assert(!entry->len == !entry->data); free_value(dw, entry, TRUE); wfree(key, dbmw_keylen(dw, key)); WFREE(entry); return TRUE; }
/** * Map iterator to free cached entries. */ static bool free_cached(void *key, void *value, void *data) { dbmw_t *dw = data; struct cached *entry = value; dbmw_check(dw); g_assert(!entry->len == !entry->data); free_value(dw, entry, TRUE); wfree(key, dbmw_keylen(dw, key)); WFREE(entry); return TRUE; }
/** * Iterate over the DB, invoking the callback on each item along with the * supplied argument and removing the item when the callback returns TRUE. */ void dbmw_foreach_remove(dbmw_t *dw, dbmw_cbr_t cbr, gpointer arg) { struct foreach_ctx ctx; struct cache_foreach_ctx fctx; dbmw_check(dw); /* * Before iterating we flush the deleted keys we know about in the cache * and whose deletion was deferred, so that the underlying map will * not have to iterate on them. */ dbmw_sync(dw, DBMW_SYNC_CACHE | DBMW_DELETED_ONLY); /* * Some values may be present only in the cache. Hence we clear all * marks in the cache and each traversed value that happens to be * present in the cache will be marked as "traversed". * * We flushed deleted keys above, but that does not remove them from * the cache structure. We don't need to traverse these after iterating * on the map, so we make sure they are artifically set to "traversed". */ ctx.u.cbr = cbr; ctx.arg = arg; ctx.dw = dw; map_foreach(dw->values, cache_reset_before_traversal, NULL); dbmap_foreach_remove(dw->dm, dbmw_foreach_remove_trampoline, &ctx); fctx.removing = TRUE; fctx.foreach = &ctx; fctx.u.cbr = dbmw_foreach_remove_trampoline; /* * Continue traversal with all the cached entries that were not traversed * already because they do not exist in the underlying map. * * Any cached entry that needs to be removed will be marked as such * and we'll complete processing by discarding from the cache all * the entries that have been marked as "removable" during the traversal. */ map_foreach(dw->values, cache_finish_traversal, &fctx); map_foreach_remove(dw->values, cache_free_removable, dw); }
/** * Write value to the database file, possibly caching it and deferring write. * * Any registered value cleanup callback will be invoked right after the value * is written to disk (for immediated writes) or removed from the cache (for * deferred writes). * * @param dw the DBM wrapper * @param key the key (constant-width, determined at open time) * @param value the start of the value in memory * @param length length of the value */ void dbmw_write(dbmw_t *dw, const void *key, void *value, size_t length) { struct cached *entry; dbmw_check(dw); g_assert(key); g_assert(length <= dw->value_size); g_assert(length || value == NULL); g_assert(length == 0 || value); dw->w_access++; entry = map_lookup(dw->values, key); if (entry) { if (dbg_ds_debugging(dw->dbg, 2, DBG_DSF_CACHING | DBG_DSF_UPDATE)) { dbg_ds_log(dw->dbg, dw, "%s: %s key=%s%s", G_STRFUNC, entry->dirty ? "dirty" : "clean", dbg_ds_keystr(dw->dbg, key, (size_t) -1), entry->absent ? " (was absent)" : ""); } if (entry->dirty) dw->w_hits++; if (entry->absent) dw->cached++; /* Key exists now, in unflushed status */ fill_entry(dw, entry, value, length); hash_list_moveto_tail(dw->keys, key); } else if (dw->max_cached > 1) { if (dbg_ds_debugging(dw->dbg, 2, DBG_DSF_CACHING | DBG_DSF_UPDATE)) { dbg_ds_log(dw->dbg, dw, "%s: deferring key=%s", G_STRFUNC, dbg_ds_keystr(dw->dbg, key, (size_t) -1)); } entry = allocate_entry(dw, key, NULL); fill_entry(dw, entry, value, length); dw->count_needs_sync = TRUE; /* Does not know whether key exists */ } else { if (dbg_ds_debugging(dw->dbg, 2, DBG_DSF_CACHING | DBG_DSF_UPDATE)) { dbg_ds_log(dw->dbg, dw, "%s: writing key=%s", G_STRFUNC, dbg_ds_keystr(dw->dbg, key, (size_t) -1)); } write_immediately(dw, key, value, length); } }
/** * Delete key from database. */ void dbmw_delete(dbmw_t *dw, gconstpointer key) { struct cached *entry; dbmw_check(dw); g_assert(key); dw->w_access++; entry = map_lookup(dw->values, key); if (entry) { if (entry->dirty) dw->w_hits++; if (!entry->absent) { dw->count_needs_sync = TRUE; /* Deferred delete */ fill_entry(dw, entry, NULL, 0); entry->absent = TRUE; } hash_list_moveto_tail(dw->keys, key); } else { dw->ioerr = FALSE; dbmap_remove(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst deleting key: %s", dw->name, dbmap_strerror(dw->dm)); } /* * If the maximum value length of the DB is 0, then it is used as a * "search table" only, meaning there will be no read to get values, * only existence checks. * * Therefore, it makes sense to cache that the key is no longer valid. * Otherwise, possibly pushing a value out of the cache to record * a deletion is not worth it. */ if (0 == dw->value_size) { WALLOC0(entry); entry->absent = TRUE; (void) allocate_entry(dw, key, entry); } } }
/** * Is key present in the database? */ gboolean dbmw_exists(dbmw_t *dw, gconstpointer key) { struct cached *entry; gboolean ret; dbmw_check(dw); g_assert(key); dw->r_access++; entry = map_lookup(dw->values, key); if (entry) { dw->r_hits++; return !entry->absent; } dw->ioerr = FALSE; ret = dbmap_contains(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst checking key existence: %s", dw->name, dbmap_strerror(dw->dm)); return FALSE; } /* * If the maximum value length of the DB is 0, then it is used as a * "search table" only, meaning there will be no read to get values, * only existence checks. * * Therefore, it makes sense to cache existence checks. A data read * will also correctly return a null item from the cache. */ if (0 == dw->value_size) { WALLOC0(entry); entry->absent = !ret; (void) allocate_entry(dw, key, entry); } return ret; }
/** * Map iterator to free cached entries that have been marked as removable. */ static gboolean cache_free_removable(gpointer key, gpointer value, gpointer data) { dbmw_t *dw = data; struct cached *entry = value; dbmw_check(dw); g_assert(!entry->len == !entry->data); if (!entry->removable) return FALSE; free_value(dw, entry, TRUE); hash_list_remove(dw->keys, key); wfree(key, dbmw_keylen(dw, key)); WFREE(entry); return TRUE; }
/** * Write value to the database file immediately, without caching for write-back * nor for future reading. If defined, the registered value cleanup callback * is invoked before returning. * * @param dw the DBM wrapper * @param key the key (constant-width, determined at open time) * @param value the start of the value in memory * @param length length of the value */ void dbmw_write_nocache(dbmw_t *dw, const void *key, void *value, size_t length) { dbmw_check(dw); g_assert(key); g_assert(length <= dw->value_size); g_assert(length || value == NULL); g_assert(length == 0 || value); /* * The data allocation model of DBMW allows one to issue a dbmw_read(), * modify the actual value we got and immediately request a write of that * same data. * * Therefore, we must remove the cached entry only after flushing its value. */ write_immediately(dw, key, value, length); (void) remove_entry(dw, key, TRUE, FALSE); /* Discard any cached data */ }
/** * Destroy the DBM wrapper, optionally closing the underlying DB map. */ void dbmw_destroy(dbmw_t *dw, gboolean close_map) { dbmw_check(dw); if (common_stats) g_debug("DBMW destroying \"%s\" with %s back-end " "(read cache hits = %.2f%% on %s request%s, " "write cache hits = %.2f%% on %s request%s)", dw->name, dbmw_map_type(dw) == DBMAP_SDBM ? "sdbm" : "map", dw->r_hits * 100.0 / MAX(1, dw->r_access), uint64_to_string(dw->r_access), 1 == dw->r_access ? "" : "s", dw->w_hits * 100.0 / MAX(1, dw->w_access), uint64_to_string2(dw->w_access), 1 == dw->w_access ? "" : "s"); /* * If we close the map and we're volatile, there's no need to flush * the cache as the data is going to be gone soon anyway. */ if (!close_map || !dw->is_volatile) { dbmw_sync(dw, DBMW_SYNC_CACHE); } dbmw_clear_cache(dw); hash_list_free(&dw->keys); map_destroy(dw->values); if (dw->mb) pmsg_free(dw->mb); bstr_free(&dw->bs); if (close_map) dbmap_destroy(dw->dm); dw->magic = 0; WFREE(dw); }
/** * Read value from database file, returning a pointer to the allocated * deserialized data. These data can be modified freely and stored back, * but their lifetime will not exceed that of the next call to a dbmw * operation on the same descriptor. * * User code does not need to bother with freeing the allocated data, this * is managed directly by the DBM wrapper. * * @param dw the DBM wrapper * @param key the key (constant-width, determined at open time) * @param lenptr if non-NULL, writes length of (deserialized) value * * @return pointer to value, or NULL if it was either not found or the * deserialization failed. */ G_GNUC_HOT gpointer dbmw_read(dbmw_t *dw, gconstpointer key, size_t *lenptr) { struct cached *entry; dbmap_datum_t dval; dbmw_check(dw); g_assert(key); dw->r_access++; entry = map_lookup(dw->values, key); if (entry) { dw->r_hits++; if (lenptr) *lenptr = entry->len; return entry->data; } /* * Not cached, must read from DB. */ dw->ioerr = FALSE; dval = dbmap_lookup(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; g_warning("DBMW \"%s\" I/O error whilst reading entry: %s", dw->name, dbmap_strerror(dw->dm)); return NULL; } else if (NULL == dval.data) return NULL; /* Not found in DB */ /* * Value was found, allocate a cache entry object for it. */ WALLOC0(entry); /* * Deserialize data if needed. */ if (dw->unpack) { /* * Allocate cache entry arena to hold the deserialized version. */ entry->data = walloc(dw->value_size); entry->len = dw->value_size; bstr_reset(dw->bs, dval.data, dval.len, BSTR_F_ERROR); if (!dbmw_deserialize(dw, dw->bs, entry->data, dw->value_size)) { g_carp("DBMW \"%s\" deserialization error in %s(): %s", dw->name, stacktrace_routine_name(func_to_pointer(dw->unpack), FALSE), bstr_error(dw->bs)); /* Not calling value free routine on deserialization failures */ wfree(entry->data, dw->value_size); WFREE(entry); return NULL; } if (lenptr) *lenptr = dw->value_size; } else { g_assert(dw->value_size >= dval.len); if (dval.len) { entry->len = dval.len; entry->data = wcopy(dval.data, dval.len); } else { entry->data = NULL; entry->len = 0; } if (lenptr) *lenptr = dval.len; } g_assert((entry->len != 0) == (entry->data != NULL)); /* * Insert into cache. */ (void) allocate_entry(dw, key, entry); return entry->data; }
/** * Iterate over the DB, invoking the callback on each item along with the * supplied argument and removing the item when the callback returns TRUE. * * @return the amount of removed entries. */ size_t dbmw_foreach_remove(dbmw_t *dw, dbmw_cbr_t cbr, void *arg) { struct foreach_ctx ctx; struct cache_foreach_ctx fctx; size_t pruned; dbmw_check(dw); if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) { dbg_ds_log(dw->dbg, dw, "%s: starting with %s(%p)", G_STRFUNC, stacktrace_function_name(cbr), arg); } /* * Before iterating we flush the deleted keys we know about in the cache * and whose deletion was deferred, so that the underlying map will * not have to iterate on them. */ dbmw_sync(dw, DBMW_SYNC_CACHE | DBMW_DELETED_ONLY); /* * Some values may be present only in the cache. Hence we clear all * marks in the cache and each traversed value that happens to be * present in the cache will be marked as "traversed". * * We flushed deleted keys above, but that does not remove them from * the cache structure. We don't need to traverse these after iterating * on the map, so we make sure they are artifically set to "traversed". */ ctx.u.cbr = cbr; ctx.arg = arg; ctx.dw = dw; map_foreach(dw->values, cache_reset_before_traversal, NULL); pruned = dbmap_foreach_remove(dw->dm, dbmw_foreach_remove_trampoline, &ctx); ZERO(&fctx); fctx.removing = TRUE; fctx.foreach = &ctx; fctx.u.cbr = dbmw_foreach_remove_trampoline; /* * Continue traversal with all the cached entries that were not traversed * already because they do not exist in the underlying map. * * We count these and remember how many there are so that we can determine * the correct overall item count after an iteration without having to * flush all the dirty values! * * Any cached entry that needs to be removed will be marked as such * and we'll complete processing by discarding from the cache all * the entries that have been marked as "removable" during the traversal. */ map_foreach(dw->values, cache_finish_traversal, &fctx); map_foreach_remove(dw->values, cache_free_removable, dw); dw->cached = fctx.cached; dw->count_needs_sync = FALSE; /* We just counted items the slow way! */ if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) { dbg_ds_log(dw->dbg, dw, "%s: done with %s(%p): " "pruned %zu from dbmap, %zu from cache (%zu total), " "has %zu unflushed entr%s in cache", G_STRFUNC, stacktrace_function_name(cbr), arg, pruned, fctx.removed, pruned + fctx.removed, dw->cached, plural_y(dw->cached)); } return pruned + fctx.removed; }
/** * Read value from database file, returning a pointer to the allocated * deserialized data. These data can be modified freely and stored back, * but their lifetime will not exceed that of the next call to a dbmw * operation on the same descriptor. * * User code does not need to bother with freeing the allocated data, this * is managed directly by the DBM wrapper. * * @param dw the DBM wrapper * @param key the key (constant-width, determined at open time) * @param lenptr if non-NULL, writes length of (deserialized) value * * @return pointer to value, or NULL if it was either not found or the * deserialization failed. */ G_GNUC_HOT void * dbmw_read(dbmw_t *dw, const void *key, size_t *lenptr) { struct cached *entry; dbmap_datum_t dval; dbmw_check(dw); g_assert(key); dw->r_access++; entry = map_lookup(dw->values, key); if (entry) { if (dbg_ds_debugging(dw->dbg, 5, DBG_DSF_CACHING | DBG_DSF_ACCESS)) { dbg_ds_log(dw->dbg, dw, "%s: read cache hit on %s key=%s%s", G_STRFUNC, entry->dirty ? "dirty" : "clean", dbg_ds_keystr(dw->dbg, key, (size_t) -1), entry->absent ? " (absent)" : ""); } dw->r_hits++; if (lenptr) *lenptr = entry->len; return entry->data; } /* * Not cached, must read from DB. */ dw->ioerr = FALSE; dval = dbmap_lookup(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; s_warning_once_per(LOG_PERIOD_SECOND, "DBMW \"%s\" I/O error whilst reading entry: %s", dw->name, dbmap_strerror(dw->dm)); return NULL; } else if (NULL == dval.data) return NULL; /* Not found in DB */ /* * Value was found, allocate a cache entry object for it. */ WALLOC0(entry); /* * Deserialize data if needed. */ if (dw->unpack) { /* * Allocate cache entry arena to hold the deserialized version. */ entry->data = walloc(dw->value_size); entry->len = dw->value_size; bstr_reset(dw->bs, dval.data, dval.len, BSTR_F_ERROR); if (!dbmw_deserialize(dw, dw->bs, entry->data, dw->value_size)) { s_critical("DBMW \"%s\" deserialization error in %s(): %s", dw->name, stacktrace_function_name(dw->unpack), bstr_error(dw->bs)); /* Not calling value free routine on deserialization failures */ wfree(entry->data, dw->value_size); WFREE(entry); return NULL; } if (lenptr) *lenptr = dw->value_size; } else { g_assert(dw->value_size >= dval.len); if (dval.len) { entry->len = dval.len; entry->data = wcopy(dval.data, dval.len); } else { entry->data = NULL; entry->len = 0; } if (lenptr) *lenptr = dval.len; } g_assert((entry->len != 0) == (entry->data != NULL)); /* * Insert into cache. */ (void) allocate_entry(dw, key, entry); if (dbg_ds_debugging(dw->dbg, 4, DBG_DSF_CACHING)) { dbg_ds_log(dw->dbg, dw, "%s: cached %s key=%s%s", G_STRFUNC, entry->dirty ? "dirty" : "clean", dbg_ds_keystr(dw->dbg, key, (size_t) -1), entry->absent ? " (absent)" : ""); } return entry->data; }
/** * Synchronize dirty values. * * The ``which'' argument is a bitfield indicating the set of things to * synchronize: * * DBMW_SYNC_CACHE requests that dirty values from the local DBMW cache * be flushed to the DB map layer immediately. * * DBMW_SYNC_MAP requests that the DB map layer be flushed, if it is backed * by disk data. * * If DBMW_DELETED_ONLY is specified along with DBMW_SYNC_CACHE, only the * dirty values that are marked as pending deletion are flushed. * * @return amount of value flushes plus amount of sdbm page flushes, -1 if * an error occurred. */ ssize_t dbmw_sync(dbmw_t *dw, int which) { ssize_t amount = 0; size_t pages = 0, values = 0; bool error = FALSE; dbmw_check(dw); if (which & DBMW_SYNC_CACHE) { struct flush_context ctx; ctx.dw = dw; ctx.error = FALSE; ctx.deleted_only = booleanize(which & DBMW_DELETED_ONLY); ctx.amount = 0; if (dbg_ds_debugging(dw->dbg, 6, DBG_DSF_CACHING)) { dbg_ds_log(dw->dbg, dw, "%s: syncing cache%s", G_STRFUNC, ctx.deleted_only ? " (deleted only)" : ""); } map_foreach(dw->values, flush_dirty, &ctx); if (!ctx.error && !ctx.deleted_only) dw->count_needs_sync = FALSE; /* * We can safely reset the amount of cached entries to 0, regardless * of whether we only sync'ed deleted entries: that value is rather * meaningless when ``count_needs_sync'' is TRUE anyway since we will * come here first, and we'll then reset it to zero. */ dw->cached = 0; /* No more dirty values */ amount += ctx.amount; values = ctx.amount; error = ctx.error; } if (which & DBMW_SYNC_MAP) { ssize_t ret; if (dbg_ds_debugging(dw->dbg, 6, DBG_DSF_CACHING)) dbg_ds_log(dw->dbg, dw, "%s: syncing map", G_STRFUNC); ret = dbmap_sync(dw->dm); if (-1 == ret) { error = TRUE; } else { amount += ret; pages = ret; } } if (dbg_ds_debugging(dw->dbg, 5, DBG_DSF_CACHING)) { dbg_ds_log(dw->dbg, dw, "%s: %s (flushed %zu value%s, %zu page%s)", G_STRFUNC, error ? "FAILED" : "OK", values, plural(values), pages, plural(pages)); } return error ? -1 : amount; }
/** * Delete key from database. */ void dbmw_delete(dbmw_t *dw, const void *key) { struct cached *entry; dbmw_check(dw); g_assert(key); dw->w_access++; entry = map_lookup(dw->values, key); if (entry) { if (dbg_ds_debugging(dw->dbg, 2, DBG_DSF_CACHING | DBG_DSF_DELETE)) { dbg_ds_log(dw->dbg, dw, "%s: %s key=%s%s", G_STRFUNC, entry->dirty ? "dirty" : "clean", dbg_ds_keystr(dw->dbg, key, (size_t) -1), entry->absent ? " (was absent)" : ""); } if (entry->dirty) dw->w_hits++; if (!entry->absent) { /* * Entry was present but is now deleted. * * If it was clean, then it was flushed to the database and we now * know that there is one less entry in the database than there is * physically present in the map. * * If it was dirty, then we do not know whether it exists in the * database or not, and therefore we cannot adjust the amount * of cached entries down. */ if (entry->dirty) dw->count_needs_sync = TRUE; /* Deferred delete */ else dw->cached--; /* One less entry in database */ fill_entry(dw, entry, NULL, 0); entry->absent = TRUE; } hash_list_moveto_tail(dw->keys, key); } else { if (dbg_ds_debugging(dw->dbg, 2, DBG_DSF_DELETE)) { dbg_ds_log(dw->dbg, dw, "%s: removing key=%s", G_STRFUNC, dbg_ds_keystr(dw->dbg, key, (size_t) -1)); } dw->ioerr = FALSE; dbmap_remove(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; s_warning("DBMW \"%s\" I/O error whilst deleting key: %s", dw->name, dbmap_strerror(dw->dm)); } /* * If the maximum value length of the DB is 0, then it is used as a * "search table" only, meaning there will be no read to get values, * only existence checks. * * Therefore, it makes sense to cache that the key is no longer valid. * Otherwise, possibly pushing a value out of the cache to record * a deletion is not worth it. */ if (0 == dw->value_size) { WALLOC0(entry); entry->absent = TRUE; (void) allocate_entry(dw, key, entry); if (dbg_ds_debugging(dw->dbg, 2, DBG_DSF_CACHING)) { dbg_ds_log(dw->dbg, dw, "%s: cached absent key=%s", G_STRFUNC, dbg_ds_keystr(dw->dbg, key, (size_t) -1)); } } } }
/** * Common code for dbmw_foreach_trampoline() and * dbmw_foreach_remove_trampoline(). */ static gboolean dbmw_foreach_common(gboolean removing, gpointer key, dbmap_datum_t *d, gpointer arg) { struct foreach_ctx *ctx = arg; dbmw_t *dw = ctx->dw; struct cached *entry; dbmw_check(dw); entry = map_lookup(dw->values, key); if (entry != NULL) { /* * Key / value pair is present in the cache. * * This affects us in two ways: * * - We may already know that the key was deleted, in which case * that entry is just skipped: no further access is possible * through DBMW until that key is recreated. We still return * TRUE to make sure the lower layers will delete the entry * physically, since deletion has not been flushed yet (that's * the reason we're still iterating on it). * * - Should the cached key need to be deleted (as determined by * the user callback, we make sure we delete the entry in the * cache upon callback return). */ entry->traversed = TRUE; /* Signal we iterated on cached value */ if (entry->absent) return TRUE; /* Key was already deleted, info cached */ if (removing) { gboolean status; status = (*ctx->u.cbr)(key, entry->data, entry->len, ctx->arg); if (status) { entry->removable = TRUE; /* Discard it after traversal */ } return status; } else { (*ctx->u.cb)(key, entry->data, entry->len, ctx->arg); return FALSE; } } else { gboolean status = FALSE; gpointer data = d->data; size_t len = d->len; /* * Deserialize data if needed, but do not cache this value. * Iterating over the map must not disrupt the cache. */ if (dw->unpack) { len = dw->value_size; data = walloc(len); bstr_reset(dw->bs, d->data, d->len, BSTR_F_ERROR); if (!dbmw_deserialize(dw, dw->bs, data, len)) { g_carp("DBMW \"%s\" deserialization error in %s(): %s", dw->name, stacktrace_routine_name(func_to_pointer(dw->unpack), FALSE), bstr_error(dw->bs)); /* Not calling value free routine on deserialization failures */ wfree(data, len); return FALSE; } } if (removing) { status = (*ctx->u.cbr)(key, data, len, ctx->arg); } else { (*ctx->u.cb)(key, data, len, ctx->arg); } if (dw->unpack) { if (dw->valfree) (*dw->valfree)(data, len); wfree(data, len); } return status; } }
/** * Is key present in the database? */ bool dbmw_exists(dbmw_t *dw, const void *key) { struct cached *entry; bool ret; dbmw_check(dw); g_assert(key); dw->r_access++; entry = map_lookup(dw->values, key); if (entry) { if (dbg_ds_debugging(dw->dbg, 5, DBG_DSF_CACHING | DBG_DSF_ACCESS)) { dbg_ds_log(dw->dbg, dw, "%s: read cache hit on %s key=%s%s", G_STRFUNC, entry->dirty ? "dirty" : "clean", dbg_ds_keystr(dw->dbg, key, (size_t) -1), entry->absent ? " (absent)" : ""); } dw->r_hits++; return !entry->absent; } dw->ioerr = FALSE; ret = dbmap_contains(dw->dm, key); if (dbmap_has_ioerr(dw->dm)) { dw->ioerr = TRUE; dw->error = errno; s_warning("DBMW \"%s\" I/O error whilst checking key existence: %s", dw->name, dbmap_strerror(dw->dm)); return FALSE; } /* * If the maximum value length of the DB is 0, then it is used as a * "search table" only, meaning there will be no read to get values, * only existence checks. * * Therefore, it makes sense to cache existence checks. A data read * will also correctly return a null item from the cache. * * If the value length is not 0, we only cache negative lookups (i.e. * the value was not found) because we did not get any value so it is * possible to record an absent cache entry. */ if (0 == dw->value_size || !ret) { WALLOC0(entry); entry->absent = !ret; (void) allocate_entry(dw, key, entry); if (dbg_ds_debugging(dw->dbg, 2, DBG_DSF_CACHING)) { dbg_ds_log(dw->dbg, dw, "%s: cached %s key=%s", G_STRFUNC, entry->absent ? "absent" : "present", dbg_ds_keystr(dw->dbg, key, (size_t) -1)); } } return ret; }