static void map_foreach(t_btree *root, t_simple_functor functor, void *data) { if (root) { functor(root->content, data); map_foreach(root->left, functor, data); map_foreach(root->right, functor, data); } }
/** * Iterate over the DB, invoking the callback on each item along with the * supplied argument and removing the item when the callback returns TRUE. */ void dbmw_foreach_remove(dbmw_t *dw, dbmw_cbr_t cbr, gpointer arg) { struct foreach_ctx ctx; struct cache_foreach_ctx fctx; dbmw_check(dw); /* * Before iterating we flush the deleted keys we know about in the cache * and whose deletion was deferred, so that the underlying map will * not have to iterate on them. */ dbmw_sync(dw, DBMW_SYNC_CACHE | DBMW_DELETED_ONLY); /* * Some values may be present only in the cache. Hence we clear all * marks in the cache and each traversed value that happens to be * present in the cache will be marked as "traversed". * * We flushed deleted keys above, but that does not remove them from * the cache structure. We don't need to traverse these after iterating * on the map, so we make sure they are artifically set to "traversed". */ ctx.u.cbr = cbr; ctx.arg = arg; ctx.dw = dw; map_foreach(dw->values, cache_reset_before_traversal, NULL); dbmap_foreach_remove(dw->dm, dbmw_foreach_remove_trampoline, &ctx); fctx.removing = TRUE; fctx.foreach = &ctx; fctx.u.cbr = dbmw_foreach_remove_trampoline; /* * Continue traversal with all the cached entries that were not traversed * already because they do not exist in the underlying map. * * Any cached entry that needs to be removed will be marked as such * and we'll complete processing by discarding from the cache all * the entries that have been marked as "removable" during the traversal. */ map_foreach(dw->values, cache_finish_traversal, &fctx); map_foreach_remove(dw->values, cache_free_removable, dw); }
static void print_all(void) { printf("count: %lu\n", map_count(mapc, map)); map_foreach(mapc, map, hashmap_print, NULL); printf("\n"); }
/** * Synchronize dirty values. * * The ``which'' argument is a bitfield indicating the set of things to * synchronize: * * DBMW_SYNC_CACHE requests that dirty values from the local DBMW cache * be flushed to the DB map layer immediately. * * DBMW_SYNC_MAP requests that the DB map layer be flushed, if it is backed * by disk data. * * If DBMW_DELETED_ONLY is specified along with DBMW_SYNC_CACHE, only the * dirty values that are marked as pending deletion are flushed. * * @return amount of value flushes plus amount of sdbm page flushes, -1 if * an error occurred. */ ssize_t dbmw_sync(dbmw_t *dw, int which) { ssize_t amount = 0; gboolean error = FALSE; if (which & DBMW_SYNC_CACHE) { struct flush_context ctx; ctx.dw = dw; ctx.error = FALSE; ctx.deleted_only = booleanize(which & DBMW_DELETED_ONLY); ctx.amount = 0; map_foreach(dw->values, flush_dirty, &ctx); if (!ctx.error) dw->count_needs_sync = FALSE; amount += ctx.amount; error = ctx.error; } if (which & DBMW_SYNC_MAP) { ssize_t ret = dbmap_sync(dw->dm); if (-1 == ret) error = TRUE; else amount += ret; } return error ? -1 : amount; }
static emc_cb_t EMC_CALL global_reconnect_cb(void * args){ while(!self.exit){ map_foreach(self.rcmq, reconnect_map_foreach_cb, NULL); nsleep(100); } return (emc_cb_t)0; }
double PhysObject::getVt() { double vt=0; map_foreach(String,PhysForce,it,forces) vt+=it->second.getVt(); return vt; }
double PhysObject::getVl() { double vl=0; map_foreach(String,PhysForce,it,forces) vl+=it->second.getVl(); return vl; }
void ft_map_foreach(t_map map, t_simple_functor functor, void *data) { size_t i; i = 0; while (i < MAP_TREE_SIZE) map_foreach(map[i++], functor, data); }
void aoi_release(struct aoi_space *space) { map_foreach(space->object, delete_object, space); map_delete(space, space->object); delete_pair_list(space); delete_set(space,space->watcher_static); delete_set(space,space->marker_static); delete_set(space,space->watcher_move); delete_set(space,space->marker_move); space->alloc(space->alloc_ud, space, sizeof(*space)); }
void aoi_message(struct aoi_space *space, aoi_Callback cb, void *ud) { flush_pair(space, cb, ud); space->watcher_static->number = 0; space->watcher_move->number = 0; space->marker_static->number = 0; space->marker_move->number = 0; map_foreach(space->object, set_push , space); gen_pair_list(space, space->watcher_static, space->marker_move, cb, ud); gen_pair_list(space, space->watcher_move, space->marker_static, cb, ud); gen_pair_list(space, space->watcher_move, space->marker_move, cb, ud); }
int main() { struct alloc_cookie cookie = { 0,0,0 }; SPACE = aoi_create(my_alloc , &cookie); struct map * m = map_new(SPACE); int i; for (i=0;i<10;i++) { test(m,10,i*10); } test(m,100,100); test(m,200,200); test(m,500,500); map_foreach(m,check,m); map_delete(SPACE, m); aoi_release(SPACE); printf("max memory = %d, current memory = %d\n", cookie.max , cookie.current); return 0; }
/** * Record security tokens collected during a node lookup from the supplied map. * * @param tokens a map containing KUID => lookup_token_t */ void tcache_record(map_t *tokens) { map_foreach(tokens, record_token, NULL); }
/** * Synchronize dirty values. * * The ``which'' argument is a bitfield indicating the set of things to * synchronize: * * DBMW_SYNC_CACHE requests that dirty values from the local DBMW cache * be flushed to the DB map layer immediately. * * DBMW_SYNC_MAP requests that the DB map layer be flushed, if it is backed * by disk data. * * If DBMW_DELETED_ONLY is specified along with DBMW_SYNC_CACHE, only the * dirty values that are marked as pending deletion are flushed. * * @return amount of value flushes plus amount of sdbm page flushes, -1 if * an error occurred. */ ssize_t dbmw_sync(dbmw_t *dw, int which) { ssize_t amount = 0; size_t pages = 0, values = 0; bool error = FALSE; dbmw_check(dw); if (which & DBMW_SYNC_CACHE) { struct flush_context ctx; ctx.dw = dw; ctx.error = FALSE; ctx.deleted_only = booleanize(which & DBMW_DELETED_ONLY); ctx.amount = 0; if (dbg_ds_debugging(dw->dbg, 6, DBG_DSF_CACHING)) { dbg_ds_log(dw->dbg, dw, "%s: syncing cache%s", G_STRFUNC, ctx.deleted_only ? " (deleted only)" : ""); } map_foreach(dw->values, flush_dirty, &ctx); if (!ctx.error && !ctx.deleted_only) dw->count_needs_sync = FALSE; /* * We can safely reset the amount of cached entries to 0, regardless * of whether we only sync'ed deleted entries: that value is rather * meaningless when ``count_needs_sync'' is TRUE anyway since we will * come here first, and we'll then reset it to zero. */ dw->cached = 0; /* No more dirty values */ amount += ctx.amount; values = ctx.amount; error = ctx.error; } if (which & DBMW_SYNC_MAP) { ssize_t ret; if (dbg_ds_debugging(dw->dbg, 6, DBG_DSF_CACHING)) dbg_ds_log(dw->dbg, dw, "%s: syncing map", G_STRFUNC); ret = dbmap_sync(dw->dm); if (-1 == ret) { error = TRUE; } else { amount += ret; pages = ret; } } if (dbg_ds_debugging(dw->dbg, 5, DBG_DSF_CACHING)) { dbg_ds_log(dw->dbg, dw, "%s: %s (flushed %zu value%s, %zu page%s)", G_STRFUNC, error ? "FAILED" : "OK", values, plural(values), pages, plural(pages)); } return error ? -1 : amount; }
/** * Iterate over the DB, invoking the callback on each item along with the * supplied argument and removing the item when the callback returns TRUE. * * @return the amount of removed entries. */ size_t dbmw_foreach_remove(dbmw_t *dw, dbmw_cbr_t cbr, void *arg) { struct foreach_ctx ctx; struct cache_foreach_ctx fctx; size_t pruned; dbmw_check(dw); if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) { dbg_ds_log(dw->dbg, dw, "%s: starting with %s(%p)", G_STRFUNC, stacktrace_function_name(cbr), arg); } /* * Before iterating we flush the deleted keys we know about in the cache * and whose deletion was deferred, so that the underlying map will * not have to iterate on them. */ dbmw_sync(dw, DBMW_SYNC_CACHE | DBMW_DELETED_ONLY); /* * Some values may be present only in the cache. Hence we clear all * marks in the cache and each traversed value that happens to be * present in the cache will be marked as "traversed". * * We flushed deleted keys above, but that does not remove them from * the cache structure. We don't need to traverse these after iterating * on the map, so we make sure they are artifically set to "traversed". */ ctx.u.cbr = cbr; ctx.arg = arg; ctx.dw = dw; map_foreach(dw->values, cache_reset_before_traversal, NULL); pruned = dbmap_foreach_remove(dw->dm, dbmw_foreach_remove_trampoline, &ctx); ZERO(&fctx); fctx.removing = TRUE; fctx.foreach = &ctx; fctx.u.cbr = dbmw_foreach_remove_trampoline; /* * Continue traversal with all the cached entries that were not traversed * already because they do not exist in the underlying map. * * We count these and remember how many there are so that we can determine * the correct overall item count after an iteration without having to * flush all the dirty values! * * Any cached entry that needs to be removed will be marked as such * and we'll complete processing by discarding from the cache all * the entries that have been marked as "removable" during the traversal. */ map_foreach(dw->values, cache_finish_traversal, &fctx); map_foreach_remove(dw->values, cache_free_removable, dw); dw->cached = fctx.cached; dw->count_needs_sync = FALSE; /* We just counted items the slow way! */ if (dbg_ds_debugging(dw->dbg, 1, DBG_DSF_ITERATOR)) { dbg_ds_log(dw->dbg, dw, "%s: done with %s(%p): " "pruned %zu from dbmap, %zu from cache (%zu total), " "has %zu unflushed entr%s in cache", G_STRFUNC, stacktrace_function_name(cbr), arg, pruned, fctx.removed, pruned + fctx.removed, dw->cached, plural_y(dw->cached)); } return pruned + fctx.removed; }