int dict_cache_delete(DICT_CACHE *cp, const char *cache_key) { const char *myname = "dict_cache_delete"; int del_res; DICT *db = cp->db; /* * Delete the entry, unless we would delete the current first/next entry. * In that case, schedule the "current" entry for delete-behind to avoid * mis-behavior by some databases. */ if (DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { DC_SCHEDULE_FOR_DELETE_BEHIND(cp); if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: key=%s (current entry - schedule for delete-behind)", myname, cache_key); DICT_ERR_VAL_RETURN(cp, DICT_ERR_NONE, DICT_STAT_SUCCESS); } else { del_res = dict_del(db, cache_key); if (del_res != 0) msg_rate_delay(&cp->del_log_stamp, cp->log_delay, msg_warn, "%s: could not delete entry for %s", cp->name, cache_key); if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: key=%s (%s)", myname, cache_key, del_res == 0 ? "found" : db->error ? "error" : "not found"); DICT_ERR_VAL_RETURN(cp, db->error, del_res); } }
int dict_cache_delete(DICT_CACHE *cp, const char *cache_key) { const char *myname = "dict_cache_delete"; int zero_means_found; /* * Delete the entry, unless we would delete the current first/next entry. * In that case, schedule the "current" entry for delete-behind to avoid * mis-behavior by some databases. */ if (DC_MATCH_SAVED_CURRENT_KEY(cp, cache_key)) { DC_SCHEDULE_FOR_DELETE_BEHIND(cp); if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: key=%s (current entry - schedule for delete-behind)", myname, cache_key); zero_means_found = 0; } else { zero_means_found = dict_del(cp->db, cache_key); if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: key=%s (%s)", myname, cache_key, zero_means_found == 0 ? "found" : "not found"); } return (zero_means_found); }
static void dict_cache_clean_event(int unused_event, char *cache_context) { const char *myname = "dict_cache_clean_event"; DICT_CACHE *cp = (DICT_CACHE *) cache_context; const char *cache_key; const char *cache_val; int next_interval; VSTRING *stamp_buf; int first_next; /* * We interleave cache cleanup with other processing, so that the * application's service remains available, with perhaps increased * latency. */ /* * Start a new cache cleanup run. */ if (cp->saved_curr_key == 0) { cp->retained = cp->dropped = 0; first_next = DICT_SEQ_FUN_FIRST; if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: start %s cache cleanup", myname, cp->name); } /* * Continue a cache cleanup run in progress. */ else { first_next = DICT_SEQ_FUN_NEXT; } /* * Examine one cache entry. */ if (dict_cache_sequence(cp, first_next, &cache_key, &cache_val) == 0) { if (cp->exp_validator(cache_key, cache_val, cp->exp_context) == 0) { DC_SCHEDULE_FOR_DELETE_BEHIND(cp); cp->dropped++; if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: drop %s cache entry for %s", myname, cp->name, cache_key); } else { cp->retained++; if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: keep %s cache entry for %s", myname, cp->name, cache_key); } next_interval = 0; } /* * Cache cleanup completed. Report vital statistics. */ else if (cp->error != 0) { msg_warn("%s: cache cleanup scan terminated due to error", cp->name); dict_cache_clean_stat_log_reset(cp, "partial"); next_interval = cp->exp_interval; } else { if (cp->user_flags & DICT_CACHE_FLAG_VERBOSE) msg_info("%s: done %s cache cleanup scan", myname, cp->name); dict_cache_clean_stat_log_reset(cp, "full"); stamp_buf = vstring_alloc(100); vstring_sprintf(stamp_buf, "%ld", (long) event_time()); dict_put(cp->db, DC_LAST_CACHE_CLEANUP_COMPLETED, vstring_str(stamp_buf)); vstring_free(stamp_buf); next_interval = cp->exp_interval; } event_request_timer(dict_cache_clean_event, cache_context, next_interval); }