void CertificateCache::drop_expired() { while (!m_expiries.empty() && is_expired(m_expiries.top())) { m_certificates.erase(m_expiries.top().certificate); m_expiries.pop(); } }
static void cl_periodic_timeout_check(int fd, short event, void *arg) { struct timeval time_now; gettimeofday(&time_now, NULL); // Iterate over the values submitted by this client unsigned int i; client_value_record * iter; for(i = 0; i < concurrent_values; i++) { iter = &values_table[i]; if (is_expired(&iter->expire_time, &time_now)) { submit_old_value(iter); } } //And set a timeout for calling this function again set_timeout_check(); //Makes the compiler happy fd = fd; event = event; arg = arg; }
static int onStart(activity *act) { static struct timespec lastupdatedtime = {0, 0}; if (is_expired(&lastupdatedtime, 1000)) getInfoManager()->requestRkeUpdate(UR_CBS); update_cbs_button(); return MKD_OK; }
void mastermind_t::data::cache_expire() { auto preferable_life_time = std::chrono::seconds(m_group_info_update_period); cache_is_expired = false; #if 0 { auto cache = cache_groups.copy(); if (!cache.is_expired() && check_cache_for_expire("cache_groups", cache , preferable_life_time, warning_time, expire_time)) { cache.set_expire(true); cache_groups.set(cache); } } #endif { auto cache = elliptics_remotes.copy(); if (!cache.is_expired() && check_cache_for_expire("elliptics_remotes", cache , preferable_life_time, warning_time, expire_time)) { cache.set_expire(true); elliptics_remotes.set(cache); } } { auto cache_map = namespaces_states.copy(); for (auto it = cache_map.begin(), end = cache_map.end(); it != end; ++it) { const auto &name = it->first; auto &cache = it->second; cache_is_expired = cache_is_expired || cache.is_expired(); if (check_cache_for_expire("namespaces_states:" + name , cache, preferable_life_time, warning_time, expire_time)) { cache_is_expired = true; if (!cache.is_expired()) { cache.set_expire(true); namespaces_states.set(name, std::move(cache)); } } } } }
void weak_ref::get(lua_State * L) { assert(!is_expired()); lua_rawgeti(L, LUA_REGISTRYINDEX, *(m_ref.lock())); lua_pushinteger(L, 1); lua_gettable(L, -2); lua_replace(L, -2); }
int is_expired_tuple(struct memcached_service *p, box_tuple_t *tuple) { uint64_t flush = p->flush; const char *pos = box_tuple_field(tuple, 1); uint64_t exptime = mp_decode_uint(&pos); uint64_t time = mp_decode_uint(&pos); return is_expired(exptime, time, flush); }
std::vector<std::string> mastermind_t::get_elliptics_remotes() { auto cache = m_data->elliptics_remotes.copy(); if (cache.is_expired()) { return std::vector<std::string>(); } return cache.get_value(); }
static int onStart() { static struct timespec lastupdatedtime = {0, 0}; if (is_expired(&lastupdatedtime, 1000)) getInfoManager()->requestRkeUpdate(UR_RANGE); #if 0 startDemoAnimation(); #endif return MKD_OK; }
bool LeaseManager::has_valid_lease(const time_t now) const { DsRuntimeGlobalInformation& ds_info = DsRuntimeGlobalInformation::instance(); bool valid = false; for (int i = 0; i < MAX_SINGLE_CLUSTER_NS_NUM && !valid; i++) { if (0 != ns_ip_port_[i]) { valid = (ns_ip_port_[i] == ds_info.master_ns_ip_port_ && !is_expired(now, i)); } } return valid; }
uint64_t mastermind_t::free_effective_space_in_couple_by_group(size_t group) { auto cache = m_data->fake_groups_info.copy(); if (cache.is_expired()) { return 0; } auto git = cache.get_value().find(group); if (git == cache.get_value().end()) { return 0; } return git->second.free_effective_space; }
std::vector<std::tuple<std::vector<int>, uint64_t, uint64_t>> mastermind_t::get_couple_list( const std::string &ns) { auto namespace_states = m_data->namespaces_states.copy(ns); if (namespace_states.is_expired()) { return std::vector<std::tuple<std::vector<int>, uint64_t, uint64_t>>(); } const auto &weights = namespace_states.get_value().weights.data(); std::map<int, std::tuple<std::vector<int>, uint64_t, uint64_t>> result_map; for (auto it = weights.begin(), end = weights.end(); it != end; ++it) { auto weight = it->weight; auto memory = it->memory; const auto &couple = it->groups; auto group_id = it->id; result_map.insert(std::make_pair(group_id , std::make_tuple(couple, weight, memory))); } { const auto &couples = namespace_states.get_value().couples.couple_info_map; for (auto it = couples.begin(), end = couples.end(); it != end; ++it) { const auto &couple_info = it->second; const auto &groups = couple_info.groups; auto couple_id = *std::min_element(groups.begin(), groups.end()); result_map.insert(std::make_pair(couple_id , std::make_tuple(groups, static_cast<uint64_t>(0) , couple_info.free_effective_space))); } } std::vector<std::tuple<std::vector<int>, uint64_t, uint64_t>> result; result.reserve(result_map.size()); for (auto it = result_map.begin(), end = result_map.end(); it != end; ++it) { result.emplace_back(std::move(it->second)); } return result; }
static int process_slot(struct cache_slot *slot) { int err; err = open_slot(slot); if (!err && slot->match) { if (is_expired(slot)) { if (!lock_slot(slot)) { /* If the cachefile has been replaced between * `open_slot` and `lock_slot`, we'll just * serve the stale content from the original * cachefile. This way we avoid pruning the * newly generated slot. The same code-path * is chosen if fill_slot() fails for some * reason. * * TODO? check if the new slot contains the * same key as the old one, since we would * prefer to serve the newest content. * This will require us to open yet another * file-descriptor and read and compare the * key from the new file, so for now we're * lazy and just ignore the new file. */ if (is_modified(slot) || fill_slot(slot)) { unlock_slot(slot, 0); close_lock(slot); } else { close_slot(slot); unlock_slot(slot, 1); slot->cache_fd = slot->lock_fd; } } } if ((err = print_slot(slot)) != 0) { cache_log("[cgit] error printing cache %s: %s (%d)\n", slot->cache_name, strerror(err), err); } close_slot(slot); return err; } /* If the cache slot does not exist (or its key doesn't match the * current key), lets try to create a new cache slot for this * request. If this fails (for whatever reason), lets just generate * the content without caching it and fool the caller to belive * everything worked out (but print a warning on stdout). */ close_slot(slot); if ((err = lock_slot(slot)) != 0) { cache_log("[cgit] Unable to lock slot %s: %s (%d)\n", slot->lock_name, strerror(err), err); slot->fn(); return 0; } if ((err = fill_slot(slot)) != 0) { cache_log("[cgit] Unable to fill slot %s: %s (%d)\n", slot->lock_name, strerror(err), err); unlock_slot(slot, 0); close_lock(slot); slot->fn(); return 0; } // We've got a valid cache slot in the lock file, which // is about to replace the old cache slot. But if we // release the lockfile and then try to open the new cache // slot, we might get a race condition with a concurrent // writer for the same cache slot (with a different key). // Lets avoid such a race by just printing the content of // the lock file. slot->cache_fd = slot->lock_fd; unlock_slot(slot, 1); if ((err = print_slot(slot)) != 0) { cache_log("[cgit] error printing cache %s: %s (%d)\n", slot->cache_name, strerror(err), err); } close_slot(slot); return err; }
char* dc_get_oauth2_access_token(dc_context_t* context, const char* addr, const char* code, int flags) { oauth2_t* oauth2 = NULL; char* access_token = NULL; char* refresh_token = NULL; char* refresh_token_for = NULL; char* redirect_uri = NULL; int update_redirect_uri_on_success = 0; char* token_url = NULL; time_t expires_in = 0; char* error = NULL; char* error_description = NULL; char* json = NULL; jsmn_parser parser; jsmntok_t tok[128]; // we do not expect nor read more tokens int tok_cnt = 0; int locked = 0; if (context==NULL || context->magic!=DC_CONTEXT_MAGIC || code==NULL || code[0]==0) { dc_log_warning(context, 0, "Internal OAuth2 error"); goto cleanup; } if ((oauth2=get_info(addr))==NULL) { dc_log_warning(context, 0, "Internal OAuth2 error: 2"); goto cleanup; } pthread_mutex_lock(&context->oauth2_critical); locked = 1; // read generated token if ( !(flags&DC_REGENERATE) && !is_expired(context) ) { access_token = dc_sqlite3_get_config(context->sql, "oauth2_access_token", NULL); if (access_token!=NULL) { goto cleanup; // success } } // generate new token: build & call auth url refresh_token = dc_sqlite3_get_config(context->sql, "oauth2_refresh_token", NULL); refresh_token_for = dc_sqlite3_get_config(context->sql, "oauth2_refresh_token_for", "unset"); if (refresh_token==NULL || strcmp(refresh_token_for, code)!=0) { dc_log_info(context, 0, "Generate OAuth2 refresh_token and access_token..."); redirect_uri = dc_sqlite3_get_config(context->sql, "oauth2_pending_redirect_uri", "unset"); update_redirect_uri_on_success = 1; token_url = dc_strdup(oauth2->init_token); } else { dc_log_info(context, 0, "Regenerate OAuth2 access_token by refresh_token..."); redirect_uri = dc_sqlite3_get_config(context->sql, "oauth2_redirect_uri", "unset"); token_url = dc_strdup(oauth2->refresh_token); } replace_in_uri(&token_url, "$CLIENT_ID", oauth2->client_id); replace_in_uri(&token_url, "$REDIRECT_URI", redirect_uri); replace_in_uri(&token_url, "$CODE", code); replace_in_uri(&token_url, "$REFRESH_TOKEN", refresh_token); json = (char*)context->cb(context, DC_EVENT_HTTP_POST, (uintptr_t)token_url, 0); if (json==NULL) { dc_log_warning(context, 0, "Error calling OAuth2 at %s", token_url); goto cleanup; } // generate new token: parse returned json jsmn_init(&parser); tok_cnt = jsmn_parse(&parser, json, strlen(json), tok, sizeof(tok)/sizeof(tok[0])); if (tok_cnt<2 || tok[0].type!=JSMN_OBJECT) { dc_log_warning(context, 0, "Failed to parse OAuth2 json from %s", token_url); goto cleanup; } for (int i = 1; i < tok_cnt; i++) { if (access_token==NULL && jsoneq(json, &tok[i], "access_token")==0) { access_token = jsondup(json, &tok[i+1]); } else if (refresh_token==NULL && jsoneq(json, &tok[i], "refresh_token")==0) { refresh_token = jsondup(json, &tok[i+1]); } else if (jsoneq(json, &tok[i], "expires_in")==0) { char* expires_in_str = jsondup(json, &tok[i+1]); if (expires_in_str) { time_t val = atol(expires_in_str); // val should be reasonable, maybe between 20 seconds and 5 years. // if out of range, we re-create when the token gets invalid, // which may create some additional load and requests wrt threads. if (val>20 && val<(60*60*24*365*5)) { expires_in = val; } free(expires_in_str); } } else if (error==NULL && jsoneq(json, &tok[i], "error")==0) { error = jsondup(json, &tok[i+1]); } else if (error_description==NULL && jsoneq(json, &tok[i], "error_description")==0) { error_description = jsondup(json, &tok[i+1]); } } if (error || error_description) { dc_log_warning(context, 0, "OAuth error: %s: %s", error? error : "unknown", error_description? error_description : "no details"); // continue, errors do not imply everything went wrong } // update refresh_token if given, typically on the first round, but we update it later as well. if (refresh_token && refresh_token[0]) { dc_sqlite3_set_config(context->sql, "oauth2_refresh_token", refresh_token); dc_sqlite3_set_config(context->sql, "oauth2_refresh_token_for", code); } // after that, save the access token. // if it's unset, we may get it in the next round as we have the refresh_token now. if (access_token==NULL || access_token[0]==0) { dc_log_warning(context, 0, "Failed to find OAuth2 access token"); goto cleanup; } dc_sqlite3_set_config(context->sql, "oauth2_access_token", access_token); dc_sqlite3_set_config_int64(context->sql, "oauth2_timestamp_expires", expires_in? time(NULL)+expires_in-5/*refresh a bet before*/ : 0); if (update_redirect_uri_on_success) { dc_sqlite3_set_config(context->sql, "oauth2_redirect_uri", redirect_uri); } cleanup: if (locked) { pthread_mutex_unlock(&context->oauth2_critical); } free(refresh_token); free(refresh_token_for); free(redirect_uri); free(token_url); free(json); free(error); free(error_description); free(oauth2); return access_token? access_token : dc_strdup(NULL); }
void xyzsh_readline_interface_on_curses(char* cmdline, int cursor_point, char** argv, int argc, BOOL exit_in_spite_ofjob_exist, BOOL welcome_msg) { gSigChld = FALSE; gSigWinch = FALSE; signal(SIGCHLD, handler); signal(SIGWINCH, handler); const int maxx = mgetmaxx(); const int maxy = mgetmaxy(); int temulator_y = 0; int temulator_x = 0; int temulator_height = maxy; int temulator_width = maxx; sTEmulator* temulator = temulator_init(temulator_height, temulator_width); struct sTEmulatorFunArg arg; arg.cmdline = cmdline; arg.cursor_point = cursor_point; arg.argv = argv; arg.argc = argc; arg.exit_in_spite_ofjob_exist = exit_in_spite_ofjob_exist; arg.welcome_msg = welcome_msg; temulator_open(temulator, temulator_fun, &arg); initscr(); start_color(); noecho(); raw(); nodelay(stdscr, TRUE); keypad(stdscr, TRUE); curs_set(0); ESCDELAY=50; temulator_init_colors(); WINDOW* term_win = newwin(temulator_height, temulator_width, temulator_y, temulator_x); int pty = temulator->mFD; fd_set mask, read_ok; FD_ZERO(&mask); FD_SET(0, &mask); FD_SET(pty, &mask); int dirty = 0; struct timeval next; gettimeofday(&next, NULL); while(1) { struct timeval tv = { 0, 1000 * 1000 / 100 }; read_ok = mask; if(select(pty+1, &read_ok, NULL, NULL, &tv) > 0) { if(FD_ISSET(pty, &read_ok)) { temulator_read(temulator); dirty = 1; } } int key; while((key = getch()) != ERR) { temulator_write(temulator, key); dirty = 1; } gettimeofday(&tv, NULL); if(dirty && is_expired(tv, next)) { temulator_draw_on_curses(temulator, term_win, temulator_y, temulator_x); wrefresh(term_win); dirty = 0; next = timeval_add(tv, slice); } if(gSigChld) { gSigChld = FALSE; break; } if(gSigWinch) { gSigWinch = 0; temulator_height = mgetmaxy(); temulator_width = mgetmaxx(); if(temulator_width >= 10 && temulator_height >= 10) { resizeterm(temulator_height, temulator_width); wresize(term_win, temulator_height, temulator_width); temulator_resize(temulator, temulator_height, temulator_width); dirty = 1; } } } endwin(); temulator_final(temulator); }
int main (int argc, char * argv[]) { LOG_DEBUG(("THREADED defined")); if (argc != 2) { fprintf(stderr, "USAGE: %s host:port\n", argv[0]); exit(1); } /* * Initialize ZooKeeper session */ if(init(argv[1])){ LOG_ERROR(("Error while initializing the master: ", errno)); } #ifdef THREADED /* * Wait until connected */ while(!is_connected()) { sleep(1); } LOG_DEBUG(("Connected, going to bootstrap and run for master")); /* * Create parent znodes */ bootstrap(); /* * Run for master */ run_for_master(); /* * Run until session expires */ while(!is_expired()) { sleep(1); } #else int run = 0; fd_set rfds, wfds, efds; FD_ZERO(&rfds); FD_ZERO(&wfds); FD_ZERO(&efds); while (!is_expired()) { int fd = -1; int interest = 0; int events ; struct timeval tv; int rc; zookeeper_interest(zh, &fd, &interest, &tv); if (fd != -1) { if (interest&ZOOKEEPER_READ) { FD_SET(fd, &rfds); } else { FD_CLR(fd, &rfds); } if (interest&ZOOKEEPER_WRITE) { FD_SET(fd, &wfds); } else { FD_CLR(fd, &wfds); } } else { fd = 0; } /* * The next if block contains * calls to bootstrap the master * and run for master. We only * get into it when the client * has established a session and * is_connected is true. */ if(is_connected() && !run) { LOG_DEBUG(("Connected, going to bootstrap and run for master")); /* * Create parent znodes */ bootstrap(); /* * Run for master */ run_for_master(); run = 1; } rc = select(fd+1, &rfds, &wfds, &efds, &tv); events = 0; if (rc > 0) { if (FD_ISSET(fd, &rfds)) { events |= ZOOKEEPER_READ; } if (FD_ISSET(fd, &wfds)) { events |= ZOOKEEPER_WRITE; } } zookeeper_process(zh, events); } #endif return 0; }
static int dump_database(apr_pool_t *pool, apr_sdbm_t *db, int action) { apr_status_t ret; apr_sdbm_datum_t key; apr_sdbm_datum_t val; apr_sdbm_t *db_dest; double elements = 0; int bad_datum = 0; int expired_datum = 0; int removed = 0; int progress = 0; int fret = 0; if (action & PRINT) v("Dumping database...\n"); if (action & SHRINK) v("Starting the shrink process...\n"); if (action & STATUS) v("Showing some status about the databases...\n"); if (action & EXTRACT) { v("Exporting valid items to: /tmp/new_db.[pag,dir]...\n"); ret = apr_sdbm_open(&db_dest, "/tmp/new_db", APR_CREATE | APR_WRITE | APR_SHARELOCK, 0x0777, pool); if (ret != APR_SUCCESS) { v("Failed to retrieve the first key of the database.\n"); fret = -1; goto end; } } ret = apr_sdbm_firstkey(db, &key); if (ret != APR_SUCCESS) { v("Failed to retrieve the first key of the database.\n"); fret = -1; goto end; } do { ret = apr_sdbm_fetch(db, &val, key); if (ret != APR_SUCCESS) { v("Failed to fetch the value of the key: %s.\n", key.dptr); fret = -1; goto end; } elements++; if (action & PRINT) { if ((!(action & PRINT_ONLY_EXPIRED)) || ((action & PRINT_ONLY_EXPIRED) && is_expired(pool, (const unsigned char *)val.dptr, val.dsize))) { printf("Key: \"%s\", Value len: %d\n", key.dptr, val.dsize); if (action & PRINT_MODSEC_VARS) { print_modsec_variables(pool, (const unsigned char *)val.dptr, val.dsize); } } } if (action & SHRINK || action & STATUS || action & EXTRACT) { int selected = 0; if (val.dsize == 0) { bad_datum++; selected = 1; } if (is_expired(pool, (const unsigned char *)val.dptr, val.dsize)) { expired_datum++; selected = 1; } if ((int)elements % 10 == 0) { int p2s = (int) progress++ % 4; p(" [%c] %.0f records so far.\r", progress_feedback[p2s], elements); fflush(stdout); } if (selected && action & SHRINK) { ret = remote_datum_t(pool, db, &key); if (ret != APR_SUCCESS) { p("Failed to delete key: \"%s\"\n", (const unsigned char *)key.dptr); } else { removed++; } //Remove key. } if (selected == 0 && action & EXTRACT) { ret = apr_sdbm_store(db_dest, key, val, APR_SDBM_INSERT); if (ret != APR_SUCCESS) { p("Failed to insert key: \"%s\"\n", (const unsigned char *)key.dptr); } } } ret = apr_sdbm_nextkey(db, &key); if (ret != APR_SUCCESS) { v("Failed to retrieve the next key.\n"); fret = -1; goto end; } } while (key.dptr); end: if (action & EXTRACT) { p("New database generated with valied keys at: /tmp/new_db\n"); apr_sdbm_close(db_dest); } if (action & SHRINK || action & STATUS) { printf("\n"); printf("Total of %.0f elements processed.\n", elements); printf("%d elements removed.\n", removed); printf("Expired elements: %d, inconsistent items: %d\n", expired_datum, bad_datum); if (expired_datum+bad_datum != 0 && elements !=0) printf("Fragmentation rate: %2.2f%% of the database is/was dirty " \ "data.\n", 100*(expired_datum+bad_datum)/elements); } return fret; }
bool TracingState::is_complete() const { return !is_expired() && graph->stage() == num_stages - 1; }
void weak_ref::unref(lua_State * L) { assert(!is_expired()); luaL_unref(L, LUA_REGISTRYINDEX, *(m_ref.lock())); }