/** * Takes delimiter tags and sets the delimiter for the parser. * * @param sds tag The tag * * @retval sdsempty() Emtpy sds string. */ static sds tag_delimiter(sds tag) { puts("tag_delimiter"); printf("tag found: '%s'\n",tag); sds re = sdsempty(); sds oldl = sdsdup(ldelim), oldr = sdsdup(rdelim); re = sdscatprintf(re, "(%s=\\S+)\\s+(\\S+=%s)", oldl, oldr); struct slre_cap caps[2]; int i; if ((i = slre_match(re, tag, sdslen(tag), caps, 2)) > 0) { ldelim = sdscpylen(ldelim, caps[0].ptr, caps[0].len); rdelim = sdscpylen(rdelim, caps[1].ptr, caps[1].len); oldl = sdscatprintf(sdsempty(), "%s=", oldl); oldr = sdscatprintf(sdsempty(), "=%s", oldr); ldelim = sdsreplace(ldelim, oldl, NULL); rdelim = sdsreplace(rdelim, oldr, NULL); printf("ldelim: '%s'\n", ldelim); printf("rdelim: '%s'\n", rdelim); } printf("Matched: %d\n",i); puts("free oldl); sdsfree(oldr); sdsfree(re"); sdsfree(oldl); sdsfree(oldr); sdsfree(re); return sdsempty(); }
bool luafunc_call(redisClient *c, int argc, robj **argv) { sds fname; if (server.alc.WebServerMode == -1) { fname = sdsdup(argv[1]->ptr); } else { if (isWhiteListedIp(c)) { if (c->http.mode == HTTP_MODE_ON) { fname = sdscatprintf(sdsempty(), "WL_%s", (char *)argv[1]->ptr); } else { fname = sdsdup(argv[1]->ptr); } } else { fname = sdscatprintf(sdsempty(), "WL_%s", (char *)argv[1]->ptr); } } //printf("luafunc_call: fname: %s\n", fname); lua_getglobal(server.lua, fname); sdsfree(fname); int type = lua_type(server.lua, -1); if (type != LUA_TFUNCTION) { lua_pop(server.lua, 1); if (c->http.mode == HTTP_MODE_ON) SEND_404 else LUA_FUNCTION_REDIS_ERR return 1; }
/* Add the specified value into a set. * * If the value was already member of the set, nothing is done and 0 is * returned, otherwise the new element is added and 1 is returned. */ int setTypeAdd(robj *subject, sds value) { long long llval; if (subject->encoding == OBJ_ENCODING_HT) { dict *ht = subject->ptr; dictEntry *de = dictAddRaw(ht,value); if (de) { dictSetKey(ht,de,sdsdup(value)); dictSetVal(ht,de,NULL); return 1; } } else if (subject->encoding == OBJ_ENCODING_INTSET) { if (isSdsRepresentableAsLongLong(value,&llval) == C_OK) { uint8_t success = 0; subject->ptr = intsetAdd(subject->ptr,llval,&success); if (success) { /* Convert to regular set when the intset contains * too many entries. */ if (intsetLen(subject->ptr) > server.set_max_intset_entries) setTypeConvert(subject,OBJ_ENCODING_HT); return 1; } } else { /* Failed to get integer from object, convert to regular set. */ setTypeConvert(subject,OBJ_ENCODING_HT); /* The set *was* an intset and this value is not integer * encodable, so dictAdd should always work. */ serverAssert(dictAdd(subject->ptr,sdsdup(value),NULL) == DICT_OK); return 1; } } else { serverPanic("Unknown set encoding"); } return 0; }
void zoomServiceInit(sds srcDir) { /* service */ zoomSrcDir = sdsdup(srcDir); zoomTmpDir = bioPathInTmpDirCharPtr(SERVICE_ZOOM+1); if(utilMkdir(zoomTmpDir)) exit(EXIT_FAILURE); int i; for(i = 0;i < 16;i++) { sds zoomSubDirs = sdsdup(zoomTmpDir); zoomSubDirs = sdscatprintf(zoomSubDirs,"/%x",i); if(utilMkdir(zoomSubDirs)) exit(EXIT_FAILURE); sdsfree(zoomSubDirs); } baseoffset = sdslen(zoomTmpDir)+2; }
void destor_shutdown() { sds stat_file = sdsdup(destor.working_directory); stat_file = sdscat(stat_file, "/destor.stat"); FILE *fp; if ((fp = fopen(stat_file, "w")) == 0) { destor_log(DESTOR_WARNING, "Fatal error, can not open destor.stat!"); exit(1); } fwrite(&destor.chunk_num, 8, 1, fp); fwrite(&destor.stored_chunk_num, 8, 1, fp); fwrite(&destor.data_size, 8, 1, fp); fwrite(&destor.stored_data_size, 8, 1, fp); fwrite(&destor.zero_chunk_num, 8, 1, fp); fwrite(&destor.zero_chunk_size, 8, 1, fp); fwrite(&destor.rewritten_chunk_num, 8, 1, fp); fwrite(&destor.rewritten_chunk_size, 8, 1, fp); fwrite(&destor.index_memory_footprint, 4, 1, fp); fwrite(&destor.live_container_num, 4, 1, fp); fwrite(&destor.backup_retention_time, 4, 1, fp); fwrite(&destor.simulation_level, 4, 1, fp); fclose(fp); sdsfree(stat_file); }
/* Add the key to the DB. It's up to the caller to increment the reference * counter of the value if needed. * * The program is aborted if the key already exists. */ void dbAdd(redisDb *db, robj *key, robj *val) { sds copy = sdsdup(key->ptr); int retval = dictAdd(db->dict, copy, val); redisAssertWithInfo(NULL,key,retval == REDIS_OK); if (val->type == REDIS_LIST) signalListAsReady(db, key); }
bool consolidate_gpos_single(otfcc_Font *font, table_OTL *table, otl_Subtable *_subtable, const otfcc_Options *options) { subtable_gpos_single *subtable = &(_subtable->gpos_single); gpos_single_hash *h = NULL; for (glyphid_t k = 0; k < subtable->length; k++) { if (!GlyphOrder.consolidateHandle(font->glyph_order, &subtable->items[k].target)) { logWarning("[Consolidate] Ignored missing glyph /%s.\n", subtable->items[k].target.name); continue; } gpos_single_hash *s; int fromid = subtable->items[k].target.index; HASH_FIND_INT(h, &fromid, s); if (s) { logWarning("[Consolidate] Detected glyph double-mapping about /%s.\n", subtable->items[k].target.name); } else { NEW(s); s->fromid = subtable->items[k].target.index; s->fromname = sdsdup(subtable->items[k].target.name); s->v = subtable->items[k].value; HASH_ADD_INT(h, fromid, s); } } HASH_SORT(h, gpos_by_from_id); iSubtable_gpos_single.clear(subtable); gpos_single_hash *s, *tmp; HASH_ITER(hh, h, s, tmp) { iSubtable_gpos_single.push(subtable, ((otl_GposSingleEntry){ .target = Handle.fromConsolidated(s->fromid, s->fromname), .value = s->v, }));
void slaveofCommand(redisClient *c) { if (!strcasecmp(c->argv[1]->ptr,"no") && !strcasecmp(c->argv[2]->ptr,"one")) { if (server.masterhost) { sdsfree(server.masterhost); server.masterhost = NULL; if (server.master) freeClient(server.master); if (server.replstate == REDIS_REPL_TRANSFER) replicationAbortSyncTransfer(); server.replstate = REDIS_REPL_NONE; redisLog(REDIS_NOTICE,"MASTER MODE enabled (user request)"); } } else { sdsfree(server.masterhost); server.masterhost = sdsdup(c->argv[1]->ptr); server.masterport = atoi(c->argv[2]->ptr); if (server.master) freeClient(server.master); if (server.replstate == REDIS_REPL_TRANSFER) replicationAbortSyncTransfer(); server.replstate = REDIS_REPL_CONNECT; redisLog(REDIS_NOTICE,"SLAVE OF %s:%d enabled (user request)", server.masterhost, server.masterport); } addReply(c,shared.ok); }
/* Add the key to the DB. It's up to the caller to increment the reference * counter of the value if needed. * * The program is aborted if the key already exists. */ void dbAdd(memoryDb *db, sds *key, value_t *val) { sds copy = sdsdup(key); resetValueVersion(val); int retval = dictAdd(db->dict, copy, val); redisAssertWithInfo(NULL, key, retval == MDB_OK); }
/* Add the key to the DB. It's up to the caller to increment the reference * counte of the value if needed. * * The program is aborted if the key already exists. */ void dbAdd(redisDb *db, robj *key, robj *val) { sds copy = sdsdup(key->ptr); int retval = dictAdd(db->dict, copy, val); redisAssertWithInfo(NULL,key,retval == REDIS_OK); if (server.cluster_enabled) SlotToKeyAdd(key); }
static sds trie_dir_path(const TF * const fort, const void * const hash, const char * filename) { const size_t width = fort->cfg.width; const size_t depth = fort->cfg.depth; const size_t hashlen = fort->cfg.hash_len; const uint8_t * const hashb = hash; char dir_node[(width * 2) + 2]; sds path = sdsdup(fort->path); for (size_t i = 0; i < depth; i++) { path = sdscat(path, "/"); for (size_t j = 0; j < width; j++) { char * strpos = &dir_node[j * 2]; size_t hashix = (i * width) + j; snprintf(strpos, 3, "%02x", hashb[hashix]); } path = sdscat(path, dir_node); } path = sdscat(path, "/"); sds shash = mk_hash_str(hash, hashlen); path = sdscat(path, shash); sdsfree(shash); if (NULL != filename) { path = sdscat(path, "/"); path = sdscat(path, filename); } return path; }
// returns 1 if hooked, 0 otherwise. // if hooked, reference counts for key and value are decreased int arc_rdb_load_aux_fields_hook (robj * auxkey, robj * auxval, long long *now) { char *p = auxkey->ptr; // fast check if (*p++ != '\001' || *p++ != '\002' || *p++ != '\003') { return 0; } if (!compareStringObjects (auxkey, shared.db_version)) { getLongLongFromObject (auxval, &arc.smr_seqnum); } else if (!compareStringObjects (auxkey, shared.db_smr_mstime)) { getLongLongFromObject (auxval, &arc.smr_ts); if (now) { *now = arc.smr_ts; } } else if (!compareStringObjects (auxkey, shared.db_migrate_slot)) { if (arc.migrate_slot) { sdsfree (arc.migrate_slot); } arc.migrate_slot = sdsdup (auxval->ptr); } else if (!compareStringObjects (auxkey, shared.db_migclear_slot)) { if (arc.migclear_slot) { sdsfree (arc.migclear_slot); } arc.migclear_slot = sdsdup (auxval->ptr); } else { return 0; } decrRefCount (auxkey); decrRefCount (auxval); return 1; }
void update_manifest(GHashTable *monitor){ GHashTable *manifest = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, free); sds fname = sdsdup(destor.working_directory); fname = sdscat(fname, "/manifest"); FILE *fp = NULL; if((fp = fopen(fname, "r"))){ /* file exists. Reconstruct the manifest from the file. */ struct record tmp; while(fscanf(fp, "%lld,%d", &tmp.id, &tmp.time) == 2){ struct record* rec = (struct record*) malloc(sizeof(struct record)); rec->id = tmp.id; rec->time = tmp.time; g_hash_table_insert(manifest, &rec->id, rec); } DEBUG("CMA: read %d records.", g_hash_table_size(manifest)); fclose(fp); } /* Update the backup times in the manifest. */ GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, monitor); while(g_hash_table_iter_next(&iter, &key, &value)){ /* the key is a pointer to a container ID. */ struct record *r = g_hash_table_lookup(manifest, key); if(!r){ r = (struct record*) malloc(sizeof(struct record)); r->id = *(containerid*)key; g_hash_table_insert(manifest, &r->id, r); } r->time = jcr.id; } /* Flush the manifest */ if((fp = fopen(fname, "w"))){ /* Update the manifest into the file. */ g_hash_table_iter_init(&iter, manifest); while(g_hash_table_iter_next(&iter, &key, &value)){ struct record* r = value; fprintf(fp, "%lld,%d\n", r->id, r->time); } DEBUG("CMA: update %d records.", g_hash_table_size(manifest)); fclose(fp); }else{ WARNING("Cannot create the manifest!"); exit(1); } destor.live_container_num = g_hash_table_size(manifest); g_hash_table_destroy(manifest); sdsfree(fname); }
/* * cetcd_cluster_request tries to request the whole cluster. It round-robin to next server if the request failed * */ void *cetcd_cluster_request(cetcd_client *cli, cetcd_request *req) { size_t i, count; cetcd_string url; cetcd_error *err = NULL; cetcd_response *resp = NULL; cetcd_array *addrs = NULL; void *res = NULL; count = cetcd_array_size(cli->addresses); for(i = 0; i < count; ++i) { url = sdscatprintf(sdsempty(), "http://%s/%s", (cetcd_string)cetcd_array_get(cli->addresses, cli->picked), req->uri); req->url = url; req->cli = cli; res = cetcd_send_request(cli->curl, req); sdsfree(url); /*api_type == syncCluster, got address, return*/ if (req->api_type == ETCD_MEMBERS ){ if ((addrs = res)) { if ( cetcd_array_size(addrs)) { return addrs; } else { cetcd_array_destroy(addrs); } } } else { if((resp=res) && resp->err && resp->err->ecode == error_send_request_failed) { if (i != count-1) { cetcd_response_release(resp); resp = NULL; } } else { /*got response, return*/ return resp; } } /*try next*/ if (i != count-1) { cli->picked = (cli->picked + 1) % count; } } /*the whole cluster failed*/ if (req->api_type == ETCD_MEMBERS) return NULL; if (resp) { if(resp->err) { err = resp->err; /*remember last error*/ } resp->err = calloc(1, sizeof(cetcd_error)); resp->err->ecode = error_cluster_failed; resp->err->message = sdsnew("etcd_do_request: all cluster servers failed."); if (err) { resp->err->message = sdscatprintf(resp->err->message, " last error: %s", err->message); cetcd_error_release(err); } resp->err->cause = sdsdup(req->uri); } return resp; }
int parg_asset_download(const char* filename, sds targetpath) { sds baseurl = parg_asset_baseurl(); sds fullurl = sdscat(sdsdup(baseurl), filename); printf("Downloading %s...\n", fullurl); return par_easycurl_to_file(fullurl, targetpath); return 0; }
/* Add the key to the DB. It's up to the caller to increment the reference * counter of the value if needed. * * The program is aborted if the key already exists. */ void dbAdd(redisDb *db, robj *key, robj *val) { sds copy = sdsdup(key->ptr); int retval = dictAdd(db->dict, copy, val); serverAssertWithInfo(NULL,key,retval == C_OK); if (val->type == OBJ_LIST) signalListAsReady(db, key); if (server.cluster_enabled) slotToKeyAdd(key); }
cacheEntry *cacheFind(ccache *c, sds key) { cacheEntry *ce = dictFetchValue(c->data,key); if(ce == NULL) { ce = cacheAdd(c,sdsdup(key),NULL); cacheSendMessage(c,ce,CACHE_REQUEST_NEW); } return ce; }
S triefort_put_with_key(TF * fort, const void * const key, const size_t keylen, const void * const buffer, const size_t bufferlen, void * const hash) { NULLCHK(fort); NULLCHK(key); NULLCHK(buffer); NULLCHK(hash); if (keylen > fort->cfg.max_key_len) { return triefort_err_key_too_long; } triefort_hasher_fn * hfn = fort->hcfg->hasher; const size_t hashlen = fort->cfg.hash_len; if (0 != hfn(hash, hashlen, key, keylen)) { return triefort_err_hasher_error; } S s; sds dir_path = NULL; PANIC_IF(triefort_ok != mk_trie_dirs(fort, hash, hashlen, &dir_path)); sds skey_path = sdsdup(dir_path); sds sdata_path = sdsdup(dir_path); sdata_path = sdscat(sdata_path, "/triefort.data"); skey_path = sdscat(skey_path, "/triefort.key"); if (file_exists(sdata_path)) { s = triefort_err_hash_already_exists; } else { s = write_file(skey_path, key, keylen); if (triefort_ok == s) { s = write_file(sdata_path, buffer, bufferlen); } } sdsfree(skey_path); sdsfree(sdata_path); sdsfree(dir_path); return s; }
// PURGE PURGE PURGE PURGE PURGE PURGE PURGE PURGE PURGE PURGE PURGE PURGE void DXDB_syncCommand(redisClient *c) { sds ds = sdscatprintf(sdsempty(), "DIRTY %lld\r\n", server.alc.stat_num_dirty_commands); robj *r = createStringObject(ds, sdslen(ds)); addReply(c, r); // SYNC DIRTY NUM decrRefCount(r); c->bindaddr = sdsdup(c->argv[1]->ptr); c->bindport = atoi(c->argv[2]->ptr); }
/* * Be called when users delete backups in FIFO order. * Delete all backups earlier than jobid. * All container IDs with a time smaller than or equal to jobid can be removed. * Return these IDs. */ GHashTable* trunc_manifest(int jobid){ /* The containers we reclaim */ GHashTable *invalid_containers = g_hash_table_new_full(g_int64_hash, g_int64_equal, free, NULL); GHashTable *manifest = g_hash_table_new_full(g_int64_hash, g_int64_equal, NULL, free); sds fname = sdsdup(destor.working_directory); fname = sdscat(fname, "/manifest"); FILE *fp = NULL; if((fp = fopen(fname, "r"))){ /* file exists. Reconstruct the manifest from the file. */ struct record tmp; while(fscanf(fp, "%lld,%d", &tmp.id, &tmp.time) == 2){ struct record* rec = (struct record*) malloc(sizeof(struct record)); if(tmp.time <= jobid){ /* This record can be removed. */ containerid *cid = (containerid*) malloc(sizeof(containerid)); *cid = tmp.id; g_hash_table_insert(invalid_containers, cid, NULL); NOTICE("CMA: container %lld can be reclaimed.", cid); }else{ /* This record remains valid. */ rec->id = tmp.id; rec->time = tmp.time; g_hash_table_insert(manifest, &rec->id, rec); } } NOTICE("CMA: %d of records are valid.", g_hash_table_size(manifest)); NOTICE("CMA: %d of records are going to be reclaimed.", g_hash_table_size(invalid_containers)); fclose(fp); }else{ NOTICE("manifest doesn't exist!"); exit(1); } if((fp = fopen(fname, "w"))){ GHashTableIter iter; gpointer key, value; g_hash_table_iter_init(&iter, manifest); while(g_hash_table_iter_next(&iter, &key, &value)){ struct record* rec = value; fprintf(fp, "%lld,%d\n", rec->id, rec->time); } fclose(fp); }else{ WARNING("CMA: cannot create manifest!"); exit(1); } destor.live_container_num = g_hash_table_size(manifest); g_hash_table_destroy(manifest); return invalid_containers; }
static sds zoomePathInTmpDir(char *str) { sds path = sdsdup(zoomTmpDir); char *dn = mhashFunction((unsigned char*)str,strlen(str)); char *fn = fast_url_encode(str); path = sdscatprintf(path,"/%s/%s",dn,fn); free(dn);free(fn); return path; }
void init_recipe_store() { recipepath = sdsdup(destor.working_directory); recipepath = sdscat(recipepath, "/recipes/"); sds count_fname = sdsdup(recipepath); count_fname = sdscat(count_fname, "backupversion.count"); FILE *fp; if ((fp = fopen(count_fname, "r"))) { /* Read if exists. */ fread(&backup_version_count, 4, 1, fp); fclose(fp); } sdsfree(count_fname); NOTICE("Init recipe store successfully"); }
static S mk_info_from_path(const TF * const fort, sds path, const void * const hash, INFO ** info) { sds data_path = sdsdup(path); sds key_path = sdsdup(path); data_path = sdscat(data_path, "/triefort.data"); key_path = sdscat(key_path, "/triefort.key"); S status = triefort_ok; if (file_exists(data_path)) { struct stat s; PANIC_IF(0 != stat(data_path, &s)); *info = calloc(1, sizeof(**info)); INFO * inf = *info; inf->hash = calloc(1, fort->cfg.hash_len); memcpy(inf->hash, hash, fort->cfg.hash_len); inf->hashlen = fort->cfg.hash_len; inf->length = s.st_size; if (file_exists(key_path)) { FILE * kh = fopen(key_path, "rb"); PANIC_IF(NULL == kh); PANIC_IF(0 != fstat(fileno(kh), &s)); inf->keylen = s.st_size; inf->key = calloc(1, s.st_size); PANIC_IF(1 != fread(inf->key, s.st_size, 1, kh)); fclose(kh); } else { inf->keylen = 0; inf->key = NULL; } } else { status = triefort_err_hash_does_not_exist; } sdsfree(data_path); sdsfree(key_path); return status; }
/* If the key does not exist, this is just like dbAdd(). Otherwise * the value associated to the key is replaced with the new one. * * On update (key already existed) 0 is returned. Otherwise 1. */ int dbReplace(redisDb *db, robj *key, robj *val) { if (dictFind(db->dict,key->ptr) == NULL) { sds copy = sdsdup(key->ptr); dictAdd(db->dict, copy, val); return 1; } else { dictReplace(db->dict, key->ptr, val); return 0; } }
parg_buffer* parg_buffer_from_path(const char* filename) { #if EMSCRIPTEN sds baseurl = parg_asset_baseurl(); sds fullurl = sdscat(sdsdup(baseurl), filename); parg_buffer* retval = 0; printf("TODO: download %s here\n", fullurl); sdsfree(fullurl); #else sds execdir = parg_asset_whereami(); sds fullpath = sdscat(sdsdup(execdir), filename); if (!parg_asset_fileexists(fullpath)) { parg_asset_download(filename, fullpath); } parg_buffer* retval = parg_buffer_from_file(fullpath); sdsfree(fullpath); #endif return retval; }
void *cetcd_send_request(CURL *curl, cetcd_request *req) { CURLcode res; cetcd_response_parser parser; cetcd_response *resp = NULL ; cetcd_array *addrs = NULL; if (req->api_type == ETCD_MEMBERS) { addrs = cetcd_array_create(10); parser.resp = addrs; } else { resp = calloc(1, sizeof(cetcd_response)); parser.resp = resp; } parser.api_type = req->api_type; parser.st = 0; /*0 should be the start state of the state machine*/ parser.buf = sdsempty(); curl_easy_setopt(curl, CURLOPT_URL, req->url); curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, http_method[req->method]); if (req->method == ETCD_HTTP_PUT || req->method == ETCD_HTTP_POST) { curl_easy_setopt(curl, CURLOPT_POSTFIELDS, req->data); } else { /* We must clear post fields here: * We reuse the curl handle for all HTTP methods. * CURLOPT_POSTFIELDS would be set when issue a PUT request. * The field pointed to the freed req->data. It would be * reused by next request. * */ curl_easy_setopt(curl, CURLOPT_POSTFIELDS, ""); } curl_easy_setopt(curl, CURLOPT_HEADER, 1L); curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L); curl_easy_setopt(curl, CURLOPT_WRITEDATA, &parser); curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, cetcd_parse_response); curl_easy_setopt(curl, CURLOPT_VERBOSE, req->cli->settings.verbose); curl_easy_setopt(curl, CURLOPT_CONNECTTIMEOUT, req->cli->settings.connect_timeout); res = curl_easy_perform(curl); sdsfree(parser.buf); if (res != CURLE_OK) { if (req->api_type == ETCD_MEMBERS) { return addrs; } if (resp->err == NULL) { resp->err = calloc(1, sizeof(cetcd_error)); resp->err->ecode = error_send_request_failed; resp->err->message = sdsnew(curl_easy_strerror(res)); resp->err->cause = sdsdup(req->url); } return resp; } return parser.resp; }
/* Create a counter and add it to server.counters. */ counter *counterCreate(sds name) { counter *cntr = zcalloc(sizeof(counter) + (sizeof(long double) * server.history_size)); cntr->name = sdsdup(name); cntr->shards = listCreate(); cntr->history = (long double*)(cntr + 1); cntr->precision = server.default_precision; serverAssert(dictAdd(server.counters, cntr->name, cntr) == DICT_OK); return cntr; }
int master_init(vr_conf *conf) { rstatus_t status; uint32_t j; sds *host, listen_str; vr_listen **vlisten; int threads_num; int filelimit; master.cbsul = NULL; pthread_mutex_init(&master.cbsullock, NULL); conf_server_get(CONFIG_SOPN_THREADS,&threads_num); filelimit = threads_num*2+CONFIG_MIN_RESERVED_FDS; vr_eventloop_init(&master.vel,filelimit); master.vel.thread.fun_run = master_thread_run; darray_init(&master.listens,darray_n(&cserver->binds),sizeof(vr_listen*)); for (j = 0; j < darray_n(&cserver->binds); j ++) { host = darray_get(&cserver->binds,j); listen_str = sdsdup(*host); listen_str = sdscatfmt(listen_str, ":%i", cserver->port); vlisten = darray_push(&master.listens); *vlisten = vr_listen_create(listen_str); if (*vlisten == NULL) { darray_pop(&master.listens); log_error("Create listen %s failed", listen_str); sdsfree(listen_str); return VR_ERROR; } sdsfree(listen_str); } for (j = 0; j < darray_n(&master.listens); j ++) { vlisten = darray_get(&master.listens, j); status = vr_listen_begin(*vlisten); if (status != VR_OK) { log_error("Begin listen to %s failed", (*vlisten)->name); return VR_ERROR; } } master.cbsul = dlistCreate(); if (master.cbsul == NULL) { log_error("Create list failed: out of memory"); return VR_ENOMEM; } setup_master(); return VR_OK; }
/* Add the key to the DB. If the key already exists REDIS_ERR is returned, * otherwise REDIS_OK is returned, and the caller should increment the * refcount of 'val'. */ int dbAdd(redisDb *db, robj *key, robj *val) { /* Perform a lookup before adding the key, as we need to copy the * key value. */ if (dictFind(db->dict, key->ptr) != NULL) { return REDIS_ERR; } else { sds copy = sdsdup(key->ptr); dictAdd(db->dict, copy, val); return REDIS_OK; } }
void cacheDelete(ccache* c, sds key) { cacheEntry *ce = (cacheEntry *)dictFetchValue(c->data,key); /* Master reply by setting val to an object. * We do not delete cache entry until the master reply */ if(ce&&ce->val) { cacheSendMessage(c,sdsdup(key),CACHE_REQUEST_OLD); listDelNode(c->accesslist,ce->ln); dictDelete(c->data,key); } }