/** * free the temp memory for storing winner list */ static void free_copy_winner_list(ReportCopyWinner_t *copy_winner) { if (copy_winner->winner_head != NULL) { plat_free(copy_winner->winner_head); copy_winner->winner_head = NULL; } if (copy_winner->winners != NULL) { plat_free(copy_winner->winners); copy_winner->winner_head = NULL; } if (copy_winner->key_table != NULL) { plat_free(copy_winner->key_table); copy_winner->key_table = NULL; } if (copy_winner->snapshot.winner_sorted != NULL) { plat_free(copy_winner->snapshot.winner_sorted); copy_winner->snapshot.winner_sorted = NULL; } if (copy_winner->ref_sort != NULL) { plat_free(copy_winner->ref_sort); copy_winner->ref_sort = NULL; } }
void testremove() { uint64_t cguid = 1; SDF_container_type_t ctype = SDF_BLOCK_CONTAINER; for (int blockNum = 0; blockNum < numBlocks; blockNum++) { local_key_t *lkey = get_local_block_key(blockNum); DirEntry *entry = HomeDir_remove(homedir, cguid, ctype, lkey); // {{ plat_assert_always(entry); fthThread_t *top = reqq_peek(entry->q); if (top) plat_assert_always(top == fthSelf()); fthWaitEl_t *wait = reqq_lock(entry->q); fthThread_t *self = reqq_dequeue(entry->q); if (self) plat_assert_always(self == fthSelf()); reqq_unlock(entry->q, wait); reqq_destroy(entry->q); plat_assert_always(NULL == entry->home); plat_free(entry); // }} free_local_key(lkey); } printf("Thread 1: Removed %d blocks from the directory\n", numBlocks); }
/** * Apply shard_meta event at given time */ static void user_at_event(struct plat_closure_scheduler *context, void *env, SDF_status_t status) { struct update_meta_data_at_event_state *state = (struct update_meta_data_at_event_state *)env; struct cr_shard_meta *in; plat_assert(state != NULL && state->in != NULL); if (state->type == GET_SHARD_META) { status = rtfw_get_shard_meta_sync(state->test_framework, state->node, state->in->persistent.sguid, state->r_shard_meta, &state->out, &state->expires); } else if (state->type == PUT_SHARD_META) { in = state->in; in->persistent.current_home_node = state->node; ++in->persistent.shard_meta_seqno; ++in->persistent.ltime; in->persistent.lease_usecs = state->lease_usecs; rtfw_put_shard_meta_sync(state->test_framework, state->node, state->in, &state->out, &state->expires); } else { rtfw_delete_shard_meta_sync(state->test_framework, state->node, state->in, &state->out, &state->expires); } cr_shard_meta_free(state->in); plat_free(state); }
static void ra_sched_pts_free(void *pts) { struct sdf_replicator_adapter_thread_state *rats = (struct sdf_replicator_adapter_thread_state *)pts; if (rats) { plat_free(rats); } }
/** * Synchronously receive a message. * * On success, *msg contains the message which was received and * *msg_free_closure code which will free it. * * Returns positive on success, 0 on EOF, -errno on failure */ int plat_recv_msg(int fd, struct plat_msg_header **msg_ptr, plat_msg_free_t *msg_free_closure) { struct plat_msg_header header, *msg = NULL; int got; size_t remain; int ret; ret = recv_bytes(fd, &header, sizeof (header)); if (!ret) { } else if (0 <= ret && ret < sizeof (header)) { plat_log_msg(20959, PLAT_LOG_CAT_PLATFORM_MSG, PLAT_LOG_LEVEL_DIAGNOSTIC, "plat_recv_msg short header %d of %u bytes", ret, (unsigned)sizeof (header)); ret = -PLAT_EEOF; } else if (ret == sizeof (header) && header.magic != PLAT_MSG_MAGIC) { plat_log_msg(20960, PLAT_LOG_CAT_PLATFORM_MSG, PLAT_LOG_LEVEL_DIAGNOSTIC, "plat_recv_msg bad magix %x", header.magic); ret = -EILSEQ; } else { msg = plat_alloc(header.len); if (!msg) { ret = -ENOMEM; }; }; if (ret > 0) { memcpy(msg, &header, sizeof (header)); remain = header.len - sizeof (header); got = recv_bytes(fd, ((char *)msg) + sizeof (header), remain); if (got < 0) { ret = got; } else if (!got) { ret = -PLAT_EEOF; } else if (got < remain) { plat_log_msg(20961, PLAT_LOG_CAT_PLATFORM_MSG, PLAT_LOG_LEVEL_DIAGNOSTIC, "plat_recv_msg short payload %d of %u bytes", got, (unsigned)remain); ret = -PLAT_EEOF; } else { ret += got; } } if (ret > 0) { *msg_ptr = msg; *msg_free_closure = plat_msg_free_create(PLAT_CLOSURE_SCHEDULER_SYNCHRONOUS, &free_msg, NULL); } else { plat_free(msg); } return (ret); }
/** @brief Decrement reference count */ static void ra_scheduler_stopped(plat_closure_scheduler_t *context, void *env) { int i; struct sdf_replicator_adapter *ra = (struct sdf_replicator_adapter *)env; plat_log_msg(21469, LOG_CAT, PLAT_LOG_LEVEL_TRACE, "replicator_adapter %p scheduler stopped", ra); ra->internal_sdf_client_scheduler = NULL; plat_assert(!ra->replicator); plat_assert(!ra->internal_sdf_client_scheduler); if (ra->replication_msg_binding) { sdf_msg_binding_free(ra->replication_msg_binding); } if (ra->peer_msg_binding) { sdf_msg_binding_free(ra->peer_msg_binding); } if (ra->recv_msg_action) { sdf_msg_action_free(ra->recv_msg_action); } /** XXX use destructors here */ if (ra->send_queue_pair) { sdf_delete_queue_pair(ra->send_queue_pair); } if (ra->recv_queue_pair) { sdf_delete_queue_pair(ra->recv_queue_pair); } for (i = 0; i < ra->config.node_count; ++i) { if (ra->peers[i]) { ra_peer_free(ra->peers[i]); } } plat_free(ra->peers); plat_free(ra); }
/* * Clean up reporter instance */ int hot_key_cleanup(Reporter_t *preporter) { if (__sync_bool_compare_and_swap(&preporter->dump_num, 0, MAX_DUMP_NUM)) { plat_free(preporter); } else { return -1; } return (0); }
/** @brief Free one peer's structures */ static void ra_peer_free(struct ra_peer_state *peer) { if (peer) { if (peer->inbound) { sdf_delete_queue_pair(peer->inbound); } if (peer->outbound) { sdf_delete_queue_pair(peer->outbound); } plat_free(peer); } }
SDF_boolean_t object_exists(SDF_internal_ctxt_t *pai, SDF_CONTAINER c, SDF_key_t key) { SDF_boolean_t exists = SDF_FALSE; objMetaData_t *metaData = plat_alloc(sizeof(objMetaData_t)); char *fkey = plat_alloc(256); char *name = NULL; name = (char *)ObjectKey_getName(key->object_id); metaData->objFlags = 0; metaData->expTime = 0; metaData->createTime = 0; metaData->keyLen = strlen(name); metaData->dataLen = 0; memcpy(fkey, name, strlen(name)); if (!isContainerNull(c) && key != NULL) { local_SDF_CONTAINER lc = getLocalContainer(&lc, c); local_SDF_CONTAINER_PARENT lparent = getLocalContainerParent(&lparent, lc->parent); // We only need to do the check for object containers if (lparent->container_type == SDF_OBJECT_CONTAINER) { // char buf[size]; struct shard *shard = NULL; if ((shard = get_shard(pai, lc)) == NULL) { exists = SDF_FALSE; plat_log_msg(21597, PLAT_LOG_CAT_SDF_SHARED, PLAT_LOG_LEVEL_TRACE, "FAILURE: object_exists - failed to get shard"); } else { if (flashGet(shard, metaData, (char *) fkey, NULL)) { exists = SDF_TRUE; } if (fkey != NULL) { plat_free(fkey); } } } releaseLocalContainer(&lc); releaseLocalContainerParent(&lparent); } return (exists); }
/** * @brief synchronized create_shard/write/read/delete/delete_shard operations */ void user_operations_rms_sm_fault_in(uint64_t args) { struct replication_test_framework *test_framework = (struct replication_test_framework *)args; SDF_shardid_t shard_id = 1; struct SDF_shard_meta *shard_meta = NULL; struct sdf_replication_shard_meta *r_shard_meta; /* configuration infomation about shard */ SDF_replication_props_t *replication_props = NULL; struct cr_shard_meta *in; vnode_t node = 0; int failed; #if 0 struct cr_shard_meta *out = NULL; SDF_status_t status = SDF_SUCCESS; struct timeval expires; struct timeval now; #endif /* configures test framework accommodate to RT_TYPE_META_STORAGE */ failed = !(plat_calloc_struct(&replication_props)); plat_assert(!failed); rtfw_set_default_replication_props(&test_framework->config, replication_props); init_meta_data(test_framework, replication_props, &r_shard_meta, node, shard_id, LEASE_USECS, &shard_meta, &in); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "start test_framework"); rtfw_start(test_framework); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "test_framework started\n"); failed = rms_modify_shard_meta_fault_in(test_framework, replication_props); plat_assert(failed != 1); cr_shard_meta_free(in); plat_free(replication_props); /* Shutdown test framework */ plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "\n************************************************************\n" " Test framework shutdown " "\n************************************************************"); rtfw_shutdown_sync(test_framework); /* Terminate scheduler */ fthKill(1); }
SDF_status_t put_object(struct shard *shard, SDF_key_t key, void *pbuf, SDF_size_t size, SDF_operation_t *opid) { SDF_status_t status = SDF_FAILURE; char *data = NULL; objMetaData_t *metaData = plat_alloc(sizeof(objMetaData_t)); char *fkey = plat_alloc(256); char *name = NULL; name = (char *)ObjectKey_getName(key->object_id); metaData->objFlags = 0; metaData->expTime = 0; metaData->createTime = 0; metaData->keyLen = strlen(name); metaData->dataLen = size; memcpy(fkey, name, strlen(name)); if (pbuf != NULL && (data = plat_alloc(4+size)) == NULL) { status = SDF_FAILURE_MEMORY_ALLOC; plat_log_msg(21590, PLAT_LOG_CAT_SDF_SHARED, PLAT_LOG_LEVEL_TRACE, "FAILURE: put_object - memory allocation"); } else { if (pbuf != NULL) { memcpy(data, pbuf, size); } if (!flashPut(shard, metaData, (char *)fkey, data)) { status = SDF_FAILURE_STORAGE_WRITE; plat_log_msg(21591, PLAT_LOG_CAT_SDF_SHARED, PLAT_LOG_LEVEL_TRACE, "FAILURE: put_object - flashPut"); } else { status = SDF_SUCCESS; plat_log_msg(21592, PLAT_LOG_CAT_SDF_SHARED, PLAT_LOG_LEVEL_TRACE, "SUCCESS: put_object - flashPut"); } if (fkey != NULL) { plat_free(fkey); } } return (status); }
SDF_status_t get_object(struct shard *shard, SDF_key_t key, SDF_CACHE_OBJ *dest, SDF_size_t *size, SDF_operation_t *opid) { int code = 0; SDF_status_t status = SDF_FAILURE; char *data = NULL; local_SDF_CACHE_OBJ lo = NULL; char *fkey = plat_alloc(256); char *name = NULL; objMetaData_t *metaData = plat_alloc(sizeof(objMetaData_t)); name = (char *)ObjectKey_getName(key->object_id); metaData->objFlags = 0; metaData->expTime = 0; metaData->createTime = 0; metaData->keyLen = strlen(name); metaData->dataLen = 0; memcpy(fkey, name, strlen(name)); if ((code = flashGet(shard, metaData, (char *)fkey, &data)) == 0) { plat_log_msg(21588, PLAT_LOG_CAT_SDF_SHARED, PLAT_LOG_LEVEL_TRACE, "FAILURE: get_object - flashget"); } else { uint32_t dataLen = *((uint32_t *) data); *dest = createCacheObject(dataLen); plat_assert(!isCacheObjectNull(*dest)); getLocalCacheObject(&lo, *dest, dataLen); memcpy(lo, data, dataLen); releaseLocalCacheObject(&lo, dataLen); *size = dataLen; status = SDF_SUCCESS; plat_log_msg(21589, PLAT_LOG_CAT_SDF_SHARED, PLAT_LOG_LEVEL_TRACE, "SUCCESS: get_object - flashget"); } if (fkey != NULL) { plat_free(fkey); } return (status); }
/** *The cleaner to clean out the content when exit or Ctlr+C */ void interrupt() { printf("exit...\n"); int i = 0; /*close all file */ for (i = 0; i < MAX_FILE_NUMBER; i++) { fclose(files[i].fp); } /*clean the pid flag */ *(shmem_info_A->pid) = 0; *(shmem_info_B->pid) = 0; /*free the buffer */ //free(buffer); /*get the buffer that is not full */ struct shmem_buffer *shmem_info = shmem_info_A; /*even number for buffer A, and odd number for buffer B*/ if ( file_index%2 == 1) shmem_info = shmem_info_B; /*flush the left content */ printf("buffer total size %d\n", (*shmem_info->size)); /*clear the share memory to the file LOG_END_FILE */ char file_name[MAX_FILE_NAME]; sprintf (file_name, "%s/%s", LOG_DIR, END_FILE); FILE *fp = fopen(file_name, "wb+"); unsigned long long last_index = file_index % MAX_FILE_NUMBER - 1; if (fp) { fwrite(&(last_index), sizeof(int), sizeof(char), fp); fwrite(&file_index, sizeof(unsigned long long), 1, fp); fwrite(shmem_info->shm, (*shmem_info->size), sizeof(char), fp); fflush(fp); fclose(fp); } else { printf("flush the end file failed\n!"); } /*clean the shmem */ //memset(shmem_info_A->shm - SHME_HEADER_SIZE, '\0', SHME_MAX_SIZE); //memset(shmem_info_B->shm - SHME_HEADER_SIZE, '\0', SHME_MAX_SIZE); /*restore the sig mask */ sigprocmask(SIG_UNBLOCK, &mask, NULL); /*free the files */ plat_free(files); /*do exit */ plat__exit(PLAT_EXIT_OK); }
int main(int argc, char **argv) { SDF_status_t status; struct replication_test_framework *test_framework = NULL; struct replication_test_config *config = NULL; int failed; struct plat_opts_config_sn_get_put_test opts_config; memset(&opts_config, 0, sizeof (opts_config)); int opts_status = plat_opts_parse_sn_get_put_test(&opts_config, argc, argv); if (opts_status) { plat_opts_usage_sn_get_put_test(); return (1); } failed = !plat_calloc_struct(&config); struct plat_opts_config_replication_test_framework_sm *sm_config; plat_calloc_struct(&sm_config); plat_assert(sm_config); /* start shared memory */ status = framework_sm_init(0, NULL, sm_config); /* start fthread library */ fthInit(); rt_config_init(config, 10 /* hard code iterations here */); config->test_type = RT_TYPE_META_STORAGE; config->nnode = 5; config->num_replicas = NUM_REPLICAS; test_framework = replication_test_framework_alloc(config); if (test_framework) { plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "test_framework %p allocated\n", test_framework); } XResume(fthSpawn(&user_operations_rms_sm_fault_in, 40960), (uint64_t)test_framework); fthSchedulerPthread(0); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "JOIN"); plat_free(config); framework_sm_destroy(sm_config); return (0); }
/* * Caller want to know the ntop hot clients. It should provide string buffer. */ int hot_client_report(Reporter_t *preporter, int ntop, char *rpbuf, int rpsize) { int i = 0; char *pos = rpbuf + strlen(rpbuf); ReportSortEntry_t *sort_client = NULL; int nlists = CLIENT_BUCKETS * NCLINETS_PER_BUCKET; int dump_num = __sync_add_and_fetch(&preporter->dump_num, 1); if (dump_num > MAX_DUMP_NUM) { (void)__sync_sub_and_fetch(&preporter->dump_num, 1); return -1; } sort_client = plat_alloc(sizeof(ReportSortEntry_t) * preporter->instances[0].nwinner_head); if (sort_client == NULL) { plat_log_msg(100016, LOG_CAT_HOTKEY, PLAT_LOG_LEVEL_ERROR, "Allocate memory for sorted client failed."); (void)__sync_sub_and_fetch(&preporter->dump_num, 1); return -1; } pos += snprintf(pos, rpsize, "HOTCLIENT\r\n"); if (nlists > preporter->instances[0].nwinner_head) { nlists = preporter->instances[0].nwinner_head; } for (i = 0; i < nlists; i++) { sort_client[i].index = preporter->client_ref[i].client_ip; sort_client[i].refcount = preporter->client_ref[i].refcount; } dump_hot_client(sort_client, nlists, ntop, rpsize, pos); plat_free(sort_client); (void)__sync_sub_and_fetch(&preporter->dump_num, 1); return (0); }
int setProperty(const char *key, void* value) { #ifdef SDFAPIONLY /* SDFSetPropery may be called before loadProperties, initialize the hash here in this case */ if (!_sdf_globalPropertiesMap) { initializeProperties(); } #endif if (strcmp(key, "ZS_LOG_LEVEL") == 0) set_log_level(value); if (SDF_TRUE != HashMap_put(_sdf_globalPropertiesMap, key, value)) { void *p = HashMap_replace(_sdf_globalPropertiesMap, key, value); if (p) { plat_free(p); return 0; } else { return 1; } } return 0; }
void testremove(uint64_t seq) { uint64_t cguid = 1; SDF_container_type_t ctype = SDF_BLOCK_CONTAINER; for (int i = 0; i < niterator; i++) { local_key_t *lkey = get_local_block_key(0); DirEntry *entry = HomeDir_remove(homedir, cguid, ctype, lkey); if (entry) { printf("fth %d remove block\n", seq); fflush(stdout); (void)__sync_fetch_and_add(&nremoves, 1); } else { free_local_key(lkey); continue; } // {{ plat_assert_always(entry); fthThread_t *top = reqq_peek(entry->q); if (top) plat_assert_always(top == fthSelf()); fthWaitEl_t *wait = reqq_lock(entry->q); fthThread_t *self = reqq_dequeue(entry->q); if (self) plat_assert_always(self == fthSelf()); reqq_unlock(entry->q, wait); reqq_destroy(entry->q); plat_assert_always(NULL == entry->home); plat_free(entry); // }} free_local_key(lkey); } }
/** * @brief synchronized create_shard/write/read/delete/delete_shard operations */ void user_operations_cursor_test(uint64_t args) { struct replication_test_framework *test_framework = (struct replication_test_framework *)args; SDF_boolean_t op_ret = SDF_FALSE; struct SDF_shard_meta *shard_meta = NULL; SDF_replication_props_t *replication_props = NULL; int failed = 0; uint64_t seqno = 0; SDF_shardid_t shard_id = 2; vnode_t node_id = 1; struct timeval now; struct timeval when; /* timeval incre */ struct timeval incre; void *data_read; size_t data_read_len; uint64_t seqno_start, seqno_len, seqno_max; int i; int ncursors; it_cursor_t *pit; resume_cursor_t *prc = NULL; char skey[1024]; SDF_time_t exptime; SDF_time_t createtime; int key_len; size_t data_len; void *pdata; int resume_cursor_size = 0; char *pcur; shard_id = __sync_add_and_fetch(&test_framework->max_shard_id, 1); char *key; char *data; failed = !plat_calloc_struct(&meta); replication_test_meta_init(meta); /* Assure test_framework is started?! */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "start test_framework"); rtfw_start(test_framework); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "test_framework started\n"); /* Start all nodes */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "start nodes"); rtfw_start_all_nodes(test_framework); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "nodes started"); plat_assert(!failed); failed = !plat_calloc_struct(&replication_props); plat_assert(!failed); rtfw_set_default_replication_props(&test_framework->config, replication_props); shard_meta = rtfw_init_shard_meta(&test_framework->config, 1 /* first_node */, shard_id /* shard_id, in real system generated by generate_shard_ids() */, replication_props); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n**************************************************\n" " create shard sync " "\n**************************************************"); op_ret = rtfw_create_shard_sync(test_framework, 1, shard_meta); plat_assert(op_ret == SDF_SUCCESS); /* - write on node 1, key:google:1, data:Sebstian:1 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n**************************************************\n" " write object sync " "\n**************************************************"); plat_asprintf(&key, "google:%d", 1); plat_asprintf(&data, "Sebstian:%d", 1); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "write key:%s, key_len:%u, data:%s, data_len:%u", key, (int)(strlen(key)), data, (int)(strlen(data))); op_ret = rtfw_write_sync(test_framework, shard_id /* shard */, 1 /* node */, meta /* test_meta */, key, strlen(key)+1, data, strlen(data)+1); plat_assert(op_ret == SDF_SUCCESS); plat_free(key); plat_free(data); /* - read on node 1, key:google:1 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n**************************************************\n" " read object sync " "\n**************************************************"); replication_test_framework_read_data_free_cb_t free_cb = replication_test_framework_read_data_free_cb_create(PLAT_CLOSURE_SCHEDULER_ANY_OR_SYNCHRONOUS, &rtfw_read_free, test_framework); plat_asprintf(&key, "google:%d", 1); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "KEY:%s, key_len:%d", key, (int)strlen(key)); op_ret = rtfw_read_sync(test_framework, shard_id /* shard */, node_id /* node */, key, strlen(key) + 1, &data_read, &data_read_len, &free_cb); plat_free(key); plat_assert(op_ret == SDF_SUCCESS); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "read data:%s, data_len:%d", (char *)data_read, (int)data_read_len); plat_free(data_read); /* crash node 2 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n**************************************************\n" " crash node 2 sync " "\n**************************************************"); rtfw_crash_node_sync(test_framework, 2); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "crash node:%"PRIu32" complete", 2); /** * write on node 1, key2: google:2, data2: Sebstian:2, * key3: google:3, data3: Sebstian:3 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n**************************************************\n" " write object sync " "\n**************************************************"); plat_asprintf(&key, "google:%d", 2); plat_asprintf(&data, "Sebstian:%d", 2); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "write key:%s, key_len:%u, data:%s, data_len:%u", key, (int)(strlen(key)), data, (int)(strlen(data))); op_ret = rtfw_write_sync(test_framework, shard_id /* shard */, 1 /* node */, meta /* test_meta */, key, strlen(key)+1, data, strlen(data)+1); plat_assert(op_ret == SDF_SUCCESS); plat_free(key); plat_free(data); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n**************************************************\n" " write object sync " "\n**************************************************"); plat_asprintf(&key, "google:%d", 3); plat_asprintf(&data, "Sebstian:%d", 3); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "write key:%s, key_len:%u, data:%s, data_len:%u", key, (int)(strlen(key)), data, (int)(strlen(data))); op_ret = rtfw_write_sync(test_framework, shard_id /* shard */, 1 /* node */, meta /* test_meta */, key, strlen(key)+1, data, strlen(data)+1); plat_assert(op_ret == SDF_SUCCESS); plat_free(key); plat_free(data); /* read on node 1, key2: google:2 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n**************************************************\n" " read object sync " "\n**************************************************"); plat_asprintf(&key, "google:%d", 2); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "KEY:%s, key_len:%d", key, (int)strlen(key)); op_ret = rtfw_read_sync(test_framework, shard_id /* shard */, 1 /* node */, key, strlen(key) + 1, &data_read, &data_read_len, &free_cb); plat_free(key); plat_assert(op_ret == SDF_SUCCESS); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "read data:%s, data_len:%d", (char *)data_read, (int)data_read_len); plat_free(data_read); /* delete from node 1, key3: google3 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n***************************************************\n" " delete object sync " "\n***************************************************"); plat_asprintf(&key, "google:%d", 3); plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "KEY:%s, key_len:%d", key, (int)(strlen(key))); op_ret = rtfw_delete_sync(test_framework, shard_id /* shard */, 1 /* node */, key, strlen(key)+1); plat_assert(op_ret == SDF_SUCCESS); plat_free(key); /* restart node 2 */ op_ret = rtfw_start_node(test_framework, 2); plat_assert(op_ret == SDF_SUCCESS); /* block a while */ now = test_framework->now; incre.tv_sec = 10; incre.tv_usec = 0; timeradd(&now, &incre, &when); rtfw_block_until(test_framework, (const struct timeval)when); rtfw_sleep_usec(test_framework, SLEEP_US); /* get last seqno from node 2 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n************************************************************\n" " get latest seqno from node 2 " "\n************************************************************"); op_ret = rtfw_get_last_seqno_sync(test_framework, 2, shard_id, &seqno); if (op_ret == SDF_SUCCESS) { plat_log_msg(LOG_ID, LOG_CAT, LOG_INFO, "get_last_seqno succeeded! (seqno=%"PRIu64")", seqno); } else { plat_log_msg(LOG_ID, LOG_CAT, LOG_INFO, "get_last_seqno failed!"); } plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n************************************************************\n" " get iteration cursors " "\n************************************************************"); prc = NULL; resume_cursor_size = 0; while (1) { replication_test_framework_read_data_free_cb_t free_cb = replication_test_framework_read_data_free_cb_create(PLAT_CLOSURE_SCHEDULER_ANY_OR_SYNCHRONOUS, &rtfw_read_free, test_framework); seqno_start = 0; seqno_len = 10; seqno_max = UINT64_MAX - 1; op_ret = rtfw_get_cursors_sync(test_framework, shard_id, node_id, seqno_start, seqno_len, seqno_max, (void *) prc, resume_cursor_size, (void **) &pit, &data_len, &free_cb); if (op_ret != SDF_SUCCESS) { plat_log_msg(LOG_ID, LOG_CAT, LOG_INFO, "get_iteration_cursors failed!"); break; } else { ncursors = pit->cursor_count; if (ncursors == 0) { break; } prc = &(pit->resume_cursor); resume_cursor_size = sizeof(resume_cursor_t); plat_assert(data_len == (sizeof(it_cursor_t) + seqno_len*pit->cursor_len)); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "get_iteration_cursors succeeded (%d cursors returned)!", ncursors); pcur = pit->cursors; for (i = 0; i < ncursors; i++) { replication_test_framework_read_data_free_cb_t free_cb = replication_test_framework_read_data_free_cb_create(PLAT_CLOSURE_SCHEDULER_ANY_OR_SYNCHRONOUS, &rtfw_read_free, test_framework); op_ret = rtfw_get_by_cursor_sync(test_framework, shard_id, node_id, (void *) pcur, pit->cursor_len, skey, 1024, &key_len, &exptime, &createtime, &seqno, &pdata, &data_len, &free_cb); pcur += pit->cursor_len; if (op_ret == SDF_SUCCESS) { plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "get_by_cursor: %s, key_len:%u, data:%s, data_len:%u," "seqno: %"PRIu64", exptime:%"PRIu32", createtime:%"PRIu32"", skey, key_len, (char *)pdata, (unsigned)data_len, seqno, exptime, createtime); plat_free(pdata); } else { plat_log_msg(LOG_ID, LOG_CAT, LOG_INFO, "get_by_cursor failed!"); } } } } plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n************************************************************\n" " Test framework shutdown " "\n************************************************************"); rtfw_shutdown_sync(test_framework); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "\n************************************************************\n" " Test framework sync summary " "\n************************************************************"); plat_free(meta); plat_free(replication_props); plat_free(shard_meta); /* Terminate scheduler if idle_thread exit */ while (test_framework->timer_dispatcher) { fthYield(-1); } plat_free(test_framework); fthKill(1); }
/* Closure to free message */ static void free_msg(plat_closure_scheduler_t *context, void *ignore, struct plat_msg_header *msg) { plat_free(msg); }
static int init_flash(struct sdf_agent_state *state) { char *device_name; int flash_flags = 0; // Allocate space for the Mcd_containers array Mcd_containers = (mcd_container_t *) plat_alloc(sizeof(mcd_container_t) * max_num_containers); memset(Mcd_containers, 0, sizeof(*Mcd_containers) * max_num_containers); /* This initializes the code that redirects * flash API calls to one of several alternative * flash subsystems (see sdf/ssd). */ #ifdef ENABLE_MULTIPLE_FLASH_SUBSYSTEMS ssd_Init(); #endif #ifdef FLASH_RECOVERY flash_flags = FLASH_OPEN_PERSISTENCE_AVAILABLE; #endif switch(state->config.system_recovery) { case SYS_FLASH_RECOVERY: default: flash_flags |= FLASH_OPEN_NORMAL_RECOVERY; break; case SYS_FLASH_REFORMAT: flash_flags |= FLASH_OPEN_REFORMAT_DEVICE; break; } #ifdef MULTIPLE_FLASH_DEV_ENABLED int ii; for(ii=0; ii < state->config.numFlashDevs; ii++) { if (plat_asprintf(&device_name, "%s%d", state->config.flashDevName, ii) > 0) { state->flash_dev[ii] = (flashDev_t *)NULL; state->flash_dev[ii] = flashOpen(device_name, &state->flash_settings, flash_flags); plat_assert(state->flash_dev[ii]); plat_free(device_name); } } /* All the /dev/flash names have been converted to /dev/flash0 even in signle flashcard machines. So no need to handle single card case separately */ #else state->flash_dev = NULL; if (strstr(state->config.flashDevName, "%")) { if (plat_asprintf(&device_name, state->config.flashDevName, (int)state->rank) > 0) { state->flash_dev = flashOpen(device_name, &state->flash_settings, flash_flags); plat_assert(state->flash_dev); plat_free(device_name); } } else { if (plat_asprintf(&device_name, "%s%d", state->config.flashDevName, 0) > 0) { state->flash_dev = flashOpen(device_name, &state->flash_settings, flash_flags); plat_assert(state->flash_dev); plat_free(device_name); } } #endif // MULTI_FLASH_DEV_ENABLED state->flash_dev_count = state->config.numFlashDevs; return (state->flash_dev != NULL); }
int loadProperties(const char *path_arg) { #ifndef SDFAPIONLY /* In SDF library the hash can be initialized already by SDFSetPropery API*/ if (NULL != _sdf_globalPropertiesMap) { return 0; } #endif int ret = 0; const char *path = NULL; path = path_arg; if (!path) return 0; FILE *fp = fopen(path, "r"); if (!fp) { plat_log_msg(21756, PLAT_LOG_CAT_PRINT_ARGS, PLAT_LOG_LEVEL_ERROR, "Reading properties file '%s' has an error!\n", path); return -1; } if (!_sdf_globalPropertiesMap) { initializeProperties(); } char *line = (char *) plat_alloc(2048), *beg, *str, *key, *val; while(fgets(line, 2048, fp)) { // aah... really needed Boost here beg = line; while(' ' == *beg) { // trim beginning beg++; } if('#' == *beg || '\0' == *beg || '\n' == *beg) { // search for comment continue; } str = beg; while('=' != *str && '\0' != *str && ' ' != *str && '\n' != *str) { // get key str++; } if (str-beg) { key = strndup(beg, str-beg); } else { continue; } beg = str++; while(' ' == *beg || '=' == *beg) { // trim beginning beg++; } str = beg; while('=' != *str && '\0' != *str && ' ' != *str && '\n' != *str) { // get value str++; } if (str-beg) { val = strndup(beg, str-beg); } else { free(key); continue; } #ifdef SDFAPIONLY /* in SDF library properties from file override properties set in runtime using SDFSetProperty */ setProperty(key, val); #else if (0 != insertProperty(key, val)) { ret--; plat_log_msg(21757, PLAT_LOG_CAT_PRINT_ARGS, PLAT_LOG_LEVEL_ERROR, "Parsed property error (ret:%d)('%s', '%s')", ret, key, val); } #endif /** * XXX: drew 2008-12-17 It would be better to log at the point of use * so we can output whether the default value was being used; but * this will get us the current settings in Patrick's memcached * runs. */ if (ZS_log_level <= PLAT_LOG_LEVEL_TRACE_LOW) { plat_log_msg(21758, PLAT_LOG_CAT_PRINT_ARGS, PLAT_LOG_LEVEL_TRACE_LOW, "Parsed property ('%s', '%s')", key, val); } } if (ZS_log_level <= PLAT_LOG_LEVEL_TRACE_LOW) { plat_log_msg(70124, PLAT_LOG_CAT_PRINT_ARGS, PLAT_LOG_LEVEL_TRACE_LOW, "Read from properties file '%s'", path); } fclose(fp); plat_free(line); return (ret); }
void plat_attr_uthread_free(struct plat_attr_uthread_specific *spec) { if (spec) { plat_free(spec); } }
/** * @brief synchronized create_shard/write/read/delete/delete_shard operations */ void rt_mix_write_delete_entry(uint64_t args) { struct replication_test_framework *test_framework = (struct replication_test_framework *)args; SDF_boolean_t op_ret; struct SDF_shard_meta *shard_meta = NULL; SDF_replication_props_t *replication_props = NULL; int failed; SDF_shardid_t shard_id; replication_test_framework_read_data_free_cb_t free_cb; shard_id = __sync_add_and_fetch(&test_framework->max_shard_id, 1); char key1[] = "key1"; char key2[] = "key2"; char *key; size_t key_len; char *data; void *data_out; size_t data_len_out; int data_generation; failed = !plat_calloc_struct(&meta); plat_assert(!failed); replication_test_meta_init(meta); /* Assure test_framework is started?! */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "start test_framework"); rtfw_start(test_framework); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "test_framework started\n"); /* Start all nodes */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "start nodes"); rtfw_start_all_nodes(test_framework); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "nodes started"); failed = !plat_calloc_struct(&replication_props); plat_assert(!failed); rtfw_set_default_replication_props(&test_framework->config, replication_props); shard_meta = rtfw_init_shard_meta(&test_framework->config, 1 /* first */, shard_id /* shard_id, in real system generated by generate_shard_ids() */, replication_props); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "create on node 1"); op_ret = rtfw_create_shard_sync(test_framework, 1, shard_meta); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "create on node 1 complete"); plat_assert(op_ret == SDF_SUCCESS); data_generation = 0; plat_asprintf(&data, "data_%s_%d", key1, data_generation); key = key1; key_len = strlen(key) + 1; plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "write on node 1 key:%s, key_len:%u, data:%s, data_len:%u", key, (int)(strlen(key)), data, (int)(strlen(data))); op_ret = rtfw_write_sync(test_framework, shard_id /* shard */, 1 /* node */, meta /* test_meta */, key, key_len, data, strlen(data)+1); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "write on node 1 complete"); plat_assert(op_ret == SDF_SUCCESS); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "read on node 1"); op_ret = rtfw_read_sync(test_framework, shard_id, 1 /* node */, key, key_len, &data_out, &data_len_out, &free_cb); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "read on node 1 complete"); plat_assert(op_ret == SDF_SUCCESS); plat_assert(strcmp(data_out, data) == 0); plat_closure_apply(replication_test_framework_read_data_free_cb, &free_cb, data_out, data_len_out); ++ data_generation; plat_asprintf(&data, "data_%s_%d", key2, data_generation); key = key2; key_len = strlen(key) + 1; plat_log_msg(LOG_ID, LOG_CAT, LOG_TRACE, "write on node 1 key:%s, key_len:%u, data:%s, data_len:%u", key, (int)(strlen(key)), data, (int)(strlen(data))); op_ret = rtfw_write_sync(test_framework, shard_id /* shard */, 1 /* node */, meta /* test_meta */, key, key_len, data, strlen(data)+1); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "write on node 1 complete"); plat_assert(op_ret == SDF_SUCCESS); key = key2; key_len = strlen(key) + 1; op_ret = rtfw_delete_sync(test_framework, shard_id, 1, key, key_len); plat_assert(op_ret == SDF_SUCCESS); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "crash node 1"); op_ret = rtfw_crash_node_sync(test_framework, 1); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "crash node 1 complete"); plat_assert(op_ret == SDF_SUCCESS); /* Sleep through the lease until switchover happens */ rtfw_sleep_usec(test_framework, test_framework->config.replicator_config.lease_usecs * 2); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "shutdown"); rtfw_shutdown_sync(test_framework); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "shutdown complete"); plat_free(data); plat_free(meta); plat_free(replication_props); plat_free(shard_meta); /* Terminate scheduler if idle_thread exit */ while (test_framework->timer_dispatcher) { fthYield(-1); } plat_free(test_framework); fthKill(1); }
static int hs_expire_simple(struct replication_test_framework *test_framework, SDF_replication_props_t *replication_props) { SDF_status_t status; struct SDF_shard_meta *shard_meta; struct sdf_replication_shard_meta *r_shard_meta; SDF_shardid_t shard_id; vnode_t node = 0; struct cr_shard_meta *in; struct cr_shard_meta *out; struct timeval expires; /* init cr_shard_meta and shard_meta */ init_meta_data(test_framework, replication_props, &r_shard_meta, node, shard_id, LEASE_USECS, &shard_meta, &in); plat_assert(r_shard_meta); plat_assert(shard_meta); plat_assert(in); /* put meta on node 0 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "create on node 0"); status = rtfw_create_shard_meta_sync(test_framework, node, in, &out, &expires); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "create on node 0 complete"); /* perhaps shard has been created in other unit test */ plat_assert(status == SDF_SUCCESS || status == SDF_FAILURE_STORAGE_WRITE); if (status == SDF_SUCCESS) { plat_assert(out); hs_print_current_lease(in); hs_print_current_lease(out); out->persistent.lease_usecs = in->persistent.lease_usecs; plat_assert(0 == cr_shard_meta_cmp(in, out)); cr_shard_meta_free(out); } /* * XXX: drew 2009-05-09 sleep and validate that less time remains on the * lease. */ rtfw_sleep_usec(test_framework, 2000000); /* get meta on node0 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "get on node 0"); status = rtfw_get_shard_meta_sync(test_framework, node, in->persistent.sguid, r_shard_meta, &out, &expires); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "get on node 0 complete"); plat_assert(status == SDF_SUCCESS); plat_assert(out); hs_print_current_lease(in); hs_print_current_lease(out); plat_assert(in->persistent.lease_usecs > out->persistent.lease_usecs); out->persistent.lease_usecs = in->persistent.lease_usecs; plat_assert(0 == cr_shard_meta_cmp(in, out)); cr_shard_meta_free(out); /* verify get shard meta does not renew lease */ rtfw_sleep_usec(test_framework, 2000000); /* get meta on node1 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "first get on node 1"); status = rtfw_get_shard_meta_sync(test_framework, 1, in->persistent.sguid, r_shard_meta, &out, &expires); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "first get on node 1 complete"); plat_assert(status == SDF_SUCCESS); plat_assert(out); hs_print_current_lease(in); hs_print_current_lease(out); out->persistent.lease_usecs = in->persistent.lease_usecs; plat_assert(0 == cr_shard_meta_cmp(in, out)); cr_shard_meta_free(out); plat_free(shard_meta); plat_free(r_shard_meta); return ((status == SDF_SUCCESS)? 0 : 1); }
int getPropertyFromFile(const char *prop_file, char *inKey, char *outVal) { int ret = 0; if (!prop_file || !prop_file[0]) return 0; FILE *fp = fopen(prop_file, "r"); if (!fp) { plat_log_msg(21756, PLAT_LOG_CAT_PRINT_ARGS, PLAT_LOG_LEVEL_ERROR, "Reading properties file '%s' has an error!\n", propertiesDefaultFile); return -1; } char *line = (char *) plat_alloc(2048), *beg, *str, *key, *val; while(fgets(line, 2048, fp)) { beg = line; while(' ' == *beg) { // trim beginning beg++; } if('#' == *beg || '\0' == *beg || '\n' == *beg) { // search for comment continue; } str = beg; while('=' != *str && '\0' != *str && ' ' != *str && '\n' != *str) { // get key str++; } if (str-beg) { key = strndup(beg, str-beg); } else { continue; } beg = str++; while(' ' == *beg || '=' == *beg) { // trim beginning beg++; } str = beg; while('=' != *str && '\0' != *str && ' ' != *str && '\n' != *str) { // get value str++; } if (str - beg) { val = strndup(beg, str-beg); } else { free(key); continue; } if ( strcmp(inKey,key) == 0 ) { strcpy(outVal,val); free(key); free(val); break; } else { free(key); free(val); } } fclose(fp); plat_free(line); return (ret); }
struct flashDev * fifo_flashOpen( char * devName, flash_settings_t *flash_settings_in, int flags ) { int i; int rc; struct flashDev * pdev; plat_log_msg(21691, PLAT_LOG_CAT_SDF_APP_MEMCACHED, PLAT_LOG_LEVEL_DEBUG, "ENTERING, devName=%s", devName ); /* Global settings for aio and flash subsystems */ flash_settings = *flash_settings_in; pdev = plat_alloc( sizeof(struct flashDev) ); if ( NULL == pdev ) { plat_log_msg(21692, PLAT_LOG_CAT_FLASH, PLAT_LOG_LEVEL_ERROR, "failed to alloc dev"); return NULL; } for ( i = 0; i < FTH_MAX_SCHEDS; i++ ) { pdev->stats[i].flashOpCount = 0; pdev->stats[i].flashReadOpCount = 0; pdev->stats[i].flashBytesTransferred = 0; } pdev->shardList = NULL; InitLock( pdev->lock ); /* * initialize the aio subsystem */ pdev->paio_state = plat_alloc( sizeof(struct ssdaio_state) ); if ( NULL == pdev->paio_state ) { plat_log_msg(21693, PLAT_LOG_CAT_FLASH, PLAT_LOG_LEVEL_ERROR, "failed to alloc aio state"); plat_free(pdev); return NULL; } rc = ssdaio_init( pdev->paio_state, devName ); if ( 0 != rc ) { plat_log_msg(21694, PLAT_LOG_CAT_FLASH, PLAT_LOG_LEVEL_ERROR, "failed to init aio"); plat_free(pdev->paio_state); plat_free(pdev); return NULL; } pdev->size = pdev->paio_state->size; pdev->used = 0; plat_log_msg(21695, PLAT_LOG_CAT_SDF_APP_MEMCACHED, PLAT_LOG_LEVEL_DEBUG, "dev size is %lu", pdev->size ); if ( NULL == Ssd_fifo_ops.flashOpen ) { plat_log_msg(21696, PLAT_LOG_CAT_SDF_APP_MEMCACHED, PLAT_LOG_LEVEL_FATAL, "fifo_flashOpen not implemented!" ); plat_abort(); } Ssd_fifo_ops.flashOpen( devName, flash_settings_in, flags ); return pdev; }
int main(int argc, char **argv) { int ret = 0; int shmem_attached = 0; struct plat_opts_config_fthGetTimeSpeed config; int status; int i; struct state *state; struct thread_state *thread_state; long count; memset(&config, 0, sizeof(config)); plat_shmem_config_init(&config.shmem); config.npthread = DEFAULT_NPTHREAD; config.secs = DEFAULT_LIMIT_SECS; config.multiq = DEFAULT_MULTIQ; config.mode = DEFAULT_MODE; if (plat_opts_parse_fthGetTimeSpeed(&config, argc, argv)) { ret = 2; } if (!ret) { status = plat_shmem_prototype_init(&config.shmem); if (status) { plat_log_msg(20876, LOG_CAT, PLAT_LOG_LEVEL_FATAL, "shmem init failure: %s", plat_strerror(-status)); ret = 1; } } if (!ret) { status = plat_shmem_attach(plat_shmem_config_get_path(&config.shmem)); if (status) { plat_log_msg(20877, LOG_CAT, PLAT_LOG_LEVEL_FATAL, "shmem attach failure: %s", plat_strerror(-status)); ret = 1; } else { shmem_attached = 1; } } if (!ret) { if (config.multiq) { fthInitMultiQ(1, config.npthread); } else { fthInit(); } plat_calloc_struct(&state); plat_assert_always(state); state->config = &config; state->ts = plat_calloc(config.npthread, sizeof (state->ts[0])); plat_assert_always(state->ts); for (i = 0; i < state->config->npthread; ++i) { plat_calloc_struct(&thread_state); plat_assert_always(thread_state); thread_state->state = state; thread_state->index = i; state->ts[i] = thread_state; XResume(fthSpawn(&time_main, 40960), (uint64_t)thread_state); } signal(SIGALRM, alarm_handler); signal(SIGINT, alarm_handler); alarm(state->config->secs); for (i = 0; i < state->config->npthread; ++i) { status = pthread_create(&state->ts[i]->pthread, NULL, &pthread_main, NULL); plat_assert_always(!status); } count = 0; for (i = 0; i < state->config->npthread; ++i) { status = pthread_join(state->ts[i]->pthread, NULL); plat_assert_always(!status); count += state->ts[i]->count; plat_free(state->ts[i]); } plat_free(state->ts); plat_free(state); printf("%ld\n", count/config.secs); } if (shmem_attached) { status = plat_shmem_detach(); if (status) { plat_log_msg(20880, LOG_CAT, PLAT_LOG_LEVEL_FATAL, "shmem detach failure: %s", plat_strerror(-status)); ret = 1; } } plat_shmem_config_destroy(&config.shmem); return (ret); }
void threadTest(uint64_t arg) { int maxtop = 16; int top = 16; int nbuckets = hashsize(16); // uint64_t updates = 160 * nbuckets; uint64_t updates = 16; int i, t, cmd_type; int mm_size[4]; void* buf[4]; Reporter_t *rpt[4]; char *recv = plat_alloc(300*top); char *cmp_recv = plat_alloc(300*top); for (int i = 0; i < 4; i++) { mm_size[i] = calc_hotkey_memory(maxtop, nbuckets, i); buf[i] = plat_alloc(mm_size[i]); rpt[i] = hot_key_init(buf[i], mm_size[i], nbuckets, maxtop, i); } init_key(4); // case 0: 0x00 handler // keys are get/update, randomized ip, result should be // merged keys and 0 for ip // 16 keys with different ip for (i = 0; i < updates*2; i++) { t = i % KEY_NUM; int ip = rnd_ip(i); cmd_type = rnd_cmd(i % 2); printf("key_t=%s\n", keys[t].key_str); hot_key_update(rpt[0], keys[t].key_str, KEY_LEN+1, keys[t].syndrome, keys[t].bucket, cmd_type, ip); cmd_type <= 2 ? (keys[t].get_count++): (keys[t].update_count++); keys[t].count++; } recv[0] = '\0'; hot_key_report(rpt[0], top, recv, 300*top, 1, 0); printf("handle 0x00:get\n%s", recv); strcpy(cmp_recv, recv); recv[0] = '\0'; hot_key_report(rpt[0], top, recv, 300*top, 2, 0); printf("handle 0x00:update\n%s", recv); if (strcmp(cmp_recv, recv)) { printf("error: get/update should be both access for 0x00 reporter\n"); nfailed++; } recv[0] = '\0'; hot_client_report(rpt[0], top, recv, 300*top); printf("handle 0x00: client\n%s", recv); hot_key_reset(rpt[0]); // case 1: 0x01 handler // keys are get/update, randomized ip, result should be // seperate keys and 0 for ip // each instance has 16 keys with different ip for (i = 0; i < updates*2; i++) { t = i % KEY_NUM; for (int j = 0; j < 2; j++) { int ip = rnd_ip(i); cmd_type = rnd_cmd(j); hot_key_update(rpt[1], keys[t].key_str, KEY_LEN+1, keys[t].syndrome, keys[t].bucket, cmd_type, ip); cmd_type <= 2 ? (keys[t].get_count++): (keys[t].update_count++); keys[t].count++; } } recv[0] = '\0'; hot_key_report(rpt[1], top, recv, 300*top, 1, 0); printf("handle 0x01:get\n%s", recv); strcpy(cmp_recv, recv); recv[0] = '\0'; hot_key_report(rpt[1], top, recv, 300*top, 3, 0); printf("handle 0x01:update\n%s", recv); if (strcmp(cmp_recv, recv) != 0) { printf("error: get/update should be the same for 0x01 reporter\n"); nfailed++; } recv[0] = '\0'; hot_client_report(rpt[1], top, recv, 300*top); printf("handle 0x01: client\n%s", recv); hot_key_reset(rpt[1]); // case 2: 0x02 handler // keys are get/update, randomized ip, result should be // merged keys keys for get/set while different ip // 8 sets, and 8 gets, the winner is filled with 16 entries // with the same key but different ip for (i = 0; i < updates*4; i++) { int ip = rnd_ip(i); for (int j = 0; j < 2; j++) { t = j; int cmd_type = rnd_cmd(j); hot_key_update(rpt[2], keys[t].key_str, KEY_LEN+1, keys[t].syndrome, keys[t].bucket, cmd_type, ip); } } recv[0] = '\0'; hot_key_report(rpt[2], top, recv, 300*top, 1, 0); printf("handle 0x02:get\n%s", recv); strcpy(cmp_recv, recv); recv[0] = '\0'; hot_key_report(rpt[2], top, recv, 300*top, 3, 0); printf("handle 0x02:update\n%s", recv); if (strcmp(cmp_recv, recv)) { printf("error: get/update should be same for 0x02 reporter\n"); nfailed++; } recv[0] = '\0'; hot_client_report(rpt[2], top, recv, 300*top); printf("handle 0x02: client\n%s", recv); hot_key_reset(rpt[2]); // case 3: 0x03 handler // keys are get/update, randomized ip, result should be // separated keys keys for get/set while different ip // 16 sets, and 16 gets, the winner is filled with 16 entries // with the same key but different ip for (i = 0; i < updates*4; i++) { t = 0; int ip = rnd_ip(i); for (int j = 0; j < 2; j++) { int cmd_type = rnd_cmd(j); hot_key_update(rpt[3], keys[t].key_str, KEY_LEN+1, keys[t].syndrome, keys[t].bucket, cmd_type, ip); } } recv[0] = '\0'; hot_key_report(rpt[3], top, recv, 300*top, 1, 0); printf("handle 0x03: get\n%s", recv); strcpy(cmp_recv, recv); recv[0] = '\0'; hot_key_report(rpt[3], top, recv, 300*top, 3, 0); printf("handle 0x03: update\n%s", recv); if (strcmp(cmp_recv, recv) != 0) { printf("error: get/update should be the same for 0x03 reporter\n"); nfailed++; } recv[0] = '\0'; hot_client_report(rpt[3], top, recv, 300*top); printf("handle 0x03: client\n%s", recv); hot_key_reset(rpt[3]); plat_free(recv); plat_free(cmp_recv); printf("nfailed = %d\n", nfailed); done = 1; }
static int rms_modify_shard_meta_fault_in(struct replication_test_framework *test_framework, SDF_replication_props_t *replication_props) { SDF_status_t status; struct SDF_shard_meta *shard_meta; struct sdf_replication_shard_meta *r_shard_meta; SDF_shardid_t shard_id = 1; vnode_t node = 1; struct cr_shard_meta *in; struct cr_shard_meta *out; struct cr_shard_meta *temp; struct timeval expires; /* init cr_shard_meta and shard_meta */ init_meta_data(test_framework, replication_props, &r_shard_meta, node, shard_id, LEASE_USECS, &shard_meta, &in); plat_assert(r_shard_meta); plat_assert(shard_meta); plat_assert(in); /* put meta on node 1 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "create on node 1"); status = rtfw_create_shard_meta_sync(test_framework, node, in, &out, &expires); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "create on node 1 complete"); /* perhaps shard has been created in other unit test since currently we can't delete shard meta */ plat_assert(status == SDF_SUCCESS || status == SDF_FAILURE_STORAGE_WRITE); if (status == SDF_SUCCESS) { plat_assert(out); hs_print_current_lease(in); hs_print_current_lease(out); out->persistent.lease_usecs = in->persistent.lease_usecs; plat_assert(0 == cr_shard_meta_cmp(in, out)); cr_shard_meta_free(out); } /* get meta on node0 */ plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "get on node 0"); status = rtfw_get_shard_meta_sync(test_framework, node, in->persistent.sguid, r_shard_meta, &out, &expires); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "get on node 0 complete"); plat_assert(status == SDF_SUCCESS); plat_assert(out); hs_print_current_lease(in); hs_print_current_lease(out); /* buffer an cr_shard_meta here */ temp = cr_shard_meta_dup(out); plat_assert(temp); out->persistent.lease_usecs = in->persistent.lease_usecs; plat_assert(0 == cr_shard_meta_cmp(in, out)); cr_shard_meta_free(out); /* modify meta data before expire */ /* fault injections */ /* (1) put meta data with incorrect ltime non home node */ in->persistent.ltime = -1; ++in->persistent.shard_meta_seqno; plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "put shard meta on node 1 with illegal ltime"); status = rtfw_put_shard_meta_sync(test_framework, 1, in, &out, &expires); plat_assert(status == SDF_SUCCESS); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "put shard meta on node 1 with illegal ltime complete"); /** * Fixme: zhenwei, shard meta with incorrect ltime can be set to any node */ temp->persistent.lease_usecs = out->persistent.lease_usecs; plat_assert(0 != cr_shard_meta_cmp(temp, out)); /* buffer the latest cr_shard_meta */ cr_shard_meta_free(temp); temp = cr_shard_meta_dup(out); cr_shard_meta_free(out); /* (2) put meta data with incorrect seqno */ in->persistent.shard_meta_seqno += 2; plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "put shard meta on node 1 with illegal ltime"); status = rtfw_put_shard_meta_sync(test_framework, 1, in, &out, &expires); plat_assert(status != SDF_SUCCESS); status = SDF_SUCCESS; plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "put shard meta on node 1 with illegal ltime complete"); plat_assert(temp->persistent.ltime == out->persistent.ltime && temp->persistent.shard_meta_seqno == out->persistent.shard_meta_seqno); /* buffer the latest cr_shard_meta */ cr_shard_meta_free(temp); temp = cr_shard_meta_dup(out); cr_shard_meta_free(out); /* (3) put meta data with a lease than LEASE_USECS */ in->persistent.lease_usecs = LEASE_USECS * 2; /* skip since (2) +2 for it */ /* ++in->persistent.shard_meta_seqno; */ --in->persistent.shard_meta_seqno; plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "put shard meta on node 1 with illegal ltime"); status = rtfw_put_shard_meta_sync(test_framework, 1, in, &out, &expires); plat_assert(status == SDF_SUCCESS); out->persistent.lease_usecs = in->persistent.lease_usecs; plat_assert(0 == cr_shard_meta_cmp(in, out)); cr_shard_meta_free(out); cr_shard_meta_free(temp); cr_shard_meta_free(in); plat_log_msg(LOG_ID, LOG_CAT, LOG_DBG, "put shard meta on node 1 with illegal ltime"); plat_free(shard_meta); plat_free(r_shard_meta); return (status == SDF_SUCCESS ? 0 : 1); }
void threadTest(uint64_t arg) { int maxtop = 16; int top = 16; int nbuckets = hashsize(4); int loops = NCANDIDATES; int mm_size; void *buf; struct _key keys[NCANDIDATES]; struct _key big_key, small_key; char *recv = (char *) plat_alloc(300*top); if (recv == NULL) { perror("failed to alloc"); } mm_size = calc_hotkey_memory(maxtop, nbuckets, 0); buf = plat_alloc(mm_size); Reporter_t *rpt; for (int i = 0; i < NCANDIDATES; i++) { gen_str(keys[i].key_str); keys[i].syndrome = 10000 + i; keys[i].bucket = i % 4; } gen_str(big_key.key_str); big_key.syndrome = 10000 + NCANDIDATES - 1; big_key.bucket = (NCANDIDATES - 1) % 4; gen_str(small_key.key_str); small_key.syndrome = 10000 + 65536; small_key.bucket = (NCANDIDATES - 1) % 4; rpt = hot_key_init(buf, mm_size, nbuckets, maxtop, 0); int ip = 127001; for (int i = 0; i < 2; i++) { loops = NCANDIDATES; while (loops--) { hot_key_update(rpt, keys[loops].key_str, KEY_LEN + 1, keys[loops].syndrome, keys[loops].bucket, 1 + random()%11, ip); } } hot_key_report(rpt, top, recv, 300*top, 1, 0); printf("keys:\n%s", recv); loops = NCANDIDATES; while (loops--) { if (!strstr(recv, keys[loops].key_str) && loops >= NCANDIDATES - NKEY_PER_WINNER_LIST) { printf("fail to set key %s\n", keys[loops].key_str); nfailed++; } } // at this moment, winner list's threshold is updated and it should // forbiddn lower ref-count keys to be winner loops = 2; while (loops--) { hot_key_update(rpt, small_key.key_str, KEY_LEN+1, small_key.syndrome, small_key.bucket, 1+random()%11, ip); } recv[0] = '\0'; printf("keys:\n"); hot_key_report(rpt, top, recv, 300*top, 1, 0); if (strstr(recv, small_key.key_str)) { printf("small key should not be hot keys %s\n", small_key.key_str); nfailed++; } // continue with this key, it should be winner if // ref-count is larger than threshold hot_key_update(rpt, small_key.key_str, KEY_LEN + 1, small_key.syndrome, small_key.bucket, 1 + random()%11, ip); recv[0] = '\0'; printf("keys after small key increased:\n"); hot_key_report(rpt, top, recv, 300*top, 1, 0); if (!strstr(recv, small_key.key_str)) { printf("small key should be hot keys as it's no longer small %s\n", small_key.key_str); nfailed++; } // small_key = keys[NCANDIDATES - 1]; small_key.syndrome = keys[NCANDIDATES - NKEY_PER_WINNER_LIST].syndrome; small_key.bucket = keys[NCANDIDATES - NKEY_PER_WINNER_LIST].bucket; strcpy(small_key.key_str, keys[NCANDIDATES - NKEY_PER_WINNER_LIST].key_str); if (strstr(recv, small_key.key_str)) { printf("winner should be evicted out:%s\n", small_key.key_str); nfailed++; } recv[0] = '\0'; hot_client_report(rpt, top, recv, 300*top); printf("clients:\n%s", recv); hot_key_reset(rpt); hot_key_cleanup(rpt); plat_free(recv); printf("failed = %d\n", nfailed); done = 1; }