static int iterate_root_squash_wildcards(struct filesystem_configuration_s *fsconfig, PVFS_BMI_addr_t client_addr) { int i; /* check exceptions first */ for (i = 0; i < fsconfig->root_squash_exceptions_count; i++) { gossip_debug(GOSSIP_SERVER_DEBUG, "BMI_query_addr_range %lld, %s, netmask: %i\n", lld(client_addr), fsconfig->root_squash_exceptions_hosts[i], fsconfig->root_squash_exceptions_netmasks[i]); if (BMI_query_addr_range(client_addr, fsconfig->root_squash_exceptions_hosts[i], fsconfig->root_squash_exceptions_netmasks[i]) == 1) { /* in the exception list, do not squash */ return 0; } } for (i = 0; i < fsconfig->root_squash_count; i++) { gossip_debug(GOSSIP_SERVER_DEBUG, "BMI_query_addr_range %lld, %s, netmask: %i\n", lld(client_addr), fsconfig->root_squash_hosts[i], fsconfig->root_squash_netmasks[i]); if (BMI_query_addr_range(client_addr, fsconfig->root_squash_hosts[i], fsconfig->root_squash_netmasks[i]) == 1) { return 1; } } return 0; }
int test_util_get_io_perfs( PVFS_fs_id cur_fs, PVFS_credentials creds, int count) { int ret, i, j; ret = PVFS_mgmt_perf_mon_list( cur_fs, &creds, perf_matrix, end_time_ms_array, addr_array, next_id_array, count, HISTORY, NULL, NULL); if(ret < 0) { PVFS_perror("PVFS_mgmt_perf_mon_list", ret); return -1; } for(i = 0; i < count; i++) { for(j = 0; j < HISTORY; ++j) { if(!perf_matrix[i][j].valid_flag) { break; } printf("%d\t%llu\t%lld\t%lld\n", count, llu(perf_matrix[i][j].start_time_ms), lld(perf_matrix[i][j].write), lld(perf_matrix[i][j].read)); } } return 0; }
void prtseg(PINT_Request_result *seg, char *s) { int i; if (!longflag) return; printf("%s\n",s); printf("%d segments with %lld bytes\n", seg->segs, lld(seg->bytes)); for(i=0; i<seg->segs && i<seg->segmax; i++) { printf(" segment %d: offset: %lld size: %lld\n", i, lld(seg->offset_array[i]), lld(seg->size_array[i])); } }
void print_enum(reflection_Enum_table_t E) { reflection_EnumVal_vec_t EnumVals; reflection_EnumVal_table_t EV; size_t i; printf("{\"name\":\"%s\"", reflection_Enum_name(E)); EnumVals = reflection_Enum_values(E); printf(",\"values\":["); for (i = 0; i < reflection_Enum_vec_len(EnumVals); ++i) { EV = reflection_EnumVal_vec_at(EnumVals, i); if (i > 0) { printf(","); } printf("{\"name\":\"%s\"", reflection_EnumVal_name(EV)); if (reflection_EnumVal_value_is_present(EV)) { printf(",\"value\":%lld", lld(reflection_EnumVal_value(EV))); } if (reflection_EnumVal_object_is_present(EV)) { printf(",\"object\":"); print_object(reflection_EnumVal_object(EV)); } printf("}"); } printf("]"); if (reflection_Enum_is_union_is_present(E)) { printf(",\"is_union\":%s", reflection_Enum_is_union(E) ? "true" : "false"); } printf(",\"underlying_type\":"); print_type(reflection_Enum_underlying_type(E)); printf("}"); }
double FFANN::TrainWithBackPropagation(Matrix input, Matrix output, double learning_rate) { std::vector<Matrix> outputs = FeedForward(input); std::vector<Matrix> temp_deltas; //layer deltas stored backwards in order //calculate cost function double cost = 0.0f; Matrix partial_cost_matrix(Dimensions[Num_Layers - 1], 1); partial_cost_matrix = output + (outputs[outputs.size() - 1] * -1); for (int i = 0; i < partial_cost_matrix.Elements.size(); i++) { cost += 0.5f * partial_cost_matrix.Elements[i] * partial_cost_matrix.Elements[i]; } //calculate last layer deltas Matrix lld(Dimensions[Num_Layers - 1], 1); lld = outputs[outputs.size() - 1] + (output * -1); for (int i = 0; i < lld.Dimensions[0]; i++) { double a = outputs[outputs.size() - 1].Elements[i]; lld.Elements[i] *= a * (1 - a); //derivative of activation function } temp_deltas.push_back(lld); //calculate the rest of the deltas through back propagation int j = 0; //this keeps track of the index for the next layer's delta for (int i = Num_Layers - 2; i >= 0; i--) //start at the second to last layer { Matrix delta(Dimensions[i], 1); delta = Weights[i + 1] * temp_deltas[j]; j++; for (int k = 0; k < delta.Dimensions[0]; k++) { double a = outputs[i].Elements[k]; delta.Elements[k] *= a * (1 - a); //derivative of activation function } temp_deltas.push_back(delta); } //put the deltas into a new vector object in the correct order std::vector<Matrix> deltas; for (int i = (int)temp_deltas.size() - 1; i >= 0; i--) { deltas.push_back(temp_deltas[i]); } //update biases for (int i = 0; i < Biases.size(); i++) { Biases[i] = Biases[i] + deltas[i] * (-1.0f * learning_rate); } //update weights for (int i = 1; i < Weights.size(); i++) { Weights[i] = Weights[i] + ((outputs[i - 1] * deltas[i].Transpose()) * (-1.0f * learning_rate)); } return cost; }
/* PINT_serv_msgpair_array_resolve_addrs() * * fills in BMI address of server for each entry in the msgpair array, * based on the handle and fsid * * returns 0 on success, -PVFS_error on failure */ int PINT_serv_msgpairarray_resolve_addrs( PINT_sm_msgarray_op *mop) { int i = 0; int ret = -PVFS_EINVAL; if ((mop->count > 0) && mop->msgarray) { for(i = 0; i < mop->count; i++) { PINT_sm_msgpair_state *msg_p = &mop->msgarray[i]; assert(msg_p); ret = PINT_cached_config_map_to_server( &msg_p->svr_addr, msg_p->handle, msg_p->fs_id); if (ret != 0) { gossip_err("Failed to map server address to handle\n"); break; } gossip_debug(GOSSIP_MSGPAIR_DEBUG, " mapped handle %llu to server %lld\n", llu(msg_p->handle), lld(msg_p->svr_addr)); } } return ret; }
std::vector<Matrix> RNN::CalculateInitialDeltas(Matrix output, std::vector<Matrix> outputs) { std::vector<Matrix> zerorecurrence; for (int j = 0; j < Num_Layers; j++) { Matrix zr(InputVectorSize, 1); zerorecurrence.push_back(zr); } std::vector<Matrix> temp_deltas; //layer deltas stored backwards in order //calculate cost function double cost = 0.0f; Matrix partial_cost_matrix(InputVectorSize, 1); partial_cost_matrix = output + (outputs[outputs.size() - 1] * -1); for (int i = 0; i < partial_cost_matrix.Elements.size(); i++) { cost += 0.5f * partial_cost_matrix.Elements[i] * partial_cost_matrix.Elements[i]; } //calculate last layer deltas Matrix lld(InputVectorSize, 1); lld = outputs[outputs.size() - 1] + (output * -1); for (int i = 0; i < lld.Dimensions[0]; i++) { double a = outputs[outputs.size() - 1].Elements[i]; lld.Elements[i] *= a * (1 - a); //derivative of activation function } temp_deltas.push_back(lld); //calculate the rest of the deltas through back propagation int j = 0; //this keeps track of the index for the next layer's delta for (int i = Num_Layers - 2; i >= 0; i--) //start at the second to last layer { Matrix delta(InputVectorSize, 1); delta = Weights[i + 1] * temp_deltas[j]; j++; for (int k = 0; k < delta.Dimensions[0]; k++) { double a = outputs[i].Elements[k]; delta.Elements[k] *= a * (1 - a); //derivative of activation function } temp_deltas.push_back(delta); } //put the deltas into a new vector object in the correct order std::vector<Matrix> deltas; for (int i = (int)temp_deltas.size() - 1; i >= 0; i--) { deltas.push_back(temp_deltas[i]); } return deltas; }
/** Finds state machine referenced by op_id and releases resources * associated with it */ void PINT_sys_release(PVFS_sys_op_id op_id) { PINT_smcb *smcb; gossip_debug(GOSSIP_CLIENT_DEBUG, "%s: id %lld\n", __func__, lld(op_id)); smcb = PINT_id_gen_safe_lookup(op_id); if (smcb == NULL) { return; } PINT_id_gen_safe_unregister(op_id); PINT_sys_release_smcb(smcb); return; }
static int iterate_ro_wildcards(struct filesystem_configuration_s *fsconfig, PVFS_BMI_addr_t client_addr) { int i; for (i = 0; i < fsconfig->ro_count; i++) { gossip_debug(GOSSIP_SERVER_DEBUG, "BMI_query_addr_range %lld, %s\n", lld(client_addr), fsconfig->ro_hosts[i]); /* Does the client address match the wildcard specification and/or the netmask specification? */ if (BMI_query_addr_range(client_addr, fsconfig->ro_hosts[i], fsconfig->ro_netmasks[i]) == 1) { return 1; } } return 0; }
static int iterate_all_squash_wildcards(struct filesystem_configuration_s *fsconfig, PVFS_BMI_addr_t client_addr) { int i; for (i = 0; i < fsconfig->all_squash_count; i++) { gossip_debug(GOSSIP_SERVER_DEBUG, "BMI_query_addr_range %lld, %s\n", lld(client_addr), fsconfig->all_squash_hosts[i]); if (BMI_query_addr_range(client_addr, fsconfig->all_squash_hosts[i], fsconfig->all_squash_netmasks[i]) == 1) { return 1; } } return 0; }
/* shrink the LRU cache list by discarding some extents from the list. * The expected number of extents discarded is "expected", while the * real number of discarded extents is "shrinked". */ int LRU_shrink_cache(struct cache_stack *cache, unsigned int expected, unsigned int *shrinked) { struct list_head *lru_head, *lru_tail; struct extent *victim; int ret = 0; fprintf(stderr, "%s: expected:%d\n", __FUNCTION__, expected); *shrinked = 0; lru_head = &cache->active_list; lru_tail = lru_head->prev; while (*shrinked < expected && lru_tail != (& cache->active_list) ){ victim = list_entry(lru_tail, struct extent, lru); if ( !PageLRU(victim) ){ NCAC_error("extent flag is wrong. LRU flag is expected\n"); ret = NCAC_INVAL_FLAGS; break; } lru_tail = lru_tail->prev; if (PageReadPending(victim) || PageWritePending(victim)){ ret = NCAC_check_ioreq(victim); if (ret < 0){ NCAC_error("NCAC_check_ioreq error: index=%ld, ioreq=%lld\n", victim->index, lld(victim->ioreq)); break; } if (ret) { /* completion */ list_set_clean_page(victim); } } if ( is_extent_discardable(victim) ){ LRU_remove_cache_item(cache, victim); list_add_tail(&victim->list, &cache->free_extent_list); (*shrinked)++; } } return ret; }
void print_object(reflection_Object_table_t O) { reflection_Field_vec_t Flds; reflection_Field_table_t F; size_t i; Flds = reflection_Object_fields(O); printf("{\"name\":\"%s\"", reflection_Object_name(O)); printf(",\"fields\":["); for (i = 0; i < reflection_Field_vec_len(Flds); ++i) { if (i > 0) { printf(","); } F = reflection_Field_vec_at(Flds, i); printf("{\"name\":\"%s\",\"type\":", reflection_Field_name(F)); print_type(reflection_Field_type(F)); if (reflection_Field_id_is_present(F)) { printf(",\"id\":%hu", reflection_Field_id(F)); } if (reflection_Field_default_integer_is_present(F)) { printf(",\"default_integer\":%lld", lld(reflection_Field_default_integer(F))); } if (reflection_Field_default_real_is_present(F)) { printf(",\"default_integer\":%lf", reflection_Field_default_real(F)); } if (reflection_Field_required_is_present(F)) { printf(",\"required\":%s", reflection_Field_required(F) ? "true" : "false"); } if (reflection_Field_key_is_present(F)) { printf(",\"key\":%s", reflection_Field_key(F) ? "true" : "false"); } printf("}"); } printf("]"); if (reflection_Object_is_struct_is_present(O)) { printf(",\"is_struct\":%s", reflection_Object_is_struct(O) ? "true" : "false"); } if (reflection_Object_minalign_is_present(O)) { printf(",\"minalign\":%d", reflection_Object_minalign(O)); } if (reflection_Object_bytesize_is_present(O)) { printf(",\"bytesize\":%d", reflection_Object_bytesize(O)); } printf("}"); }
static int readdir_msg_comp_fn(void *v_p, struct PVFS_server_resp *resp_p, int index) { PINT_smcb *smcb = v_p; PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_MSGPAIR_PARENT_SM); gossip_debug(GOSSIP_CLIENT_DEBUG, "readdir_msg_comp_fn\n"); assert(resp_p->op == PVFS_SERV_READDIR); if (resp_p->status != 0) { return resp_p->status; } /* convert servresp_readdir response to a sysresp_readdir obj */ *(sm_p->readdir.token) = resp_p->u.readdir.token; *(sm_p->readdir.directory_version) = resp_p->u.readdir.directory_version; *(sm_p->readdir.dirent_outcount) = resp_p->u.readdir.dirent_count; if (*(sm_p->readdir.dirent_outcount) > 0) { int dirent_array_len = (sizeof(PVFS_dirent) * *(sm_p->readdir.dirent_outcount)); /* this dirent_array MUST be freed by caller */ *(sm_p->readdir.dirent_array) = (PVFS_dirent *) malloc(dirent_array_len); assert(*(sm_p->readdir.dirent_array)); memcpy(*(sm_p->readdir.dirent_array), resp_p->u.readdir.dirent_array, dirent_array_len); } gossip_debug(GOSSIP_READDIR_DEBUG, "*** Got %d directory entries " "[version %lld]\n", *(sm_p->readdir.dirent_outcount), lld(*(sm_p->readdir.directory_version))); return 0; }
static int print_dspace(TROVE_coll_id coll_id, TROVE_handle handle, TROVE_context_id trove_context) { int ret, opcount; TROVE_ds_attributes_s ds_attr; TROVE_op_id op_id; TROVE_ds_state state; ret = trove_dspace_getattr(coll_id, handle, &ds_attr, 0 /* flags */, NULL /* user ptr */, trove_context, &op_id, NULL); while (ret == 0) { ret = trove_dspace_test( coll_id, op_id, trove_context, &opcount, NULL, NULL, &state, TROVE_DEFAULT_TEST_TIMEOUT); } if (ret != 1) return -1; fprintf(stdout, "\t0x%08llx (dspace_getattr output: type = %s, b_size = %lld)\n", llu(handle), type_to_string(ds_attr.type), lld(ds_attr.u.datafile.b_size)); if (print_keyvals) { ret = print_dspace_keyvals(coll_id, handle, trove_context, ds_attr.type); if (ret != 0) return -1; } return 0; }
/** Checks for completion of a specific state machine. * * If specific state machine has not completed, progress is made on * all posted state machines. */ PVFS_error PINT_client_state_machine_test( PVFS_sys_op_id op_id, int *error_code) { int i = 0, job_count = 0; PVFS_error ret = -PVFS_EINVAL; PINT_smcb *smcb, *tmp_smcb = NULL; PINT_client_sm *sm_p = NULL; job_id_t job_id_array[MAX_RETURNED_JOBS]; job_status_s job_status_array[MAX_RETURNED_JOBS]; void *smcb_p_array[MAX_RETURNED_JOBS] = {NULL}; gossip_debug(GOSSIP_STATE_MACHINE_DEBUG, "PINT_client_state_machine_test id %lld\n",lld(op_id)); gen_mutex_lock(&test_mutex); CLIENT_SM_ASSERT_INITIALIZED(); job_count = MAX_RETURNED_JOBS; if (!error_code) { gen_mutex_unlock(&test_mutex); return ret; } smcb = PINT_id_gen_safe_lookup(op_id); if (!smcb) { gen_mutex_unlock(&test_mutex); return ret; } if (PINT_smcb_complete(smcb)) { sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); *error_code = sm_p->error_code; conditional_remove_sm_if_in_completion_list(smcb); gen_mutex_unlock(&test_mutex); return 0; } ret = job_testcontext(job_id_array, &job_count, /* in/out parameter */ smcb_p_array, job_status_array, 10, pint_client_sm_context); assert(ret > -1); /* do as much as we can on every job that has completed */ for(i = 0; i < job_count; i++) { tmp_smcb = (PINT_smcb *)smcb_p_array[i]; assert(tmp_smcb); if (PINT_smcb_invalid_op(tmp_smcb)) { gossip_err("Invalid sm control block op %d\n", PINT_smcb_op(tmp_smcb)); continue; } gossip_debug(GOSSIP_CLIENT_DEBUG, "sm control op %d\n", PINT_smcb_op(tmp_smcb)); if (!PINT_smcb_complete(tmp_smcb)) { ret = PINT_state_machine_continue(tmp_smcb, &job_status_array[i]); if (ret != SM_ACTION_DEFERRED && ret != SM_ACTION_TERMINATE); /* ret == 0 */ { continue; } } } if (PINT_smcb_complete(smcb)) { sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); *error_code = sm_p->error_code; conditional_remove_sm_if_in_completion_list(smcb); } gen_mutex_unlock(&test_mutex); return 0; }
/* * Function: io_send_completion_ack() * * Params: server_op *s_op, * job_status_s* js_p * * Pre: flow is completed so that we can report its status * * Post: if this is a write, response has been sent to client * if this is a read, do nothing * * Returns: int * * Synopsis: fills in a response to the I/O request, encodes it, * and sends it to the client via BMI. Note that it may * send either positive or negative acknowledgements. * */ static PINT_sm_action io_send_completion_ack( struct PINT_smcb *smcb, job_status_s *js_p) { struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int err = -PVFS_EIO; job_id_t tmp_id; struct server_configuration_s *user_opts = get_server_config_struct(); gossip_debug(GOSSIP_SERVER_DEBUG, "send completion ack 1 :%lld\n", lld(s_op->u.migposter.flow_desc->total_transferred)); /* release encoding of the first ack that we sent */ PINT_encode_release(&s_op->encoded, PINT_ENCODE_RESP); /* zero size for safety */ s_op->encoded.total_size = 0; /* fill in response -- status field is the only generic one we should have to set */ s_op->resp.op = PVFS_SERV_WRITE_COMPLETION; /* not IO */ s_op->resp.status = js_p->error_code; s_op->resp.u.write_completion.total_completed = 1111; // s_op->u.migposter.flow_desc->total_transferred; gossip_debug(GOSSIP_LB_DEBUG, "Server->send flow completion ack :%lld\n", s_op->resp.u.write_completion.total_completed); err = PINT_encode( &s_op->resp, PINT_ENCODE_RESP, &(s_op->encoded), s_op->addr, s_op->decoded.enc_type); if (err < 0) { gossip_lerr("Server: IO SM: PINT_encode() failure.\n"); js_p->error_code = err; return SM_ACTION_COMPLETE; } gossip_debug(GOSSIP_SERVER_DEBUG, "send completion ack 3 :%lld\n", lld(s_op->u.migposter.flow_desc->total_transferred)); err = job_bmi_send_list( s_op->addr, s_op->encoded.buffer_list, s_op->encoded.size_list, s_op->encoded.list_count, s_op->encoded.total_size, 5, s_op->encoded.buffer_type, 0, smcb, 0, js_p, &tmp_id, server_job_context, user_opts->client_job_bmi_timeout,NULL); gossip_debug(GOSSIP_SERVER_DEBUG, "job_bmi_send_list: err=%d\n", err); return err; }
/* * Function: io_start_flow() * * Params: server_op *s_op, * job_status_s* js_p * * Pre: all of the previous steps have succeeded, so that we * are ready to actually perform the I/O * * Post: I/O has been carried out * * Returns: int * * Synopsis: this is the most important part of the state machine. * we setup the flow descriptor and post it in order to * carry out the data transfer * */ static PINT_sm_action io_start_flow( struct PINT_smcb *smcb, job_status_s *js_p) { struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int err = -PVFS_EIO; job_id_t tmp_id; struct server_configuration_s *user_opts = get_server_config_struct(); struct filesystem_configuration_s *fs_conf; // gossip_debug(GOSSIP_LB_DEBUG, "\n\n Receive mig request: set mig_state = %d\n\n", mig_state); gossip_debug(GOSSIP_SERVER_DEBUG, "IO start flow\n"); s_op->u.migposter.flow_desc = PINT_flow_alloc(); if (!s_op->u.migposter.flow_desc) { js_p->error_code = -PVFS_ENOMEM; return SM_ACTION_COMPLETE; } /* we still have the file size stored in the response structure * that we sent in the previous state, other details come from * request */ s_op->u.migposter.flow_desc->file_data.fsize = 0; s_op->u.migposter.flow_desc->file_data.dist = PINT_dist_create("simple_stripe"); s_op->u.migposter.flow_desc->file_data.server_nr = 0; s_op->u.migposter.flow_desc->file_data.server_ct = 1; /* on writes, we allow the bstream to be extended at EOF */ gossip_debug(GOSSIP_SERVER_DEBUG, "io_start_flow() issuing flow to " "write data.\n"); s_op->u.migposter.flow_desc->file_data.extend_flag = 1; s_op->u.migposter.flow_desc->file_req = PVFS_BYTE; s_op->u.migposter.flow_desc->file_req_offset = 0; s_op->u.migposter.flow_desc->mem_req = NULL; s_op->u.migposter.flow_desc->aggregate_size = s_op->req->u.migposter.dfsize; s_op->u.migposter.flow_desc->tag = 0; s_op->u.migposter.flow_desc->user_ptr = NULL; s_op->u.migposter.flow_desc->type = FLOWPROTO_MULTIQUEUE; fs_conf = PINT_config_find_fs_id(user_opts, s_op->req->u.io.fs_id); if(fs_conf) { /* pick up any buffer settings overrides from fs conf */ s_op->u.migposter.flow_desc->buffer_size = fs_conf->fp_buffer_size; s_op->u.migposter.flow_desc->buffers_per_flow = fs_conf->fp_buffers_per_flow; } gossip_debug(GOSSIP_SERVER_DEBUG, "flow: fsize: %lld, " "server_nr: %d, server_ct: %d\n", lld(s_op->u.migposter.flow_desc->file_data.fsize), (int)s_op->u.migposter.flow_desc->file_data.server_nr, (int)s_op->u.migposter.flow_desc->file_data.server_ct); gossip_debug(GOSSIP_SERVER_DEBUG, "file_req_offset: %lld," "aggregate_size: %lld, handle: %llu\n", lld(s_op->u.migposter.flow_desc->file_req_offset), lld(s_op->u.migposter.flow_desc->aggregate_size), llu(s_op->resp.u.migposter.handle)); /* set endpoints depending on type of io requested */ s_op->u.migposter.flow_desc->src.endpoint_id = BMI_ENDPOINT; s_op->u.migposter.flow_desc->src.u.bmi.address = s_op->addr; s_op->u.migposter.flow_desc->dest.endpoint_id = TROVE_ENDPOINT; s_op->u.migposter.flow_desc->dest.u.trove.handle = s_op->resp.u.migposter.handle; s_op->u.migposter.flow_desc->dest.u.trove.coll_id = s_op->req->u.migposter.fs_id; err = job_flow(s_op->u.migposter.flow_desc, smcb, 0, js_p, &tmp_id, server_job_context, user_opts->server_job_flow_timeout, NULL); return err; }
/** Cancels in progress I/O operations. * * \return 0 on success, -PVFS_error on failure. */ PVFS_error PINT_client_io_cancel(PVFS_sys_op_id id) { int i = 0; PVFS_error ret = -PVFS_EINVAL; PINT_smcb *smcb = NULL; PINT_client_sm *sm_p = NULL; PINT_client_sm *sm_base_p = NULL; gossip_debug(GOSSIP_CLIENT_DEBUG, "PINT_client_io_cancel id %lld\n",lld(id)); smcb = PINT_id_gen_safe_lookup(id); if (!smcb) { /* if we can't find it, it may have already completed */ return 0; } sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); if (!sm_p) { /* if we can't find it, it may have already completed */ return 0; } /* we can't cancel any arbitrary operation */ assert(PINT_smcb_op(smcb) == PVFS_SYS_IO); if (PINT_smcb_complete(smcb)) { /* op already completed; nothing to cancel. */ return 0; } /* We also don't cancel small I/O operations as posted by * sys-small-io.sm. Check the corresponding flag. We have * to jump to the base frame rather than the current frame for this * information because small-io may have pushed a msgpairarray. */ sm_base_p = PINT_sm_frame(smcb, (-(smcb->frame_count -1))); if(sm_base_p->u.io.small_io) { gossip_debug(GOSSIP_CANCEL_DEBUG, "skipping cancellation of small I/O operation.\n"); return(0); } /* if we fall to here, the I/O operation is still in flight */ /* first, set a flag informing the sys_io state machine that the * operation has been cancelled so it doesn't post any new jobs */ PINT_smcb_set_cancelled(smcb); /* don't return an error if nothing is cancelled, because everything may have completed already */ ret = 0; /* now run through and cancel the outstanding jobs */ for(i = 0; i < sm_p->u.io.context_count; i++) { PINT_client_io_ctx *cur_ctx = &sm_p->u.io.contexts[i]; assert(cur_ctx); if (cur_ctx->msg_send_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting " "cancellation of type: BMI Send " "(Request)\n",i); ret = job_bmi_cancel(cur_ctx->msg.send_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_bmi_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } if (cur_ctx->msg_recv_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting " "cancellation of type: BMI Recv " "(Response)\n",i); ret = job_bmi_cancel(cur_ctx->msg.recv_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_bmi_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } if (cur_ctx->flow_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting cancellation of type: FLOW\n",i); ret = job_flow_cancel( cur_ctx->flow_job_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_flow_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } if (cur_ctx->write_ack_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting " "cancellation of type: BMI Recv " "(Write Ack)\n",i); ret = job_bmi_cancel(cur_ctx->write_ack.recv_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_bmi_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } } gossip_debug(GOSSIP_CANCEL_DEBUG, "(%p) Total cancellations " "remaining: %d\n", sm_p, sm_p->u.io.total_cancellations_remaining); return ret; }
/* This is not an exhaustive test. */ int test_schema(const char *monster_bfbs) { void *buffer; size_t size; int ret = -1; reflection_Schema_table_t S; reflection_Object_vec_t Objs; reflection_Object_table_t Obj; reflection_Field_vec_t Flds; reflection_Field_table_t F; reflection_Type_table_t T; size_t k, monster_index; buffer = readfile(monster_bfbs, 10000, &size); if (!buffer) { printf("failed to load binary schema\n"); goto done; } S = reflection_Schema_as_root(buffer); Objs = reflection_Schema_objects(S); for (k = 0; k < reflection_Object_vec_len(Objs); ++k) { printf("dbg: obj #%d : %s\n", (int)k, reflection_Object_name(reflection_Object_vec_at(Objs, k))); } k = reflection_Object_vec_find(Objs, "MyGame.Example.Monster"); if (k == flatbuffers_not_found) { printf("Could not find monster in schema\n"); goto done; } monster_index = k; Obj = reflection_Object_vec_at(Objs, k); if (strcmp(reflection_Object_name(Obj), "MyGame.Example.Monster")) { printf("Found wrong object in schema\n"); goto done; } Flds = reflection_Object_fields(Obj); k = reflection_Field_vec_find(Flds, "mana"); if (k == flatbuffers_not_found) { printf("Did not find mana field in Monster schema\n"); goto done; } F = reflection_Field_vec_at(Flds, k); if (reflection_Field_default_integer(F) != 150) { printf("mana field has wrong default value\n"); printf("field name: %s\n", reflection_Field_name(F)); printf("%lld\n", lld(reflection_Field_default_integer(F))); goto done; } T = reflection_Field_type(F); if (reflection_Type_base_type(T) != reflection_BaseType_Short) { printf("mana field has wrong type\n"); goto done; } k = reflection_Field_vec_find(Flds, "enemy"); if (k == flatbuffers_not_found) { printf("enemy field not found\n"); goto done; } T = reflection_Field_type(reflection_Field_vec_at(Flds, k)); if (reflection_Type_base_type(T) != reflection_BaseType_Obj) { printf("enemy is not an object\n"); goto done; } if (reflection_Type_index(T) != (int32_t)monster_index) { printf("enemy is not a monster\n"); goto done; } k = reflection_Field_vec_find(Flds, "testarrayoftables"); if (k == flatbuffers_not_found) { printf("array of tables not found\n"); goto done; } T = reflection_Field_type(reflection_Field_vec_at(Flds, k)); if (reflection_Type_base_type(T) != reflection_BaseType_Vector) { printf("array of tables is not of vector type\n"); goto done; } if (reflection_Type_element(T) != reflection_BaseType_Obj) { printf("array of tables is not a vector of table type\n"); goto done; } if (reflection_Type_index(T) != (int32_t)monster_index) { printf("array of tables is not a monster vector\n"); goto done; } ret = 0; done: if (buffer) { free(buffer); } return ret; }
void print_entry_attr( PVFS_handle handle, char *entry_name, PVFS_sys_attr *attr, struct options *opts) { char buf[128] = {0}, *formatted_size = NULL; char *formatted_owner = NULL, *formatted_group = NULL; struct group *grp = NULL; struct passwd *pwd = NULL; char *empty_str = ""; char *owner = empty_str, *group = empty_str; char *inode = empty_str; time_t mtime; struct tm *time; PVFS_size size = 0; char scratch_owner[16] = {0}, scratch_group[16] = {0}; char scratch_size[16] = {0}, scratch_inode[16] = {0}; char f_type = '-'; char group_x_char = '-'; if (!opts->list_all && (entry_name[0] == '.')) { return; } if (attr == NULL) { return; } mtime = (time_t)attr->mtime; time = localtime(&mtime); snprintf(scratch_owner,16,"%d",(int)attr->owner); snprintf(scratch_group,16,"%d",(int)attr->group); if (opts->list_inode) { snprintf(scratch_inode,16,"%llu ",llu(handle)); inode = scratch_inode; } if ((attr->objtype == PVFS_TYPE_METAFILE) && (attr->mask & PVFS_ATTR_SYS_SIZE)) { size = attr->size; } else if ((attr->objtype == PVFS_TYPE_SYMLINK) && (attr->link_target)) { size = (PVFS_size)strlen(attr->link_target); } else if (attr->objtype == PVFS_TYPE_DIRECTORY) { size = (PVFS_size)4096; } if (opts->list_human_readable) { PVFS_util_make_size_human_readable( size,scratch_size,16,opts->list_use_si_units); } else { snprintf(scratch_size,16, "%lld", lld(size)); } format_size_string(scratch_size,11,&formatted_size,1,1); if (!opts->list_no_owner) { owner = scratch_owner; } if (!opts->list_no_group) { group = scratch_group; } if (!opts->list_numeric_uid_gid) { if (!opts->list_no_owner) { pwd = getpwuid((uid_t)attr->owner); owner = (pwd ? pwd->pw_name : scratch_owner); } if (!opts->list_no_group) { grp = getgrgid((gid_t)attr->group); group = (grp ? grp->gr_name : scratch_group); } } /* for owner and group allow the fields to grow larger than 8 if * necessary (set hard_limit to 0), but pad anything smaller to * take up 8 spaces. */ format_size_string(owner,8,&formatted_owner,0,0); format_size_string(group,8,&formatted_group,0,0); if (attr->objtype == PVFS_TYPE_DIRECTORY) { f_type = 'd'; } else if (attr->objtype == PVFS_TYPE_SYMLINK) { f_type = 'l'; } /* special case to set setgid display for groups if needed */ if(attr->perms & PVFS_G_SGID) { group_x_char = ((attr->perms & PVFS_G_EXECUTE) ? 's' : 'S'); } else { group_x_char = ((attr->perms & PVFS_G_EXECUTE) ? 'x' : '-'); } snprintf(buf,128,"%s%c%c%c%c%c%c%c%c%c%c 1 %s %s %s " "%.4d-%.2d-%.2d %.2d:%.2d %s", inode, f_type, ((attr->perms & PVFS_U_READ) ? 'r' : '-'), ((attr->perms & PVFS_U_WRITE) ? 'w' : '-'), ((attr->perms & PVFS_U_EXECUTE) ? 'x' : '-'), ((attr->perms & PVFS_G_READ) ? 'r' : '-'), ((attr->perms & PVFS_G_WRITE) ? 'w' : '-'), group_x_char, ((attr->perms & PVFS_O_READ) ? 'r' : '-'), ((attr->perms & PVFS_O_WRITE) ? 'w' : '-'), ((attr->perms & PVFS_O_EXECUTE) ? 'x' : '-'), formatted_owner, formatted_group, formatted_size, (time->tm_year + 1900), (time->tm_mon + 1), time->tm_mday, (time->tm_hour), (time->tm_min), entry_name); if (formatted_size) { free(formatted_size); } if (formatted_owner) { free(formatted_owner); } if (formatted_group) { free(formatted_group); } if (attr->objtype == PVFS_TYPE_SYMLINK) { assert(attr->link_target); if (opts->list_long) { printf("%s -> %s\n", buf, attr->link_target); } else { printf("%s\n",buf); } } else { printf("%s\n",buf); } }
/** lebf_encode_req() * * encodes a request structure * * returns 0 on success, -errno on failure */ static int lebf_encode_req( struct PVFS_server_req *req, struct PINT_encoded_msg *target_msg) { int ret = 0; char **p; gossip_debug(GOSSIP_ENDECODE_DEBUG,"Executing lebf_encode_req...\n"); gossip_debug(GOSSIP_ENDECODE_DEBUG,"\treq->op:%d\n",req->op); ret = encode_common(target_msg, max_size_array[req->op].req); if (ret) goto out; gossip_debug(GOSSIP_ENDECODE_DEBUG,"lebf_encode_req\n"); /** every request has these fields */ p = &target_msg->ptr_current; encode_PVFS_server_req(p, req); #define CASE(tag,var) \ case tag: encode_PVFS_servreq_##var(p,&req->u.var); break switch (req->op) { /** call standard function defined in headers */ CASE(PVFS_SERV_LOOKUP_PATH, lookup_path); CASE(PVFS_SERV_CREATE, create); CASE(PVFS_SERV_MIRROR, mirror); CASE(PVFS_SERV_UNSTUFF, unstuff); CASE(PVFS_SERV_BATCH_CREATE, batch_create); CASE(PVFS_SERV_BATCH_REMOVE, batch_remove); CASE(PVFS_SERV_REMOVE, remove); CASE(PVFS_SERV_MGMT_REMOVE_OBJECT, mgmt_remove_object); CASE(PVFS_SERV_MGMT_REMOVE_DIRENT, mgmt_remove_dirent); CASE(PVFS_SERV_TREE_REMOVE, tree_remove); CASE(PVFS_SERV_TREE_GET_FILE_SIZE, tree_get_file_size); CASE(PVFS_SERV_MGMT_GET_DIRDATA_HANDLE, mgmt_get_dirdata_handle); CASE(PVFS_SERV_IO, io); CASE(PVFS_SERV_SMALL_IO, small_io); CASE(PVFS_SERV_GETATTR, getattr); CASE(PVFS_SERV_SETATTR, setattr); CASE(PVFS_SERV_CRDIRENT, crdirent); CASE(PVFS_SERV_RMDIRENT, rmdirent); CASE(PVFS_SERV_CHDIRENT, chdirent); CASE(PVFS_SERV_TRUNCATE, truncate); CASE(PVFS_SERV_MKDIR, mkdir); CASE(PVFS_SERV_READDIR, readdir); CASE(PVFS_SERV_FLUSH, flush); CASE(PVFS_SERV_STATFS, statfs); CASE(PVFS_SERV_MGMT_SETPARAM, mgmt_setparam); CASE(PVFS_SERV_MGMT_PERF_MON, mgmt_perf_mon); CASE(PVFS_SERV_MGMT_ITERATE_HANDLES, mgmt_iterate_handles); CASE(PVFS_SERV_MGMT_DSPACE_INFO_LIST, mgmt_dspace_info_list); CASE(PVFS_SERV_MGMT_EVENT_MON, mgmt_event_mon); CASE(PVFS_SERV_GETEATTR, geteattr); CASE(PVFS_SERV_SETEATTR, seteattr); CASE(PVFS_SERV_DELEATTR, deleattr); CASE(PVFS_SERV_LISTEATTR, listeattr); CASE(PVFS_SERV_LISTATTR, listattr); CASE(PVFS_SERV_MGMT_GET_UID, mgmt_get_uid); case PVFS_SERV_GETCONFIG: case PVFS_SERV_MGMT_NOOP: case PVFS_SERV_PROTO_ERROR: case PVFS_SERV_IMM_COPIES: /** nothing else */ break; case PVFS_SERV_INVALID: case PVFS_SERV_WRITE_COMPLETION: case PVFS_SERV_PERF_UPDATE: case PVFS_SERV_PRECREATE_POOL_REFILLER: case PVFS_SERV_JOB_TIMER: case PVFS_SERV_NUM_OPS: /** sentinel */ gossip_err("%s: invalid operation %d\n", __func__, req->op); ret = -PVFS_ENOSYS; break; } #undef CASE /** although much more may have been allocated */ target_msg->total_size = target_msg->ptr_current - (char *) target_msg->buffer_list[0]; target_msg->size_list[0] = target_msg->total_size; if (target_msg->total_size > max_size_array[req->op].req) { ret = -PVFS_ENOMEM; gossip_err("%s: op %d needed %lld bytes but alloced only %d\n", __func__, req->op, lld(target_msg->total_size), max_size_array[req->op].req); } out: return ret; }
void print_stats(const PVFS_object_ref * ref, const char * pszName, const char * pszRelativeName, const PVFS_sys_attr * attr) { char a_time[100] = "", m_time[100] = "", c_time[100] = ""; struct passwd * user; struct group * group; fprintf(stdout, "-------------------------------------------------------\n"); fprintf(stdout, " File Name : %s\n", pszName); fprintf(stdout, " Relative Name : %s\n", pszRelativeName); fprintf(stdout, " fs ID : %d\n", ref->fs_id); fprintf(stdout, " Handle : %llu\n", llu(ref->handle)); fprintf(stdout, " Mask : %o\n", attr->mask); if(attr->mask & PVFS_ATTR_SYS_PERM) { fprintf(stdout, " Permissions : %o\n", attr->perms); } /* Print the type of object */ if(attr->mask & PVFS_ATTR_SYS_TYPE) { if(attr->objtype & PVFS_TYPE_METAFILE) { fprintf(stdout, " Type : Regular File\n"); } else if(attr->objtype & PVFS_TYPE_DIRECTORY) { fprintf(stdout, " Type : Directory\n"); } else if(attr->objtype & PVFS_TYPE_SYMLINK) { fprintf(stdout, " Type : Symbolic Link\n"); if(attr->mask & PVFS_ATTR_SYS_LNK_TARGET) { fprintf(stdout, " Link Target : %s\n", attr->link_target); } } } if(attr->mask & PVFS_ATTR_SYS_SIZE) { /* If the size of a directory object is zero, let's default the size to * 4096. This is what the kernel module does, and is the default directory * size on an EXT3 system */ if( (attr->size == 0) && (attr->objtype & PVFS_TYPE_DIRECTORY)) { fprintf(stdout, " Size : 4096\n"); } else { fprintf(stdout, " Size : %lld\n", lld(attr->size)); } } if(attr->mask & PVFS_ATTR_SYS_UID) { user = getpwuid(attr->owner); fprintf(stdout, " Owner : %d (%s)\n", attr->owner, (user ? user->pw_name : "UNKNOWN")); } if(attr->mask & PVFS_ATTR_SYS_GID) { group = getgrgid(attr->group); fprintf(stdout, " Group : %d (%s)\n", attr->group, (group ? group->gr_name : "UNKNOWN")); } if(attr->mask & PVFS_ATTR_SYS_ATIME) { time_t a_tmp = attr->atime; sprintf(a_time, "%s", ctime((const time_t *)&a_tmp)); a_time[strlen(a_time)-1] = 0; fprintf(stdout, " atime : %llu (%s)\n", llu(attr->atime), a_time); } if(attr->mask & PVFS_ATTR_SYS_MTIME) { time_t m_tmp = attr->mtime; sprintf(m_time, "%s", ctime((const time_t *)&m_tmp)); m_time[strlen(m_time)-1] = 0; fprintf(stdout, " mtime : %llu (%s)\n", llu(attr->mtime), m_time); } if(attr->mask & PVFS_ATTR_SYS_CTIME) { time_t c_tmp = attr->ctime; sprintf(c_time, "%s", ctime((const time_t *)&c_tmp)); c_time[strlen(c_time)-1] = 0; fprintf(stdout, " ctime : %llu (%s)\n", llu(attr->ctime), c_time); } /* dfile_count is only valid for a file. For a given file, it tells how many * datafiles there are */ if( (attr->mask & PVFS_ATTR_SYS_DFILE_COUNT) && (attr->objtype == PVFS_TYPE_METAFILE)) { fprintf(stdout, " datafiles : %d\n", attr->dfile_count); } if( (attr->mask & PVFS_ATTR_SYS_BLKSIZE) && (attr->objtype == PVFS_TYPE_METAFILE)) { fprintf(stdout, " blksize : %lld\n", lld(attr->blksize)); } /* dirent_count is only valid on directories */ if( (attr->mask & PVFS_ATTR_SYS_DIRENT_COUNT) && (attr->objtype == PVFS_TYPE_DIRECTORY)) { fprintf(stdout, " dir entries : %llu\n", llu(attr->dirent_count)); } if ((attr->mask & PVFS_ATTR_SYS_TYPE) && (attr->objtype & PVFS_TYPE_METAFILE)) { if (attr->flags == 0) fprintf(stdout, " flags : none"); else fprintf(stdout, " flags : "); if (attr->flags & PVFS_IMMUTABLE_FL) fprintf(stdout, "immutable, "); if (attr->flags & PVFS_APPEND_FL) fprintf(stdout, "append-only, "); if (attr->flags & PVFS_NOATIME_FL) fprintf(stdout, "noatime "); fprintf(stdout, "\n"); } }
int main(int argc, char **argv) { int ret = -1; char str_buf[256] = {0}; char *filename = (char *)0; PVFS_fs_id cur_fs; PVFS_sysresp_symlink resp_sym; char* entry_name = NULL; char *target = NULL; PVFS_object_ref parent_refn; PVFS_sys_attr attr; PVFS_credentials credentials; if (argc != 3) { fprintf(stderr,"Usage: %s filename target\n",argv[0]); return ret; } filename = argv[1]; target = argv[2]; ret = PVFS_util_init_defaults(); if (ret < 0) { PVFS_perror("PVFS_util_init_defaults", ret); return (-1); } ret = PVFS_util_get_default_fsid(&cur_fs); if (ret < 0) { PVFS_perror("PVFS_util_get_default_fsid", ret); return (-1); } if (PINT_remove_base_dir(filename,str_buf,256)) { if (filename[0] != '/') { printf("You forgot the leading '/'\n"); } printf("Cannot retrieve link name for creation on %s\n", filename); return(-1); } printf("Link to be created is %s\n",str_buf); memset(&resp_sym, 0, sizeof(PVFS_sysresp_symlink)); PVFS_util_gen_credentials(&credentials); entry_name = str_buf; attr.mask = PVFS_ATTR_SYS_ALL_SETABLE; attr.owner = credentials.uid; attr.group = credentials.gid; attr.perms = 1877; attr.atime = attr.ctime = attr.mtime = time(NULL); ret = PINT_lookup_parent(filename, cur_fs, &credentials, &parent_refn.handle); if(ret < 0) { PVFS_perror("PVFS_util_lookup_parent", ret); return(-1); } parent_refn.fs_id = cur_fs; ret = PVFS_sys_symlink(entry_name, parent_refn, target, attr, &credentials, &resp_sym, NULL); if (ret < 0) { printf("symlink failed with errcode = %d\n", ret); return(-1); } printf("--symlink--\n"); printf("Handle: %lld\n", lld(resp_sym.ref.handle)); ret = PVFS_sys_finalize(); if (ret < 0) { printf("finalizing sysint failed with errcode = %d\n", ret); return (-1); } return(0); }
static void start_delayed_ops_if_any(int dec_first) { int ret = 0; dbpf_queued_op_t *cur_op = NULL; int i = 0, aiocb_inuse_count = 0; struct aiocb *aiocbs = NULL, *aiocb_ptr_array[AIOCB_ARRAY_SZ] = {0}; gen_mutex_lock(&s_dbpf_io_mutex); if (dec_first) { s_dbpf_ios_in_progress--; } gossip_debug(GOSSIP_TROVE_DEBUG, "DBPF I/O ops in progress: %d\n", s_dbpf_ios_in_progress); if (s_dbpf_io_ready_queue == NULL) { s_dbpf_io_ready_queue = dbpf_op_queue_new(); } assert(s_dbpf_io_ready_queue); if (!dbpf_op_queue_empty(s_dbpf_io_ready_queue)) { cur_op = dbpf_op_queue_shownext(s_dbpf_io_ready_queue); assert(cur_op); #ifndef __PVFS2_TROVE_AIO_THREADED__ assert(cur_op->op.state == OP_INTERNALLY_DELAYED); #endif assert((cur_op->op.type == BSTREAM_READ_AT) || (cur_op->op.type == BSTREAM_READ_LIST) || (cur_op->op.type == BSTREAM_WRITE_AT) || (cur_op->op.type == BSTREAM_WRITE_LIST)); dbpf_op_queue_remove(cur_op); gossip_debug(GOSSIP_TROVE_DEBUG, "starting delayed I/O " "operation %p (%d in progress)\n", cur_op, s_dbpf_ios_in_progress); aiocbs = cur_op->op.u.b_rw_list.aiocb_array; assert(aiocbs); for(i = 0; i < AIOCB_ARRAY_SZ; i++) { if (aiocbs[i].aio_lio_opcode != LIO_NOP) { aiocb_inuse_count++; } } for(i = 0; i < aiocb_inuse_count; i++) { aiocb_ptr_array[i] = &aiocbs[i]; } if(gossip_debug_enabled(GOSSIP_TROVE_DEBUG)) { gossip_debug(GOSSIP_TROVE_DEBUG, "lio_listio called with %d following aiocbs:\n", aiocb_inuse_count); for(i=0; i<aiocb_inuse_count; i++) { gossip_debug( GOSSIP_TROVE_DEBUG, "aiocb_ptr_array[%d]: fd: %d, off: %lld, " "bytes: %d, buf: %p, type: %d\n", i, aiocb_ptr_array[i]->aio_fildes, lld(aiocb_ptr_array[i]->aio_offset), (int)aiocb_ptr_array[i]->aio_nbytes, aiocb_ptr_array[i]->aio_buf, (int)aiocb_ptr_array[i]->aio_lio_opcode); } } ret = cur_op->op.u.b_rw_list.aio_ops->lio_listio( LIO_NOWAIT, aiocb_ptr_array, aiocb_inuse_count, &cur_op->op.u.b_rw_list.sigev); if (ret != 0) { gossip_lerr("lio_listio() returned %d\n", ret); dbpf_open_cache_put(&cur_op->op.u.b_rw_list.open_ref); goto error_exit; } s_dbpf_ios_in_progress++; gossip_debug(GOSSIP_TROVE_DEBUG, "%s: lio_listio posted %p " "(handle %llu, ret %d))\n", __func__, cur_op, llu(cur_op->op.handle), ret); #ifndef __PVFS2_TROVE_AIO_THREADED__ /* to continue making progress on this previously delayed I/O operation, we need to re-add it back to the normal dbpf operation queue so that the calling thread can continue to call the service method (state flag is updated as well) */ dbpf_queued_op_queue_nolock(cur_op); #endif } error_exit: gen_mutex_unlock(&s_dbpf_io_mutex); }
/** lebf_encode_resp() * * encodes a response structure * * returns 0 on success, -errno on failure */ static int lebf_encode_resp( struct PVFS_server_resp *resp, struct PINT_encoded_msg *target_msg) { int ret; char **p; ret = encode_common(target_msg, max_size_array[resp->op].resp); if (ret) goto out; gossip_debug(GOSSIP_ENDECODE_DEBUG,"lebf_encode_resp\n"); /** every response has these fields */ p = &target_msg->ptr_current; encode_PVFS_server_resp(p, resp); #define CASE(tag,var) \ case tag: encode_PVFS_servresp_##var(p,&resp->u.var); break /** we stand a good chance of segfaulting if we try to encode the response * after something bad happened reading data from disk. */ if (resp->status == 0) { /** extra encoding rules for particular responses */ switch (resp->op) { /** call standard function defined in headers */ CASE(PVFS_SERV_GETCONFIG, getconfig); CASE(PVFS_SERV_LOOKUP_PATH, lookup_path); CASE(PVFS_SERV_CREATE, create); CASE(PVFS_SERV_MIRROR, mirror); CASE(PVFS_SERV_UNSTUFF, unstuff); CASE(PVFS_SERV_BATCH_CREATE, batch_create); CASE(PVFS_SERV_IO, io); CASE(PVFS_SERV_SMALL_IO, small_io); CASE(PVFS_SERV_GETATTR, getattr); CASE(PVFS_SERV_RMDIRENT, rmdirent); CASE(PVFS_SERV_CHDIRENT, chdirent); CASE(PVFS_SERV_MKDIR, mkdir); CASE(PVFS_SERV_READDIR, readdir); CASE(PVFS_SERV_STATFS, statfs); CASE(PVFS_SERV_MGMT_PERF_MON, mgmt_perf_mon); CASE(PVFS_SERV_MGMT_ITERATE_HANDLES, mgmt_iterate_handles); CASE(PVFS_SERV_MGMT_DSPACE_INFO_LIST, mgmt_dspace_info_list); CASE(PVFS_SERV_MGMT_EVENT_MON, mgmt_event_mon); CASE(PVFS_SERV_WRITE_COMPLETION, write_completion); CASE(PVFS_SERV_MGMT_GET_DIRDATA_HANDLE, mgmt_get_dirdata_handle); CASE(PVFS_SERV_GETEATTR, geteattr); CASE(PVFS_SERV_LISTEATTR, listeattr); CASE(PVFS_SERV_LISTATTR, listattr); CASE(PVFS_SERV_TREE_GET_FILE_SIZE, tree_get_file_size); CASE(PVFS_SERV_MGMT_GET_UID, mgmt_get_uid); case PVFS_SERV_REMOVE: case PVFS_SERV_MGMT_REMOVE_OBJECT: case PVFS_SERV_MGMT_REMOVE_DIRENT: case PVFS_SERV_TREE_REMOVE: case PVFS_SERV_SETATTR: case PVFS_SERV_SETEATTR: case PVFS_SERV_DELEATTR: case PVFS_SERV_CRDIRENT: case PVFS_SERV_TRUNCATE: case PVFS_SERV_FLUSH: case PVFS_SERV_MGMT_NOOP: case PVFS_SERV_BATCH_REMOVE: case PVFS_SERV_PROTO_ERROR: case PVFS_SERV_IMM_COPIES: case PVFS_SERV_MGMT_SETPARAM: /** nothing else */ break; case PVFS_SERV_INVALID: case PVFS_SERV_PERF_UPDATE: case PVFS_SERV_PRECREATE_POOL_REFILLER: case PVFS_SERV_JOB_TIMER: case PVFS_SERV_NUM_OPS: /** sentinel */ gossip_err("%s: invalid operation %d\n", __func__, resp->op); ret = -PVFS_ENOSYS; break; } } #undef CASE /** although much more may have been allocated */ target_msg->total_size = target_msg->ptr_current - (char *) target_msg->buffer_list[0]; target_msg->size_list[0] = target_msg->total_size; if (target_msg->total_size > max_size_array[resp->op].resp) { ret = -PVFS_ENOMEM; gossip_err("%s: op %d needed %lld bytes but alloced only %d\n", __func__, resp->op, lld(target_msg->total_size), max_size_array[resp->op].resp); } out: return ret; }
static int issue_or_delay_io_operation( dbpf_queued_op_t *cur_op, struct aiocb **aiocb_ptr_array, int aiocb_inuse_count, struct sigevent *sig, int dec_first) { int ret = -TROVE_EINVAL, op_delayed = 0; int i; assert(cur_op); gen_mutex_lock(&s_dbpf_io_mutex); if (dec_first) { s_dbpf_ios_in_progress--; } if (s_dbpf_ios_in_progress < TROVE_max_concurrent_io) { s_dbpf_ios_in_progress++; } else { if (s_dbpf_io_ready_queue == NULL) { s_dbpf_io_ready_queue = dbpf_op_queue_new(); if (!s_dbpf_io_ready_queue) { return -TROVE_ENOMEM; } } assert(s_dbpf_io_ready_queue); dbpf_op_queue_add(s_dbpf_io_ready_queue, cur_op); op_delayed = 1; #ifndef __PVFS2_TROVE_AIO_THREADED__ /* setting this state flag tells the caller not to re-add this operation to the normal dbpf-op queue because it will be started automatically (internally) on completion of other I/O operations */ gen_mutex_lock(&cur_op->mutex); cur_op->op.state = OP_INTERNALLY_DELAYED; gen_mutex_unlock(&cur_op->mutex); #endif gossip_debug(GOSSIP_TROVE_DEBUG, "delayed I/O operation %p " "(%d already in progress)\n", cur_op, s_dbpf_ios_in_progress); } gossip_debug(GOSSIP_TROVE_DEBUG, "DBPF I/O ops in progress: %d\n", s_dbpf_ios_in_progress); gen_mutex_unlock(&s_dbpf_io_mutex); if (!op_delayed) { if(gossip_debug_enabled(GOSSIP_TROVE_DEBUG)) { gossip_debug(GOSSIP_TROVE_DEBUG, "lio_listio called with the following aiocbs:\n"); for(i=0; i<aiocb_inuse_count; i++) { gossip_debug(GOSSIP_TROVE_DEBUG, "aiocb_ptr_array[%d]: fd: %d, " "off: %lld, bytes: %d, buf: %p, type: %d\n", i, aiocb_ptr_array[i]->aio_fildes, lld(aiocb_ptr_array[i]->aio_offset), (int)aiocb_ptr_array[i]->aio_nbytes, aiocb_ptr_array[i]->aio_buf, (int)aiocb_ptr_array[i]->aio_lio_opcode); } } ret = cur_op->op.u.b_rw_list.aio_ops->lio_listio( LIO_NOWAIT, aiocb_ptr_array, aiocb_inuse_count, sig); if (ret != 0) { s_dbpf_ios_in_progress--; gossip_lerr("lio_listio() returned %d\n", ret); dbpf_open_cache_put(&cur_op->op.u.b_rw_list.open_ref); return -trove_errno_to_trove_error(errno); } gossip_debug(GOSSIP_TROVE_DEBUG, "%s: lio_listio posted %p " "(handle %llu, ret %d)\n", __func__, cur_op, llu(cur_op->op.handle), ret); } return 0; }
int main(int argc, char **argv) { int i; PINT_Request *r1; PINT_Request *r2; PINT_Request_state *rs1; PINT_Request_state *rs2; PINT_request_file_data rf1; PINT_Request_result seg1; /* PVFS_Process_request arguments */ int retval; int32_t blksz[] = {4}; PVFS_size disps[] = {32}; /* set up file type request */ PVFS_Request_hindexed(1, blksz, disps, PVFS_INT, &r1); rs1 = PINT_new_request_state(r1); /* set up memory request */ PVFS_Request_contiguous(96, PVFS_BYTE, &r2); rs2 = PINT_new_request_state(r2); /* set up file data for request */ PINT_dist_initialize(NULL); rf1.server_nr = 0; rf1.server_ct = 4; rf1.fsize = 6000; rf1.dist = PINT_dist_create("simple_stripe"); rf1.extend_flag = 0; PINT_dist_lookup(rf1.dist); /* set up result struct */ seg1.offset_array = (int64_t *)malloc(SEGMAX * sizeof(int64_t)); seg1.size_array = (int64_t *)malloc(SEGMAX * sizeof(int64_t)); seg1.bytemax = BYTEMAX; seg1.segmax = SEGMAX; seg1.bytes = 0; seg1.segs = 0; /* skip into the file datatype */ /*PINT_REQUEST_STATE_SET_TARGET(rs1, 500);*/ PINT_REQUEST_STATE_SET_FINAL(rs1,96); /* Turn on debugging */ // gossip_enable_stderr(); // gossip_set_debug_mask(1,GOSSIP_REQUEST_DEBUG); /* skipping logical bytes */ // PINT_REQUEST_STATE_SET_TARGET(rs1,(3 * 1024) + 512); // PINT_REQUEST_STATE_SET_FINAL(rs1,(6 * 1024) + 512); printf("\n************************************\n"); printf("One request in CLIENT mode server 0 of 4\n"); printf("Simple stripe, default stripe size (64K)\n"); printf("Offset 0, file size 6000, no extend flag\n"); printf("MemReq size 96 coniguous\n"); printf("\n************************************\n"); PINT_REQUEST_STATE_RESET(rs1); PINT_REQUEST_STATE_RESET(rs2); do { int r = 0; seg1.bytes = 0; seg1.segs = 0; /* process request */ retval = PINT_process_request(rs1, rs2, &rf1, &seg1, PINT_CLIENT); if(retval >= 0) { printf("results of PINT_process_request():\n"); printf("%d segments with %lld bytes\n", seg1.segs, lld(seg1.bytes)); for(i=0; i<seg1.segs; i++, r++) { printf(" segment %d: offset: %d size: %d\n", i, (int)seg1.offset_array[i], (int)seg1.size_array[i]); } } } while(!PINT_REQUEST_DONE(rs1) && retval >= 0); if(retval < 0) { fprintf(stderr, "Error: PINT_process_request() failure.\n"); return(-1); } if(PINT_REQUEST_DONE(rs1)) { printf("**** request done.\n"); } printf("\n************************************\n"); printf("One request in SERVER mode server 0 of 4\n"); printf("Simple stripe, default stripe size (64K)\n"); printf("Offset 32, file size 6000, no extend flag\n"); printf("MemReq size 96 coniguous\n"); printf("\n************************************\n"); PINT_REQUEST_STATE_RESET(rs1); do { int r = 0; seg1.bytes = 0; seg1.segs = 0; /* process request */ retval = PINT_process_request(rs1, NULL, &rf1, &seg1, PINT_SERVER); if(retval >= 0) { printf("results of PINT_process_request():\n"); printf("%d segments with %lld bytes\n", seg1.segs, lld(seg1.bytes)); for(i=0; i<seg1.segs; i++, r++) { printf(" segment %d: offset: %d size: %d\n", i, (int)seg1.offset_array[i], (int)seg1.size_array[i]); } } } while(!PINT_REQUEST_DONE(rs1) && retval >= 0); if(retval < 0) { fprintf(stderr, "Error: PINT_process_request() failure.\n"); return(-1); } if(PINT_REQUEST_DONE(rs1)) { printf("**** request done.\n"); } return 0; }
/* dbpf_bstream_rw_list() * * Handles queueing of both read and write list operations * * opcode parameter should be LIO_READ or LIO_WRITE */ inline int dbpf_bstream_rw_list(TROVE_coll_id coll_id, TROVE_handle handle, char **mem_offset_array, TROVE_size *mem_size_array, int mem_count, TROVE_offset *stream_offset_array, TROVE_size *stream_size_array, int stream_count, TROVE_size *out_size_p, TROVE_ds_flags flags, TROVE_vtag_s *vtag, void *user_ptr, TROVE_context_id context_id, TROVE_op_id *out_op_id_p, int opcode, struct dbpf_aio_ops * aio_ops, PVFS_hint hints) { //gossip_debug(GOSSIP_WORMUP_DEBUG, "Here we enter into dbpf_bstream_rw_list\n"); int ret = -TROVE_EINVAL; dbpf_queued_op_t *q_op_p = NULL; struct dbpf_collection *coll_p = NULL; enum dbpf_op_type tmp_type; PINT_event_type event_type; int i; PVFS_size count_mem; #ifdef __PVFS2_TROVE_AIO_THREADED__ struct dbpf_op *op_p = NULL; int aiocb_inuse_count = 0; struct aiocb *aiocb_p = NULL, *aiocb_ptr_array[AIOCB_ARRAY_SZ] = {0}; #endif coll_p = dbpf_collection_find_registered(coll_id); if (coll_p == NULL) { return -TROVE_EINVAL; } q_op_p = dbpf_queued_op_alloc(); if (q_op_p == NULL) { return -TROVE_ENOMEM; } if (opcode == LIO_READ) { tmp_type = BSTREAM_READ_LIST; event_type = trove_dbpf_read_event_id; } else { tmp_type = BSTREAM_WRITE_LIST; event_type = trove_dbpf_write_event_id; } /* initialize all the common members */ dbpf_queued_op_init(q_op_p, tmp_type, handle, coll_p, #ifdef __PVFS2_TROVE_AIO_THREADED__ NULL, #else dbpf_bstream_rw_list_op_svc, #endif user_ptr, flags, context_id); if(PINT_EVENT_ENABLED) { count_mem = 0; for(i = 0; i < mem_count; ++i) { count_mem += mem_size_array[i]; } } q_op_p->event_type = event_type; PINT_EVENT_START(event_type, dbpf_pid, NULL, &q_op_p->event_id, PINT_HINT_GET_CLIENT_ID(hints), PINT_HINT_GET_REQUEST_ID(hints), PINT_HINT_GET_RANK(hints), PINT_HINT_GET_HANDLE(hints), handle, PINT_HINT_GET_OP_ID(hints), count_mem); if(gossip_debug_enabled(GOSSIP_TROVE_DEBUG)) { PVFS_size count_stream = 0; count_mem = 0; gossip_debug(GOSSIP_TROVE_DEBUG, "dbpf_bstream_rw_list: mem_count: %d, stream_count: %d\n", mem_count, stream_count); for(i = 0; i < mem_count; ++i) { gossip_debug( GOSSIP_TROVE_DEBUG, "dbpf_bstream_rw_list: mem_offset: %p, mem_size: %Ld\n", mem_offset_array[i], lld(mem_size_array[i])); count_mem += mem_size_array[i]; } for(i = 0; i < stream_count; ++i) { gossip_debug(GOSSIP_TROVE_DEBUG, "dbpf_bstream_rw_list: " "stream_offset: %Ld, stream_size: %Ld\n", lld(stream_offset_array[i]), lld(stream_size_array[i])); count_stream += stream_size_array[i]; } if(count_mem != count_stream) { gossip_debug(GOSSIP_TROVE_DEBUG, "dbpf_bstream_rw_list: " "mem_count: %Ld != stream_count: %Ld\n", lld(count_mem), lld(count_stream)); } } /* initialize op-specific members */ q_op_p->op.u.b_rw_list.fd = -1; q_op_p->op.u.b_rw_list.opcode = opcode; q_op_p->op.u.b_rw_list.mem_array_count = mem_count; q_op_p->op.u.b_rw_list.mem_offset_array = mem_offset_array; q_op_p->op.u.b_rw_list.mem_size_array = mem_size_array; q_op_p->op.u.b_rw_list.stream_array_count = stream_count; q_op_p->op.u.b_rw_list.stream_offset_array = stream_offset_array; q_op_p->op.u.b_rw_list.stream_size_array = stream_size_array; q_op_p->op.hints = hints; q_op_p->op.u.b_rw_list.aio_ops = aio_ops; /* initialize the out size to 0 */ *out_size_p = 0; q_op_p->op.u.b_rw_list.out_size_p = out_size_p; q_op_p->op.u.b_rw_list.aiocb_array_count = 0; q_op_p->op.u.b_rw_list.aiocb_array = NULL; #ifndef __PVFS2_TROVE_AIO_THREADED__ q_op_p->op.u.b_rw_list.queued_op_ptr = (void *)q_op_p; #endif /* initialize list processing state (more op-specific members) */ q_op_p->op.u.b_rw_list.lio_state.mem_ct = 0; q_op_p->op.u.b_rw_list.lio_state.stream_ct = 0; q_op_p->op.u.b_rw_list.lio_state.cur_mem_size = mem_size_array[0]; q_op_p->op.u.b_rw_list.lio_state.cur_mem_off = mem_offset_array[0]; q_op_p->op.u.b_rw_list.lio_state.cur_stream_size = stream_size_array[0]; q_op_p->op.u.b_rw_list.lio_state.cur_stream_off = stream_offset_array[0]; q_op_p->op.u.b_rw_list.list_proc_state = LIST_PROC_INITIALIZED; ret = dbpf_open_cache_get( coll_id, handle, (opcode == LIO_WRITE) ? DBPF_FD_BUFFERED_WRITE : DBPF_FD_BUFFERED_READ, &q_op_p->op.u.b_rw_list.open_ref); if (ret < 0) { dbpf_queued_op_free(q_op_p); gossip_ldebug(GOSSIP_TROVE_DEBUG, "warning: useless error value: %d\n", ret); return ret; } q_op_p->op.u.b_rw_list.fd = q_op_p->op.u.b_rw_list.open_ref.fd; /* if we're doing an i/o write, remove the cached attribute for this handle if it's present */ if (opcode == LIO_WRITE) { TROVE_object_ref ref = {handle, coll_id}; gen_mutex_lock(&dbpf_attr_cache_mutex); dbpf_attr_cache_remove(ref); gen_mutex_unlock(&dbpf_attr_cache_mutex); } #ifndef __PVFS2_TROVE_AIO_THREADED__ *out_op_id_p = dbpf_queued_op_queue(q_op_p); #else op_p = &q_op_p->op; /* instead of queueing the op like most other trove operations, we're going to issue the system aio calls here to begin being serviced immediately. We'll check progress in the aio_progress_notification callback method; this array is freed in dbpf-op.c:dbpf_queued_op_free */ aiocb_p = (struct aiocb *)malloc( (AIOCB_ARRAY_SZ * sizeof(struct aiocb))); if (aiocb_p == NULL) { dbpf_open_cache_put(&q_op_p->op.u.b_rw_list.open_ref); return -TROVE_ENOMEM; } memset(aiocb_p, 0, (AIOCB_ARRAY_SZ * sizeof(struct aiocb))); for(i = 0; i < AIOCB_ARRAY_SZ; i++) { aiocb_p[i].aio_lio_opcode = LIO_NOP; aiocb_p[i].aio_sigevent.sigev_notify = SIGEV_NONE; } op_p->u.b_rw_list.aiocb_array_count = AIOCB_ARRAY_SZ; op_p->u.b_rw_list.aiocb_array = aiocb_p; op_p->u.b_rw_list.list_proc_state = LIST_PROC_INPROGRESS; /* convert listio arguments into aiocb structures */ aiocb_inuse_count = op_p->u.b_rw_list.aiocb_array_count; ret = dbpf_bstream_listio_convert( op_p->u.b_rw_list.fd, op_p->u.b_rw_list.opcode, op_p->u.b_rw_list.mem_offset_array, op_p->u.b_rw_list.mem_size_array, op_p->u.b_rw_list.mem_array_count, op_p->u.b_rw_list.stream_offset_array, op_p->u.b_rw_list.stream_size_array, op_p->u.b_rw_list.stream_array_count, aiocb_p, &aiocb_inuse_count, &op_p->u.b_rw_list.lio_state); if (ret == 1) { op_p->u.b_rw_list.list_proc_state = LIST_PROC_ALLCONVERTED; } op_p->u.b_rw_list.sigev.sigev_notify = SIGEV_THREAD; op_p->u.b_rw_list.sigev.sigev_notify_attributes = NULL; op_p->u.b_rw_list.sigev.sigev_notify_function = aio_progress_notification; op_p->u.b_rw_list.sigev.sigev_value.sival_ptr = (void *)q_op_p; /* mark unused with LIO_NOPs */ for(i = aiocb_inuse_count; i < op_p->u.b_rw_list.aiocb_array_count; i++) { aiocb_p[i].aio_lio_opcode = LIO_NOP; } for(i = 0; i < aiocb_inuse_count; i++) { aiocb_ptr_array[i] = &aiocb_p[i]; } assert(q_op_p == op_p->u.b_rw_list.sigev.sigev_value.sival_ptr); if (op_p->u.b_rw_list.list_proc_state == LIST_PROC_ALLCONVERTED) { op_p->u.b_rw_list.list_proc_state = LIST_PROC_ALLPOSTED; } gen_mutex_lock(&q_op_p->mutex); q_op_p->op.state = OP_IN_SERVICE; gen_mutex_unlock(&q_op_p->mutex); id_gen_fast_register(&q_op_p->op.id, q_op_p); *out_op_id_p = q_op_p->op.id; ret = issue_or_delay_io_operation( q_op_p, aiocb_ptr_array, aiocb_inuse_count, &op_p->u.b_rw_list.sigev, 0); if (ret) { return ret; } #endif return 0; }
//--- // Writer a dot rpf file for entry to output directory. // // NOTES: // // 1) All coordinate written out in AREA or edge to edge format. // 2) Throws ossimException on error. //--- void ossimRpfUtil::writeDotRpfFile( const ossimRpfToc* toc, const ossimRpfTocEntry* tocEntry, const ossimFilename& outputDir, ossim_uint32 entry) { static const char MODULE[] = "ossimRpfUtil::writeDotRpfFile"; if ( traceDebug() ) { ossimNotify(ossimNotifyLevel_DEBUG) << MODULE << " entered..." << "\noutput directory: " << outputDir << "\nentry: " << entry << "\n"; } if ( !toc ) { std::string errMsg = MODULE; errMsg += " ERROR toc pointer null!"; throw ossimException(errMsg); } if ( !tocEntry ) { std::string errMsg = MODULE; errMsg += " ERROR toc entry pointer null!"; throw ossimException(errMsg); } // Get the file name. ossimFilename outFile; if ( outputDir.expand().isDir() ) { getDotRfpFilenameForEntry(outputDir, entry, outFile); } else { outFile = outputDir; } // Open the file to write. std::ofstream os; os.open(outFile.c_str(), ios::out); if ( os.good() == false ) { std::string errMsg = MODULE; errMsg += "ERROR could not open: "; errMsg += outFile.string(); throw ossimException(errMsg); } // Set up the output stream fix with full precision for ground points. os << setiosflags(std::ios_base::fixed) << setprecision(15); //--- // Overall TOC entry bounds: // // Write the first line which is the bounding box of the entry in the form of: // "89.9850464205332, 23.9892538162654|90.5085823882692, 24.5002602501599|1" // lr-lon lr-lat ul-lon ul-lat //--- ossimRefPtr<ossimImageGeometry> geom = tocEntry->getImageGeometry(); if( geom.valid() == false) { std::string errMsg = "ERROR could not get geometry."; errMsg += outFile.string(); throw ossimException(errMsg); } // Rectangle in image space. ossimIrect outputRect; tocEntry->getBoundingRect(outputRect); // bands: ossim_uint32 bands = tocEntry->getNumberOfBands(); // scale: ossimDpt scale; tocEntry->getDecimalDegreesPerPixel(scale); ossimDpt halfPix = scale / 2.0; ossimGpt llg; ossimGpt urg; geom->localToWorld(outputRect.ur(), urg); geom->localToWorld(outputRect.ll(), llg); if ( traceDebug() ) { ossimNotify(ossimNotifyLevel_DEBUG) << "outputRect: " << outputRect << "\nbands: " << bands << "\nscale: " << scale << "\nllg: " << llg << "\nurg: " << urg << std::endl; } // Expand coordinates to edge: llg.lon -= halfPix.x; llg.lat -= halfPix.y; urg.lon += halfPix.x; urg.lat += halfPix.y; // Test for 360 degrees apart. checkLongitude(llg, urg); os << llg.lon << "," // lower left longitude << llg.lat << "|" // lower left latitude << urg.lon << "," // upper right longitude << urg.lat << "|" // upper right latitude << bands << "\n"; // Frame loop: const ossim_int32 FRAMESIZE = 1536; const ossim_int32 ROWS = static_cast<ossim_int32>(tocEntry->getNumberOfFramesVertical()); if( ROWS == 0 ) { std::string errMsg = MODULE; errMsg += " ERROR no rows!"; throw ossimException(errMsg); } const ossim_int32 COLS = static_cast<ossim_int32>(tocEntry->getNumberOfFramesHorizontal()); if( COLS == 0 ) { std::string errMsg = MODULE; errMsg += " ERROR no columns!"; throw ossimException(errMsg); } // Set the initial lower left and upper right image points for localToWorld call. //ossimDpt urd( ( (ROWS-1)*FRAMESIZE) -1, 0.0); //ossimDpt lld(0.0, (ROWS*FRAMESIZE)-1); ossimDpt urd( FRAMESIZE-1, 0.0); ossimDpt lld(0.0, FRAMESIZE-1); for (ossim_int32 row = ROWS-1; row > -1; --row) { for (ossim_int32 col = 0; col < COLS; ++col) { //--- // Example format (only with 15 digit precision): // /data/spadac/rpf/world/cb01/ng467a1/0xslpk1a.i41|90.0448,24.3621|90.0598,24.3750 //--- // Get the path to the frame. ossimFilename path; toc->getRootDirectory(path); path = path.dirCat( toc->getRelativeFramePath(entry, row, col) ); // Not sure if this is backwards: geom->localToWorld(urd, urg); geom->localToWorld(lld, llg); // Expand coordinates to edge: llg.lon -= halfPix.x; llg.lat -= halfPix.y; urg.lon += halfPix.x; urg.lat += halfPix.y; // Test for 360 degrees apart. checkLongitude(llg, urg); os << path.c_str() << "|" << llg.lon << "," // lower left longitude << llg.lat << "|" // lower left latitude << urg.lon << "," // upper right longitude << urg.lat // upper right latitude << "\n"; if ( traceDebug() ) { ossimNotify(ossimNotifyLevel_DEBUG) << "row[" << row << "]col[" << col << "]path: " << path << "\nlld: " << lld << "\nllg: " << llg << "\nurd: " << urd << "\nurg: " << urg << std::endl; } // Go to next col. urd.x += FRAMESIZE; lld.x += FRAMESIZE; } // End column loop. // Go to nex row. urd.y += FRAMESIZE; urd.x = FRAMESIZE-1; lld.y += FRAMESIZE; lld.x = 0; } // End row loop. // Close the file. os.close(); ossimNotify(ossimNotifyLevel_DEBUG) << "wrote file: " << outFile << std::endl; } // End: ossimRpfUtil::writeDotRpfFile
/** Adds a state machine into the list of machines that are being * actively serviced. */ PVFS_error PINT_client_state_machine_post( PINT_smcb *smcb, PVFS_sys_op_id *op_id, void *user_ptr /* in */) { PINT_sm_action sm_ret; PVFS_error ret = -PVFS_EINVAL; job_status_s js; int pvfs_sys_op = PINT_smcb_op(smcb); PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); PVFS_hint_add_internal(&sm_p->hints, PINT_HINT_OP_ID, sizeof(pvfs_sys_op), &pvfs_sys_op); PINT_EVENT_START(PINT_client_sys_event_id, pint_client_pid, NULL, &sm_p->event_id, PINT_HINT_GET_CLIENT_ID(sm_p->hints), PINT_HINT_GET_RANK(sm_p->hints), PINT_HINT_GET_REQUEST_ID(sm_p->hints), PINT_HINT_GET_HANDLE(sm_p->hints), pvfs_sys_op); gossip_debug(GOSSIP_CLIENT_DEBUG, "PINT_client_state_machine_post smcb %p, op: %s\n", smcb, PINT_client_get_name_str(smcb->op)); CLIENT_SM_ASSERT_INITIALIZED(); if (!smcb) { return ret; } memset(&js, 0, sizeof(js)); /* save operation type; mark operation as unfinished */ sm_p->user_ptr = user_ptr; gen_mutex_lock(&test_mutex); /* start state machine and continue advancing while we're getting immediate completions */ sm_ret = PINT_state_machine_start(smcb, &js); assert(SM_ACTION_ISVALID(sm_ret)); if(sm_ret < 0) { /* state machine code failed */ gen_mutex_unlock(&test_mutex); return sm_ret; } if (PINT_smcb_complete(smcb)) { assert(sm_ret == SM_ACTION_TERMINATE); PINT_EVENT_END(PINT_client_sys_event_id, pint_client_pid, NULL, sm_p->event_id, 0); *op_id = -1; /* free the smcb and any other extra data allocated there */ PINT_sys_release_smcb(smcb); gossip_debug( GOSSIP_CLIENT_DEBUG, "Posted %s (%llu) " "(ran to termination)(%d)\n", PINT_client_get_name_str(pvfs_sys_op), llu((op_id ? *op_id : -1)), js.error_code); } else { assert(sm_ret == SM_ACTION_DEFERRED); PINT_id_gen_safe_register(&sm_p->sys_op_id, (void *)smcb); if (op_id) { *op_id = sm_p->sys_op_id; } gossip_debug( GOSSIP_CLIENT_DEBUG, "Posted %s (%lld) " "(waiting for test)(%d)\n", PINT_client_get_name_str(pvfs_sys_op), lld((op_id ? *op_id : -1)), ret); } gen_mutex_unlock(&test_mutex); return js.error_code; }