/** Remove a specific file system object. */ PVFS_error PVFS_mgmt_remove_object( PVFS_object_ref object_ref, PVFS_credentials *credentials, PVFS_hint hints) { PVFS_error ret = -PVFS_EINVAL, error = 0; PVFS_sys_op_id op_id; gossip_debug(GOSSIP_CLIENT_DEBUG, "PVFS_mgmt_remove_object entered\n"); ret = PVFS_imgmt_remove_object(object_ref, credentials, &op_id, NULL, hints); if (ret) { PVFS_perror_gossip("PVFS_imgmt_remove_object call", ret); error = ret; } else { ret = PVFS_mgmt_wait(op_id, "remove_object", &error); if (ret) { PVFS_perror_gossip("PVFS_mgmt_wait call", ret); error = ret; } } PINT_mgmt_release(op_id); return error; }
/** Modify the attributes of a single object. */ PVFS_error PVFS_sys_setattr( PVFS_object_ref ref, PVFS_sys_attr attr, const PVFS_credentials *credentials, PVFS_hint hints) { PVFS_error ret = -PVFS_EINVAL, error = 0; PVFS_sys_op_id op_id; gossip_debug(GOSSIP_CLIENT_DEBUG, "PVFS_sys_setattr entered\n"); ret = PVFS_isys_setattr(ref, attr, credentials, &op_id, hints, NULL); if (ret) { PVFS_perror_gossip("PVFS_isys_setattr call", ret); error = ret; } else { ret = PVFS_sys_wait(op_id, "setattr", &error); if (ret) { PVFS_perror_gossip("PVFS_sys_wait call", ret); error = ret; } } PINT_sys_release(op_id); return error; }
/** Read entries from a directory. * * \param token opaque value used to track position in directory * when more than one read is required. * \param pvfs_dirent_incount maximum number of entries to read, if * available, starting from token. */ PVFS_error PVFS_sys_readdir( PVFS_object_ref ref, PVFS_ds_position token, int32_t pvfs_dirent_incount, const PVFS_credentials *credentials, PVFS_sysresp_readdir *resp, PVFS_hint hints) { PVFS_error ret = -PVFS_EINVAL, error = 0; PVFS_sys_op_id op_id; gossip_debug(GOSSIP_CLIENT_DEBUG, "PVFS_sys_readdir entered\n"); ret = PVFS_isys_readdir(ref, token, pvfs_dirent_incount, credentials, resp, &op_id, hints, NULL); if (ret) { PVFS_perror_gossip("PVFS_isys_readdir call", ret); error = ret; } else { ret = PVFS_sys_wait(op_id, "readdir", &error); if (ret) { PVFS_perror_gossip("PVFS_sys_wait call", ret); error = ret; } } PINT_sys_release(op_id); return error; }
static PINT_sm_action chdirent_update_directory_attr( struct PINT_smcb *smcb, job_status_s *js_p) { struct PINT_server_op *s_op = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int ret = -1; job_id_t j_id; PVFS_object_attr tmp_attr, *tmp_attr_ptr = &tmp_attr; PVFS_object_attr *dspace_attr = NULL; PVFS_ds_attributes *ds_attr = NULL; if (js_p->error_code != UPDATE_DIR_ATTR_REQUIRED) { PVFS_perror_gossip("previous keyval write failed", js_p->error_code); return SM_ACTION_COMPLETE; } memset(&tmp_attr, 0, sizeof(PVFS_object_attr)); dspace_attr = &s_op->attr; dspace_attr->mask |= (PVFS_ATTR_COMMON_ATIME | PVFS_ATTR_COMMON_MTIME | PVFS_ATTR_COMMON_CTIME); PVFS_object_attr_overwrite_setable(tmp_attr_ptr, dspace_attr); ds_attr = &(s_op->ds_attr); PVFS_object_attr_to_ds_attr(tmp_attr_ptr, ds_attr); ret = job_trove_dspace_setattr( s_op->req->u.chdirent.fs_id, s_op->req->u.chdirent.handle, ds_attr, TROVE_SYNC | 0, smcb, 0, js_p, &j_id, server_job_context, s_op->req->hints); return ret; }
/** Continually test on a specific state machine until it completes. * * This is what is called when PINT_sys_wait or PINT_mgmt_wait is used. */ PVFS_error PINT_client_wait_internal( PVFS_sys_op_id op_id, const char *in_op_str, int *out_error, const char *in_class_str) { PVFS_error ret = -PVFS_EINVAL; PINT_smcb *smcb = NULL; PINT_client_sm *sm_p; if (in_op_str && out_error && in_class_str) { smcb = PINT_id_gen_safe_lookup(op_id); assert(smcb); do { /* gossip_debug(GOSSIP_CLIENT_DEBUG, "%s: PVFS_i%s_%s calling test()\n", __func__, in_class_str, in_op_str); */ ret = PINT_client_state_machine_test(op_id, out_error); } while (!PINT_smcb_complete(smcb) && (ret == 0)); sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); if (ret) { PVFS_perror_gossip("PINT_client_state_machine_test()", ret); } else { *out_error = sm_p->error_code; } } return ret; }
int main(int argc, char * argv[]) { FILE * f; int ret; PVFS_fs_id curfs; PVFS_Request file_req; PVFS_Request mem_req; int count; char line[255]; int size; PVFS_offset offset=0; PVFS_credentials creds; PVFS_sysresp_create create_resp; PVFS_sysresp_io io_resp; PVFS_sysresp_lookup lookup_resp; PVFS_sys_attr attr; const char * filename = "test-accesses-file"; int j = 0, i = 0; char * membuff; char errormsg[255]; if(argc < 2) { fprintf(stderr, "test-accesses <sizes file>\n"); exit(1); } f = fopen(argv[1], "r"); if(!f) { fprintf(stderr, "error opening file\n"); return errno; } if(fgets(line, 255, f) == NULL) { fprintf(stderr, "error in file\n"); exit(1); } if(sscanf(line, "%d", &count) < 1) { fprintf(stderr, "error in file\n"); exit(1); } ret = PVFS_util_init_defaults(); if(ret < 0) goto error; ret = PVFS_util_get_default_fsid(&curfs); if(ret < 0) goto error; ret = PVFS_sys_lookup(curfs, "/", &creds, &lookup_resp, 0, NULL); if(ret < 0) goto error; PVFS_util_gen_credentials(&creds); attr.mask = PVFS_ATTR_SYS_ALL_SETABLE; attr.owner = creds.uid; attr.group = creds.gid; attr.perms = 0644; attr.atime = attr.ctime = attr.mtime = time(NULL); ret = PVFS_sys_create( (char*)filename, lookup_resp.ref, attr, &creds, NULL, &create_resp, NULL, NULL); if(ret < 0) goto error; for(; i < count; ++i) { if(fgets(line, 255, f) == NULL) { fprintf(stderr, "error in file\n"); exit(1); } if(sscanf(line, "%d", &size) < 1) { fprintf(stderr, "error in file\n"); exit(1); } membuff = malloc(size); assert(membuff); for(j = 0; j < size; ++j) { membuff[j] = j; } ret = PVFS_Request_contiguous( size, PVFS_BYTE, &file_req); if(ret < 0) goto error; ret = PVFS_Request_contiguous( size, PVFS_BYTE, &mem_req); if(ret < 0) goto error; printf("Performing Write: offset: %llu, size: %d\n", llu(offset), size); ret = PVFS_sys_io( create_resp.ref, file_req, offset, membuff, mem_req, &creds, &io_resp, PVFS_IO_WRITE, NULL); if(ret < 0) goto error; printf("Write response: size: %llu\n", llu(io_resp.total_completed)); offset += size; PVFS_Request_free(&mem_req); PVFS_Request_free(&file_req); free(membuff); } return 0; error: fclose(f); PVFS_sys_remove( (char*)filename, lookup_resp.ref, &creds, NULL); PVFS_perror_gossip(errormsg, ret); fprintf(stderr, "%s\n", errormsg); return PVFS_get_errno_mapping(ret); }
/* given mount information, retrieve the server's configuration by issuing a getconfig operation. on successful response, we parse the configuration and fill in the config object specified. returns 0 on success, -errno on error */ int PVFS_mgmt_get_config( const PVFS_fs_id * fsid, PVFS_BMI_addr_t * addr, char *fs_buf, int fs_buf_size) { int ret = -PVFS_EINVAL; PINT_smcb *smcb = NULL; PINT_client_sm *sm_p = NULL; PVFS_error error = 0; PVFS_credentials creds; struct filesystem_configuration_s *cur_fs = NULL; PVFS_sys_op_id op_id; struct server_configuration_s *config = NULL; struct PVFS_sys_mntent mntent; int server_type = 0; gossip_debug(GOSSIP_CLIENT_DEBUG, "PVFS_mgmt_get_config entered\n"); PVFS_util_gen_credentials(&creds); PINT_smcb_alloc(&smcb, PVFS_SERVER_GET_CONFIG, sizeof(struct PINT_client_sm), client_op_state_get_machine, client_state_machine_terminate, pint_client_sm_context); if(smcb == NULL) { return -PVFS_ENOMEM; } sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); sm_p->u.get_config.persist_config_buffers = 1; PINT_init_msgarray_params(sm_p, *fsid); PINT_init_sysint_credentials(sm_p->cred_p, &creds); config = PINT_get_server_config_struct(*fsid); mntent.the_pvfs_config_server = (char*)PINT_cached_config_map_addr(*fsid, *addr, &server_type); PINT_put_server_config_struct(config); cur_fs = PINT_config_find_fs_id(config, *fsid); mntent.encoding = cur_fs->encoding; mntent.flowproto = cur_fs->flowproto; mntent.fs_id = *fsid; mntent.pvfs_fs_name = cur_fs->file_system_name; sm_p->u.get_config.config = config; sm_p->msgarray_op.msgpair.enc_type = cur_fs->encoding; sm_p->u.get_config.mntent = &mntent; PINT_msgpair_init(&sm_p->msgarray_op); ret = PINT_client_state_machine_post( smcb, &op_id, NULL); if (ret) { PVFS_perror_gossip("PINT_client_state_machine_post call", ret); error = ret; } else { ret = PVFS_mgmt_wait(op_id, "X-get_config", &error); if (ret) { PVFS_perror_gossip("PVFS_mgmt_wait call", ret); error = ret; } } if (error) { goto exit_path; } gossip_debug(GOSSIP_CLIENT_DEBUG, "PVFS_mgmt_get_config completed\n"); /* make sure strings will be null terminated after strncpy */ fs_buf[fs_buf_size-1] = '\0'; /* The following copies the retrieved configuration buffers into the return buffers */ strncpy(fs_buf, sm_p->u.get_config.fs_config_buf, (fs_buf_size - 1)); exit_path: if (sm_p && sm_p->u.get_config.persist_config_buffers) { free(sm_p->u.get_config.fs_config_buf); sm_p->u.get_config.fs_config_buf = NULL; } PINT_mgmt_release(op_id); return error; }
static PINT_sm_action msgpairarray_completion_fn( struct PINT_smcb *smcb, job_status_s *js_p) { PINT_sm_msgarray_op *mop = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int ret = -PVFS_EINVAL, i = 0; int need_retry = 0; struct PINT_decoded_msg decoded_resp; const char* server_string = NULL; int server_type; /* response structure (decoded) */ struct PVFS_server_resp *resp_p = NULL; js_p->error_code = 0; gossip_debug(GOSSIP_MSGPAIR_DEBUG, "(%p) msgpairarray state: " "completion_fn\n", smcb); for (i = 0; i < mop->count; i++) { PINT_sm_msgpair_state *msg_p = &mop->msgarray[i]; assert(msg_p); /* * Can take multiple trips through this function as we retry * ones that failed. */ if (msg_p->complete) continue; if (msg_p->op_status != 0) { char s[1024]; PVFS_strerror_r(msg_p->op_status, s, sizeof(s)); server_string = PINT_cached_config_map_addr( msg_p->fs_id, msg_p->svr_addr, &server_type); if(!server_string) { server_string = BMI_addr_rev_lookup(msg_p->svr_addr); } gossip_err("Warning: msgpair failed to %s, will retry: %s\n", server_string, s); ++need_retry; continue; } ret = PINT_serv_decode_resp(msg_p->fs_id, msg_p->encoded_resp_p, &decoded_resp, &msg_p->svr_addr, msg_p->recv_status.actual_size, &resp_p); if (ret != 0) { PVFS_perror_gossip("msgpairarray decode error", ret); msg_p->op_status = ret; } else { /* if we've made it this far, the server response status is * meaningful, so we save it. */ msg_p->op_status = resp_p->status; } /* NOTE: we call the function associated with each message, * not just the one from the first array element. so * there could in theory be different functions for each * message (to handle different types of messages all in * the same array). */ if (msg_p->comp_fn != NULL) { /* If we call the completion function, store the result on * a per message pair basis. Also store some non-zero * (failure) value in js_p->error_code if we see one. */ msg_p->op_status = msg_p->comp_fn(smcb, resp_p, i); if (msg_p->op_status != 0) { js_p->error_code = msg_p->op_status; } /* even if we see a failure, continue to process with the * completion function. -- RobR */ } else if (resp_p->status != 0) { /* no comp_fn specified and status non-zero */ gossip_debug(GOSSIP_MSGPAIR_DEBUG, "notice: msgpairarray_complete: error %d " "from server %d\n", resp_p->status, i); /* save a non-zero status to return if we see one */ js_p->error_code = resp_p->status; /* If we don't have a completion function, there is no point * in continuing to process after seeing a failure. */ if (js_p->error_code) { break; } } /* free all the resources that we used to send and receive. */ ret = PINT_serv_free_msgpair_resources( &msg_p->encoded_req, msg_p->encoded_resp_p, &decoded_resp, &msg_p->svr_addr, msg_p->max_resp_sz); if (ret) { PVFS_perror_gossip("Failed to free msgpair resources", ret); js_p->error_code = ret; return SM_ACTION_COMPLETE; } msg_p->encoded_resp_p = NULL; msg_p->max_resp_sz = 0; /* mark that this msgpair has been completed and should not be retried in the case of possible future retries */ msg_p->complete = 1; gossip_debug(GOSSIP_MSGPAIR_DEBUG, "%s: sm %p msgpair %d " "marked complete\n", __func__, smcb, i); } if (need_retry) { /* * We only retry msgpairs that are not yet complete. Factor * of two since they are pairs. If over the count, do not * retry, just return one of the error codes. */ mop->params.comp_ct = 0; js_p->error_code = 0; for (i=0; i < mop->count; i++) { PINT_sm_msgpair_state *msg_p = &mop->msgarray[i]; if (msg_p->complete) continue; if (msg_p->retry_flag == PVFS_MSGPAIR_RETRY && PVFS_ERROR_CLASS(-msg_p->op_status) == PVFS_ERROR_BMI && msg_p->retry_count < mop->params.retry_limit) { ++msg_p->retry_count; mop->params.comp_ct += 2; gossip_debug(GOSSIP_MSGPAIR_DEBUG, "*** %s: msgpair %d failed, retry %d\n", __func__, i, msg_p->retry_count); if(msg_p->op_status == -BMI_ECANCEL) { /* if the error code indicates cancel, then skip the * delay. We have probably already been waiting a while */ gossip_debug(GOSSIP_MSGPAIR_DEBUG, "*** %s: msgpair skipping retry delay.\n", __func__); js_p->error_code = MSGPAIRS_RETRY_NODELAY; } else { gossip_debug(GOSSIP_MSGPAIR_DEBUG, "*** %s: msgpair retrying after delay.\n", __func__); js_p->error_code = MSGPAIRS_RETRY; } } else { char s[1024]; server_string = PINT_cached_config_map_addr( msg_p->fs_id, msg_p->svr_addr, &server_type); if(!server_string) { server_string = "[UNKNOWN]"; } PVFS_strerror_r(msg_p->op_status, s, sizeof(s)); gossip_err_unless_quiet("*** %s: msgpair to server %s failed: %s\n", __func__, server_string, s); if(msg_p->retry_flag != PVFS_MSGPAIR_RETRY) { gossip_err_unless_quiet("*** No retries requested.\n"); } else if(PVFS_ERROR_CLASS(-msg_p->op_status) != PVFS_ERROR_BMI) { gossip_err_unless_quiet("*** Non-BMI failure.\n"); } else { gossip_err_unless_quiet("*** Out of retries.\n"); } if (js_p->error_code == 0) js_p->error_code = msg_p->op_status; } } } return SM_ACTION_COMPLETE; }
/* msgpairarray_post() * * The following elements of the PINT_sm_msgpair_state * should be valid prior to this state (for each msgpair in array): * - req (unencoded request) * - srv_addr of each element in msg array * * This state performs the following operations for each msgpair, * one at a time: * (1) encodes request * (2) calculates maximum response size * (3) allocates BMI memory for response data (encoded) * (4) gets a session tag for the pair of messages * (5) posts the receive of the response * (6) posts the send of the request * (7) stores job ids for later matching * */ static PINT_sm_action msgpairarray_post( struct PINT_smcb *smcb, job_status_s *js_p) { PINT_sm_msgarray_op *mop = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int ret = -PVFS_EINVAL, i = 0, tmp = 0; struct server_configuration_s *server_config = NULL; PVFS_msg_tag_t session_tag; PINT_sm_msgpair_state *msg_p = NULL; struct filesystem_configuration_s *cur_fs = NULL; int must_loop_encodings = 0; int local_enc_and_alloc = 0; gossip_debug( GOSSIP_MSGPAIR_DEBUG, "%s: sm %p " "%d total message(s) with %d incomplete\n", __func__, smcb, mop->count * 2, mop->params.comp_ct); js_p->error_code = 0; assert(mop->count > 0); assert(mop->params.comp_ct >= 2); for (i = 0; i < mop->count; i++) { msg_p = &mop->msgarray[i]; assert(msg_p); /* here we skip over the msgs that have already completed in the case of being in the retry code path when it's ok */ if (msg_p->complete) { continue; } msg_p->op_status = 0; if (msg_p->encoded_resp_p == NULL) { if (msg_p->fs_id != PVFS_FS_ID_NULL) { server_config = PINT_server_config_mgr_get_config( msg_p->fs_id); assert(server_config); cur_fs = PINT_config_find_fs_id( server_config, msg_p->fs_id); PINT_server_config_mgr_put_config(server_config); assert(cur_fs); msg_p->enc_type = cur_fs->encoding; } if (!ENCODING_IS_VALID(msg_p->enc_type)) { PRINT_ENCODING_ERROR("supported", msg_p->enc_type); must_loop_encodings = 1; msg_p->enc_type = (ENCODING_INVALID_MIN + 1); } else if (!ENCODING_IS_SUPPORTED(msg_p->enc_type)) { PRINT_ENCODING_ERROR("supported", msg_p->enc_type); must_loop_encodings = 1; msg_p->enc_type = ENCODING_SUPPORTED_MIN; } try_next_encoding: assert(ENCODING_IS_VALID(msg_p->enc_type)); ret = PINT_encode(&msg_p->req, PINT_ENCODE_REQ, &msg_p->encoded_req, msg_p->svr_addr, msg_p->enc_type); if (ret != 0) { if (must_loop_encodings) { gossip_debug(GOSSIP_MSGPAIR_DEBUG, "Looping through " "encodings [%d/%d]\n", msg_p->enc_type, ENCODING_INVALID_MAX); msg_p->enc_type++; if (ENCODING_IS_VALID(msg_p->enc_type)) { goto try_next_encoding; } } gossip_lerr("msgpairarray_post: PINT_encode failed\n"); js_p->error_code = ret; return SM_ACTION_COMPLETE; } /* calculate max response msg size and allocate space */ msg_p->max_resp_sz = PINT_encode_calc_max_size( PINT_ENCODE_RESP, msg_p->req.op, msg_p->enc_type); msg_p->encoded_resp_p = BMI_memalloc( msg_p->svr_addr, msg_p->max_resp_sz, BMI_RECV); if (msg_p->encoded_resp_p == NULL) { js_p->error_code = -PVFS_ENOMEM; return SM_ACTION_COMPLETE; } local_enc_and_alloc = 1; } session_tag = PINT_util_get_next_tag(); gossip_debug(GOSSIP_MSGPAIR_DEBUG, "%s: sm %p msgpair %d: " "posting recv\n", __func__, smcb, i); /* post receive of response; job_id stored in recv_id */ ret = job_bmi_recv(msg_p->svr_addr, msg_p->encoded_resp_p, msg_p->max_resp_sz, session_tag, BMI_PRE_ALLOC, smcb, i, &msg_p->recv_status, &msg_p->recv_id, mop->params.job_context, mop->params.job_timeout, msg_p->req.hints); if (ret == 0) { /* perform a quick test to see if the recv failed before posting * the send; if it reports an error quickly then we can save the * confusion of sending a request for which we can't recv a * response */ ret = job_test(msg_p->recv_id, &tmp, NULL, &msg_p->recv_status, 0, mop->params.job_context); } if ((ret < 0) || (ret == 1)) { /* it is impossible for this recv to complete at this point * without errors; we haven't sent the request yet! */ assert(ret < 0 || msg_p->recv_status.error_code != 0); if (ret < 0) { PVFS_perror_gossip("Post of receive failed", ret); } else { PVFS_perror_gossip("Receive immediately failed", msg_p->recv_status.error_code); } msg_p->recv_id = 0; msg_p->send_id = 0; /* mark send as bad too and don't post it */ msg_p->send_status.error_code = msg_p->recv_status.error_code; msg_p->op_status = msg_p->recv_status.error_code; mop->params.comp_ct -= 2; if (local_enc_and_alloc) { PINT_encode_release(&msg_p->encoded_req, PINT_ENCODE_REQ); BMI_memfree(msg_p->svr_addr,msg_p->encoded_resp_p, msg_p->max_resp_sz, BMI_RECV); msg_p->encoded_resp_p = NULL; local_enc_and_alloc = 0; } /* continue to send other array entries if possible */ continue; } /* if we reach here, the recv has been posted without failure, but * has not completed yet */ assert(ret == 0); gossip_debug(GOSSIP_MSGPAIR_DEBUG, "%s: sm %p msgpair %d: " "posting send\n", __func__, smcb, i); /* post send of request; job_id stored in send_id */ ret = job_bmi_send_list(msg_p->encoded_req.dest, msg_p->encoded_req.buffer_list, msg_p->encoded_req.size_list, msg_p->encoded_req.list_count, msg_p->encoded_req.total_size, session_tag, msg_p->encoded_req.buffer_type, 1, smcb, mop->count+i, &msg_p->send_status, &msg_p->send_id, mop->params.job_context, mop->params.job_timeout, msg_p->req.hints); if ((ret < 0) || ((ret == 1) && (msg_p->send_status.error_code != 0))) { if (ret < 0) { PVFS_perror_gossip("Post of send failed", ret); } else { PVFS_perror_gossip("Send immediately failed", msg_p->send_status.error_code); } gossip_err_unless_quiet("Send error: cancelling recv.\n"); job_bmi_cancel(msg_p->recv_id, mop->params.job_context); /* we still have to wait for recv completion, so just decrement * comp_ct by one and keep going */ msg_p->op_status = msg_p->send_status.error_code; msg_p->send_id = 0; mop->params.comp_ct--; } else if (ret == 1) { /* immediate completion */ msg_p->send_id = 0; /* decrement our count, since send is already done. */ mop->params.comp_ct--; } /* else: successful post, no immediate completion */ } if (mop->params.comp_ct == 0) { /* everything is completed already (could happen in some failure * cases); jump straight to final completion function. */ js_p->error_code = MSGPAIRS_COMPLETE; return SM_ACTION_COMPLETE; } /* we are still waiting on operations to complete, next state * transition will handle them */ return SM_ACTION_DEFERRED; }
/** Checks completion of one or more state machines. * * If none of the state machines listed in op_id_array have completed, * then progress is made on all posted state machines. */ PVFS_error PINT_client_state_machine_testsome( PVFS_sys_op_id *op_id_array, int *op_count, /* in/out */ void **user_ptr_array, int *error_code_array, int timeout_ms) { PVFS_error ret = -PVFS_EINVAL; int i = 0, limit = 0, job_count = 0; PINT_smcb *smcb = NULL; job_id_t job_id_array[MAX_RETURNED_JOBS]; job_status_s job_status_array[MAX_RETURNED_JOBS]; void *smcb_p_array[MAX_RETURNED_JOBS] = {NULL}; gen_mutex_lock(&test_mutex); CLIENT_SM_ASSERT_INITIALIZED(); if (!op_id_array || !op_count || !error_code_array) { PVFS_perror_gossip("PINT_client_state_machine_testsome", ret); gen_mutex_unlock(&test_mutex); return ret; } if ((*op_count < 1) || (*op_count > MAX_RETURNED_JOBS)) { PVFS_perror_gossip("testsome() got invalid op_count", ret); gen_mutex_unlock(&test_mutex); return ret; } job_count = MAX_RETURNED_JOBS; limit = *op_count; *op_count = 0; /* check for requests completed previously */ ret = completion_list_retrieve_completed( op_id_array, user_ptr_array, error_code_array, limit, op_count); /* return them if found */ if ((ret == 0) && (*op_count > 0)) { gen_mutex_unlock(&test_mutex); return ret; } /* see if there are requests ready to make progress */ ret = job_testcontext(job_id_array, &job_count, /* in/out parameter */ smcb_p_array, job_status_array, timeout_ms, pint_client_sm_context); assert(ret > -1); /* do as much as we can on every job that has completed */ for(i = 0; i < job_count; i++) { smcb = (PINT_smcb *)smcb_p_array[i]; assert(smcb); if (!PINT_smcb_complete(smcb)) { ret = PINT_state_machine_continue(smcb, &job_status_array[i]); /* (ret < 0) indicates a problem from the job system * itself; the return value of the underlying operation is * kept in the job status structure. */ if (ret != SM_ACTION_DEFERRED && ret != SM_ACTION_TERMINATE) { continue; } } } /* terminated SMs have added themselves to the completion list */ ret = completion_list_retrieve_completed( op_id_array, user_ptr_array, error_code_array, limit, op_count); gen_mutex_unlock(&test_mutex); return(ret); }
/** Cancels in progress I/O operations. * * \return 0 on success, -PVFS_error on failure. */ PVFS_error PINT_client_io_cancel(PVFS_sys_op_id id) { int i = 0; PVFS_error ret = -PVFS_EINVAL; PINT_smcb *smcb = NULL; PINT_client_sm *sm_p = NULL; PINT_client_sm *sm_base_p = NULL; gossip_debug(GOSSIP_CLIENT_DEBUG, "PINT_client_io_cancel id %lld\n",lld(id)); smcb = PINT_id_gen_safe_lookup(id); if (!smcb) { /* if we can't find it, it may have already completed */ return 0; } sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); if (!sm_p) { /* if we can't find it, it may have already completed */ return 0; } /* we can't cancel any arbitrary operation */ assert(PINT_smcb_op(smcb) == PVFS_SYS_IO); if (PINT_smcb_complete(smcb)) { /* op already completed; nothing to cancel. */ return 0; } /* We also don't cancel small I/O operations as posted by * sys-small-io.sm. Check the corresponding flag. We have * to jump to the base frame rather than the current frame for this * information because small-io may have pushed a msgpairarray. */ sm_base_p = PINT_sm_frame(smcb, (-(smcb->frame_count -1))); if(sm_base_p->u.io.small_io) { gossip_debug(GOSSIP_CANCEL_DEBUG, "skipping cancellation of small I/O operation.\n"); return(0); } /* if we fall to here, the I/O operation is still in flight */ /* first, set a flag informing the sys_io state machine that the * operation has been cancelled so it doesn't post any new jobs */ PINT_smcb_set_cancelled(smcb); /* don't return an error if nothing is cancelled, because everything may have completed already */ ret = 0; /* now run through and cancel the outstanding jobs */ for(i = 0; i < sm_p->u.io.context_count; i++) { PINT_client_io_ctx *cur_ctx = &sm_p->u.io.contexts[i]; assert(cur_ctx); if (cur_ctx->msg_send_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting " "cancellation of type: BMI Send " "(Request)\n",i); ret = job_bmi_cancel(cur_ctx->msg.send_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_bmi_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } if (cur_ctx->msg_recv_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting " "cancellation of type: BMI Recv " "(Response)\n",i); ret = job_bmi_cancel(cur_ctx->msg.recv_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_bmi_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } if (cur_ctx->flow_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting cancellation of type: FLOW\n",i); ret = job_flow_cancel( cur_ctx->flow_job_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_flow_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } if (cur_ctx->write_ack_in_progress) { gossip_debug(GOSSIP_CANCEL_DEBUG, "[%d] Posting " "cancellation of type: BMI Recv " "(Write Ack)\n",i); ret = job_bmi_cancel(cur_ctx->write_ack.recv_id, pint_client_sm_context); if (ret < 0) { PVFS_perror_gossip("job_bmi_cancel failed", ret); break; } sm_p->u.io.total_cancellations_remaining++; } } gossip_debug(GOSSIP_CANCEL_DEBUG, "(%p) Total cancellations " "remaining: %d\n", sm_p, sm_p->u.io.total_cancellations_remaining); return ret; }