/* PINT_serv_msgpair_array_resolve_addrs() * * fills in BMI address of server for each entry in the msgpair array, * based on the handle and fsid * * returns 0 on success, -PVFS_error on failure */ int PINT_serv_msgpairarray_resolve_addrs( PINT_sm_msgarray_op *mop) { int i = 0; int ret = -PVFS_EINVAL; if ((mop->count > 0) && mop->msgarray) { for(i = 0; i < mop->count; i++) { PINT_sm_msgpair_state *msg_p = &mop->msgarray[i]; assert(msg_p); ret = PINT_cached_config_map_to_server( &msg_p->svr_addr, msg_p->handle, msg_p->fs_id); if (ret != 0) { gossip_err("Failed to map server address to handle\n"); break; } gossip_debug(GOSSIP_MSGPAIR_DEBUG, " mapped handle %llu to server %lld\n", llu(msg_p->handle), lld(msg_p->svr_addr)); } } return ret; }
static PINT_sm_action readdir_msg_setup_msgpair( struct PINT_smcb *smcb, job_status_s *js_p) { struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int ret = -PVFS_EINVAL; PINT_sm_msgpair_state *msg_p = NULL; gossip_debug(GOSSIP_CLIENT_DEBUG, "readdir state: " "readdir_msg_setup_msgpair\n"); if (js_p->error_code) { return SM_ACTION_COMPLETE; } js_p->error_code = 0; gossip_debug(GOSSIP_READDIR_DEBUG," readdir: posting readdir req\n"); gossip_debug( GOSSIP_READDIR_DEBUG, "%llu|%d | token is %llu | limit is %d\n", llu(sm_p->object_ref.handle), sm_p->object_ref.fs_id, llu(sm_p->readdir.pos_token), sm_p->readdir.dirent_limit); PINT_msgpair_init(&sm_p->msgarray_op); msg_p = &sm_p->msgarray_op.msgpair; PINT_SERVREQ_READDIR_FILL( msg_p->req, *sm_p->cred_p, sm_p->object_ref.fs_id, sm_p->object_ref.handle, sm_p->u.readdir.pos_token, sm_p->u.readdir.dirent_limit, sm_p->hints); msg_p->fs_id = sm_p->object_ref.fs_id; msg_p->handle = sm_p->object_ref.handle; msg_p->retry_flag = PVFS_MSGPAIR_RETRY; msg_p->comp_fn = readdir_msg_comp_fn; ret = PINT_cached_config_map_to_server( &msg_p->svr_addr, sm_p->object_ref.handle, sm_p->object_ref.fs_id); if (ret) { gossip_err("Failed to map meta server address\n"); js_p->error_code = ret; } PINT_sm_push_frame(smcb, 0, &sm_p->msgarray_op); return SM_ACTION_COMPLETE; }
static PINT_sm_action setattr_msg_setup_msgpair( struct PINT_smcb *smcb, job_status_s *js_p) { struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int ret = -PVFS_EINVAL; PINT_sm_msgpair_state *msg_p = NULL; PVFS_ds_type objtype; js_p->error_code = 0; gossip_debug(GOSSIP_CLIENT_DEBUG," setattr: posting setattr req\n"); PINT_msgpair_init(&sm_p->msgarray_op); msg_p = &sm_p->msgarray_op.msgpair; objtype = ((sm_p->u.setattr.sys_attr.mask & PVFS_ATTR_SYS_TYPE) ? sm_p->u.setattr.sys_attr.objtype : PVFS_TYPE_NONE); PINT_SERVREQ_SETATTR_FILL( msg_p->req, *sm_p->cred_p, sm_p->object_ref.fs_id, sm_p->object_ref.handle, objtype, sm_p->u.setattr.sys_attr, 0, sm_p->hints); /* clients should not be able to mess with dfile and distribution * information here. Those parameters should only be set at create time. * Maybe at some point we'll have a utility to adjust those attributes. At * this time if they somehow get changed we'll have garbage on disk */ msg_p->fs_id = sm_p->object_ref.fs_id; msg_p->handle = sm_p->object_ref.handle; msg_p->retry_flag = PVFS_MSGPAIR_RETRY; msg_p->comp_fn = setattr_msg_comp_fn; gossip_debug( GOSSIP_CLIENT_DEBUG, "setattr attr mask sent to server: 0x%x\n", (int)sm_p->u.setattr.sys_attr.mask); ret = PINT_cached_config_map_to_server( &msg_p->svr_addr, msg_p->handle, msg_p->fs_id); if (ret) { gossip_err("Failed to map meta server address\n"); js_p->error_code = ret; } PINT_sm_push_frame(smcb, 0, &sm_p->msgarray_op); return SM_ACTION_COMPLETE; }
static PINT_sm_action mgmt_remove_object_setup_msgpair( struct PINT_smcb *smcb, job_status_s *js_p) { struct PINT_client_sm *sm_p = PINT_sm_frame(smcb, PINT_FRAME_CURRENT); int ret = -PVFS_EINVAL; PINT_sm_msgpair_state *msg_p = NULL; js_p->error_code = 0; PINT_msgpair_init(&sm_p->msgarray_op); msg_p = &sm_p->msgarray_op.msgpair; PINT_SERVREQ_MGMT_REMOVE_OBJECT_FILL( msg_p->req, *sm_p->cred_p, sm_p->object_ref.fs_id, sm_p->object_ref.handle, sm_p->hints); gossip_debug(GOSSIP_REMOVE_DEBUG, "- doing MGMT_REMOVE_OBJECT on " "%llu,%d\n", llu(sm_p->object_ref.handle), sm_p->object_ref.fs_id); msg_p->fs_id = sm_p->object_ref.fs_id; msg_p->handle = sm_p->object_ref.handle; msg_p->retry_flag = PVFS_MSGPAIR_NO_RETRY; msg_p->comp_fn = mgmt_remove_object_comp_fn; ret = PINT_cached_config_map_to_server( &msg_p->svr_addr, msg_p->handle, msg_p->fs_id); if (ret) { gossip_err("Failed to map server address\n"); js_p->error_code = ret; } PINT_sm_push_frame(smcb, 0, &sm_p->msgarray_op); return SM_ACTION_COMPLETE; }
struct handlelist *build_handlelist(PVFS_fs_id cur_fs, PVFS_BMI_addr_t *addr_array, int server_count, PVFS_credentials *creds) { int ret, i, more_flag; unsigned long j; PVFS_handle **handle_matrix; int *hcount_array; unsigned long *handle_count_array; unsigned long *total_count_array; PVFS_ds_position *position_array; struct PVFS_mgmt_server_stat *stat_array; struct handlelist *hl; struct PVFS_mgmt_setparam_value param_value; /* find out how many handles are in use on each */ stat_array = (struct PVFS_mgmt_server_stat *) malloc(server_count * sizeof(struct PVFS_mgmt_server_stat)); if (stat_array == NULL) { param_value.type = PVFS_MGMT_PARAM_TYPE_UINT64; param_value.u.value = PVFS_SERVER_NORMAL_MODE; PVFS_mgmt_setparam_list(cur_fs, creds, PVFS_SERV_PARAM_MODE, ¶m_value, addr_array, server_count, NULL, NULL); return NULL; } ret = PVFS_mgmt_statfs_list(cur_fs, creds, stat_array, addr_array, server_count, NULL /* details */ , NULL); if (ret != 0) { param_value.type = PVFS_MGMT_PARAM_TYPE_UINT64; param_value.u.value = PVFS_SERVER_NORMAL_MODE; PVFS_perror("PVFS_mgmt_statfs_list", ret); PVFS_mgmt_setparam_list(cur_fs, creds, PVFS_SERV_PARAM_MODE, ¶m_value, addr_array, server_count, NULL, NULL); return NULL; } /* allocate a 2 dimensional array for handles from mgmt fn. */ handle_matrix = (PVFS_handle **) calloc(server_count, sizeof(PVFS_handle)); if (handle_matrix == NULL) { perror("malloc"); return NULL; } for (i=0; i < server_count; i++) { handle_matrix[i] = (PVFS_handle *) calloc(HANDLE_BATCH, sizeof(PVFS_handle)); if (handle_matrix[i] == NULL) { perror("malloc"); return NULL; } } /* allocate some arrays to keep up with state */ handle_count_array = (unsigned long *) calloc(server_count, sizeof(unsigned long)); if (handle_count_array == NULL) { perror("malloc"); return NULL; } position_array = (PVFS_ds_position *) calloc(server_count, sizeof(PVFS_ds_position)); if (position_array == NULL) { perror("malloc"); return NULL; } /* total_count_array */ total_count_array = (unsigned long *) calloc(server_count, sizeof(unsigned long)); if (total_count_array == NULL) { perror("malloc"); return NULL; } /* hcount array */ hcount_array = (int *) calloc(server_count, sizeof(int)); if (hcount_array == NULL) { perror("malloc:"); return NULL; } for (i=0; i < server_count; i++) { handle_count_array[i] = stat_array[i].handles_total_count - stat_array[i].handles_available_count; total_count_array[i] = 0; } hl = handlelist_initialize(handle_count_array, server_count); for (i=0; i < server_count; i++) { hcount_array[i] = HANDLE_BATCH; position_array[i] = PVFS_ITERATE_START; } /* iterate until we have retrieved all handles */ more_flag = 1; while (more_flag) { ret = PVFS_mgmt_iterate_handles_list(cur_fs, creds, handle_matrix, hcount_array, position_array, addr_array, server_count, 0, NULL /* details */, NULL /* hints */); if (ret < 0) { param_value.type = PVFS_MGMT_PARAM_TYPE_UINT64; param_value.u.value = PVFS_SERVER_NORMAL_MODE; PVFS_perror("PVFS_mgmt_iterate_handles_list", ret); PVFS_mgmt_setparam_list(cur_fs, creds, PVFS_SERV_PARAM_MODE, ¶m_value, addr_array, server_count, NULL, NULL); return NULL; } for (i=0; i < server_count; i++) { total_count_array[i] += hcount_array[i]; for (j=0; j < hcount_array[i]; j++) { PVFS_BMI_addr_t tmp_addr; /* verify that handles are * within valid ranges for the given server here. */ ret = PINT_cached_config_map_to_server(&tmp_addr, handle_matrix[i][j], cur_fs); if (ret || tmp_addr != addr_array[i]) { fprintf(stderr, "Ugh! handle does not seem to be owned by the server!\n"); return NULL; } } handlelist_add_handles(hl, handle_matrix[i], hcount_array[i], i); } /* find out if any servers have more handles to dump */ more_flag = 0; for (i=0; i < server_count; i++) { if (position_array[i] != PVFS_ITERATE_END) { more_flag = 1; break; } } } for (i = 0; i < server_count; i++) { unsigned long used_handles = handle_count_array[i]; if (total_count_array[i] != used_handles) { fprintf(stderr, "Ugh! Server %d, Received %ld total handles instead of %ld\n", i, total_count_array[i], used_handles); return NULL; } } handlelist_finished_adding_handles(hl); /* sanity check */ /* now look for reserved handles */ for (i=0; i < server_count; i++) { hcount_array[i] = HANDLE_BATCH; position_array[i] = PVFS_ITERATE_START; } more_flag = 1; while (more_flag) { ret = PVFS_mgmt_iterate_handles_list(cur_fs, creds, handle_matrix, hcount_array, position_array, addr_array, server_count, PVFS_MGMT_RESERVED, NULL /* details */, NULL /* hints */); if (ret < 0) { PVFS_perror("PVFS_mgmt_iterate_handles_list", ret); param_value.type = PVFS_MGMT_PARAM_TYPE_UINT64; param_value.u.value = PVFS_SERVER_NORMAL_MODE; PVFS_mgmt_setparam_list(cur_fs, creds, PVFS_SERV_PARAM_MODE, ¶m_value, addr_array, server_count, NULL, NULL); return NULL; } for (i=0; i < server_count; i++) { /* remove any reserved handles from the handlelist. These will * not show up in normal objects when we walk the file system * tree. */ for (j=0; j < hcount_array[i]; j++) { /* we don't know the server index. Reserved handles can be * reported by any server; not just the server that actually * owns that handle. */ handlelist_remove_handle_no_idx(hl, handle_matrix[i][j]); } } /* find out if any servers have more handles to dump */ more_flag = 0; for (i=0; i < server_count; i++) { if (position_array[i] != PVFS_ITERATE_END) { more_flag = 1; hcount_array[i] = HANDLE_BATCH; } } } for (i = 0; i < server_count; i++) { free(handle_matrix[i]); } free(handle_matrix); free(handle_count_array); free(hcount_array); free(total_count_array); free(position_array); free(stat_array); stat_array = NULL; return hl; }