int ompi_fetch_opal_pointer_array_item(mqs_process *proc, mqs_taddr_t addr, mpi_process_info *p_info, int index, mqs_taddr_t *item) { mqs_image *image = mqs_get_image(proc); mpi_image_info *i_info = (mpi_image_info *) mqs_get_image_info(image); int size, lowest_free, number_free; mqs_taddr_t base; if (index < 0) { return mqs_no_information; } ompi_fetch_opal_pointer_array_info(proc, addr, p_info, &size, &lowest_free, &number_free); if (index >= size) { return mqs_no_information; } base = ompi_fetch_pointer(proc, addr + i_info->opal_pointer_array_t.offset.addr, p_info); *item = ompi_fetch_pointer(proc, base + index * p_info->sizes.pointer_size, p_info); return mqs_ok; }
int mpidbg_comm_query(mqs_image *image, mqs_image_info *image_info, mqs_process *process, mqs_process_info *process_info, mqs_taddr_t c_comm, struct mpidbg_comm_info_t **info) { int flags; mpi_image_info *i_info = (mpi_image_info*) image_info; mpi_process_info *p_info = (mpi_process_info*) process_info; mqs_taddr_t group, topo, keyhash; /* Get the comm name */ *info = mqs_malloc(sizeof(struct mpidbg_comm_info_t)); if (NULL == *info) { return MPIDBG_ERR_NO_MEM; } /* JMS temporarily zero everything out. Remove this when we fill in all the fields */ memset(*info, 0, sizeof(struct mpidbg_comm_info_t)); (*info)->comm_c_handle = c_comm; printf("mpidbg_comm_query: %p\n", (void*) c_comm); mqs_fetch_data(process, c_comm + i_info->ompi_communicator_t.offset.c_name, MPIDBG_MAX_OBJECT_NAME, (*info)->comm_name); /* Get this process' rank in the comm */ (*info)->comm_rank = ompi_fetch_int(process, c_comm + i_info->ompi_communicator_t.offset.c_my_rank, p_info); /* Analyze the flags on the comm */ flags = ompi_fetch_int(process, c_comm + i_info->ompi_communicator_t.offset.c_flags, p_info); (*info)->comm_bitflags = 0; if (MPI_PROC_NULL == (*info)->comm_rank) { /* This communicator is MPI_COMM_NULL */ (*info)->comm_rank = (*info)->comm_size = 0; (*info)->comm_bitflags |= MPIDBG_COMM_INFO_COMM_NULL; } else if (0 != (flags & OMPI_COMM_INTER)) { (*info)->comm_bitflags |= MPIDBG_COMM_INFO_INTERCOMM; } else { if (0 != (flags & OMPI_COMM_CART)) { (*info)->comm_bitflags |= MPIDBG_COMM_INFO_CARTESIAN; } else if (0 != (flags & OMPI_COMM_GRAPH)) { (*info)->comm_bitflags |= MPIDBG_COMM_INFO_GRAPH; } } if (0 != (flags & OMPI_COMM_ISFREED)) { (*info)->comm_bitflags |= MPIDBG_COMM_INFO_FREED_HANDLE; } if (0 != (flags & OMPI_COMM_INTRINSIC)) { (*info)->comm_bitflags |= MPIDBG_COMM_INFO_PREDEFINED; } if (0 != (flags & OMPI_COMM_INVALID)) { (*info)->comm_bitflags |= MPIDBG_COMM_INFO_FREED_OBJECT; } /* Look up the local group */ group = ompi_fetch_pointer(process, c_comm + i_info->ompi_communicator_t.offset.c_local_group, p_info); (*info)->comm_rank = ompi_fetch_int(process, group + i_info->ompi_group_t.offset.grp_my_rank, p_info); (*info)->comm_num_local_procs = ompi_fetch_int(process, group + i_info->ompi_group_t.offset.grp_proc_count, p_info); /* Fill in the comm_size with the size of the local group. We'll override below if this is an intercommunicator. */ (*info)->comm_size = (*info)->comm_num_local_procs; /* JMS fill this in: waiting to decide between mpidbg_process_t and mqs_process_location */ (*info)->comm_local_procs = NULL; /* Look up the remote group (if relevant) */ if (0 != (flags & OMPI_COMM_INTER)) { group = ompi_fetch_pointer(process, c_comm + i_info->ompi_communicator_t.offset.c_remote_group, p_info); (*info)->comm_num_remote_procs = ompi_fetch_int(process, group + i_info->ompi_group_t.offset.grp_proc_count, p_info); (*info)->comm_size = (*info)->comm_num_remote_procs; /* JMS fill this in: waiting to decide between mpidbg_process_t and mqs_process_location */ (*info)->comm_remote_procs = NULL; } else { (*info)->comm_num_remote_procs = 0; (*info)->comm_remote_procs = NULL; } /* Fill in cartesian/graph info, if relevant. The cartesian and graph data is just slightly different from each other; it's [slightly] easier (and less confusing!) to have separate retrieval code blocks. */ topo = ompi_fetch_pointer(process, c_comm + i_info->ompi_communicator_t.offset.c_topo, p_info); if (0 != topo && 0 != ((*info)->comm_bitflags & MPIDBG_COMM_INFO_CARTESIAN)) { int i, ndims, tmp; mqs_taddr_t dims, periods; /* Alloc space for copying arrays */ (*info)->comm_cart_num_dims = ndims = ompi_fetch_int(process, topo + i_info->ompi_mca_topo_base_comm_1_0_0_t.offset.mtc.cart.ndims, p_info); (*info)->comm_cart_dims = mqs_malloc(ndims * sizeof(int)); if (NULL == (*info)->comm_cart_dims) { return MPIDBG_ERR_NO_MEM; } (*info)->comm_cart_periods = mqs_malloc(ndims * sizeof(int8_t)); if (NULL == (*info)->comm_cart_periods) { mqs_free((*info)->comm_cart_dims); (*info)->comm_cart_dims = NULL; return MPIDBG_ERR_NO_MEM; } /* Retrieve the dimension and periodic description data from the two arrays on the image's communicator */ dims = ompi_fetch_pointer(process, topo + i_info->ompi_mca_topo_base_comm_1_0_0_t.offset.mtc.cart.dims, p_info); periods = ompi_fetch_pointer(process, topo + i_info->ompi_mca_topo_base_comm_1_0_0_t.offset.mtc.cart.periods, p_info); for (i = 0; i < ndims; ++i) { (*info)->comm_cart_dims[i] = ompi_fetch_int(process, dims + (sizeof(int) * i), p_info); tmp = ompi_fetch_int(process, periods + (sizeof(int) * i), p_info); (*info)->comm_cart_periods[i] = (int8_t) tmp; printf("mpidbg: cart comm: dimension %d: (length %d, periodic: %d)\n", i, (*info)->comm_cart_dims[i], tmp); } } else if (0 != topo && 0 != ((*info)->comm_bitflags & MPIDBG_COMM_INFO_GRAPH)) { int i, nnodes; mqs_taddr_t index, edges; /* Alloc space for copying the indexes */ (*info)->comm_graph_num_nodes = nnodes = ompi_fetch_int(process, topo + i_info->ompi_mca_topo_base_comm_1_0_0_t.offset.mtc.graph.nnodes, p_info); (*info)->comm_graph_index = mqs_malloc(nnodes * sizeof(int)); if (NULL == (*info)->comm_graph_index) { return MPIDBG_ERR_NO_MEM; } /* Retrieve the index data */ index = ompi_fetch_pointer(process, topo + i_info->ompi_mca_topo_base_comm_1_0_0_t.offset.mtc.graph.index, p_info); for (i = 0; i < nnodes; ++i) { (*info)->comm_graph_index[i] = ompi_fetch_int(process, index + (sizeof(int) * i), p_info); } /* Allocate space for the edges */ (*info)->comm_graph_edges = mqs_malloc((*info)->comm_graph_index[(*info)->comm_graph_num_nodes - 1] * sizeof(int)); if (NULL == (*info)->comm_graph_edges) { mqs_free((*info)->comm_graph_index); (*info)->comm_graph_index = NULL; return MPIDBG_ERR_NO_MEM; } /* Retrieve the edge data */ edges = ompi_fetch_pointer(process, topo + i_info->ompi_mca_topo_base_comm_1_0_0_t.offset.mtc.graph.edges, p_info); for (i = 0; i < (*info)->comm_graph_index[(*info)->comm_graph_num_nodes - 1]; ++i) { (*info)->comm_graph_edges[i] = ompi_fetch_int(process, edges + (sizeof(int) * i), p_info); } } else if (0 != topo && 0 != ((*info)->comm_bitflags & MPIDBG_COMM_INFO_DIST_GRAPH)) { /* TODO: Complete the info if the communicator has a distributed graph topology */ } /* Fortran handle */ (*info)->comm_fortran_handle = ompi_fetch_int(process, c_comm + i_info->ompi_communicator_t.offset.c_f_to_c_index, p_info); printf("mpdbg: comm fortran handle: %d\n", (*info)->comm_fortran_handle); /* Fill in attributes */ keyhash = ompi_fetch_pointer(process, c_comm + i_info->ompi_communicator_t.offset.c_keyhash, p_info); fill_attributes(&((*info)->comm_num_attrs), &((*info)->comm_attrs), keyhash); /* JMS temporary */ (*info)->comm_num_pending_requests = MPIDBG_ERR_NOT_SUPPORTED; (*info)->comm_pending_requests = NULL; (*info)->comm_num_derived_windows = MPIDBG_ERR_NOT_SUPPORTED; (*info)->comm_derived_windows = NULL; (*info)->comm_num_derived_files = MPIDBG_ERR_NOT_SUPPORTED; (*info)->comm_derived_files = NULL; return MPIDBG_SUCCESS; }