/****************************************************************************** * SPLATT-CPD *****************************************************************************/ int splatt_cpd_cmd( int argc, char ** argv) { /* assign defaults and parse arguments */ cpd_cmd_args args; default_cpd_opts(&args); argp_parse(&cpd_argp, argc, argv, ARGP_IN_ORDER, 0, &args); srand(args.opts[SPLATT_OPTION_RANDSEED]); sptensor_t * tt = NULL; print_header(); tt = tt_read(args.ifname); if(tt == NULL) { return SPLATT_ERROR_BADINPUT; } /* print basic tensor stats? */ splatt_verbosity_type which_verb = args.opts[SPLATT_OPTION_VERBOSITY]; if(which_verb >= SPLATT_VERBOSITY_LOW) { stats_tt(tt, args.ifname, STATS_BASIC, 0, NULL); } splatt_csf * csf = splatt_csf_alloc(tt, args.opts); idx_t nmodes = tt->nmodes; tt_free(tt); /* print CPD stats? */ if(which_verb >= SPLATT_VERBOSITY_LOW) { cpd_stats(csf, args.nfactors, args.opts); } splatt_kruskal factored; /* do the factorization! */ int ret = splatt_cpd_als(csf, args.nfactors, args.opts, &factored); if(ret != SPLATT_SUCCESS) { fprintf(stderr, "splatt_cpd_als returned %d. Aborting.\n", ret); return ret; } printf("Final fit: %0.5"SPLATT_PF_VAL"\n", factored.fit); /* write output */ if(args.write == 1) { char * lambda_name = NULL; if(args.stem) { asprintf(&lambda_name, "%s.lambda.mat", args.stem); } else { asprintf(&lambda_name, "lambda.mat"); } vec_write(factored.lambda, args.nfactors, lambda_name); free(lambda_name); for(idx_t m=0; m < nmodes; ++m) { char * matfname = NULL; if(args.stem) { asprintf(&matfname, "%s.mode%"SPLATT_PF_IDX".mat", args.stem, m+1); } else { asprintf(&matfname, "mode%"SPLATT_PF_IDX".mat", m+1); } matrix_t tmpmat; tmpmat.rowmajor = 1; tmpmat.I = csf->dims[m]; tmpmat.J = args.nfactors; tmpmat.vals = factored.factors[m]; mat_write(&tmpmat, matfname); free(matfname); } } /* cleanup */ splatt_csf_free(csf, args.opts); free_cpd_args(&args); /* free factor matrix allocations */ splatt_free_kruskal(&factored); return EXIT_SUCCESS; }
void mpi_write_mats( matrix_t ** mats, permutation_t const * const perm, rank_info const * const rinfo, char const * const basename, idx_t const nmodes) { char * fname; idx_t const nfactors = mats[0]->J; MPI_Status status; idx_t maxdim = 0; idx_t maxlocaldim = 0; matrix_t * matbuf = NULL; val_t * vbuf = NULL; idx_t * loc_iperm = NULL; for(idx_t m=0; m < nmodes; ++m) { maxdim = SS_MAX(maxdim, rinfo->global_dims[m]); maxlocaldim = SS_MAX(maxlocaldim, mats[m]->I); } /* get the largest local dim */ if(rinfo->rank == 0) { MPI_Reduce(MPI_IN_PLACE, &maxlocaldim, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } else { MPI_Reduce(&maxlocaldim, NULL, 1, SPLATT_MPI_IDX, MPI_MAX, 0, rinfo->comm_3d); } if(rinfo->rank == 0) { matbuf = mat_alloc(maxdim, nfactors); loc_iperm = (idx_t *) splatt_malloc(maxdim * sizeof(idx_t)); vbuf = (val_t *) splatt_malloc(maxdim * nfactors * sizeof(val_t)); } for(idx_t m=0; m < nmodes; ++m) { /* root handles the writing */ if(rinfo->rank == 0) { asprintf(&fname, "%s%"SPLATT_PF_IDX".mat", basename, m+1); matbuf->I = rinfo->global_dims[m]; /* copy root's matrix to buffer */ for(idx_t i=0; i < mats[m]->I; ++i) { idx_t const gi = rinfo->layer_starts[m] + perm->iperms[m][i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = mats[m]->vals[f+(i*nfactors)]; } } /* receive matrix from each rank */ for(int p=1; p < rinfo->npes; ++p) { idx_t layerstart; idx_t nrows; MPI_Recv(&layerstart, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(&nrows, 1, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); MPI_Recv(vbuf, nrows * nfactors, SPLATT_MPI_VAL, p, 0, rinfo->comm_3d, &status); MPI_Recv(loc_iperm, nrows, SPLATT_MPI_IDX, p, 0, rinfo->comm_3d, &status); /* permute buffer and copy into matbuf */ for(idx_t i=0; i < nrows; ++i) { idx_t const gi = layerstart + loc_iperm[i]; for(idx_t f=0; f < nfactors; ++f) { matbuf->vals[f + (gi*nfactors)] = vbuf[f+(i*nfactors)]; } } } /* write the factor matrix to disk */ mat_write(matbuf, fname); /* clean up */ free(fname); } else { /* send matrix to root */ MPI_Send(&(rinfo->layer_starts[m]), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(&(mats[m]->I), 1, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); MPI_Send(mats[m]->vals, mats[m]->I * mats[m]->J, SPLATT_MPI_VAL, 0, 0, rinfo->comm_3d); MPI_Send(perm->iperms[m] + rinfo->mat_start[m], mats[m]->I, SPLATT_MPI_IDX, 0, 0, rinfo->comm_3d); } } /* foreach mode */ if(rinfo->rank == 0) { mat_free(matbuf); free(vbuf); free(loc_iperm); } }
int main() { unsigned r1, c1, r2, c2, ch; mat m1, m2, t; puts("Enter row and column numbers of matrices 1 and 2:"); scanf(" %u %u %u %u%*c", &r1, &c1, &r2, &c2); putchar('\n'); m1=mat_alloc(r1, c1); m2=mat_alloc(r2, c2); printf("Enter value of Matrix 1 (%ux%u):\n", r1, c1); mat_read(m1); putchar('\n'); printf("Enter value of Matrix 2 (%ux%u):\n", r2, c2); mat_read(m2); putchar('\n'); do{ puts("What would you like to do?"); puts(" ( 0) Exit"); puts(" ( 1) Display"); puts(" ( 2) Transpose"); puts(" ( 3) Sum"); puts(" ( 4) Difference"); puts(" ( 5) Product"); puts(" ( 6) Greatest Element of Rows"); puts(" ( 7) Sum of Major Diagonal"); puts(" ( 8) Sum of Minor Diagonal"); puts(" ( 9) Check Symmetricity"); puts(" (10) Upper-Triangular Matrix"); puts(" (11) Lower-Triangular Matrix"); scanf(" %u%*c", &ch); switch(ch){ case 0: puts("Bye!"); break; case 1: puts("Matrix 1:"); mat_write(m1); putchar('\n'); puts("Matrix 2:"); mat_write(m2); break; case 2: t=mat_trn(m1); mat_write(t); mat_free(t); break; case 3: if((t=mat_add(m1, m2)).r){ mat_write(t); mat_free(t); } else puts("Not Possible"); break; case 4: if((t=mat_sub(m1, m2)).r){ mat_write(t); mat_free(t); } else puts("Not Possible"); break; case 5: if((t=mat_mul(m1, m2)).r){ mat_write(t); mat_free(t); } else puts("Not Possible"); break; case 6: row_great(m1); break; case 7: add_major(m1); break; case 8: add_minor(m1); break; case 9: if(issymm(m1)) puts("Symmetric"); else puts("Unsymmetric"); break; case 10: up_tri(m1); break; case 11: lo_tri(m1); break; default: puts("Incorrect Choice!"); break; } putchar('\n'); } while(ch); mat_free(m1); mat_free(m2); return 0; }