void print_item(string tag) { string type, *tags, *tp; int *dims; type = get_type(instr, tag); if (streq(type, SetType)) { get_set(instr, tag); print_set(tag); tags = list_tags(instr); for (tp = tags; *tp != NULL; tp++) print_item(*tp); get_tes(instr, tag); print_tes(tag); for (tp = tags; *tp != NULL; tp++) free(*tp); free((char *)tags); } else { dims = get_dims(instr, tag); print_header(tag, type, dims); (void) outstr(" "); print_data(tag, type, dims); end_line(); if (dims != NULL) free((char *)dims); } free(type); }
void weights::set_value(DataType value, std::vector<int> pos) { // Get tensor dimensions const auto& dims = get_dims(); #ifdef LBANN_DEBUG // Check that tensor position is valid bool valid = dims.size() == pos.size(); for (size_t i = 0 ; i < dims.size(); ++i) { valid = valid && pos[i] >= 0 && pos[i] < dims[i]; } if (!valid) { std::stringstream err; err << "attempted to set value in " << "weights \"" << get_name() << "\"" << "at position ("; for (size_t i = 0 ; i < pos.size(); ++i) { err << (i > 0 ? "x" : "") << pos[i]; } err << ") in a tensor with dimensions "; for (size_t i = 0 ; i < dims.size(); ++i) { err << (i > 0 ? "x" : "") << dims[i]; } LBANN_ERROR(err.str()); } #endif // LBANN_DEBUG // Get index of weight value and set int index = 0; for (size_t i = 0; i < dims.size(); ++i) { index = index * dims[i] + pos[i]; } set_value(value, index); }
void weights::write_proto(lbann_data::WeightsData* proto) const { // Set proto properties proto->Clear(); proto->set_name(m_name); for (const auto& d : get_dims()) { proto->mutable_shape()->add_dim(d); } proto->set_height(get_matrix_height()); proto->set_width(get_matrix_width()); // Write weight values to prototext on world master process CircMat<El::Device::CPU> values = *m_values; /// @todo What if weights are on GPU? values.SetRoot(0); /// @todo What if world master is not process 0? if (m_comm->am_world_master()) { const auto& local_values = values.LockedMatrix(); const El::Int height = local_values.Height(); const El::Int width = local_values.Width(); /// @todo OpenMP parallelization /** @todo Our matrices are column-major while Numpy expects * row-major matrices. This row-wise iteration is fine for * matrices and column vectors, but it can mess up the order of * the weights if a high-dimensional tensor is represented as a * matrix. This is what we need for quantization on convolution * kernel weights. */ for (El::Int i = 0; i < height; ++i) { for (El::Int j = 0; j < width; ++j) { proto->add_data(local_values(i,j)); } } } }
description weights::get_description() const { std::stringstream ss; // Construct description object description desc(get_name()); // Dimensions const auto& dims = get_dims(); ss.str(std::string{}); ss.clear(); for (size_t i = 0; i < dims.size(); ++i) { ss << (i > 0 ? "x" : "") << dims[i]; } desc.add("Dimensions", ss.str()); // Optimizer if (m_optimizer != nullptr) { desc.add(m_optimizer->get_description()); } // Initializer if (m_initializer != nullptr) { desc.add(m_initializer->get_description()); } // Freeze state if (is_frozen()) { desc.add("Frozen"); } return desc; }
explicit RMatrix( SEXP args ) : Rmat( coerceVector(args, RGetData<T>::Conv) ), data( RGetData<T>::Cast(Rmat) ), mcol(0) { get_dims(); }
int main(int argc, char ** argv) { int isinverse = 0; int isreal = 0; FILE *fin = stdin; FILE *fout = stdout; int ndims = 1; int dims[32]; dims[0] = 1024; /*default fft size*/ while (1) { int c = getopt(argc, argv, "n:iR"); if (c == -1) break; switch (c) { case 'n': ndims = get_dims(optarg, dims); break; case 'i': isinverse = 1; break; case 'R': isreal = 1; break; case '?': fprintf(stderr, "usage options:\n" "\t-n d1[,d2,d3...]: fft dimension(s)\n" "\t-i : inverse\n" "\t-R : real input samples, not complex\n"); exit(1); default: fprintf(stderr, "bad %c\n", c); break; } } if (optind < argc) { if (strcmp("-", argv[optind]) != 0) fin = fopen(argv[optind], "rb"); ++optind; } if (optind < argc) { if (strcmp("-", argv[optind]) != 0) fout = fopen(argv[optind], "wb"); ++optind; } if (ndims == 1) { if (isreal) fft_file_real(fin, fout, dims[0], isinverse); else fft_file(fin, fout, dims[0], isinverse); } else { if (isreal) fft_filend_real(fin, fout, dims, ndims, isinverse); else fft_filend(fin, fout, dims, ndims, isinverse); } if (fout != stdout) fclose(fout); if (fin != stdin) fclose(fin); return 0; }
//initialize a map void load_map(Game* game, TMX_map* map){ fprintf(fpLog, "Loading map...\n"); int i, j; FILE* fp; char dir[128] = ""; //get size of map sprintf(dir, "res/maps/%s/map.map", game->active_map); fp = fopen(dir, "r"); if(game->debug && fp == NULL){ fprintf(fpLog, "File not opened map.map\n"); } get_dims(fp, &map->w, &map->l); //allocate memory for map map->tmx = (int**) malloc(map->l * sizeof(int)); for(i = 0; i < map->l; ++i){ map->tmx[i] = (int*) malloc(map->w * sizeof(int)); for(j = 0; j < map->w; ++j){ //scan in map to locaiton fscanf(fp, "%d", &map->tmx[i][j]); } } fclose(fp); //get tilemap sprintf(dir, "res/maps/%s/tilemap.bmp", game->active_map); if ((map->tilemap = load_bitmap(dir, NULL)) == NULL){ fprintf(fpLog, "Failed to load tilemap\n"); } map->blocker_values = NULL; sprintf(dir, "res/maps/%s/settings.map", game->active_map); int temp; int cR, cG, cB; fp = fopen(dir, "r"); if(game->debug && fp == NULL){ fprintf(fpLog, "File not opened settings.map\n"); } //get and set default background color fscanf(fp, "%d %d %d", &cR, &cG, &cB); map->bgCol = makecol(cR, cG, cB); i = 0; while(fscanf(fp, "%d", &temp) != EOF){ map->blocker_values = (int*) realloc(map->blocker_values, (++i) * sizeof(int)); map->blocker_values[i-1] = temp; } map->blocker_val_count = i; //generate background map->map = NULL; gen_map(map); }END_OF_FUNCTION(load_map);
explicit RMatrix( size_t r, size_t c ) : Rmat( allocMatrix( RGetData<T>::Conv, r, c) ), data( RGetData<T>::Cast(Rmat) ), mcol(0) { PROTECT(Rmat); set_R(); get_dims(); }
static void draw_init() { initscr(); noecho(); curs_set(0); nodelay(stdscr, TRUE); leaveok(stdscr, TRUE); scrollok(stdscr, FALSE); get_dims(); }
> constexpr bool operator==( matrix< M1, C1, R1 > const& m1, matrix< M2, C2, R2 > const& m2 ){ auto size = get_dims(m2, m1); for(size_t y = 0; y < size_t(size.rows()); ++y){ for(size_t x = 0; x < size_t(size.cols()); ++x){ if(m1(x, y) != m2(x, y)) return false; } } return true; }
int main() { int i = 0, l = 0; get_dims(); clearscreen(); indent = (width - COLS) / 2; for (i = 0, l = (height - ROWS) / 2; i < l; i++) { newline(); } textos(OS); newline(); textline(CONTINUE); newline(); textline(ENTER); newline(); textline(CTRLALTDEL); textline(UNSAVED); newline(); textline(ERROR); newline(); textline(PRESSKEY); for (i = 0, l = ((height - ROWS) / 2) - 2; i < l; i++) { newline(); } cursor(0); while (getchar() != '\n') { } cursor(1); return 0; }
int weights::get_size() const { const auto& dims = get_dims(); return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies<int>()); }