示例#1
0
文件: weights.cpp 项目: LLNL/lbann
void weights::write_proto(lbann_data::WeightsData* proto) const {

  // Set proto properties
  proto->Clear();
  proto->set_name(m_name);
  for (const auto& d : get_dims()) {
    proto->mutable_shape()->add_dim(d);
  }
  proto->set_height(get_matrix_height());
  proto->set_width(get_matrix_width());

  // Write weight values to prototext on world master process
  CircMat<El::Device::CPU> values = *m_values; /// @todo What if weights are on GPU?
  values.SetRoot(0); /// @todo What if world master is not process 0?
  if (m_comm->am_world_master()) {
    const auto& local_values = values.LockedMatrix();
    const El::Int height = local_values.Height();
    const El::Int width = local_values.Width();
    /// @todo OpenMP parallelization
    /** @todo Our matrices are column-major while Numpy expects
     *  row-major matrices. This row-wise iteration is fine for
     *  matrices and column vectors, but it can mess up the order of
     *  the weights if a high-dimensional tensor is represented as a
     *  matrix. This is what we need for quantization on convolution
     *  kernel weights.
     */
    for (El::Int i = 0; i < height; ++i) {
      for (El::Int j = 0; j < width; ++j) {
        proto->add_data(local_values(i,j));
      }
    }
  }

}
示例#2
0
文件: weights.cpp 项目: LLNL/lbann
void weights::set_dims(std::vector<int> matrix_height_dims,
                       std::vector<int> matrix_width_dims) {
  m_matrix_height_dims = matrix_height_dims;
  m_matrix_width_dims = matrix_width_dims;
  if (m_values != nullptr) {
    const auto& height = get_matrix_height();
    const auto& width = get_matrix_width();
    if (m_values->Height() != height || m_values->Width() != width) {
      std::stringstream err;
      err << "attempted to set weights \"" << get_name() << "\" "
          << "with dimensions "
          << get_dims_string(matrix_height_dims, matrix_width_dims) << ", "
          << "but it is already setup with a "
          << m_values->Height() << " x " << m_values->Width() << " "
          << "weights matrix";
      LBANN_ERROR(err.str());
    }
  }
}
示例#3
0
文件: weights.cpp 项目: LLNL/lbann
void weights::setup() {

  // Check that tensor dimensions are valid
  const auto& is_nonpositive = [] (int d) { return d <= 0; };
  if (std::any_of(m_matrix_height_dims.begin(),
                  m_matrix_height_dims.end(),
                  is_nonpositive)
      || std::any_of(m_matrix_width_dims.begin(),
                     m_matrix_width_dims.end(),
                     is_nonpositive)) {
    std::stringstream err;
    err << "attempted to setup weights \"" << get_name() << "\" with a "
        << get_dims_string(m_matrix_height_dims, m_matrix_width_dims) << " "
        << "weights matrix";
    LBANN_ERROR(err.str());
  }

  // Construct weights matrix
  m_values.reset(AbsDistMat::Instantiate(*m_matrix_dist.grid,
                                         m_matrix_dist.root,
                                         m_matrix_dist.colDist,
                                         m_matrix_dist.rowDist,
                                         (m_matrix_dist.blockHeight == 1
                                          && m_matrix_dist.blockWidth == 1 ?
                                          El::ELEMENT : El::BLOCK),
                                         m_matrix_dist.device));
  m_values->AlignWith(m_matrix_dist);
  m_values->Resize(get_matrix_height(), get_matrix_width());
  if (m_initializer != nullptr) {
    m_initializer->fill(*m_values);
  } else {
    El::Zero(*m_values);
  }

  // Setup optimizer
  if (m_optimizer != nullptr) {
    m_optimizer->setup(*this);
  }

}
示例#4
0
文件: matrix.c 项目: lukhio/matC
matrix *parse_matrix_from_text (const char *text_matrix) {
    matrix * mat;
    char *token, *end;
    float **parsed_data;
    // we must use another container to prevent segfaults
    char *raw_data = strdup(text_matrix);
    const char *delim = "}";
    int height        = get_matrix_height(text_matrix);
    int width         = get_matrix_width(text_matrix);
    int i             = 0;

    mat               = new_matrix(height, width);
    parsed_data       = malloc(height*sizeof(float*));

    for (size_t i = 0; i < height; ++i)
        parsed_data[i] = malloc(width*sizeof(float));

    token = strtok(raw_data, delim);

    while(token != NULL) {
        // moving to first digit
        while(!isdigit(*token)) {
            token++;
        }

        for (size_t j = 0; j < width; ++j) {
            float f = strtof(token, &end);
            parsed_data[i][j] = f;
            token = end;
            token++;
        }

        token = strtok(NULL, delim);
        i++;
    }

    set_matrix_values(mat, parsed_data);

    return mat;
}
示例#5
0
文件: weights.cpp 项目: LLNL/lbann
void weights::set_value(DataType value, int row, int col) {

#ifdef LBANN_DEBUG
  // Check that matrix entry is valid
  const auto& height = get_matrix_height();
  const auto& width = get_matrix_width();
  if (row < 0 || row >= height || col < 0 || col > width ) {
    std::stringstream err;
    err << "attempted to set weights value "
        << "in weights \"" << get_name() << "\""
        << "at entry (" << row << "," << col << ") "
        << "in a " << height << "x" << width << " matrix";
    LBANN_ERROR(err.str());
  }
#endif // LBANN_DEBUG

  // Set value if it is local
  auto& values = get_values();
  if (values.IsLocal(row, col)) {
    values.SetLocal(values.LocalRow(row), values.LocalCol(col), value);
  }

}