APG::VertexAttributeList::VertexAttributeList(std::vector<VertexAttribute> &attVec) { for (const auto &att : attVec) { attributes.emplace_back(VertexAttribute(att)); } calculateOffsets(); }
void MLRModel::train() { if (descriptor_matrix_.cols() == 0) { throw Exception::InconsistentUsage(__FILE__, __LINE__, "Data must be read into the model before training!"); } if (Y_.cols() == 0) { throw Exception::InconsistentUsage(__FILE__, __LINE__, "No response values have been read! Model can not be trained!"); } if (descriptor_matrix_.cols() >= descriptor_matrix_.rows()) { throw Exception::SingularMatrixError(__FILE__, __LINE__, "For MLR model, matrix must have more rows than columns in order to be invertible!!"); //training_result_.ReSize(0, 0); //return; } Eigen::MatrixXd m = descriptor_matrix_.transpose()*descriptor_matrix_; try { training_result_ = m.colPivHouseholderQr().solve(descriptor_matrix_.transpose()*Y_); } catch(BALL::Exception::GeneralException e) { training_result_.resize(0, 0); throw Exception::SingularMatrixError(__FILE__, __LINE__, "Matrix for MLR training is singular!! Check that descriptor_matrix_ does not contain empty columns!"); return; } calculateOffsets(); }
APG::VertexAttributeList::VertexAttributeList(std::initializer_list<VertexAttribute> initList) { for (const auto &att : initList) { attributes.emplace_back(VertexAttribute(att)); } calculateOffsets(); }
FileSystemMinix::FileSystemMinix(FsDevice* device, uint32 mount_flags, minix_super_block super_block, uint16 minix_version, uint16 filename_len) : FileSystemUnix(device, mount_flags), superblock_(super_block), zone_size_(0), FILENAME_LEN_(filename_len), DIR_ENTRY_SIZE_(FILENAME_LEN_+2), zone_bitmap_(NULL) { assert( FILENAME_LEN_ == 14 || FILENAME_LEN_ == 30 ); // setting Minix Blocks size on FsDevice device_->setBlockSize(getBlockSize()); // calculate the bitmap and table offsets calculateOffsets(); // creating the i-node table createInodeTable(minix_version); // create the zone-bitmap zone_bitmap_ = new FsBitmap(this, getVolumeManager(), zone_bitmap_sector_, zone_bitmap_sector_+zone_bitmap_size_-1, superblock_.s_nzones); debug(FS_MINIX, "FileSystemMinix() - FsBitmap Zone successfully created\n"); // init root-directory initRootInode(); }
// Recalculates the projection matrix. This uses a method that needs to // know the distance in the screen plane from the origin (determined by the // normal to the plane throught the origin) to the edges of the screen. // This method can be used for any rectangular planar screen. By adjusting // the wall rotation matrix, this method can be used for the general case // of a rectangular screen in 3-space void SurfaceProjection::calcViewMatrix(const gmtl::Point3f& eyePos, const float scaleFactor) { calculateOffsets(); calcViewFrustum(eyePos, scaleFactor); // Need to post translate to get the view matrix at the position of the // eye. mViewMat = m_surface_M_base * gmtl::makeTrans<gmtl::Matrix44f>(-eyePos); }
/** Constructor, sets the vertex attributes in a specific order */ VertexAttributes::VertexAttributes(const VertexAttribute* attributes, int attributesLength) { if (attributesLength < 1) throw GdxRuntimeException("attributes must be >= 1"); m_attributesLength = attributesLength; m_attributes = new VertexAttribute[attributesLength]; for (int i = 0; i < attributesLength; i++) m_attributes[i] = attributes[i]; checkValidity(); m_vertexSize = calculateOffsets(); }
// // Wad::writeWad // void Wad::writeWad(std::FILE *out) { calculateOffsets(); std::fputs(option_iwad.data ? "IWAD" : "PWAD", out); Lump::write_32(out, count); Lump::write_32(out, 16); Lump::write_32(out, 0); for(Lump *lump = head; lump; lump = lump->next) lump->writeHead(out); for(Lump *lump = head; lump; lump = lump->next) lump->writeData(out); }
void TrackedSurfaceProjection::updateSurfaceParams(const float scaleFactor) { /* Here we rotate the original screen corners, and then have SurfaceProjection::calculateOffsets * handle the calculation of mOriginToScreen, etc. */ // Get the tracking data for the surface offset gmtl::Matrix44f trackerMatrix = mTracker->getData(scaleFactor); gmtl::Vec3f trans=gmtl::makeTrans<gmtl::Vec3f>(trackerMatrix); trans=trans/scaleFactor; gmtl::setTrans(trackerMatrix,trans); mLLCorner=trackerMatrix * mOriginalLLCorner; mLRCorner=trackerMatrix * mOriginalLRCorner; mURCorner=trackerMatrix * mOriginalURCorner; mULCorner=trackerMatrix * mOriginalULCorner; calculateOffsets(); }
void KPLSModel::train() { if (descriptor_matrix_.cols() == 0) { throw Exception::InconsistentUsage(__FILE__, __LINE__, "Data must be read into the model before training!"); } // if (descriptor_matrix_.cols() < no_components_) // { // throw Exception::TooManyPLSComponents(__FILE__, __LINE__, no_components_, descriptor_matrix_.cols()); // } kernel->calculateKernelMatrix(descriptor_matrix_, K_); Eigen::MatrixXd P; // Matrix P saves all vectors p int cols = K_.cols(); // determine the number of components that are to be created. // no_components_ contains the number of components desired by the user, // but obviously we cannot calculate more PLS components than features int features = descriptor_matrix_.cols(); unsigned int components_to_create = no_components_; if (features < no_components_) components_to_create = features; U_.resize(K_.rows(), components_to_create); loadings_.resize(cols, components_to_create); weights_.resize(Y_.cols(), components_to_create); latent_variables_.resize(K_.rows(), components_to_create); P.resize(cols, components_to_create); Eigen::VectorXd w; Eigen::VectorXd t; Eigen::VectorXd c; Eigen::VectorXd u = Y_.col(0); Eigen::VectorXd u_old; for (unsigned int j = 0; j < components_to_create; j++) { for (int i = 0; i < 10000 ; i++) { w = K_.transpose()*u / Statistics::scalarProduct(u); w = w / Statistics::euclNorm(w); t = K_*w; c = Y_.transpose()*t / Statistics::scalarProduct(t); u_old = u; u = Y_*c / Statistics::scalarProduct(c); if (Statistics::euclDistance(u, u_old)/Statistics::euclNorm(u) > 0.0000001) { continue; } else // if u has converged { break; } } Eigen::VectorXd p = K_.transpose() * t / Statistics::scalarProduct(t); K_ -= t * p.transpose(); U_.col(j) = u; loadings_.col(j) = w; weights_.col(j) = c; P.col(j) = p; latent_variables_.col(j) = t; } // try // p's are not orthogonal to each other, so that in rare cases P.t()*loadings_ is not invertible // { // loadings_ = loadings_*(P.t()*loadings_).i(); // } // catch(BALL::Exception::MatrixIsSingular e) // { // Eigen::MatrixXd I; I.setToIdentity(P.cols()); // I *= 0.001; // loadings_ = loadings_*(P.t()*loadings_+I).i(); // } Eigen::MatrixXd m = P.transpose()*loadings_; training_result_ = loadings_*m.colPivHouseholderQr().solve(weights_.transpose()); calculateOffsets(); }
// // Wad::sizeWad // std::size_t Wad::sizeWad() { calculateOffsets(); return size; }