void lincg(PyramidT& pyramid, PyramidT& pC, const Array2Df& b, Array2Df& x, const int itmax, const float tol, Progress &ph) { float rdotr_curr; float rdotr_prev; float rdotr_best; float alpha; float beta; const size_t rows = pyramid.getRows(); const size_t cols = pyramid.getCols(); const size_t n = rows*cols; const float tol2 = tol*tol; Array2Df x_best(cols, rows); Array2Df r(cols, rows); Array2Df p(cols, rows); Array2Df Ap(cols, rows); // bnrm2 = ||b|| const float bnrm2 = utils::dotProduct(b.data(), n); // r = b - Ax multiplyA(pyramid, pC, x, r); // r = A x utils::vsub(b.data(), r.data(), r.data(), n); // r = b - r // rdotr = r.r rdotr_best = rdotr_curr = utils::dotProduct(r.data(), n); // Setup initial vector std::copy(r.begin(), r.end(), p.begin()); // p = r std::copy(x.begin(), x.end(), x_best.begin()); // x_best = x const float irdotr = rdotr_curr; const float percent_sf = 100.0f/std::log(tol2*bnrm2/irdotr); int iter = 0; int num_backwards = 0; for (; iter < itmax; ++iter) { // TEST ph.setValue( static_cast<int>(std::log(rdotr_curr/irdotr)*percent_sf) ); // User requested abort if ( ph.canceled() && iter > 0 ) { break; } // Ap = A p multiplyA(pyramid, pC, p, Ap); // alpha = r.r / (p . Ap) alpha = rdotr_curr / utils::dotProduct(p.data(), Ap.data(), n); // r = r - alpha Ap utils::vsubs(r.data(), alpha, Ap.data(), r.data(), n); // rdotr = r.r rdotr_prev = rdotr_curr; rdotr_curr = utils::dotProduct(r.data(), n); // Have we gone unstable? if (rdotr_curr > rdotr_prev) { // Save where we've got to if (num_backwards == 0 && rdotr_prev < rdotr_best) { rdotr_best = rdotr_prev; std::copy(x.begin(), x.end(), x_best.begin()); } num_backwards++; } else { num_backwards = 0; } // x = x + alpha * p utils::vadds(x.data(), alpha, p.data(), x.data(), n); // Exit if we're done // fprintf(stderr, "iter:%d err:%f\n", iter+1, sqrtf(rdotr/bnrm2)); if (rdotr_curr/bnrm2 < tol2) break; if (num_backwards > NUM_BACKWARDS_CEILING) { // Reset num_backwards = 0; std::copy(x_best.begin(), x_best.end(), x.begin()); // r = Ax multiplyA(pyramid, pC, x, r); // r = b - r utils::vsub(b.data(), r.data(), r.data(), n); // rdotr = r.r rdotr_best = rdotr_curr = utils::dotProduct(r.data(), r.size()); // p = r std::copy(r.begin(), r.end(), p.begin()); } else { // p = r + beta * p beta = rdotr_curr/rdotr_prev; utils::vadds(r.data(), beta, p.data(), p.data(), n); } } // Use the best version we found if (rdotr_curr > rdotr_best) { rdotr_curr = rdotr_best; std::copy(x_best.begin(), x_best.end(), x.begin()); } if (rdotr_curr/bnrm2 > tol2) { // Not converged ph.setValue( static_cast<int>(std::log(rdotr_curr/irdotr)*percent_sf) ); if (iter == itmax) { std::cerr << std::endl << "pfstmo_mantiuk06: Warning: Not "\ "converged (hit maximum iterations), error = " << std::sqrt(rdotr_curr/bnrm2) << " (should be below " << tol <<")" << std::endl; } else { std::cerr << std::endl << "pfstmo_mantiuk06: Warning: Not converged "\ "(going unstable), error = " << std::sqrt(rdotr_curr/bnrm2) << " (should be below " << tol << ")" << std::endl; } } else { ph.setValue( itmax ); } }
static int HEXDump_FindCallBack(FindData &fd, HDFDCB_Data *pUserParam) { if (!fd.fileMatched) return 0; int argc(pUserParam->argc); TCHAR **argv(pUserParam->argv); BinaryFind &bf(pUserParam->bf); FILE *fp = NULL; _tfopen_s(&fp, fd.fullPath.c_str(), _T("rb")); if (fp != NULL) { int nMatches(0); bf.SetFindBuffer(); _tprintf(_T("%s\n"), fd.fullPath.c_str()); pUserParam->nFiles++; Progress prog; const TCHAR *argStr = FindArgValue(argc, argv, _T("-o=")); if (argStr != NULL) { long long offset(StringUtils::getLLfromStr(argStr)); _fseeki64(fp, offset, offset >= 0 ? SEEK_SET : SEEK_END); } long long fileOffset(_ftelli64(fp)); long long sizeToRead(fd.GetFileSize()); const long long fileSize(sizeToRead); argStr = FindArgValue(argc, argv, _T("-s=")); if (argStr != NULL) { long long szRead = StringUtils::getLLfromStr(argStr); if (fileOffset + szRead > sizeToRead) sizeToRead = sizeToRead - fileOffset; else sizeToRead = szRead; } size_t findDumpSize(0), findDumpOffset(-16); if (bf.HasFindPattern()) { argStr = FindArgValue(argc, argv, _T("-d")); if (argStr != NULL) { if (*argStr == '=') { findDumpSize = StringUtils::getLLfromStr(argStr + 1); STR_SKIP_TILL_CHAR(argStr, ';'); if (*argStr) findDumpOffset = StringUtils::getLLfromStr(argStr + 1); } if (findDumpSize <= 0) findDumpSize = 48; } } prog.SetTask(sizeToRead); BinaryData buffer(NULL, 4 * 1024 * 1024); while (sizeToRead > 0) { buffer.ReadFromFile(fp); if (buffer.DataSize() <= 0) break; sizeToRead -= buffer.DataSize(); if (bf.HasFindPattern()) { bf.SetFindBuffer(buffer); while (true) { long long findPos = bf.FindNext(); if (findPos >= 0) { _tprintf(_T("%08llX=-%08llX\n"), fileOffset + findPos, fileSize - (fileOffset + findPos)); if (findDumpSize > 0) { const long long curPos(_ftelli64(fp)); long long newPos(fileOffset + findPos + findDumpOffset); if (newPos < 0) newPos = 0; newPos &= ~0xf; BinaryData bd(NULL, findDumpSize); bd.ReadFromFile(fp, 0, newPos); HexDump(bd, newPos); _fseeki64(fp, curPos, SEEK_SET); } ++nMatches; } else break; } } else fileOffset = HexDump(buffer, fileOffset); if (prog.UpdateProgress(prog.GetCurrentDone() + buffer.DataSize())) _tprintf(_T("\r%02.02f%%\r"), prog.GetCurrentPercentageDone()); } _tprintf(_T("\r \r")); fclose(fp); if (nMatches > 0) { pUserParam->nFound++; if (nMatches > pUserParam->nMaxMatchPerFile) pUserParam->nMaxMatchPerFile = nMatches; if (nMatches > 1) _tprintf(_T("%d matches\n"), nMatches); } } return 0; }
bool HIGHPASS::execute(PlugInArgList* pInArgList, PlugInArgList* pOutArgList) { StepResource pStep("Tutorial 5", "app", "219F1882-A59F-4835-BE2A-E83C0C8111EB"); if (pInArgList == NULL || pOutArgList == NULL) { return false; } Progress* pProgress = pInArgList->getPlugInArgValue<Progress>(Executable::ProgressArg()); RasterElement* pCube = pInArgList->getPlugInArgValue<RasterElement>(Executable::DataElementArg()); if (pCube == NULL) { std::string msg = "A raster cube must be specified."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } RasterDataDescriptor* pDesc = static_cast<RasterDataDescriptor*>(pCube->getDataDescriptor()); VERIFY(pDesc != NULL); FactoryResource<DataRequest> pRequest; pRequest->setInterleaveFormat(BSQ); DataAccessor pSrcAcc = pCube->getDataAccessor(pRequest.release()); ModelResource<RasterElement> pResultCube(RasterUtilities::createRasterElement(pCube->getName() + "DResult", pDesc->getRowCount(), pDesc->getColumnCount(), pDesc->getDataType())); if (pResultCube.get() == NULL) { std::string msg = "A raster cube could not be created."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } FactoryResource<DataRequest> pResultRequest; pResultRequest->setWritable(true); DataAccessor pDestAcc = pResultCube->getDataAccessor(pResultRequest.release()); int rowSize= pDesc->getRowCount(); int colSize = pDesc->getColumnCount(); int zero=0; int prevCol = 0; int prevRow = 0; int nextCol = 0; int nextRow = 0; int prevCol1 = 0; int prevRow1= 0; int nextCol1= 0; int nextRow1= 0; for (unsigned int row = 0; row < pDesc->getRowCount(); ++row) { if (pProgress != NULL) { pProgress->updateProgress("Calculating result", row * 100 / pDesc->getRowCount(), NORMAL); } if (isAborted()) { std::string msg = getName() + " has been aborted."; pStep->finalize(Message::Abort, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ABORT); } return false; } if (!pDestAcc.isValid()) { std::string msg = "Unable to access the cube data."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } for (unsigned int col = 0; col < pDesc->getColumnCount(); ++col) { double value=edgeDetection7(pSrcAcc, row, col, pDesc->getRowCount(), pDesc->getColumnCount()); switchOnEncoding(pDesc->getDataType(), conversion, pDestAcc->getColumn(), value); pDestAcc->nextColumn(); } pDestAcc->nextRow(); } if (!isBatch()) { Service<DesktopServices> pDesktop; SpatialDataWindow* pWindow = static_cast<SpatialDataWindow*>(pDesktop->createWindow(pResultCube->getName(), SPATIAL_DATA_WINDOW)); SpatialDataView* pView = (pWindow == NULL) ? NULL : pWindow->getSpatialDataView(); if (pView == NULL) { std::string msg = "Unable to create view."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } pView->setPrimaryRasterElement(pResultCube.get()); pView->createLayer(RASTER, pResultCube.get()); } if (pProgress != NULL) { pProgress->updateProgress("HighPass is compete.", 100, NORMAL); } pOutArgList->setPlugInArgValue("Result", pResultCube.release()); pStep->finalize(); return true; }
void PreviewWidget::setCurrentDataset(ImportDescriptor* pDataset) { ImportDescriptor* pActiveDataset = getCurrentDataset(); if (pDataset == pActiveDataset) { return; } // Do nothing if the new current data set is not a data set in the current file if (pDataset != NULL) { QMap<QString, vector<ImportDescriptor*> >::const_iterator iter = mDatasets.find(mCurrentFile); if (iter != mDatasets.end()) { vector<ImportDescriptor*> fileDatasets = iter.value(); if (std::find(fileDatasets.begin(), fileDatasets.end(), pDataset) == fileDatasets.end()) { return; } } } mpCurrentDataset = pDataset; // Delete the current preview destroyPreview(); // Activate the label indicating that no data set preview is available mpDatasetStack->setCurrentIndex(0); // Check for no active data set if (mpCurrentDataset == NULL) { emit currentDatasetChanged(mpCurrentDataset); return; } // Only show the preview if the widget is visible or if the data set is imported if ((isVisible() == false) || (mpCurrentDataset->isImported() == false)) { if (pActiveDataset != NULL) { emit currentDatasetChanged(NULL); } return; } if (mpImporter != NULL) { Service<PlugInManagerServices> pManager; Progress* pProgress = pManager->getProgress(dynamic_cast<PlugIn*>(mpImporter)); if (pProgress != NULL) { pProgress->attach(SIGNAL_NAME(Subject, Modified), Slot(this, &PreviewWidget::progressUpdated)); pProgress->updateProgress("Getting preview...", 0, NORMAL); } // Activate the progress bar mpDatasetStack->setCurrentIndex(1); // Update the data set label QMap<QString, vector<ImportDescriptor*> >::const_iterator iter = mDatasets.find(mCurrentFile); VERIFYNRV(iter != mDatasets.end()); vector<ImportDescriptor*> fileDatasets = iter.value(); unsigned int iIndex = 0; unsigned int numDatasets = fileDatasets.size(); for (iIndex = 0; iIndex < numDatasets; ++iIndex) { ImportDescriptor* pCurrentDataset = fileDatasets[iIndex]; if (pCurrentDataset == pDataset) { break; } } VERIFYNRV(iIndex < numDatasets); const DataDescriptor* pDescriptor = mpCurrentDataset->getDataDescriptor(); VERIFYNRV(pDescriptor != NULL); mpDatasetLabel->setText("<b>Data Set (" + QString::number(iIndex + 1) + " of " + QString::number(numDatasets) + "):</b> " + QString::fromStdString(pDescriptor->getName())); // Process events to erase the no preview available page qApp->processEvents(); // Get the preview from the importer mpImporterWidget = mpImporter->getPreview(pDescriptor, pProgress); if (mpImporterWidget != NULL) { // Display the preview widget SpatialDataView* pView = dynamic_cast<SpatialDataView*>(mpImporterWidget); if (pView != NULL) { ChippingWidget* pChippingWidget = new ChippingWidget(pView, dynamic_cast<const RasterDataDescriptor*>(pDescriptor), mpPreview); VERIFYNRV(pChippingWidget != NULL); VERIFYNR(connect(pChippingWidget, SIGNAL(chipChanged()), this, SLOT(updateCurrentDataset()))); mpImporterWidget = pChippingWidget; } else { mpImporterWidget->setParent(mpPreview); } QGridLayout* pGrid = dynamic_cast<QGridLayout*>(mpPreview->layout()); if (pGrid != NULL) { pGrid->addWidget(mpImporterWidget, 0, 0); } // Activate the preview widget mpDatasetStack->setCurrentIndex(2); // Notify of changes emit currentDatasetChanged(mpCurrentDataset); } else { mpDatasetStack->setCurrentIndex(0); if (pActiveDataset != NULL) { emit currentDatasetChanged(NULL); } } if (pProgress != NULL) { pProgress->detach(SIGNAL_NAME(Subject, Modified), Slot(this, &PreviewWidget::progressUpdated)); } } }
bool CgmImporter::execute(PlugInArgList* pInArgList, PlugInArgList* pOutArgList) { Progress* pProgress = NULL; DataElement* pElement = NULL; StepResource pStep("Import cgm element", "app", "8D5522FE-4A89-44cb-9735-6920A3BFC903"); // get input arguments and log some useful info about them { // scope the MessageResource MessageResource pMsg("Input arguments", "app", "A1735AC7-C182-45e6-826F-690DBA15D84A"); pProgress = pInArgList->getPlugInArgValue<Progress>(Executable::ProgressArg()); pMsg->addBooleanProperty("Progress Present", (pProgress != NULL)); pElement = pInArgList->getPlugInArgValue<DataElement>(Importer::ImportElementArg()); if (pElement == NULL) { if (pProgress != NULL) { pProgress->updateProgress("No data element", 0, ERRORS); } pStep->finalize(Message::Failure, "No data element"); return false; } pMsg->addProperty("Element name", pElement->getName()); } if (pProgress != NULL) { pProgress->updateProgress((string("Read and parse file ") + pElement->getFilename()), 20, NORMAL); } // Create a new annotation layer for a spatial data view or get the layout layer for a product view if (pProgress != NULL) { pProgress->updateProgress("Create a new layer", 30, NORMAL); } View* pView = mpDesktop->getCurrentWorkspaceWindowView(); if (pView == NULL) { if (pProgress != NULL) { pProgress->updateProgress("Could not access the current view.", 0, ERRORS); } pStep->finalize(Message::Failure, "Could not access the current view."); return false; } UndoGroup undoGroup(pView, "Import CGM"); AnnotationLayer* pLayer = NULL; SpatialDataView* pSpatialDataView = dynamic_cast<SpatialDataView*>(pView); if (pSpatialDataView != NULL) { // Set the parent element of the annotation element to the primary raster element LayerList* pLayerList = pSpatialDataView->getLayerList(); if (pLayerList != NULL) { RasterElement* pNewParentElement = pLayerList->getPrimaryRasterElement(); if (pNewParentElement != NULL) { Service<ModelServices> pModel; pModel->setElementParent(pElement, pNewParentElement); } } pLayer = dynamic_cast<AnnotationLayer*>(pSpatialDataView->createLayer(ANNOTATION, pElement)); } else { ProductView* pProductView = dynamic_cast<ProductView*>(mpDesktop->getCurrentWorkspaceWindowView()); if (pProductView != NULL) { pLayer = pProductView->getLayoutLayer(); } } if (pLayer == NULL) { if (pProgress != NULL) { pProgress->updateProgress("Unable to get the annotation layer", 0, ERRORS); } pStep->finalize(Message::Failure, "Unable to get the annotation layer"); return false; } // add the CGM object if (pProgress != NULL) { pProgress->updateProgress("Create the CGM object", 60, NORMAL); } CgmObject* pCgmObject = dynamic_cast<CgmObject*>(pLayer->addObject(CGM_OBJECT)); if (pCgmObject == NULL) { if (pProgress != NULL) { pProgress->updateProgress("Unable to create the CGM object", 0, ERRORS); } pStep->finalize(Message::Failure, "Unable to create the CGM object"); return false; } // load the CGM file if (pProgress != NULL) { pProgress->updateProgress("Load the CGM file", 90, NORMAL); } string fname = pElement->getDataDescriptor()->getFileDescriptor()->getFilename().getFullPathAndName(); if (!pCgmObject->deserializeCgm(fname)) { if (pProgress != NULL) { pProgress->updateProgress("Error loading the CGM element", 0, ERRORS); } pStep->finalize(Message::Failure, "Unable to parse the CGM file."); return false; } if (pProgress != NULL) { pProgress->updateProgress("Successfully loaded the CGM file", 100, NORMAL); } pStep->finalize(Message::Success); return true; }
bool Segmentation::Ransac_for_buildings(float dem_spacing, double ransac_threshold, cv::Mat original_tiles_merged) { StepResource pStep("Computing RANSAC on all the identified buildings", "app", "a2beb9b8-218e-11e4-969b-b2227cce2b54"); ProgressResource pResource("ProgressBar"); Progress *pProgress = pResource.get(); pProgress-> setSettingAutoClose(true); pProgress->updateProgress("Computing RANSAC on all buildings", 0, NORMAL); Ransac_buildings = Ransac(Segmentation::path); cv::Mat roof_image = cv::Mat::zeros(original_tiles_merged.size(), CV_8UC3); buildingS.resize(blobs.size()); buildingS_inliers.resize(blobs.size()); buildingS_outliers.resize(blobs.size()); buildingS_plane_coefficients.resize(blobs.size()); buldingS_number_inliers.resize(blobs.size()); std::ofstream building_file; std::ofstream cont_file; cont_file.open (std::string(path) + "/Results/Number_of_RANSAC_applications.txt"); for(int i = 0; i < blobs.size(); i++) {// i index is the building (blob) index pProgress->updateProgress("Computing RANSAC on all buildings\nBuilding "+ StringUtilities::toDisplayString(i) + " on "+ StringUtilities::toDisplayString(blobs.size()), static_cast<double>(static_cast<double>(i)/blobs.size()*100), NORMAL); building_file.open (std::string(path) + "/Results/Building_" + StringUtilities::toDisplayString(i)+".txt"); building_file << 'i' << '\t' << 'j' << '\t' << 'X' << '\t' << 'Y' << '\t' << 'Z' << '\n'; buildingS[i].setConstant(blobs[i].size(), 3, 0.0); // the j loop retrieves the X, Y, Z coordinate for each pixel of all the buildings for(int j = 0; j < blobs[i].size(); j++) {// j index is the pixel index for the single building // loop on all the pixel of the SINGLE building int pixel_column = blobs[i][j].x; int pixel_row = blobs[i][j].y; double x_building = pixel_column * dem_spacing;// xMin + pixel_column * dem_spacing // object coordinate double y_building = pixel_row * dem_spacing;// yMin + pixel_row * dem_spacing // object coordinate double z_building = original_tiles_merged.at<float>(pixel_row, pixel_column);//object coordinate buildingS[i](j,0) = x_building; buildingS[i](j,1) = y_building; buildingS[i](j,2) = z_building; building_file << pixel_row+1 << '\t' << pixel_column+1 << '\t' << buildingS[i](j,0) << '\t' << buildingS[i](j,1) << '\t' << buildingS[i](j,2) << '\n'; //+1 on the imae coordinates to verify with opticks' rasters (origin is 1,1) } building_file.close(); std::ofstream inliers_file; std::ofstream parameters_file; inliers_file.open (std::string(path) + "/Results/Inliers_building_" + StringUtilities::toDisplayString(i)+".txt"); parameters_file.open (std::string(path) + "/Results/plane_parameters_building_" + StringUtilities::toDisplayString(i)+".txt"); //parameters_file << "a\tb\tc\td\tmean_dist\tstd_dist\n"; int cont = 0; Ransac_buildings.ransac_msg += "\n____________Building number " + StringUtilities::toDisplayString(i) +"____________\n"; Ransac_buildings.ransac_msg += "\nITERATION NUMBER " + StringUtilities::toDisplayString(cont) +"\n"; Ransac_buildings.ComputeModel(buildingS[i], ransac_threshold); buldingS_number_inliers[i]= Ransac_buildings.n_best_inliers_count; buildingS_inliers[i] = Ransac_buildings.final_inliers; buildingS_outliers[i] = Ransac_buildings.final_outliers; buildingS_plane_coefficients[i] = Ransac_buildings.final_model_coefficients; double inliers_percentage = static_cast<double>( (Ransac_buildings.n_best_inliers_count) ) / static_cast<double> (buildingS[i].rows()); int inliers_so_far = Ransac_buildings.n_best_inliers_count; std::vector<int> old_final_outliers = Ransac_buildings.final_outliers; // DRAWS THE ROOFS yellow for (int k = 0; k < Ransac_buildings.n_best_inliers_count; k++) { int pixel_row = static_cast<int>(buildingS[i](Ransac_buildings.final_inliers[k], 1) / dem_spacing); int pixel_column = static_cast<int>(buildingS[i](Ransac_buildings.final_inliers[k], 0) / dem_spacing); unsigned char r = 255;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char g = 255;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char b = 0;//unsigned char(255 * (rand()/(1.0 + RAND_MAX))); roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[0] = b; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[1] = g; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[2] = r; } while (inliers_percentage < 0.90) { cont ++; Ransac_buildings.ransac_msg += "\nITERATION NUMBER " + StringUtilities::toDisplayString(cont) +"\n"; Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor> building_outliers; building_outliers.setConstant(buildingS[i].rows() - inliers_so_far, 3, 0.0); //* forse il metodo va già bene così, perchè riempio la matrice deglio outlier in maniera ordinata, //* solo che gli indici degli inlier/outlier non sono più indicativi rispetto alla matrice di building originale, ma rispetto alla matrice di innput //* devo riporatre gli ID degli indici alla loro posizione originale for (int w = 0; w <building_outliers.rows(); w++) { building_outliers(w, 0) = buildingS[i](old_final_outliers[w], 0); building_outliers(w, 1) = buildingS[i](old_final_outliers[w], 1); building_outliers(w, 2) = buildingS[i](old_final_outliers[w], 2); //Ransac_buildings.ransac_msg += "\n" + StringUtilities::toDisplayString(pixel_row+1) + "\t" + StringUtilities::toDisplayString(pixel_column+1) + "\t" + StringUtilities::toDisplayString(final_outliers[w]) + "\t" + StringUtilities::toDisplayString(building_outliers(w, 0))+ "\t"+ StringUtilities::toDisplayString(building_outliers(w, 1)) + "\t" + StringUtilities::toDisplayString(building_outliers(w, 2))+"\n"; // needed for tesing (test passed at first iteration) } Ransac_buildings.ransac_msg += "\n"; //Ransac_buildings.ransac_msg += "\nprova "+ StringUtilities::toDisplayString(inliers_percentage*100)+"\n"; Ransac_buildings.ComputeModel(building_outliers, ransac_threshold); //inliers_percentage = inliers_percentage + static_cast<double>( (n_best_inliers_count) ) / static_cast<double> (building_outliers.rows()); inliers_percentage = inliers_percentage + static_cast<double>( (Ransac_buildings.n_best_inliers_count) ) / static_cast<double> (buildingS[i].rows()); Ransac_buildings.ransac_msg += "\nINLIERS IN RELATION TO GLOBAL INDEX ("+ StringUtilities::toDisplayString(Ransac_buildings.n_best_inliers_count) + ")\n"; for(size_t i = 0; i < Ransac_buildings.n_best_inliers_count; i++) { Ransac_buildings.ransac_msg += StringUtilities::toDisplayString(old_final_outliers[Ransac_buildings.final_inliers[i]])+" "; inliers_file << old_final_outliers[Ransac_buildings.final_inliers[i]] << "\t"; } Ransac_buildings.ransac_msg += "\n"; inliers_file << "\n"; //old_final_outliers.resize(building_outliers.rows() - Ransac_buildings.n_best_inliers_count); Ransac_buildings.ransac_msg += "\nOUTLIERS IN RELATION TO GLOBAL INDEX("+ StringUtilities::toDisplayString(building_outliers.rows() - Ransac_buildings.n_best_inliers_count) + ")\n"; for(size_t i = 0; i < building_outliers.rows() - Ransac_buildings.n_best_inliers_count; i++) { Ransac_buildings.ransac_msg += StringUtilities::toDisplayString(old_final_outliers[Ransac_buildings.final_outliers[i]])+" "; old_final_outliers[i] = old_final_outliers[Ransac_buildings.final_outliers[i]];// in this way I refer the outliers indexes to the global indexes (those referred to the original eigen matrix) } //parameters_file << Ransac_buildings.final_model_coefficients[0] << "\t" << Ransac_buildings.final_model_coefficients[1] << "\t" << Ransac_buildings.final_model_coefficients[2] << "\t" << Ransac_buildings.final_model_coefficients[3] << "\t" << Ransac_buildings.mean_distances << "\t"<< Ransac_buildings.std_distances << "\n"; parameters_file << Ransac_buildings.final_model_coefficients[0] << "\t" << Ransac_buildings.final_model_coefficients[1] << "\t" << Ransac_buildings.final_model_coefficients[2] << "\t" << Ransac_buildings.final_model_coefficients[3] << "\n"; if (cont == 1) { // DRAWS THE ROOFS blue for (int k = 0; k < Ransac_buildings.n_best_inliers_count; k++) { int pixel_row = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 1) / dem_spacing); int pixel_column = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 0) / dem_spacing); unsigned char r = 0;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char g = 0;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char b = 255;//unsigned char(255 * (rand()/(1.0 + RAND_MAX))); roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[0] = b; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[1] = g; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[2] = r; } } if (cont ==2) { // DRAWS THE ROOFS green for (int k = 0; k < Ransac_buildings.n_best_inliers_count; k++) { int pixel_row = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 1) / dem_spacing); int pixel_column = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 0) / dem_spacing); unsigned char r = 0;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char g = 255;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char b = 0;//unsigned char(255 * (rand()/(1.0 + RAND_MAX))); roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[0] = b; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[1] = g; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[2] = r; } } if (cont ==3) { // DRAWS THE ROOFS brown for (int k = 0; k < Ransac_buildings.n_best_inliers_count; k++) { int pixel_row = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 1) / dem_spacing); int pixel_column = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 0) / dem_spacing); unsigned char r = 128;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char g = 0;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); unsigned char b = 0;//unsigned char(255 * (rand()/(1.0 + RAND_MAX))); roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[0] = b; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[1] = g; roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[2] = r; } } //if (cont == 4) //{ // // DRAWS THE ROOFS white // for (int k = 0; k < Ransac_buildings.n_best_inliers_count; k++) // { // int pixel_row = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 1) / dem_spacing); // int pixel_column = static_cast<int>(buildingS[i](old_final_outliers[Ransac_buildings.final_inliers[k]], 0) / dem_spacing); // unsigned char r = 255;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); // unsigned char g = 255;// unsigned char(255 * (rand()/(1.0 + RAND_MAX))); // unsigned char b = 255;//unsigned char(255 * (rand()/(1.0 + RAND_MAX))); // roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[0] = b; // roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[1] = g; // roof_image.at<cv::Vec3b>(pixel_row, pixel_column)[2] = r; // } //} Ransac_buildings.ransac_msg += "\n"; inliers_so_far += Ransac_buildings.n_best_inliers_count; }// fine while Ransac_buildings.ransac_msg += "__________________________________________________________________\n"; //boh_file.close(); cont_file << i << "\t" << cont << "\n"; } building_file.close(); cont_file.close(); cv::imshow("roofs", roof_image); cv::imwrite(path + "/Results/building_roofs.png", roof_image); cv::waitKey(0); pProgress->updateProgress("All buildings have been processed with RANSAC.", 100, NORMAL); pStep->finalize(); return true; }
void SliceMD::slice(typename MDEventWorkspace<MDE, nd>::sptr ws) { // Create the ouput workspace typename MDEventWorkspace<OMDE, ond>::sptr outWS( new MDEventWorkspace<OMDE, ond>()); for (size_t od = 0; od < m_binDimensions.size(); od++) { outWS->addDimension(m_binDimensions[od]); } outWS->setCoordinateSystem(ws->getSpecialCoordinateSystem()); outWS->initialize(); // Copy settings from the original box controller BoxController_sptr bc = ws->getBoxController(); // store wrute buffer size for the future // uint64_t writeBufSize = // bc->getFileIO()getDiskBuffer().getWriteBufferSize(); // and disable write buffer (if any) for input MD Events for this algorithm // purposes; // bc->setCacheParameters(1,0); BoxController_sptr obc = outWS->getBoxController(); // Use the "number of bins" as the "split into" parameter for (size_t od = 0; od < m_binDimensions.size(); od++) obc->setSplitInto(od, m_binDimensions[od]->getNBins()); obc->setSplitThreshold(bc->getSplitThreshold()); bool bTakeDepthFromInputWorkspace = getProperty("TakeMaxRecursionDepthFromInput"); int tempDepth = getProperty("MaxRecursionDepth"); size_t maxDepth = bTakeDepthFromInputWorkspace ? bc->getMaxDepth() : size_t(tempDepth); obc->setMaxDepth(maxDepth); // size_t outputSize = writeBufSize; // obc->setCacheParameters(sizeof(OMDE),outputSize); obc->resetNumBoxes(); // Perform the first box splitting outWS->splitBox(); size_t lastNumBoxes = obc->getTotalNumMDBoxes(); // --- File back end ? ---------------- std::string filename = getProperty("OutputFilename"); if (!filename.empty()) { // First save to the NXS file g_log.notice() << "Running SaveMD to create file back-end" << std::endl; IAlgorithm_sptr alg = createChildAlgorithm("SaveMD"); alg->setPropertyValue("Filename", filename); alg->setProperty("InputWorkspace", outWS); alg->setProperty("MakeFileBacked", true); alg->executeAsChildAlg(); if (!obc->isFileBacked()) throw std::runtime_error("SliceMD with file-backed output: Can not set " "up file-backed output workspace "); auto IOptr = obc->getFileIO(); size_t outBufSize = IOptr->getWriteBufferSize(); // the buffer size for resulting workspace; reasonable size is at least 10 // data chunk sizes (nice to verify) if (outBufSize < 10 * IOptr->getDataChunk()) { outBufSize = 10 * IOptr->getDataChunk(); IOptr->setWriteBufferSize(outBufSize); } } // Function defining which events (in the input dimensions) to place in the // output MDImplicitFunction *function = this->getImplicitFunctionForChunk(NULL, NULL); std::vector<API::IMDNode *> boxes; // Leaf-only; no depth limit; with the implicit function passed to it. ws->getBox()->getBoxes(boxes, 1000, true, function); // Sort boxes by file position IF file backed. This reduces seeking time, // hopefully. bool fileBackedWS = bc->isFileBacked(); if (fileBackedWS) API::IMDNode::sortObjByID(boxes); Progress *prog = new Progress(this, 0.0, 1.0, boxes.size()); // The root of the output workspace MDBoxBase<OMDE, ond> *outRootBox = outWS->getBox(); // if target workspace has events, we should count them as added uint64_t totalAdded = outWS->getNEvents(); uint64_t numSinceSplit = 0; // Go through every box for this chunk. // PARALLEL_FOR_IF( !bc->isFileBacked() ) for (int i = 0; i < int(boxes.size()); i++) { MDBox<MDE, nd> *box = dynamic_cast<MDBox<MDE, nd> *>(boxes[i]); // Perform the binning in this separate method. if (box) { // An array to hold the rotated/transformed coordinates coord_t outCenter[ond]; const std::vector<MDE> &events = box->getConstEvents(); typename std::vector<MDE>::const_iterator it = events.begin(); typename std::vector<MDE>::const_iterator it_end = events.end(); for (; it != it_end; it++) { // Cache the center of the event (again for speed) const coord_t *inCenter = it->getCenter(); if (function->isPointContained(inCenter)) { // Now transform to the output dimensions m_transformFromOriginal->apply(inCenter, outCenter); // Create the event OMDE newEvent(it->getSignal(), it->getErrorSquared(), outCenter); // Copy extra data, if any copyEvent(*it, newEvent); // Add it to the workspace outRootBox->addEvent(newEvent); numSinceSplit++; } } box->releaseEvents(); // Ask BC if one needs to split boxes if (obc->shouldSplitBoxes(totalAdded, numSinceSplit, lastNumBoxes)) // if (numSinceSplit > 20000000 || (i == int(boxes.size()-1))) { // This splits up all the boxes according to split thresholds and sizes. Kernel::ThreadScheduler *ts = new ThreadSchedulerFIFO(); ThreadPool tp(ts); outWS->splitAllIfNeeded(ts); tp.joinAll(); // Accumulate stats totalAdded += numSinceSplit; numSinceSplit = 0; lastNumBoxes = obc->getTotalNumMDBoxes(); // Progress reporting if (!fileBackedWS) prog->report(i); } if (fileBackedWS) { if (!(i % 10)) prog->report(i); } } // is box } // for each box in the vector prog->report(); outWS->splitAllIfNeeded(NULL); // Refresh all cache. outWS->refreshCache(); g_log.notice() << totalAdded << " " << OMDE::getTypeName() << "'s added to the output workspace." << std::endl; if (outWS->isFileBacked()) { // Update the file-back-end g_log.notice() << "Running SaveMD" << std::endl; IAlgorithm_sptr alg = createChildAlgorithm("SaveMD"); alg->setProperty("UpdateFileBackEnd", true); alg->setProperty("InputWorkspace", outWS); alg->executeAsChildAlg(); } // return the size of the input workspace write buffer to its initial value // bc->setCacheParameters(sizeof(MDE),writeBufSize); this->setProperty("OutputWorkspace", boost::dynamic_pointer_cast<IMDEventWorkspace>(outWS)); delete prog; }
void IsisMain() { // Get user interface UserInterface &ui = Application::GetUserInterface(); // Open the shift definitions file Pvl shiftdef; if (ui.WasEntered("SHIFTDEF")) { shiftdef.Read(ui.GetFilename("SHIFTDEF")); } else { shiftdef.AddObject(PvlObject("Hiccdstitch")); } PvlObject &stitch = shiftdef.FindObject("Hiccdstitch", Pvl::Traverse); // Open the first cube. It will be matched to the second input cube. HiJitCube trans; CubeAttributeInput &attTrans = ui.GetInputAttribute("FROM"); vector<string> bandTrans = attTrans.Bands(); trans.SetVirtualBands(bandTrans); trans.OpenCube(ui.GetFilename("FROM"), stitch); // Open the second cube, it is held in place. We will be matching the // first to this one by attempting to compute a sample/line translation HiJitCube match; CubeAttributeInput &attMatch = ui.GetInputAttribute("MATCH"); vector<string> bandMatch = attMatch.Bands(); match.SetVirtualBands(bandMatch); match.OpenCube(ui.GetFilename("MATCH"), stitch); // Ensure only one band if ((trans.Bands() != 1) || (match.Bands() != 1)) { string msg = "Input Cubes must have only one band!"; throw Isis::iException::Message(Isis::iException::User,msg,_FILEINFO_); } // Now test compatability (basically summing) trans.Compatable(match); // Determine intersection if (!trans.intersects(match)) { string msg = "Input Cubes do not overlap!"; throw Isis::iException::Message(Isis::iException::User,msg,_FILEINFO_); } // Get overlapping regions of each cube HiJitCube::Corners fcorns, mcorns; trans.overlap(match, fcorns); match.overlap(trans, mcorns); #if defined(ISIS_DEBUG) cout << "FROM Poly: " << trans.PolyToString() << std::endl; cout << "MATCH Poly: " << match.PolyToString() << std::endl; cout << "From Overlap: (" << fcorns.topLeft.sample << "," << fcorns.topLeft.line << "), (" << fcorns.lowerRight.sample << "," << fcorns.lowerRight.line << ")\n" ; cout << "Match Overlap: (" << mcorns.topLeft.sample << "," << mcorns.topLeft.line << "), (" << mcorns.lowerRight.sample << "," << mcorns.lowerRight.line << ")\n" ; #endif // We need to get a user definition of how to auto correlate around each // of the grid points. Pvl regdef; Filename regFile(ui.GetFilename("REGDEF")); regdef.Read(regFile.Expanded()); AutoReg *ar = AutoRegFactory::Create(regdef); double flines(fcorns.lowerRight.line - fcorns.topLeft.line + 1.0); double fsamps(fcorns.lowerRight.sample - fcorns.topLeft.sample + 1.0); // We want to create a grid of control points that is N rows by M columns. // Get row and column variables, if not entered, default to 1% of the input // image size int rows(1), cols(1); if (ui.WasEntered("ROWS")) { rows = ui.GetInteger("ROWS"); } else { rows = (int)(((flines - 1.0) / ar->SearchChip()->Lines()) + 1); } cols = ui.GetInteger("COLUMNS"); if (cols == 0) { cols = (int)(((fsamps - 1.0) / ar->SearchChip()->Samples()) + 1); } // Calculate spacing for the grid of points double lSpacing = floor(flines / rows); double sSpacing = floor(fsamps / cols); #if defined(ISIS_DEBUG) cout << "# Samples in Overlap: " << fsamps << endl; cout << "# Lines in Overlap : " << flines << endl; cout << "# Rows: " << rows << endl; cout << "# Columns: " << cols << endl; cout << "Line Spacing: " << lSpacing << endl; cout << "Sample Spacing: " << sSpacing << endl; #endif // Display the progress...10% 20% etc. Progress prog; prog.SetMaximumSteps(rows * cols); prog.CheckStatus(); // Initialize control point network ControlNet cn; cn.SetType(ControlNet::ImageToImage); cn.SetUserName(Application::UserName()); cn.SetCreatedDate(iTime::CurrentLocalTime()); // Get serial numbers for input cubes string transSN = SerialNumber::Compose(trans, true); string matchSN = SerialNumber::Compose(match, true); cn.SetTarget(transSN); cn.SetDescription("Records s/c jitter between two adjacent HiRISE images"); // Set up results parameter saves JitterParms jparms; jparms.fromCorns = fcorns; jparms.fromJit = trans.GetInfo(); jparms.matchCorns = mcorns; jparms.matchJit = match.GetInfo(); jparms.regFile = regFile.Expanded(); jparms.cols = cols; jparms.rows = rows; jparms.lSpacing = lSpacing; jparms.sSpacing = sSpacing; jparms.nSuspects = 0; // Loop through grid of points and get statistics to compute // translation values RegList reglist; double fline0(fcorns.topLeft.line-1.0), fsamp0(fcorns.topLeft.sample-1.0); double mline0(mcorns.topLeft.line-1.0), msamp0(mcorns.topLeft.sample-1.0); for (int r=0; r<rows; r++) { int line = (int)(lSpacing / 2.0 + lSpacing * r + 0.5); for (int c=0; c<cols; c++) { int samp = (int)(sSpacing / 2.0 + sSpacing * c + 0.5); ar->PatternChip()->TackCube(msamp0+samp, mline0+line); ar->PatternChip()->Load(match); ar->SearchChip()->TackCube(fsamp0+samp, fline0+line); ar->SearchChip()->Load(trans); // Set up ControlMeasure for cube to translate ControlMeasure cmTrans; cmTrans.SetCubeSerialNumber(transSN); cmTrans.SetCoordinate(msamp0+samp, mline0+line, ControlMeasure::Unmeasured); cmTrans.SetChooserName("hijitreg"); cmTrans.SetReference(false); // Set up ControlMeasure for the pattern/Match cube ControlMeasure cmMatch; cmMatch.SetCubeSerialNumber(matchSN); cmMatch.SetCoordinate(fsamp0+samp, fline0+line, ControlMeasure::Automatic); cmMatch.SetChooserName("hijitreg"); cmMatch.SetReference(true); // Match found if (ar->Register()==AutoReg::Success) { RegData reg; reg.fLine = fline0 + line; reg.fSamp = fsamp0 + samp; reg.fLTime = trans.getLineTime(reg.fLine); reg.mLine = mline0 + line; reg.mSamp = msamp0 + samp; reg.mLTime = match.getLineTime(reg.mLine); reg.regLine = ar->CubeLine(); reg.regSamp = ar->CubeSample(); reg.regCorr = ar->GoodnessOfFit(); if (fabs(reg.regCorr) > 1.0) jparms.nSuspects++; double sDiff = reg.fSamp - reg.regSamp; double lDiff = reg.fLine - reg.regLine; jparms.sStats.AddData(&sDiff,(unsigned int)1); jparms.lStats.AddData(&lDiff,(unsigned int)1); // Record the translation in the control point cmTrans.SetCoordinate(ar->CubeSample(), ar->CubeLine(), ControlMeasure::Automatic); cmTrans.SetError(sDiff, lDiff); cmTrans.SetGoodnessOfFit(ar->GoodnessOfFit()); // Reread the chip location centering the offset and compute // linear regression statistics try { Chip &pchip(*ar->PatternChip()); Chip fchip(pchip.Samples(), pchip.Lines()); fchip.TackCube(ar->CubeSample(), ar->CubeLine()); fchip.Load(trans); // Writes correlated chips to files for visual inspection #if defined(ISIS_DEBUG) ostringstream tstr; tstr << "R" << r << "C" << c << "_chip.cub"; string fcname("from" + tstr.str()); string mcname("match" + tstr.str()); pchip.Write(mcname); fchip.Write(fcname); #endif MultivariateStatistics mstats; for (int line = 1 ; line <= fchip.Lines() ; line++) { for(int sample = 1; sample < fchip.Samples(); sample++) { double fchipValue = fchip.GetValue(sample,line); double pchipValue = pchip.GetValue(sample,line); mstats.AddData(&fchipValue, &pchipValue, 1); } } // Get regression and correlation values mstats.LinearRegression(reg.B0, reg.B1); reg.Bcorr = mstats.Correlation(); if (IsSpecial(reg.B0)) throw 1; if (IsSpecial(reg.B1)) throw 2; if (IsSpecial(reg.Bcorr)) throw 3; } catch (...) { // If fails, flag this condition reg.B0 = 0.0; reg.B1= 0.0; reg.Bcorr = 0.0; } reglist.push_back(reg); } // Add the measures to a control point string str = "Row " + iString(r) + " Column " + iString(c); ControlPoint cp(str); cp.SetType(ControlPoint::Tie); cp.Add(cmTrans); cp.Add(cmMatch); if (!cmTrans.IsMeasured()) cp.SetIgnore(true); cn.Add(cp); prog.CheckStatus(); } } // If flatfile was entered, create the flatfile // The flatfile is comma seperated and can be imported into an excel // spreadsheet if (ui.WasEntered("FLATFILE")) { string fFile = ui.GetFilename("FLATFILE"); ofstream os; string fFileExpanded = Filename(fFile).Expanded(); os.open(fFileExpanded.c_str(),ios::out); dumpResults(os, reglist, jparms, *ar); } // If a cnet file was entered, write the ControlNet pvl to the file if (ui.WasEntered("CNETFILE")) { cn.Write(ui.GetFilename("CNETFILE")); } // Don't need the cubes opened anymore trans.Close(); match.Close(); // Write translation to log PvlGroup results("AverageTranslation"); if (jparms.sStats.ValidPixels() > 0) { double sTrans = (int)(jparms.sStats.Average() * 100.0) / 100.0; double lTrans = (int)(jparms.lStats.Average() * 100.0) / 100.0; results += PvlKeyword ("Sample",sTrans); results += PvlKeyword ("Line",lTrans); results += PvlKeyword ("NSuspects",jparms.nSuspects); } else { results += PvlKeyword ("Sample","NULL"); results += PvlKeyword ("Line","NULL"); } Application::Log(results); // add the auto registration information to print.prt PvlGroup autoRegTemplate = ar->RegTemplate(); Application::Log(autoRegTemplate); return; }
bool PlugInTester::execute(PlugInArgList* pInArgs, PlugInArgList* pOutArgs) { VERIFY(pInArgs != NULL); Progress* pProgress = NULL; PlugInArg* pArg = NULL; if (pInArgs != NULL && pInArgs->getArg(Executable::ProgressArg(), pArg) && pArg != NULL) { pProgress = reinterpret_cast<Progress*>(pArg->getActualValue()); } vector<PlugInDescriptor*> allPlugins = mpPlugMgr->getPlugInDescriptors(); vector<string> testablePlugins; for (vector<PlugInDescriptor*>::const_iterator it = allPlugins.begin(); it != allPlugins.end(); ++it) { PlugInDescriptor* pDescriptor = *it; if (pDescriptor == NULL) { continue; } if (pDescriptor->isTestable()) { testablePlugins.push_back(pDescriptor->getName()); } } string msg; bool bSuccess = false; PlugInSelectorDlg dlg(mpDesktop->getMainWidget(), testablePlugins); int stat = dlg.exec(); if (stat == QDialog::Accepted) { const vector<string>& pluginsToTest = dlg.getSelectedPlugins(); // TODO: Set up a ProgressTracker for each plug-in to test vector<string>::const_iterator it; for (it = pluginsToTest.begin(); it != pluginsToTest.end(); ++it) { PlugInResource pPlugIn(*it); Testable* pTestable = dynamic_cast<Testable*>(pPlugIn.get()); if (pTestable == NULL) { msg += "The Plug-In " + *it + " cannot be created!"; if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } msg += "Testing " + *it + "..."; if (pProgress != NULL) { pProgress->updateProgress(msg, 0, NORMAL); } stringstream ostr; bSuccess = pTestable->runAllTests(pProgress, ostr); msg += "Testing of Plug-In " + *it + " has been completed"; ReportingLevel lvl = NORMAL; if (!bSuccess) { lvl = ERRORS; msg += " with errors!"; } else { msg += "!"; } if (ostr.str().size() > 0) { msg += "\n" + ostr.str() + "\n"; } else { msg += "\n"; } if (pProgress != NULL) { pProgress->updateProgress(msg, 100, lvl); } } } return bSuccess; }
/** * Scans all areas. If an areas is of one of the given merge types, index all node ids * into the given NodeUseMap for all outer rings of the given area. */ bool MergeAreasGenerator::ScanAreaNodeIds(Progress& progress, const TypeConfig& typeConfig, FileScanner& scanner, const TypeInfoSet& mergeTypes, std::unordered_set<Id>& nodeUseMap) { uint32_t areaCount=0; progress.SetAction("Scanning for nodes joining areas from '"+scanner.GetFilename()+"'"); scanner.GotoBegin(); scanner.Read(areaCount); uint8_t type; Id id; Area data; std::unordered_set<Id> usedOnceSet; for (uint32_t current=1; current<=areaCount; current++) { progress.SetProgress(current,areaCount); scanner.Read(type); scanner.Read(id); data.ReadImport(typeConfig, scanner); if (!mergeTypes.IsSet(data.GetType())) { continue; } // We insert every node id only once per area, because we want to // find nodes that are shared by *different* areas. std::unordered_set<Id> nodeIds; for (const auto& ring: data.rings) { if (!ring.IsOuterRing()) { continue; } for (const auto node : ring.nodes) { Id id=node.GetId(); if (nodeIds.find(id)==nodeIds.end()) { auto entry=usedOnceSet.find(id); if (entry!=usedOnceSet.end()) { nodeUseMap.insert(id); } else { usedOnceSet.insert(id); } nodeIds.insert(id); } } } } return true; }
/* * Converts the specified registry file. The specified file is * removed if the conversion is successful. If conversion_count * is not NULL, the total number of Articles converted will be * passed back. */ int wsreg_convert_registry(const char *filename, int *conversion_count, Progress_function progress_callback) { File_util *futil = _wsreg_fileutil_initialize(); if (initialized == WSREG_NOT_INITIALIZED) { return (WSREG_NOT_INITIALIZED); } if (!futil->exists(filename)) { /* * Bad filename. */ return (WSREG_FILE_NOT_FOUND); } if (futil->can_read(filename) && futil->can_write(filename)) { /* * The registry file can be read and removed. */ if (wsreg_can_access_registry(O_RDWR)) { /* * The conversion permissions are appropriate. * Perform the conversion. */ int result; int article_count = 0; Progress *progress = _wsreg_progress_create( (Progress_callback)*progress_callback); int count = 0; Unz_article_input_stream *ain = NULL; Conversion *c = NULL; /* * The first progress section represents the * unzipping of the data file. */ progress->set_section_bounds(progress, 5, 1); ain = _wsreg_uzais_open(filename, &result); progress->finish_section(progress); if (result != WSREG_SUCCESS) { /* * The open failed. Clean up and * return the error code. */ if (ain != NULL) { ain->close(ain); } progress->free(progress); return (result); } c = _wsreg_conversion_create(progress); /* * The second progress section represents * the reading of articles. */ article_count = ain->get_article_count(ain); progress->set_section_bounds(progress, 8, article_count); while (ain->has_more_articles(ain)) { Article *a = ain->get_next_article(ain); if (a != NULL) { c->add_article(c, a); } progress->increment(progress); } progress->finish_section(progress); ain->close(ain); /* * The third progress section represents * the conversion and registration of the * resulting components. */ progress->set_section_bounds(progress, 100, article_count); count = c->register_components(c, NULL, FALSE); progress->finish_section(progress); /* * Pass the count back to the caller. */ if (conversion_count != NULL) { *conversion_count = count; } /* * Remove the old registry file. */ futil->remove(filename); /* * Cleanup objects. */ c->free(c); progress->free(progress); return (WSREG_SUCCESS); } else { /* * No permission to modify the registry. */ return (WSREG_NO_REG_ACCESS); } } else { /* * No permission to read and delete the specified file. */ return (WSREG_NO_FILE_ACCESS); } }
bool MergeAreasGenerator::Import(const TypeConfigRef& typeConfig, const ImportParameter& parameter, Progress& progress) { TypeInfoSet mergeTypes; FileScanner scanner; FileWriter writer; uint32_t areasWritten=0; for (const auto& type : typeConfig->GetTypes()) { if (type->CanBeArea() && type->GetMergeAreas()) { mergeTypes.Set(type); } } std::unordered_set<Id> nodeUseMap; try { scanner.Open(AppendFileToDir(parameter.GetDestinationDirectory(), MergeAreaDataGenerator::AREAS_TMP), FileScanner::Sequential, parameter.GetRawWayDataMemoryMaped()); if (!ScanAreaNodeIds(progress, *typeConfig, scanner, mergeTypes, nodeUseMap)) { return false; } uint32_t nodeCount=nodeUseMap.size(); progress.Info("Found "+NumberToString(nodeCount)+" nodes as possible connection points for areas"); /* ------ */ writer.Open(AppendFileToDir(parameter.GetDestinationDirectory(), AREAS2_TMP)); writer.Write(areasWritten); while (true) { TypeInfoSet loadedTypes; std::vector<AreaMergeData> mergeJob(typeConfig->GetTypeCount()); // // Load type data // progress.SetAction("Collecting area data by type"); if (!GetAreas(parameter, progress, *typeConfig, mergeTypes, loadedTypes, nodeUseMap, scanner, writer, mergeJob, areasWritten)) { return false; } // Merge progress.SetAction("Merging areas"); for (const auto& type : loadedTypes) { if (!mergeJob[type->GetIndex()].areas.empty()) { progress.Info("Merging areas of type "+type->GetName()); MergeAreas(progress, nodeUseMap, mergeJob[type->GetIndex()]); progress.Info("Reduced areas of '"+type->GetName()+"' from "+NumberToString(mergeJob[type->GetIndex()].areaCount)+" to "+NumberToString(mergeJob[type->GetIndex()].areaCount-mergeJob[type->GetIndex()].mergedAway.size())); mergeJob[type->GetIndex()].areas.clear(); } } // Store back merge result if (!loadedTypes.Empty()) { if (!WriteMergeResult(progress, *typeConfig, scanner, writer, loadedTypes, mergeJob, areasWritten)) { return false; } mergeTypes.Remove(loadedTypes); } if (mergeTypes.Empty()) { break; } } scanner.Close(); writer.GotoBegin(); writer.Write(areasWritten); writer.Close(); } catch (IOException& e) { progress.Error(e.GetDescription()); scanner.CloseFailsafe(); writer.CloseFailsafe(); return false; } return true; }
bool MergeAreasGenerator::WriteMergeResult(Progress& progress, const TypeConfig& typeConfig, FileScanner& scanner, FileWriter& writer, const TypeInfoSet& loadedTypes, std::vector<AreaMergeData>& mergeJob, uint32_t& areasWritten) { uint32_t areaCount=0; std::unordered_map<FileOffset,AreaRef> merges; std::unordered_set<FileOffset> ignores; for (const auto& type : loadedTypes) { for (const auto& area : mergeJob[type->GetIndex()].merges) { merges[area->GetFileOffset()]=area; } ignores.insert(mergeJob[type->GetIndex()].mergedAway.begin(), mergeJob[type->GetIndex()].mergedAway.end()); } scanner.GotoBegin(); scanner.Read(areaCount); for (uint32_t a=1; a<=areaCount; a++) { uint8_t type; Id id; AreaRef area=std::make_shared<Area>(); progress.SetProgress(a,areaCount); scanner.Read(type); scanner.Read(id); area->ReadImport(typeConfig, scanner); if (loadedTypes.IsSet(area->GetType())) { if (ignores.find(area->GetFileOffset())!=ignores.end()) { continue; } writer.Write(type); writer.Write(id); const auto& merge=merges.find(area->GetFileOffset()) ; if (merge!=merges.end()) { area=merge->second; } area->WriteImport(typeConfig, writer); areasWritten++; } } return true; }
/** * Load areas which has a one for the types given by types. If at leats one node * in one of the outer rings of the areas is marked in nodeUseMap as "used at least twice", * index it into the areas map. * * If the number of indexed areas is bigger than parameter.GetRawWayBlockSize() types are * dropped form areas until the number is again below the lmit. */ bool MergeAreasGenerator::GetAreas(const ImportParameter& parameter, Progress& progress, const TypeConfig& typeConfig, const TypeInfoSet& candidateTypes, TypeInfoSet& loadedTypes, const std::unordered_set<Id>& nodeUseMap, FileScanner& scanner, FileWriter& writer, std::vector<AreaMergeData>& mergeJob, uint32_t& areasWritten) { bool firstCall=areasWritten==0; // We are called for the first time uint32_t areaCount=0; size_t collectedAreasCount=0; size_t typesWithAreas=0; for (auto& data : mergeJob) { data.areaCount=0; } loadedTypes=candidateTypes; scanner.GotoBegin(); scanner.Read(areaCount); for (uint32_t a=1; a<=areaCount; a++) { uint8_t type; Id id; AreaRef area=std::make_shared<Area>(); progress.SetProgress(a,areaCount); scanner.Read(type); scanner.Read(id); area->ReadImport(typeConfig, scanner); mergeJob[area->GetType()->GetIndex()].areaCount++; // This is an area of a type that does not get merged, // we directly store it in the target file. if (!loadedTypes.IsSet(area->GetType())) { if (firstCall) { writer.Write(type); writer.Write(id); area->WriteImport(typeConfig, writer); areasWritten++; } continue; } bool isMergeCandidate=false; for (const auto& ring: area->rings) { if (!ring.IsOuterRing()) { continue; } for (const auto node : ring.nodes) { if (nodeUseMap.find(node.GetId())!=nodeUseMap.end()) { isMergeCandidate=true; break; } } if (isMergeCandidate) { break; } } if (!isMergeCandidate) { continue; } if (mergeJob[area->GetType()->GetIndex()].areas.empty()) { typesWithAreas++; } mergeJob[area->GetType()->GetIndex()].areas.push_back(area); collectedAreasCount++; while (collectedAreasCount>parameter.GetRawWayBlockSize() && typesWithAreas>1) { TypeInfoRef victimType; // Find the type with the smallest amount of ways loaded for (auto &type : loadedTypes) { if (!mergeJob[type->GetIndex()].areas.empty() && (!victimType || mergeJob[type->GetIndex()].areas.size()<mergeJob[victimType->GetIndex()].areas.size())) { victimType=type; } } // If there is more then one type of way, we always must find a "victim" type. assert(victimType); // Correct the statistics collectedAreasCount-=mergeJob[victimType->GetIndex()].areas.size(); // Clear already loaded data of th victim type mergeJob[victimType->GetIndex()].areas.clear(); typesWithAreas--; loadedTypes.Remove(victimType); } } progress.SetAction("Collected "+NumberToString(collectedAreasCount)+" areas for "+NumberToString(loadedTypes.Size())+" types"); return true; }
/** * This function deals with the logic necessary for summing a Workspace2D. * @param localworkspace The input workspace for summing. * @param outSpec The spectrum for the summed output. * @param progress The progress indicator. * @param numSpectra The number of spectra contributed to the sum. * @param numMasked The spectra dropped from the summations because they are * masked. * @param numZeros The number of zero bins in histogram workspace or empty * spectra for event workspace. */ void SumSpectra::doWorkspace2D(MatrixWorkspace_const_sptr localworkspace, ISpectrum *outSpec, Progress &progress, size_t &numSpectra, size_t &numMasked, size_t &numZeros) { // Get references to the output workspaces's data vectors MantidVec &YSum = outSpec->dataY(); MantidVec &YError = outSpec->dataE(); MantidVec Weight; std::vector<size_t> nZeros; if (m_calculateWeightedSum) { Weight.assign(YSum.size(), 0); nZeros.assign(YSum.size(), 0); } numSpectra = 0; numMasked = 0; numZeros = 0; // Loop over spectra std::set<int>::iterator it; // for (int i = m_minSpec; i <= m_maxSpec; ++i) for (it = this->m_indices.begin(); it != this->m_indices.end(); ++it) { int i = *it; // Don't go outside the range. if ((i >= this->m_numberOfSpectra) || (i < 0)) { g_log.error() << "Invalid index " << i << " was specified. Sum was aborted.\n"; break; } try { // Get the detector object for this spectrum Geometry::IDetector_const_sptr det = localworkspace->getDetector(i); // Skip monitors, if the property is set to do so if (!m_keepMonitors && det->isMonitor()) continue; // Skip masked detectors if (det->isMasked()) { numMasked++; continue; } } catch (...) { // if the detector not found just carry on } numSpectra++; // Retrieve the spectrum into a vector const MantidVec &YValues = localworkspace->readY(i); const MantidVec &YErrors = localworkspace->readE(i); if (m_calculateWeightedSum) { for (int k = 0; k < this->m_yLength; ++k) { if (YErrors[k] != 0) { double errsq = YErrors[k] * YErrors[k]; YError[k] += errsq; Weight[k] += 1. / errsq; YSum[k] += YValues[k] / errsq; } else { nZeros[k]++; } } } else { for (int k = 0; k < this->m_yLength; ++k) { YSum[k] += YValues[k]; YError[k] += YErrors[k] * YErrors[k]; } } // Map all the detectors onto the spectrum of the output outSpec->addDetectorIDs(localworkspace->getSpectrum(i)->getDetectorIDs()); progress.report(); } if (m_calculateWeightedSum) { numZeros = 0; for (size_t i = 0; i < Weight.size(); i++) { if (nZeros[i] == 0) YSum[i] *= double(numSpectra) / Weight[i]; else numZeros += nZeros[i]; } } }
/** * Free function performing the CCL implementation over a range defined by the *iterator. * * @param iterator : Iterator giving access the the image * @param strategy : Strategy for identifying background * @param neighbourElements : Grid of DisjointElements the same size as the *image * @param progress : Progress object to update * @param maxNeighbours : Maximum number of neighbours each element may have. *Determined by dimensionality. * @param startLabelId : Start label index to increment * @param edgeIndexVec : Vector of edge index pairs. To identify elements across *iterator boundaries to resolve later. * @return */ size_t doConnectedComponentLabeling(IMDIterator *iterator, BackgroundStrategy *const strategy, VecElements &neighbourElements, Progress &progress, size_t maxNeighbours, size_t startLabelId, VecEdgeIndexPair &edgeIndexVec) { size_t currentLabelCount = startLabelId; strategy->configureIterator( iterator); // Set up such things as desired Normalization. do { if (!strategy->isBackground(iterator)) { size_t currentIndex = iterator->getLinearIndex(); progress.report(); // Linear indexes of neighbours VecIndexes neighbourIndexes = iterator->findNeighbourIndexes(); VecIndexes nonEmptyNeighbourIndexes; nonEmptyNeighbourIndexes.reserve(maxNeighbours); SetIds neighbourIds; // Discover non-empty neighbours for (size_t i = 0; i < neighbourIndexes.size(); ++i) { size_t neighIndex = neighbourIndexes[i]; if (!iterator->isWithinBounds(neighIndex)) { /* Record labels which appear to belong to the same cluster, but cannot be combined in this pass and will later need to be conjoined and resolved. As Labels cannot be guarnteed to be correcly provided for all neighbours until the end. We must store indexes instead. */ edgeIndexVec.push_back(EdgeIndexPair(currentIndex, neighIndex)); continue; } const DisjointElement &neighbourElement = neighbourElements[neighIndex]; if (!neighbourElement.isEmpty()) { nonEmptyNeighbourIndexes.push_back(neighIndex); neighbourIds.insert(neighbourElement.getId()); } } if (nonEmptyNeighbourIndexes.empty()) { DisjointElement &element = neighbourElements[currentIndex]; element.setId(static_cast<int>(currentLabelCount)); ++currentLabelCount; } else if (neighbourIds.size() == 1) // Do we have a single unique id amongst all neighbours. { neighbourElements[currentIndex] = neighbourElements[nonEmptyNeighbourIndexes.front()]; // Copy // non-empty // neighbour } else { // Choose the lowest neighbour index as the parent. size_t candidateSourceParentIndex = nonEmptyNeighbourIndexes[0]; for (size_t i = 1; i < nonEmptyNeighbourIndexes.size(); ++i) { size_t neighIndex = nonEmptyNeighbourIndexes[i]; if (neighbourElements[neighIndex].getRoot() < neighbourElements[candidateSourceParentIndex].getRoot()) { candidateSourceParentIndex = neighIndex; } } // Get the chosen parent DisjointElement &parentElement = neighbourElements[candidateSourceParentIndex]; // Union remainder parents with the chosen parent for (size_t i = 0; i < nonEmptyNeighbourIndexes.size(); ++i) { size_t neighIndex = nonEmptyNeighbourIndexes[i]; if (neighIndex != candidateSourceParentIndex) { neighbourElements[neighIndex].unionWith(&parentElement); } } neighbourElements[currentIndex].unionWith(&parentElement); } } } while (iterator->next()); return currentLabelCount; }
/** * This function handles the logic for summing RebinnedOutput workspaces. * @param outputWorkspace the workspace to hold the summed input * @param progress the progress indicator * @param numSpectra * @param numMasked * @param numZeros */ void SumSpectra::doRebinnedOutput(MatrixWorkspace_sptr outputWorkspace, Progress &progress, size_t &numSpectra, size_t &numMasked, size_t &numZeros) { // Get a copy of the input workspace MatrixWorkspace_sptr temp = getProperty("InputWorkspace"); // First, we need to clean the input workspace for nan's and inf's in order // to treat the data correctly later. This will create a new private // workspace that will be retrieved as mutable. IAlgorithm_sptr alg = this->createChildAlgorithm("ReplaceSpecialValues"); alg->setProperty<MatrixWorkspace_sptr>("InputWorkspace", temp); std::string outName = "_" + temp->getName() + "_clean"; alg->setProperty("OutputWorkspace", outName); alg->setProperty("NaNValue", 0.0); alg->setProperty("NaNError", 0.0); alg->setProperty("InfinityValue", 0.0); alg->setProperty("InfinityError", 0.0); alg->executeAsChildAlg(); MatrixWorkspace_sptr localworkspace = alg->getProperty("OutputWorkspace"); // Transform to real workspace types RebinnedOutput_sptr inWS = boost::dynamic_pointer_cast<RebinnedOutput>(localworkspace); RebinnedOutput_sptr outWS = boost::dynamic_pointer_cast<RebinnedOutput>(outputWorkspace); // Get references to the output workspaces's data vectors ISpectrum *outSpec = outputWorkspace->getSpectrum(0); MantidVec &YSum = outSpec->dataY(); MantidVec &YError = outSpec->dataE(); MantidVec &FracSum = outWS->dataF(0); MantidVec Weight; std::vector<size_t> nZeros; if (m_calculateWeightedSum) { Weight.assign(YSum.size(), 0); nZeros.assign(YSum.size(), 0); } numSpectra = 0; numMasked = 0; numZeros = 0; // Loop over spectra std::set<int>::iterator it; // for (int i = m_minSpec; i <= m_maxSpec; ++i) for (it = m_indices.begin(); it != m_indices.end(); ++it) { int i = *it; // Don't go outside the range. if ((i >= m_numberOfSpectra) || (i < 0)) { g_log.error() << "Invalid index " << i << " was specified. Sum was aborted.\n"; break; } try { // Get the detector object for this spectrum Geometry::IDetector_const_sptr det = localworkspace->getDetector(i); // Skip monitors, if the property is set to do so if (!m_keepMonitors && det->isMonitor()) continue; // Skip masked detectors if (det->isMasked()) { numMasked++; continue; } } catch (...) { // if the detector not found just carry on } numSpectra++; // Retrieve the spectrum into a vector const MantidVec &YValues = localworkspace->readY(i); const MantidVec &YErrors = localworkspace->readE(i); const MantidVec &FracArea = inWS->readF(i); if (m_calculateWeightedSum) { for (int k = 0; k < this->m_yLength; ++k) { if (YErrors[k] != 0) { double errsq = YErrors[k] * YErrors[k] * FracArea[k] * FracArea[k]; YError[k] += errsq; Weight[k] += 1. / errsq; YSum[k] += YValues[k] * FracArea[k] / errsq; FracSum[k] += FracArea[k]; } else { nZeros[k]++; FracSum[k] += FracArea[k]; } } } else { for (int k = 0; k < this->m_yLength; ++k) { YSum[k] += YValues[k] * FracArea[k]; YError[k] += YErrors[k] * YErrors[k] * FracArea[k] * FracArea[k]; FracSum[k] += FracArea[k]; } } // Map all the detectors onto the spectrum of the output outSpec->addDetectorIDs(localworkspace->getSpectrum(i)->getDetectorIDs()); progress.report(); } if (m_calculateWeightedSum) { numZeros = 0; for (size_t i = 0; i < Weight.size(); i++) { if (nZeros[i] == 0) YSum[i] *= double(numSpectra) / Weight[i]; else numZeros += nZeros[i]; } } // Create the correct representation outWS->finalize(); }
void Scene::device_update(Device *device_, Progress& progress) { if(!device) device = device_; /* The order of updates is important, because there's dependencies between * the different managers, using data computed by previous managers. * * - Background generates shader graph compiled by shader manager. * - Image manager uploads images used by shaders. * - Camera may be used for adapative subdivison. * - Displacement shader must have all shader data available. * - Light manager needs final mesh data to compute emission CDF. */ progress.set_status("Updating Background"); background->device_update(device, &dscene, this); if(progress.get_cancel()) return; progress.set_status("Updating Shaders"); shader_manager->device_update(device, &dscene, this, progress); if(progress.get_cancel()) return; progress.set_status("Updating Images"); image_manager->device_update(device, &dscene, progress); if(progress.get_cancel()) return; progress.set_status("Updating Camera"); camera->device_update(device, &dscene); if(progress.get_cancel()) return; progress.set_status("Updating Objects"); object_manager->device_update(device, &dscene, this, progress); if(progress.get_cancel()) return; progress.set_status("Updating Meshes"); mesh_manager->device_update(device, &dscene, this, progress); if(progress.get_cancel()) return; progress.set_status("Updating Lights"); light_manager->device_update(device, &dscene, this, progress); if(progress.get_cancel()) return; progress.set_status("Updating Filter"); filter->device_update(device, &dscene); if(progress.get_cancel()) return; progress.set_status("Updating Integrator"); integrator->device_update(device, &dscene); if(progress.get_cancel()) return; progress.set_status("Updating Film"); film->device_update(device, &dscene); if(progress.get_cancel()) return; progress.set_status("Updating Device", "Writing constant memory"); device->const_copy_to("__data", &dscene.data, sizeof(dscene.data)); }
bool LayerImporter::execute(PlugInArgList* pInArgList, PlugInArgList* pOutArgList) { Layer* pLayer = NULL; Progress* pProgress = NULL; DataElement* pElement = NULL; SpatialDataView* pView = NULL; StepResource pStep("Import layer", "app", "DF24688A-6B34-4244-98FF-5FFE2063AC05"); // get input arguments and log some useful info about them { // scope the MessageResource MessageResource pMsg("Input arguments", "app", "C0A532DE-0E19-44D3-837C-16ABD267B2C1"); pProgress = pInArgList->getPlugInArgValue<Progress>(Executable::ProgressArg()); pMsg->addBooleanProperty("Progress Present", (pProgress != NULL)); pElement = pInArgList->getPlugInArgValue<DataElement>(Importer::ImportElementArg()); if (pElement == NULL) { if (pProgress != NULL) { pProgress->updateProgress("No data element", 100, ERRORS); } pStep->finalize(Message::Failure, "No data element"); return false; } pMsg->addProperty("Element name", pElement->getName()); pView = pInArgList->getPlugInArgValue<SpatialDataView>(Executable::ViewArg()); if (pView != NULL) { pMsg->addProperty("View name", pView->getName()); } } if (pProgress != NULL) { pProgress->updateProgress((string("Read and parse file ") + pElement->getFilename()), 20, NORMAL); } // parse the xml XmlReader xml(Service<MessageLogMgr>()->getLog()); XERCES_CPP_NAMESPACE_QUALIFIER DOMDocument* pDomDocument = xml.parse(pElement->getFilename()); if (pDomDocument == NULL) { if (pProgress != NULL) { pProgress->updateProgress("Unable to parse the file", 100, ERRORS); } pStep->finalize(Message::Failure, "Unable to parse the file"); return false; } DOMElement* pRootElement = pDomDocument->getDocumentElement(); VERIFY(pRootElement != NULL); if (pProgress != NULL) { pProgress->updateProgress("Create the layer", 40, NORMAL); } string name(A(pRootElement->getAttribute(X("name")))); string type(A(pRootElement->getAttribute(X("type")))); unsigned int formatVersion = atoi(A(pRootElement->getAttribute(X("version")))); { // scope the MessageResource MessageResource pMsg("Layer information", "app", "AA358F7A-107E-456E-8D11-36DDBE5B1645"); pMsg->addProperty("name", name); pMsg->addProperty("type", type); pMsg->addProperty("format version", formatVersion); } // If user requested pixel coordinates be used. bool usePixelCoords = false; DataDescriptor* pDesc = pElement->getDataDescriptor(); VERIFY( pDesc ); pDesc->getMetadata()->getAttributeByPath( "Layer/Import Options/Use Pixel Coordinates" ).getValue( usePixelCoords ); if (usePixelCoords) { // Remove geoVertices and geoBox elements. removeGeoNodes(pRootElement); } if (pView == NULL) { //no view provided, so find current view SpatialDataWindow* pWindow = dynamic_cast<SpatialDataWindow*>(mpDesktop->getCurrentWorkspaceWindow()); if (pWindow != NULL) { pView = pWindow->getSpatialDataView(); } } if (pView == NULL) { if (pProgress != NULL) { pProgress->updateProgress("Could not access the view to create the layer.", 100, ERRORS); } pStep->finalize(Message::Failure, "Could not access the view to create the layer."); return false; } bool error = false; LayerType layerType = StringUtilities::fromXmlString<LayerType>(type, &error); if (error == true) { if (pProgress != NULL) { pProgress->updateProgress("The layer type is invalid.", 100, ERRORS); } pStep->finalize(Message::Failure, "The layer type is invalid."); return false; } LayerList* pLayerList = pView->getLayerList(); if (pLayerList != NULL) { RasterElement* pNewParentElement = pLayerList->getPrimaryRasterElement(); if (pNewParentElement != NULL) { Service<ModelServices> pModel; if (pModel->setElementParent(pElement, pNewParentElement) == false) { pProgress->updateProgress("The layer already exists.", 100, ERRORS); pStep->finalize(Message::Failure, "The layer already exists."); return false; } } } UndoGroup group(pView, "Import " + StringUtilities::toDisplayString(layerType) + " Layer"); pLayer = pView->createLayer(layerType, pElement); if (pLayer == NULL) { if (pProgress != NULL) { pProgress->updateProgress("Unable to create the layer", 100, ERRORS); } pStep->finalize(Message::Failure, "Unable to create the layer"); return false; } if (pProgress != NULL) { pProgress->updateProgress("Build the layer", 60, NORMAL); } // deserialize the layer try { if (pLayer->fromXml(pRootElement, formatVersion) == false) { pProgress->updateProgress("Problem with layer file.", 100, ERRORS); pStep->finalize(Message::Failure, "Problem with layer file."); return false; } } catch (XmlReader::DomParseException&) { return false; } pStep->finalize(Message::Success); if (pProgress != NULL) { pProgress->updateProgress("Finished loading the layer", 100, NORMAL); } // Add the layer to the view pView->addLayer(pLayer); pView->setActiveLayer(pLayer); pView->setMouseMode("LayerMode"); if (pOutArgList != NULL) { // set the output arguments pOutArgList->setPlugInArgValue("Layer", pLayer); } return true; }
bool AreaAreaIndexGenerator::Import(const TypeConfigRef& typeConfig, const ImportParameter& parameter, Progress& progress) { FileScanner scanner; size_t areas=0; // Number of areas found size_t areasConsumed=0; // Number of areas consumed std::vector<double> cellWidth; std::vector<double> cellHeight; std::map<Pixel,AreaLeaf> leafs; std::map<Pixel,AreaLeaf> newAreaLeafs; cellWidth.resize(parameter.GetAreaAreaIndexMaxMag()+1); cellHeight.resize(parameter.GetAreaAreaIndexMaxMag()+1); for (size_t i=0; i<cellWidth.size(); i++) { cellWidth[i]=360.0/pow(2.0,(int)i); } for (size_t i=0; i<cellHeight.size(); i++) { cellHeight[i]=180.0/pow(2.0,(int)i); } // // Writing index file // progress.SetAction("Generating 'areaarea.idx'"); FileWriter writer; FileOffset topLevelOffset=0; FileOffset topLevelOffsetOffset; // Offset of the toplevel entry if (!writer.Open(AppendFileToDir(parameter.GetDestinationDirectory(), "areaarea.idx"))) { progress.Error("Cannot create 'areaarea.idx'"); return false; } if (!scanner.Open(AppendFileToDir(parameter.GetDestinationDirectory(), "areas.dat"), FileScanner::Sequential, parameter.GetWayDataMemoryMaped())) { progress.Error("Cannot open 'areas.dat'"); return false; } writer.WriteNumber((uint32_t)parameter.GetAreaAreaIndexMaxMag()); // MaxMag if (!writer.GetPos(topLevelOffsetOffset)) { progress.Error("Cannot read current file position"); return false; } if (!writer.WriteFileOffset(topLevelOffset)) { progress.Error("Cannot write top level entry offset"); return false; } int l=parameter.GetAreaAreaIndexMaxMag(); while (l>=0) { size_t areaLevelEntries=0; progress.Info(std::string("Storing level ")+NumberToString(l)+"..."); newAreaLeafs.clear(); SetOffsetOfChildren(leafs,newAreaLeafs); leafs=newAreaLeafs; // Areas if (areas==0 || (areas>0 && areas>areasConsumed)) { uint32_t areaCount=0; progress.Info(std::string("Scanning areas.dat for areas of index level ")+NumberToString(l)+"..."); if (!scanner.GotoBegin()) { progress.Error("Cannot go to begin of way file"); } if (!scanner.Read(areaCount)) { progress.Error("Error while reading number of data entries in file"); return false; } areas=0; for (uint32_t a=1; a<=areaCount; a++) { progress.SetProgress(a,areaCount); FileOffset offset; Area area; scanner.GetPos(offset); if (!area.Read(typeConfig, scanner)) { progress.Error(std::string("Error while reading data entry ")+ NumberToString(a)+" of "+ NumberToString(areaCount)+ " in file '"+ scanner.GetFilename()+"'"); return false; } areas++; double minLon; double maxLon; double minLat; double maxLat; area.GetBoundingBox(minLon,maxLon,minLat,maxLat); // // Calculate highest level where the bounding box completely // fits in the cell size and assign area to the tiles that // hold the geometric center of the tile. // int level=parameter.GetAreaAreaIndexMaxMag(); while (level>=0) { if (maxLon-minLon<=cellWidth[level] && maxLat-minLat<=cellHeight[level]) { break; } level--; } if (level==l) { // // Renormated coordinate space (everything is >=0) // minLon+=180; maxLon+=180; minLat+=90; maxLat+=90; // // Calculate minimum and maximum tile ids that are covered // by the area // uint32_t minyc=(uint32_t)floor(minLat/cellHeight[level]); uint32_t maxyc=(uint32_t)ceil(maxLat/cellHeight[level]); uint32_t minxc=(uint32_t)floor(minLon/cellWidth[level]); uint32_t maxxc=(uint32_t)ceil(maxLon/cellWidth[level]); Entry entry; entry.type=area.GetType()->GetId(); entry.offset=offset; // Add this area to the tile where the center of the area lies in. leafs[Pixel((minxc+maxxc)/2,(minyc+maxyc)/2)].areas.push_back(entry); areaLevelEntries++; areasConsumed++; } } } progress.Debug(std::string("Writing ")+NumberToString(leafs.size())+" leafs ("+ NumberToString(areaLevelEntries)+") "+ "to index of level "+NumberToString(l)+"..."); // Remember the offset of one cell in level '0' if (l==0) { if (!writer.GetPos(topLevelOffset)) { progress.Error("Cannot read top level entry offset"); return false; } } /* uint32_t minX=std::numeric_limits<uint32_t>::max(); uint32_t minY=std::numeric_limits<uint32_t>::max(); uint32_t maxX=std::numeric_limits<uint32_t>::min(); uint32_t maxY=std::numeric_limits<uint32_t>::min(); std::map<TypeId,size_t> useMap; for (std::map<Pixel,AreaLeaf>::const_iterator leaf=leafs.begin(); leaf!=leafs.end(); ++leaf) { minX=std::min(minX,leaf->first.x); maxX=std::max(maxX,leaf->first.x); minY=std::min(minY,leaf->first.y); maxY=std::max(maxY,leaf->first.y); for (std::list<Entry>::const_iterator entry=leaf->second.areas.begin(); entry!=leaf->second.areas.end(); entry++) { std::map<TypeId,size_t>::iterator u=useMap.find(entry->type); if (u==useMap.end()) { useMap[entry->type]=1; } else { u->second++; } } }*/ /* std::cout << "[" << minX << "-" << maxX << "]x[" << minY << "-" << maxY << "] => " << leafs.size() << "/" << (maxX-minX+1)*(maxY-minY+1) << " " << (int)BytesNeededToAddressFileData(leafs.size()) << " " << ByteSizeToString(BytesNeededToAddressFileData(leafs.size())*(maxX-minX+1)*(maxY-minY+1)) << std::endl; for (std::map<TypeId,size_t>::const_iterator u=useMap.begin(); u!=useMap.end(); ++u) { std::cout << "* " << u->first << " " << typeConfig.GetTypeInfo(u->first).GetName() << " " << u->second << std::endl; }*/ if (!WriteIndexLevel(parameter, writer, (int)l, leafs)) { return false; } l--; } writer.SetPos(topLevelOffsetOffset); writer.WriteFileOffset(topLevelOffset); return !writer.HasError() && writer.Close(); }
bool WayNodeReductionProcessorFilter::RemoveDuplicateNodes(Progress& progress, const FileOffset& offset, Way& way, bool& save) { unsigned char buffers[2][coordByteSize]; bool reduced=false; if (way.nodes.size()>=2) { size_t lastIndex=0; size_t currentIndex=1; nodeBuffer.clear(); idBuffer.clear(); // Prefill with the first coordinate way.nodes[0].EncodeToBuffer(buffers[0]); nodeBuffer.push_back(way.nodes[0]); if (!way.ids.empty()) { idBuffer.push_back(way.ids[0]); } for (size_t n=1; n<way.nodes.size(); n++) { way.nodes[n].EncodeToBuffer(buffers[currentIndex]); if (IsEqual(buffers[lastIndex], buffers[currentIndex])) { if (n>=way.ids.size() || way.ids[n]==0) { duplicateCount++; reduced=true; } else if ((n-1)>=way.ids.size() || way.ids[n-1]==0) { way.ids[n-1]=way.ids[n]; duplicateCount++; reduced=true; } else { nodeBuffer.push_back(way.nodes[n]); if (n<way.ids.size()) { idBuffer.push_back(way.ids[n]); } lastIndex=currentIndex; currentIndex=(lastIndex+1)%2; } } else { nodeBuffer.push_back(way.nodes[n]); if (n<way.ids.size()) { idBuffer.push_back(way.ids[n]); } lastIndex=currentIndex; currentIndex=(lastIndex+1)%2; } } } if (reduced) { if (nodeBuffer.size()<2) { progress.Debug("Way " + NumberToString(offset) + " empty/invalid after node reduction"); save=false; return true; } else { way.nodes=nodeBuffer; way.ids=idBuffer; } } return true; }
void bigmailer::onSend() { const String enter = "\r\n" ; if (messagePage.editor.IsModified()) messagePage.Save() ; // mapa de los campos VectorMap<String, int> fldMap ; for (int i = 0; i < theDefList.fields.GetCount(); i++) fldMap.Add(theDefList.fields[i], i) ; String outHTML ; Index<String> css ; VectorMap<String, String> links ; String path = "./" ; String html = EncodeHtml(messagePage.editor.Get(), css, links, path ) ; outHTML << "<!DOCTYPE html PUBLIC \"-//W3C//DTD HTML 4.01 Transitional//EN\">\r\n" "<html>\r\n" "<head>\r\n" "<meta http-equiv=\"content-type\" content=\"text/html; charset=utf-8\">\r\n" "<style>\r\n" << AsCss( css ) << "\r\n" << "</style>\r\n" "</head>\r\n" "<body>\r\n" << html << "\r\n" << "</body>\r\n" "</html>\r\n" ; Progress prog ; prog.Set(0, theDefList.data.GetCount()) ; prog.Show() ; theDefList.errors.Clear() ; String theHtml ; for (int row = 0; row < theDefList.data.GetCount(); row++) { String addrTo = theDefList.data[row][1] ; if (addrTo.IsEmpty()) { theDefList.errors.Add(t_("dirección incorrecta")) ; continue ; } if (prog.Canceled()) break ; theHtml = outHTML ; String s = AsString(row+1) + " / " + AsString( theDefList.data.GetCount() ) ; prog.SetText(s) ; for (int i = 0; i < theDefList.fields.GetCount(); i++) { String toFind = "[%" + theDefList.fields[i] + "%]" ; int from = 0 ; while( (from = theHtml.Find(toFind, from )) != -1) { theHtml.Remove( from, toFind.GetCount() ) ; theHtml.Insert( from, theDefList.data[row][ fldMap.Get(theDefList.fields[i])] ) ; } } prog.SetPos(row+1) ; SmtpMailEx mail ; mail.Host( theCfg.smtpServer ) .User( theCfg.user ) .Password( theCfg.pass ) .From( theMsg.from ) .ReplyTo( theMsg.from ) .To( addrTo ) .Subject( theMsg.subject ) .Text( "" ) .Attach("MENSAJE", theHtml, "text/html; charset=utf-8") ; if ( ! mail.Send() ) theDefList.errors.Add(mail.GetError()) ; else theDefList.errors.Add("OK") ; } theDefList.Save() ; }
bool NodeDataGenerator::Import(const TypeConfigRef& typeConfig, const ImportParameter& parameter, Progress& progress) { uint32_t rawNodeCount=0; uint32_t nodesReadCount=0; uint32_t nodesWrittenCount=0; // // Iterator over all raw nodes, hcekc they type, and convert them from raw nodes // to nodes if the type is interesting (!=typeIgnore). // // Count the bounding box by the way... // progress.SetAction("Generating nodes.tmp"); FileScanner scanner; FileWriter writer; if (!scanner.Open(AppendFileToDir(parameter.GetDestinationDirectory(), "rawnodes.dat"), FileScanner::Sequential, parameter.GetRawNodeDataMemoryMaped())) { progress.Error("Cannot open 'rawnodes.dat'"); return false; } if (!scanner.Read(rawNodeCount)) { progress.Error("Error while reading number of data entries in file"); return false; } if (!writer.Open(AppendFileToDir(parameter.GetDestinationDirectory(), "nodes.tmp"))) { progress.Error("Cannot create 'nodes.tmp'"); return false; } writer.Write(nodesWrittenCount); for (uint32_t n=1; n<=rawNodeCount; n++) { progress.SetProgress(n,rawNodeCount); RawNode rawNode; Node node; if (!rawNode.Read(typeConfig, scanner)) { progress.Error(std::string("Error while reading data entry ")+ NumberToString(n)+" of "+ NumberToString(rawNodeCount)+ " in file '"+ scanner.GetFilename()+"'"); return false; } nodesReadCount++; if (!rawNode.GetType()->GetIgnore()) { node.SetFeatures(rawNode.GetFeatureValueBuffer()); node.SetCoords(rawNode.GetCoords()); FileOffset fileOffset; if (!writer.GetPos(fileOffset)) { progress.Error(std::string("Error while reading current fileOffset in file '")+ writer.GetFilename()+"'"); return false; } writer.Write(rawNode.GetId()); node.Write(typeConfig, writer); nodesWrittenCount++; } } if (!scanner.Close()) { return false; } writer.SetPos(0); writer.Write(nodesWrittenCount); if (!writer.Close()) { return false; } progress.Info(std::string("Read "+NumberToString(nodesReadCount)+" nodes, wrote "+NumberToString(nodesWrittenCount)+" nodes")); return true; }
bool Deconvolution::execute(PlugInArgList* pInArgList, PlugInArgList* pOutArgList) { StepResource pStep("Deconvolution Sharpening", "app", "619F3C8A-FB70-44E0-B211-B116E604EDDA"); if (pInArgList == NULL || pOutArgList == NULL) { return false; } Progress* pProgress = pInArgList->getPlugInArgValue<Progress>(Executable::ProgressArg()); RasterElement* pCube = pInArgList->getPlugInArgValue<RasterElement>(Executable::DataElementArg()); if (pCube == NULL) { std::string msg = "A raster cube must be specified."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } RasterDataDescriptor* pDesc = static_cast<RasterDataDescriptor*>(pCube->getDataDescriptor()); VERIFY(pDesc != NULL); EncodingType ResultType = pDesc->getDataType(); if (pDesc->getDataType() == INT4SCOMPLEX) { ResultType = INT4SBYTES; } else if (pDesc->getDataType() == FLT8COMPLEX) { ResultType = FLT8BYTES; } FactoryResource<DataRequest> pRequest; pRequest->setInterleaveFormat(BSQ); DataAccessor pSrcAcc = pCube->getDataAccessor(pRequest.release()); ModelResource<RasterElement> pResultCube(RasterUtilities::createRasterElement(pCube->getName() + "_Deconvolution_Sharpening_Result", pDesc->getRowCount(), pDesc->getColumnCount(), ResultType)); if (pResultCube.get() == NULL) { std::string msg = "A raster cube could not be created."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } FactoryResource<DataRequest> pResultRequest; pResultRequest->setWritable(true); DataAccessor pDestAcc = pResultCube->getDataAccessor(pResultRequest.release()); Service<DesktopServices> pDesktop; DeconvolutionDlg dlg(pDesktop->getMainWidget()); int stat = dlg.exec(); if (stat != QDialog::Accepted) { return true; } double minGrayValue; double maxGrayValue; double deltaValue = 0.0; int nFilterType = dlg.getCurrentFilterType(); int windowSize = dlg.getCurrentWindowSize(); double sigmaVal = dlg.getSigmaValue(); double gamaVal = dlg.getGamaValue(); windowSize = (windowSize-1)/2; if (NULL != pOriginalImage) { free(pOriginalImage); } pOriginalImage = (double *)malloc(sizeof(double)*pDesc->getRowCount()*pDesc->getColumnCount()); double *OrigData = (double *)malloc(sizeof(double)*pDesc->getRowCount()*pDesc->getColumnCount()); double *NewData = (double *)malloc(sizeof(double)*pDesc->getRowCount()*pDesc->getColumnCount()); double *ConvoData = (double *)malloc(sizeof(double)*pDesc->getRowCount()*pDesc->getColumnCount()); double *pTempData; InitializeData(pSrcAcc, pOriginalImage, OrigData, pDesc->getRowCount(), pDesc->getColumnCount(), pDesc->getDataType()); GetGrayScale(&minGrayValue, &maxGrayValue, pDesc->getDataType()); //Perform deconvolution iteratively for (int num = 0; num < MAX_ITERATION_NUMBER; num++) { if (pProgress != NULL) { pProgress->updateProgress("Deconvolution process", num*100/MAX_ITERATION_NUMBER, NORMAL); } if (isAborted()) { std::string msg = getName() + " has been aborted."; pStep->finalize(Message::Abort, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ABORT); } free(OrigData); free(NewData); free(ConvoData); return false; } deltaValue = DeconvolutionFunc(OrigData, pOriginalImage, NewData, ConvoData, sigmaVal, gamaVal, windowSize, pDesc->getRowCount(), pDesc->getColumnCount(), nFilterType, maxGrayValue, minGrayValue); pTempData = OrigData; OrigData = NewData; NewData = pTempData; double errorRate = deltaValue/(maxGrayValue-minGrayValue); if (errorRate < CONVERGENCE_THRESHOLD) { break; } } free(NewData); free(ConvoData); //Output result unsigned int nCount = 0; for (int i = 0; i < pDesc->getRowCount(); i++) { for (int j = 0; j < pDesc->getColumnCount(); j++) { if (!pDestAcc.isValid()) { std::string msg = "Unable to access the cube data."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } free(OrigData); return false; } pDestAcc->toPixel(i, j); switchOnEncoding(ResultType, restoreImageValue, pDestAcc->getColumn(), (OrigData+nCount)); nCount++; } } free(OrigData); if (!isBatch()) { Service<DesktopServices> pDesktop; SpatialDataWindow* pWindow = static_cast<SpatialDataWindow*>(pDesktop->createWindow(pResultCube->getName(), SPATIAL_DATA_WINDOW)); SpatialDataView* pView = (pWindow == NULL) ? NULL : pWindow->getSpatialDataView(); if (pView == NULL) { std::string msg = "Unable to create view."; pStep->finalize(Message::Failure, msg); if (pProgress != NULL) { pProgress->updateProgress(msg, 0, ERRORS); } return false; } pView->setPrimaryRasterElement(pResultCube.get()); pView->createLayer(RASTER, pResultCube.get()); } if (pProgress != NULL) { pProgress->updateProgress("Deconvolution enhancement is complete.", 100, NORMAL); } pOutArgList->setPlugInArgValue("Deconvolution enhancement Result", pResultCube.release()); pStep->finalize(); return true; }
AppBarExamples::AppBarExamples(QWidget *parent) : QWidget(parent) { Progress *p = new Progress; p->setProgressType(Material::IndeterminateProgress); p->setMinimum(0); p->setMaximum(99); p->setValue(22); QVBoxLayout *l = new QVBoxLayout; setLayout(l); l->addStretch(1); l->addWidget(p); CircularProgress *cp = new CircularProgress; cp->setProgressType(Material::IndeterminateProgress); cp->setMinimum(0); cp->setMaximum(99); cp->setValue(90); l->addWidget(cp); QProgressBar *pb = new QProgressBar; pb->setMinimum(0); pb->setMaximum(99); pb->setValue(50); QPushButton *b = new QPushButton; l->addWidget(b); l->addWidget(pb); /* QLayout *mainLayout = widget()->layout(); { QWidget *widget = new QWidget; QVBoxLayout *layout = new QVBoxLayout; widget->setLayout(layout); AppBar *appBar = new AppBar; layout->addWidget(appBar); layout->addStretch(); layout->setContentsMargins(0, 0, 0, 0); QScrollArea *area = new QScrollArea; area->setWidget(widget); area->setWidgetResizable(true); area->setBackgroundRole(QPalette::Base); Frame *frame = new Frame; frame->setCodeSnippet( "QVBoxLayout *layout = new QVBoxLayout;\n" "AppBar *appBar = new AppBar;\n" "layout->addWidget(appBar);\n" "layout->addStretch();\n" "layout->setContentsMargins(0, 0, 0, 0);" ); frame->setWidget(area); mainLayout->addWidget(frame); } { QWidget *widget = new QWidget; QVBoxLayout *layout = new QVBoxLayout; widget->setLayout(layout); Progress *p = new Progress; Frame *frame = new Frame; frame->setCodeSnippet( "" ); frame->setWidget(p); mainLayout->addWidget(frame); } */ }
int WINAPI WinMain( HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) { mainInstance = hInstance; mainIcon = LoadIcon(hInstance, MAKEINTRESOURCE(IDR_MAINFRAME)); int argc = __argc; char** argv = __argv; vector<string> jobs; string jobsFile; bool quiet = false; for (int i = 1; i < argc; i++) { string arg = argv[i]; if (arg == "-appPath" && argc > i+1) { i++; appPath = argv[i]; } else if (arg == "-exePath" && argc > i+1) { i++; exePath = argv[i]; } else if (arg == "-updateFile" && argc > i+1) { i++; updateFile = argv[i]; jobs.push_back("update"); } else if (arg == "-quiet") { quiet = true; } else { jobsFile = arg; } } if (appPath.empty() || exePath.empty()) { ShowError("The installer was not given enough information to continue."); return __LINE__; } if (updateFile.empty()) { app = Application::NewApplication(appPath); } else { app = Application::NewApplication(updateFile, appPath); } if (app.isNull()) { ShowError("The installer could not read the application manifest."); return __LINE__; } if (!updateFile.empty()) { appInstallPath = app->path; } else { appInstallPath = GetDefaultInstallationDirectory(); } componentInstallPath = FileUtils::GetSystemRuntimeHomeDirectory(); // Read all jobs from the jobs file if (!jobsFile.empty() && FileUtils::IsFile(jobsFile)) { std::ifstream file(jobsFile.c_str()); if (!file.bad() && !file.fail() && !file.eof()) { string line; while(!std::getline(file, line).eof()) { jobs.push_back(line); } } } // Major WTF here, Redmond. LoadLibrary(TEXT("Riched20.dll")); CoInitialize(NULL); if (!quiet) { HWND introDialog = CreateDialog( hInstance, MAKEINTRESOURCE(IDD_INTRODIALOG), 0, DialogProc); if (!introDialog) { int i = GetLastError(); ShowError("The installer could not create the introductory dialog."); return __LINE__; } MSG msg; int status; while ((status = GetMessage(&msg, 0, 0, 0)) != 0) { if (status == -1) { char buf[2000]; sprintf(buf, "Error: %i", GetLastError()); ShowError(buf); return -1; } if (!IsDialogMessage(introDialog, &msg)) { TranslateMessage(&msg); DispatchMessage(&msg); } } } else { doInstall = true; } if (doInstall) { Progress *p = new Progress; p->SetLineText(1, app->name, false); p->Show(); bool success = InstallApplication(p) && HandleAllJobs(jobs, p) && FinishInstallation(); CoUninitialize(); return success ? 0 : 1; } else { CoUninitialize(); return 1; } }
void IsisMain() { latLonGrid = NULL; // We will be processing by line ProcessByLine p; Cube *icube = p.SetInputCube("FROM"); UserInterface &ui = Application::GetUserInterface(); QString mode = ui.GetString("MODE"); outline = ui.GetBoolean("OUTLINE"); ticks = ui.GetBoolean("TICKS"); if (ticks) { tickSize = ui.GetInteger("TICKSIZE") / 2; diagonalTicks = ui.GetBoolean("DIAGONALTICKS"); } lineWidth = ui.GetInteger("LINEWIDTH") / 2; QString bval = ui.GetString("BKGNDVALUE").toUpper(); image = (bval == "IMAGE"); bkgndValue = Null; if (bval == "HRS") { bkgndValue = Hrs; } else if (bval == "LRS") { bkgndValue = Lrs; } else if (bval == "DN") { bkgndValue = ui.GetDouble("BKGNDDNVALUE"); } QString lval = ui.GetString("LINEVALUE").toUpper(); if (lval == "HRS") { lineValue = Hrs; } else if (lval == "LRS") { lineValue = Lrs; } else if (lval == "NULL") { lineValue = Null; } else if (lval == "DN") { if (ui.WasEntered("DNVALUE")) { lineValue = ui.GetDouble("DNVALUE"); } else { throw IException(IException::User, "Must enter value in DNVALUE", _FILEINFO_); } } else { IString msg = "Invalid LINEVALUE string [" + ui.GetString("LINEVALUE"); msg += "], must be one of HRS, LRS, NULL, or DN."; throw IException(IException::User, msg, _FILEINFO_); } inputSamples = icube->sampleCount(); inputLines = icube->lineCount(); // Line & sample based grid if (mode == "IMAGE") { p.SetOutputCube("TO"); baseLine = ui.GetInteger("BASELINE"); baseSample = ui.GetInteger("BASESAMPLE"); lineInc = ui.GetInteger("LINC"); sampleInc = ui.GetInteger("SINC"); p.StartProcess(imageGrid); p.EndProcess(); } // Lat/Lon based grid else { CubeAttributeOutput oatt("+32bit"); p.SetOutputCube(ui.GetFileName("TO"), oatt, icube->sampleCount(), icube->lineCount(), icube->bandCount()); UniversalGroundMap *gmap = new UniversalGroundMap(*icube, UniversalGroundMap::ProjectionFirst); latLonGrid = new GroundGrid(gmap, ticks, icube->sampleCount(), icube->lineCount()); baseLat = Latitude(ui.GetDouble("BASELAT"), *latLonGrid->GetMappingGroup(), Angle::Degrees); baseLon = Longitude(ui.GetDouble("BASELON"), *latLonGrid->GetMappingGroup(), Angle::Degrees); latInc = Angle(ui.GetDouble("LATINC"), Angle::Degrees); lonInc = Angle(ui.GetDouble("LONINC"), Angle::Degrees); Progress progress; progress.SetText("Calculating Grid"); Latitude minLat, maxLat; if (ui.WasEntered("MINLAT")) minLat = Latitude(ui.GetDouble("MINLAT"), *latLonGrid->GetMappingGroup(), Angle::Degrees); if (ui.WasEntered("MAXLAT")) maxLat = Latitude(ui.GetDouble("MAXLAT"), *latLonGrid->GetMappingGroup(), Angle::Degrees); Longitude minLon, maxLon; if (ui.WasEntered("MINLON")) minLon = Longitude(ui.GetDouble("MINLON"), *latLonGrid->GetMappingGroup(), Angle::Degrees); if (ui.WasEntered("MAXLON")) maxLon = Longitude(ui.GetDouble("MAXLON"), *latLonGrid->GetMappingGroup(), Angle::Degrees); latLonGrid->SetGroundLimits(minLat, minLon, maxLat, maxLon); latLonGrid->CreateGrid(baseLat, baseLon, latInc, lonInc, &progress); if (ui.GetBoolean("BOUNDARY")) latLonGrid->WalkBoundary(); p.StartProcess(groundGrid); p.EndProcess(); delete latLonGrid; latLonGrid = NULL; delete gmap; gmap = NULL; } }
int FormatColumnMatrix::read(NameAssignment* nameMap){ try { string firstName, secondName; float distance; int nseqs = nameMap->size(); list = new ListVector(nameMap->getListVector()); Progress* reading = new Progress("Formatting matrix: ", nseqs * nseqs); int lt = 1; int refRow = 0; //we'll keep track of one cell - Cell(refRow,refCol) - and see if it's transpose int refCol = 0; //shows up later - Cell(refCol,refRow). If it does, then its a square matrix //need to see if this is a square or a triangular matrix... ofstream out; string tempOutFile = filename + ".temp"; m->openOutputFile(tempOutFile, out); while(fileHandle && lt == 1){ //let's assume it's a triangular matrix... if (m->control_pressed) { out.close(); m->mothurRemove(tempOutFile); fileHandle.close(); delete reading; return 0; } fileHandle >> firstName >> secondName >> distance; // get the row and column names and distance map<string,int>::iterator itA = nameMap->find(firstName); map<string,int>::iterator itB = nameMap->find(secondName); if(itA == nameMap->end()){ m->mothurOut("AAError: Sequence '" + firstName + "' was not found in the names file, please correct\n"); exit(1); } if(itB == nameMap->end()){ m->mothurOut("ABError: Sequence '" + secondName + "' was not found in the names file, please correct\n"); exit(1); } if (distance == -1) { distance = 1000000; } if((distance < cutoff) && (itA != itB)){ if(refRow == refCol){ // in other words, if we haven't loaded refRow and refCol... refRow = itA->second; refCol = itB->second; //making it square out << itA->second << '\t' << itB->second << '\t' << distance << endl; out << itB->second << '\t' << itA->second << '\t' << distance << endl; } else if(refRow == itA->second && refCol == itB->second){ lt = 0; } //you are square else if(refRow == itB->second && refCol == itA->second){ lt = 0; } //you are square else{ //making it square out << itA->second << '\t' << itB->second << '\t' << distance << endl; out << itB->second << '\t' << itA->second << '\t' << distance << endl; } reading->update(itA->second * nseqs / 2); } m->gobble(fileHandle); } out.close(); fileHandle.close(); string squareFile; if(lt == 0){ // oops, it was square squareFile = filename; }else{ squareFile = tempOutFile; } //sort file by first column so the distances for each row are together string outfile = m->getRootName(squareFile) + "sorted.dist.temp"; //use the unix sort #if defined (__APPLE__) || (__MACH__) || (linux) || (__linux) string command = "sort -n " + squareFile + " -o " + outfile; system(command.c_str()); #else //sort using windows sort string command = "sort " + squareFile + " /O " + outfile; system(command.c_str()); #endif if (m->control_pressed) { m->mothurRemove(tempOutFile); m->mothurRemove(outfile); delete reading; return 0; } //output to new file distance for each row and save positions in file where new row begins ifstream in; m->openInputFile(outfile, in); distFile = outfile + ".rowFormatted"; m->openOutputFile(distFile, out); rowPos.resize(nseqs, -1); int currentRow; int first, second; float dist; map<int, float> rowMap; map<int, float>::iterator itRow; //get first currentRow in >> first; currentRow = first; string firstString = toString(first); for(int k = 0; k < firstString.length(); k++) { in.putback(firstString[k]); } while(!in.eof()) { if (m->control_pressed) { in.close(); out.close(); m->mothurRemove(distFile); m->mothurRemove(tempOutFile); m->mothurRemove(outfile); delete reading; return 0; } in >> first >> second >> dist; m->gobble(in); if (first != currentRow) { //save position in file of each new row rowPos[currentRow] = out.tellp(); out << currentRow << '\t' << rowMap.size() << '\t'; for (itRow = rowMap.begin(); itRow != rowMap.end(); itRow++) { out << itRow->first << '\t' << itRow->second << '\t'; } out << endl; currentRow = first; rowMap.clear(); //save row you just read if (dist < cutoff) { rowMap[second] = dist; } }else{ if (dist < cutoff) { rowMap[second] = dist; } } } //print last Row //save position in file of each new row rowPos[currentRow] = out.tellp(); out << currentRow << '\t' << rowMap.size() << '\t'; for (itRow = rowMap.begin(); itRow != rowMap.end(); itRow++) { out << itRow->first << '\t' << itRow->second << '\t'; } out << endl; in.close(); out.close(); if (m->control_pressed) { m->mothurRemove(distFile); m->mothurRemove(tempOutFile); m->mothurRemove(outfile); delete reading; return 0; } m->mothurRemove(tempOutFile); m->mothurRemove(outfile); reading->finish(); delete reading; list->setLabel("0"); if (m->control_pressed) { m->mothurRemove(distFile); return 0; } return 1; } catch(exception& e) { m->errorOut(e, "FormatColumnMatrix", "read"); exit(1); } }
void RemoteSelector::sendFileButton_clicked() { Progress* p = new Progress(0, service_); p->show(); }
int main (int argc, char** argv) { try { // We don't want any signals causing the program to quit in mid output, as // this would lead to odd colors persisting in the terminal. signal (SIGHUP, SIG_IGN); signal (SIGINT, SIG_IGN); signal (SIGKILL, SIG_IGN); signal (SIGPIPE, SIG_IGN); signal (SIGTERM, SIG_IGN); signal (SIGUSR1, SIG_IGN); signal (SIGUSR2, SIG_IGN); long arg_current = 0; #ifdef WAITING_FOR_VITAPI std::string arg_done = ""; #endif bool arg_elapsed = false; bool arg_estimate = false; std::string arg_label; long arg_max = 0; long arg_min = 0; bool arg_percentage = false; #ifdef WAITING_FOR_VITAPI std::string arg_remaining = ""; #endif bool arg_remove = false; time_t arg_start = 0; int arg_width = 80; std::string arg_style = ""; // Dynamically determine terminal width. unsigned short buff[4]; if (ioctl (fileno(stdout), TIOCGWINSZ, &buff) != -1) arg_width = buff[1]; static struct option longopts[] = { { "current", required_argument, NULL, 'c' }, #ifdef WAITING_FOR_VITAPI { "done", required_argument, NULL, 'd' }, #endif { "elapsed", no_argument, NULL, 'e' }, { "estimate", no_argument, NULL, 't' }, { "label", required_argument, NULL, 'l' }, { "max", required_argument, NULL, 'x' }, { "min", required_argument, NULL, 'm' }, { "now", no_argument, NULL, 'n' }, { "percentage", no_argument, NULL, 'p' }, #ifdef WAITING_FOR_VITAPI { "remaining", required_argument, NULL, 'a' }, #endif { "remove", no_argument, NULL, 'r' }, { "start", required_argument, NULL, 's' }, { "version", no_argument, NULL, 'v' }, { "width", required_argument, NULL, 'w' }, { "style", required_argument, NULL, 'y' }, { "help", no_argument, NULL, 'h' }, { NULL, 0, NULL, 0 } }; int ch; #ifdef WAITING_FOR_VITAPI while ((ch = getopt_long (argc, argv, "c:d:etl:x:m:npa:rs:vw:h", longopts, NULL)) != -1) #else while ((ch = getopt_long (argc, argv, "c:etl:x:m:nprs:vw:h", longopts, NULL)) != -1) #endif { switch (ch) { case 'c': arg_current = atol (optarg); break; #ifdef WAITING_FOR_VITAPI case 'd': arg_done = optarg; break; #endif case 'e': arg_elapsed = true; break; case 't': arg_estimate = true; break; case 'l': arg_label = optarg; break; case 'x': arg_max = atol (optarg); break; case 'm': arg_min = atol (optarg); break; case 'n': std::cout << time (NULL) << std::endl; exit (0); case 'p': arg_percentage = true; break; #ifdef WAITING_FOR_VITAPI case 'a': arg_remaining = optarg; break; #endif case 'r': arg_remove = true; break; case 's': arg_start = atoi (optarg); break; case 'v': showVersion (); break; case 'w': arg_width = atoi (optarg); break; case 'y': arg_style = optarg; break; case 'h': showUsage (); break; default: std::cout << "<default>" << std::endl; break; } } argc -= optind; argv += optind; // Sanity check arguments. if (arg_min || arg_max) if (arg_min > arg_max) throw std::string ("The --max value must not be less than the --min value."); if (arg_min || arg_max || arg_current) if (arg_min > arg_current || arg_current > arg_max) throw std::string ("The --current value must not lie outside the --min/--max range."); if (arg_width && arg_label.length ()) if (arg_label.length () >= arg_width) throw std::string ("The --label string is longer than the allowed --width value."); if (! arg_remove && ! (arg_min || arg_current || arg_max)) showUsage (); if (arg_elapsed && arg_start == 0) throw std::string ("To use the --elapsed feature, --start must be provided."); if (arg_estimate && arg_start == 0) throw std::string ("To use the --estimate feature, --start must be provided."); // Disallow signals from stopping the program while it is displaying color codes // Set up and render Progress object. Progress p (arg_label, arg_width, arg_min, arg_max, arg_percentage, arg_remove); p.setStyle (arg_style); p.setStart (arg_start); p.showElapsed (arg_elapsed); p.showEstimate (arg_estimate); p.removeAfter (arg_remove); p.update (arg_current); if (arg_remove) p.done (); } catch (const std::string& e) { std::cerr << "Error: " << e << std::endl; } catch (...) { std::cerr << "Unknown error occurred - please report." << std::endl; } return 0; }