//! \brief Creates Talkfiles. //! //! \param logger A pointer to a Loggerobject bool TalkFileCreator::createTalkFiles() { m_abort = false; QString errStr; emit logItem(tr("Starting Talk file generation for folder %1") .arg(m_dir.dirName()), LOGINFO); emit logProgress(0,0); QCoreApplication::processEvents(); // read in Maps of paths - file/dirnames emit logItem(tr("Reading Filelist..."),LOGINFO); if(createTalkList(m_dir) == false) { emit logItem(tr("Talk file creation aborted"),LOGERROR); doAbort(); return false; } QCoreApplication::processEvents(); // generate entries { TalkGenerator generator(this); // no string corrections yet: do not set language for TalkGenerator. connect(&generator,SIGNAL(done(bool)),this,SIGNAL(done(bool))); connect(&generator,SIGNAL(logItem(QString,int)),this,SIGNAL(logItem(QString,int))); connect(&generator,SIGNAL(logProgress(int,int)),this,SIGNAL(logProgress(int,int))); connect(this,SIGNAL(aborted()),&generator,SLOT(abort())); if(generator.process(&m_talkList) == TalkGenerator::eERROR) { doAbort(); return false; } } // Copying talk files emit logItem(tr("Copying Talkfiles..."),LOGINFO); if(copyTalkFiles(&errStr) == false) { emit logItem(errStr,LOGERROR); doAbort(); return false; } // Deleting left overs if( !cleanup()) return false; emit logItem(tr("Finished creating Talk files"),LOGOK); emit logProgress(1,1); emit done(false); return true; }
void ZipInstaller::downloadDone(bool error) { qDebug() << "[ZipInstall] download done, error:" << error; QStringList zipContents; // needed later // update progress bar emit logProgress(1, 1); if(getter->httpResponse() != 200 && !getter->isCached()) { emit logItem(tr("Download error: received HTTP error %1.") .arg(getter->httpResponse()),LOGERROR); emit done(true); return; } if(getter->isCached()) emit logItem(tr("Cached file used."), LOGINFO); if(error) { emit logItem(tr("Download error: %1").arg(getter->errorString()), LOGERROR); emit done(true); return; } else emit logItem(tr("Download finished."),LOGOK); QCoreApplication::processEvents(); if(m_unzip) { // unzip downloaded file qDebug() << "[ZipInstall] about to unzip " << m_file << "to" << m_mountpoint; emit logItem(tr("Extracting file."), LOGINFO); QCoreApplication::processEvents(); ZipUtil zip(this); connect(&zip, SIGNAL(logProgress(int, int)), this, SIGNAL(logProgress(int, int))); connect(&zip, SIGNAL(logItem(QString, int)), this, SIGNAL(logItem(QString, int))); zip.open(m_file, QuaZip::mdUnzip); // check for free space. Make sure after installation will still be // some room for operating (also includes calculation mistakes due to // cluster sizes on the player). if((qint64)Utils::filesystemFree(m_mountpoint) < (zip.totalUncompressedSize(Utils::filesystemClusterSize(m_mountpoint)) + 1000000)) { emit logItem(tr("Not enough disk space! Aborting."), LOGERROR); emit logProgress(1, 1); emit done(true); return; } zipContents = zip.files(); if(!zip.extractArchive(m_mountpoint)) { emit logItem(tr("Extraction failed!"), LOGERROR); emit logProgress(1, 1); emit done(true); return; } zip.close(); } else {
void BootloaderInstallBase::downloadBlFinish(bool error) { qDebug() << "[BootloaderInstallBase] Downloading bootloader finished, error:" << error; // update progress bar emit logProgress(100, 100); if(m_http.httpResponse() != 200) { emit logItem(tr("Download error: received HTTP error %1.") .arg(m_http.errorString()), LOGERROR); emit done(true); return; } if(error) { emit logItem(tr("Download error: %1") .arg(m_http.error()), LOGERROR); emit done(true); return; } else if(m_http.isCached()) emit logItem(tr("Download finished (cache used)."), LOGOK); else emit logItem(tr("Download finished."), LOGOK); QCoreApplication::processEvents(); m_blversion = m_http.timestamp(); emit downloadDone(); }
void Progress::messageProgress(Progress::Stage stage, int progress_in_stage, int progress_in_stage_max) { float percentage = calcOverallProgress(stage, float(progress_in_stage) / float(progress_in_stage_max)); Application::getInstance().communication->sendProgress(percentage); logProgress(names[(int)stage].c_str(), progress_in_stage, progress_in_stage_max, percentage); }
//! @brief append a folder to current archive //! @param source source folder //! @param basedir base folder for archive. Will get stripped from zip paths. //! @return true on success, false otherwise bool ZipUtil::appendDirToArchive(QString& source, QString& basedir) { bool result = true; if(!m_zip || !m_zip->isOpen()) { qDebug() << "[ZipUtil] Zip file not open!"; return false; } // get a list of all files and folders. Needed for progress info and avoids // recursive calls. QDirIterator iterator(source, QDirIterator::Subdirectories); QStringList fileList; while(iterator.hasNext()) { iterator.next(); // skip folders, we can't add them. if(!QFileInfo(iterator.filePath()).isDir()) { fileList.append(iterator.filePath()); } } qDebug() << "[ZipUtil] Adding" << fileList.size() << "files to archive"; int max = fileList.size(); for(int i = 0; i < max; i++) { QString current = fileList.at(i); if(!appendFileToArchive(current, basedir)) { qDebug() << "[ZipUtil] Error appending file" << current << "to archive" << m_zip->getZipName(); result = false; break; } emit logProgress(i, max); } return result; }
//! \brief copys Talkfiles from the temp dir to the target. Progress and installlog is handled inside //! //! \param errString Pointer to a QString where the error cause is written. //! \returns true on success, false on error or user abort bool TalkFileCreator::copyTalkFiles(QString* errString) { int progressMax = m_talkList.size(); int m_progress = 0; emit logProgress(m_progress,progressMax); QSettings installlog(m_mountpoint + "/.rockbox/rbutil.log", QSettings::IniFormat, 0); installlog.beginGroup("talkfiles"); for(int i=0; i < m_talkList.size(); i++) { if(m_abort) { *errString = tr("File copy aborted"); return false; } // skip not encoded files if(m_talkList[i].encoded == false) { emit logProgress(++m_progress,progressMax); continue; // this file was skipped in one of the previous steps } // remove target if it exists, and if we should overwrite it if(QFile::exists(m_talkList[i].target)) QFile::remove(m_talkList[i].target); // copying qDebug() << "[TalkFileCreator] copying" << m_talkList[i].talkfilename << "to" << m_talkList[i].target; if(!QFile::copy(m_talkList[i].talkfilename,m_talkList[i].target)) { *errString = tr("Copying of %1 to %2 failed").arg(m_talkList[i].talkfilename).arg(m_talkList[i].target); return false; } // add to installlog QString now = QDate::currentDate().toString("yyyyMMdd"); installlog.setValue(m_talkList[i].target.remove(0,m_mountpoint.length()),now); emit logProgress(++m_progress,progressMax); QCoreApplication::processEvents(); } installlog.endGroup(); installlog.sync(); return true; }
//! \brief Encodes a List of strings //! TalkGenerator::Status TalkGenerator::encodeList(QList<TalkEntry>* list) { QStringList dublicates; int progressMax = list->size(); int m_progress = 0; emit logProgress(m_progress,progressMax); for(int i=0; i < list->size(); i++) { if(m_abort) { emit logItem(tr("Encoding aborted"), LOGERROR); return eERROR; } //skip non-voiced entrys if(list->at(i).voiced == false) { qDebug() << "non voiced entry" << list->at(i).toSpeak <<"detected"; emit logProgress(++m_progress,progressMax); continue; } //skip dublicates if(!dublicates.contains(list->at(i).talkfilename)) dublicates.append(list->at(i).talkfilename); else { qDebug() << "dublicate skipped"; (*list)[i].encoded = true; emit logProgress(++m_progress,progressMax); continue; } //encode entry qDebug() << "encoding " << list->at(i).wavfilename << "to" << list->at(i).talkfilename; if(!m_enc->encode(list->at(i).wavfilename,list->at(i).talkfilename)) { emit logItem(tr("Encoding of %1 failed").arg(list->at(i).wavfilename), LOGERROR); return eERROR; } (*list)[i].encoded = true; emit logProgress(++m_progress,progressMax); QCoreApplication::processEvents(); } return eOK; }
HCWindow::HCWindow(QWidget *parent) : QMainWindow(parent) { setupUi(this); cb_game_engine->addItem("Generations"); //cb_game_engine->addItem("Unleashed"); // TO VERIFY //cb_game_engine->addItem("Lost World"); // TO VERIFY cb_game_engine->setCurrentIndex(0); connect(action_open_settings, SIGNAL(triggered()), this, SLOT(openSettingsTriggered())); connect(action_save_settings, SIGNAL(triggered()), this, SLOT(saveSettingsTriggered())); connect(action_about, SIGNAL(triggered()), this, SLOT(aboutTriggered())); connect(action_about_qt, SIGNAL(triggered()), qApp, SLOT(aboutQt())); connect(action_close, SIGNAL(triggered()), this, SLOT(close())); connect(action_community_guide, SIGNAL(triggered()), this, SLOT(communityGuideTriggered())); connect(chk_gen_materials, SIGNAL(stateChanged(int)), this, SLOT(generateMaterialsChanged(int))); connect(chk_copy_convert_textures, SIGNAL(stateChanged(int)), this, SLOT(copyConvertTexturesChanged(int))); connect(pb_add_source_models, SIGNAL(released()), this, SLOT(addSourceModelsTriggered())); connect(pb_remove_source_model, SIGNAL(released()), this, SLOT(removeSourceModelTriggered())); connect(pb_clear_source_models, SIGNAL(released()), this, SLOT(clearSourceModelsTriggered())); connect(pb_source_textures, SIGNAL(released()), this, SLOT(browseTexturesTriggered())); connect(pb_terrain_output, SIGNAL(released()), this, SLOT(browseOutputTriggered())); connect(pb_reset_transform, SIGNAL(released()), this, SLOT(resetTransformTriggered())); connect(pb_tag_sheet, SIGNAL(released()), this, SLOT(tagCheatSheetTriggered())); connect(pb_convert, SIGNAL(released()), this, SLOT(convertTriggered())); te_progress->setReadOnly(true); // Reset log file QFile log_file(LogPath); log_file.open(QIODevice::WriteOnly); log_file.close(); logProgress(ProgressNormal, "Logging progress to " + LogPath); if (!QFileInfo(DefaultSettingsPath).exists()) { // Default settings converter_settings.model_source_paths = QStringList(); converter_settings.texture_source_path = ""; converter_settings.terrain_output_path = ""; converter_settings.game_engine = Generations; converter_settings.merge_existing = true; converter_settings.generate_materials = true; converter_settings.copy_and_convert_textures = true; converter_settings.force_tags_layers = true; converter_settings.remove_material_tags = true; converter_settings.remove_model_tags = true; converter_settings.use_model_groups = true; converter_settings.convert_lights = true; converter_settings.group_cell_size = 25.0; converter_settings.position_x = converter_settings.position_y = converter_settings.position_z = 0.0; converter_settings.scale_x = converter_settings.scale_y = converter_settings.scale_z = 100.0; converter_settings.rotation_x = converter_settings.rotation_y = converter_settings.rotation_z = 0.0; saveSettings(DefaultSettingsPath); } else { loadSettings(DefaultSettingsPath); } updateUiFromSettings(); }
void* RestoreThread::Entry() { wxDateTime now; wxString msg; try { msg.Printf(_("Connecting to server %s..."), serverM.c_str()); logImportant(msg); IBPP::Service svc = IBPP::ServiceFactory(wx2std(serverM), wx2std(usernameM), wx2std(passwordM)); svc->Connect(); now = wxDateTime::Now(); msg.Printf(_("Database restore started %s"), now.FormatTime().c_str()); logImportant(msg); svc->StartRestore(wx2std(bkfileM), wx2std(dbfileM), pagesizeM, brfM); while (true) { if (TestDestroy()) { now = wxDateTime::Now(); msg.Printf(_("Database restore canceled %s"), now.FormatTime().c_str()); logImportant(msg); break; } const char* c = svc->WaitMsg(); if (c == 0) { now = wxDateTime::Now(); msg.Printf(_("Database restore finished %s"), now.FormatTime().c_str()); logImportant(msg); break; } msg = c; logProgress(msg); } svc->Disconnect(); } catch (IBPP::Exception& e) { now = wxDateTime::Now(); msg.Printf(_("Database restore canceled %s due to IBPP exception:\n\n"), now.FormatTime().c_str()); msg += e.what(); logError(msg); } catch (...) { now = wxDateTime::Now(); msg.Printf(_("Database restore canceled %s due to exception"), now.FormatTime().c_str()); logError(msg); } return 0; }
//! \brief Voices a List of string //! TalkGenerator::Status TalkGenerator::voiceList(QList<TalkEntry>* list,int wavtrimth) { emit logProgress(0, list->size()); QStringList duplicates; m_ttsWarnings = false; for(int i=0; i < list->size(); i++) { (*list)[i].refs.tts = m_tts; (*list)[i].refs.wavtrim = wavtrimth; (*list)[i].refs.generator = this; // skip duplicated wav entries if(!duplicates.contains(list->at(i).wavfilename)) duplicates.append(list->at(i).wavfilename); else { qDebug() << "[TalkGen] duplicate skipped"; (*list)[i].voiced = true; continue; } } /* If the engine can't be parallelized, we use only 1 thread */ // NOTE: setting the number of maximum threads to use to 1 doesn't seem to // work as expected -- it causes sporadically output files missing (see // FS#11994). As a stop-gap solution use a separate implementation in that // case for running the TTS. if((m_tts->capabilities() & TTSBase::RunInParallel) != 0) { int maxThreadCount = QThreadPool::globalInstance()->maxThreadCount(); qDebug() << "[TalkGenerator] Maximum number of threads used:" << QThreadPool::globalInstance()->maxThreadCount(); connect(&ttsFutureWatcher, SIGNAL(progressValueChanged(int)), this, SLOT(ttsProgress(int))); ttsFutureWatcher.setFuture(QtConcurrent::map(*list, &TalkGenerator::ttsEntryPoint)); /* We use this loop as an equivalent to ttsFutureWatcher.waitForFinished() * since the latter blocks all events */ while(ttsFutureWatcher.isRunning()) QCoreApplication::processEvents(); /* Restore global settings, if we changed them */ if ((m_tts->capabilities() & TTSBase::RunInParallel) == 0) QThreadPool::globalInstance()->setMaxThreadCount(maxThreadCount); if(ttsFutureWatcher.isCanceled()) return eERROR; else if(m_ttsWarnings) return eWARNING; else return eOK; }
HKWindow::HKWindow(QWidget *parent) : QMainWindow(parent) { setupUi(this); cb_mode->addItem("Collision"); //cb_mode->addItem("Rigid Bodies"); // TO IMPLEMENT //cb_mode->addItem("Animation"); // TO IMPLEMENT cb_mode->setCurrentIndex(0); havok_enviroment = NULL; connect(action_open_settings, SIGNAL(triggered()), this, SLOT(openSettingsTriggered())); connect(action_save_settings, SIGNAL(triggered()), this, SLOT(saveSettingsTriggered())); connect(action_about, SIGNAL(triggered()), this, SLOT(aboutTriggered())); connect(action_about_qt, SIGNAL(triggered()), qApp, SLOT(aboutQt())); connect(action_close, SIGNAL(triggered()), this, SLOT(close())); connect(action_community_guide, SIGNAL(triggered()), this, SLOT(communityGuideTriggered())); connect(pb_add_source_models, SIGNAL(released()), this, SLOT(addSourceModelsTriggered())); connect(pb_remove_source_model, SIGNAL(released()), this, SLOT(removeSourceModelTriggered())); connect(pb_clear_source_models, SIGNAL(released()), this, SLOT(clearSourceModelsTriggered())); connect(pb_output_file, SIGNAL(released()), this, SLOT(browseOutputTriggered())); connect(pb_reset_transform, SIGNAL(released()), this, SLOT(resetTransformTriggered())); connect(pb_up, SIGNAL(released()), this, SLOT(tagMoveUpTriggered())); connect(pb_down, SIGNAL(released()), this, SLOT(tagMoveDownTriggered())); connect(pb_new, SIGNAL(released()), this, SLOT(newTagTriggered())); connect(pb_delete, SIGNAL(released()), this, SLOT(deleteTagTriggered())); connect(tb_properties, SIGNAL(cellChanged(int, int)), this, SLOT(tagChangedTriggered(int, int))); connect(tb_properties, SIGNAL(cellDoubleClicked(int, int)), this, SLOT(tagDoubleClickTriggered(int, int))); connect(pb_convert, SIGNAL(released()), this, SLOT(convertTriggered())); te_progress->setReadOnly(true); // Reset log file QFile log_file(LogPath); log_file.open(QIODevice::WriteOnly); log_file.close(); logProgress(ProgressNormal, "Logging progress to " + LogPath); if (!QFileInfo(DefaultSettingsPath).exists()) { // Default settings converter_settings.mode = Collision; converter_settings.model_source_paths = QStringList(); converter_settings.output_file = ""; converter_settings.position_x = converter_settings.position_y = converter_settings.position_z = 0.0; converter_settings.scale_x = converter_settings.scale_y = converter_settings.scale_z = 100.0; converter_settings.rotation_x = converter_settings.rotation_y = converter_settings.rotation_z = 0.0; saveSettings(DefaultSettingsPath); } else { loadSettings(DefaultSettingsPath); } updateUiFromSettings(); }
//! \brief Voices a List of string //! TalkGenerator::Status TalkGenerator::voiceList(QList<TalkEntry>* list,int wavtrimth) { emit logProgress(0, list->size()); QStringList duplicates; m_ttsWarnings = false; for(int i=0; i < list->size(); i++) { (*list)[i].refs.tts = m_tts; (*list)[i].refs.wavtrim = wavtrimth; (*list)[i].refs.generator = this; // skip duplicated wav entries if(!duplicates.contains(list->at(i).wavfilename)) duplicates.append(list->at(i).wavfilename); else { qDebug() << "[TalkGen] duplicate skipped"; (*list)[i].voiced = true; continue; } } /* If the engine can't be parallelized, we use only 1 thread */ int maxThreadCount = QThreadPool::globalInstance()->maxThreadCount(); if ((m_tts->capabilities() & TTSBase::RunInParallel) == 0) QThreadPool::globalInstance()->setMaxThreadCount(1); connect(&ttsFutureWatcher, SIGNAL(progressValueChanged(int)), this, SLOT(ttsProgress(int))); ttsFutureWatcher.setFuture(QtConcurrent::map(*list, &TalkGenerator::ttsEntryPoint)); /* We use this loop as an equivalent to ttsFutureWatcher.waitForFinished() * since the latter blocks all events */ while(ttsFutureWatcher.isRunning()) QCoreApplication::processEvents(); /* Restore global settings, if we changed them */ if ((m_tts->capabilities() & TTSBase::RunInParallel) == 0) QThreadPool::globalInstance()->setMaxThreadCount(maxThreadCount); if(ttsFutureWatcher.isCanceled()) return eERROR; else if(m_ttsWarnings) return eWARNING; else return eOK; }
void ProgressReporter::progress(const std::unique_ptr<TransferReport>& report) { const TransferStats& stats = report->getSummary(); int64_t totalDiscoveredSize = report->getTotalFileSize(); int progress = 0; if (totalDiscoveredSize > 0) { progress = stats.getEffectiveDataBytes() * 100 / totalDiscoveredSize; } if (isTty_) { displayProgress(progress, report->getThroughputMBps(), report->getCurrentThroughputMBps()); } else { logProgress(stats.getEffectiveDataBytes(), progress, report->getThroughputMBps(), report->getCurrentThroughputMBps()); } }
//! \brief Encodes a List of strings //! TalkGenerator::Status TalkGenerator::encodeList(QList<TalkEntry>* list) { QStringList duplicates; int itemsCount = list->size(); emit logProgress(0, itemsCount); /* Do some preprocessing and remove entries that have not been voiced. */ for (int idx=0; idx < itemsCount; idx++) { if(list->at(idx).voiced == false) { qDebug() << "[TalkGen] unvoiced entry" << list->at(idx).toSpeak <<"detected"; list->removeAt(idx); itemsCount--; idx--; continue; } if(duplicates.contains(list->at(idx).talkfilename)) { (*list)[idx].encoded = true; /* make sure we skip this entry */ continue; } duplicates.append(list->at(idx).talkfilename); (*list)[idx].refs.encoder = m_enc; (*list)[idx].refs.generator = this; /* not really needed, unless we end up voicing and encoding with two different TalkGenerators.*/ } connect(&encFutureWatcher, SIGNAL(progressValueChanged(int)), this, SLOT(encProgress(int))); encFutureWatcher.setFuture(QtConcurrent::map(*list, &TalkGenerator::encEntryPoint)); /* We use this loop as an equivalent to encFutureWatcher.waitForFinished() * since the latter blocks all events */ while (encFutureWatcher.isRunning()) QCoreApplication::processEvents(QEventLoop::AllEvents); if (encFutureWatcher.isCanceled()) return eERROR; else return eOK; }
bool BootloaderInstallMi4::uninstall(void) { LOG_INFO() << "Uninstalling bootloader"; // check if it's actually a Rockbox bootloader emit logItem(tr("Checking for Rockbox bootloader"), LOGINFO); if(installed() != BootloaderRockbox) { emit logItem(tr("No Rockbox bootloader found"), LOGERROR); emit done(true); return false; } // check if OF file present emit logItem(tr("Checking for original firmware file"), LOGINFO); QString original = QFileInfo(Utils::resolvePathCase(m_blfile)).absolutePath() + "/OF.mi4"; if(Utils::resolvePathCase(original).isEmpty()) { emit logItem(tr("Error finding original firmware file"), LOGERROR); emit done(true); return false; } // finally remove RB bootloader QString resolved = Utils::resolvePathCase(m_blfile); QFile blfile(resolved); blfile.remove(); QFile::rename(Utils::resolvePathCase(original), resolved); emit logItem(tr("Rockbox bootloader successful removed"), LOGINFO); logInstall(LogRemove); emit logProgress(1, 1); emit done(false); return true; }
/* * Algorithm: * From top layer to bottom layer: * - find overhang by looking at the difference between two consucutive layers * - join with support areas from layer above * - subtract current layer * - use the result for the next lower support layer (without doing XY-distance and Z bottom distance, so that a single support beam may move around the model a bit => more stability) * - perform inset using X/Y-distance and bottom Z distance * * for support buildplate only: purge all support not connected to buildplate */ void generateSupportAreas(SliceDataStorage& storage, SliceMeshStorage* object, int layer_count) { // given settings ESupportType support_type = object->settings->getSettingAsSupportType("support_type"); storage.support.generated = false; if (!object->settings->getSettingBoolean("support_enable")) return; if (support_type == Support_None) return; double supportAngle = object->settings->getSettingInAngleRadians("support_angle"); bool supportOnBuildplateOnly = support_type == Support_PlatformOnly; int supportXYDistance = object->settings->getSettingInMicrons("support_xy_distance"); int supportZDistance = object->settings->getSettingInMicrons("support_z_distance"); int supportZDistanceBottom = object->settings->getSettingInMicrons("support_bottom_distance"); int supportZDistanceTop = object->settings->getSettingInMicrons("support_top_distance"); int supportJoinDistance = object->settings->getSettingInMicrons("support_join_distance"); int support_bottom_stair_step_height = object->settings->getSettingInMicrons("support_bottom_stair_step_height"); int smoothing_distance = object->settings->getSettingInMicrons("support_area_smoothing"); int supportTowerDiameter = object->settings->getSettingInMicrons("support_tower_diameter"); int supportMinAreaSqrt = object->settings->getSettingInMicrons("support_minimal_diameter"); double supportTowerRoofAngle = object->settings->getSettingInAngleRadians("support_tower_roof_angle"); //std::cerr <<" towerDiameter=" << towerDiameter <<", supportMinAreaSqrt=" << supportMinAreaSqrt << std::endl; int min_smoothing_area = 100*100; // minimal area for which to perform smoothing int z_layer_distance_tower = 1; // start tower directly below overhang point int layerThickness = object->settings->getSettingInMicrons("layer_height"); int extrusionWidth = object->settings->getSettingInMicrons("wall_line_width_x"); // TODO check for layer0extrusionWidth! // derived settings: if (supportZDistanceBottom < 0) supportZDistanceBottom = supportZDistance; if (supportZDistanceTop < 0) supportZDistanceTop = supportZDistance; int supportLayerThickness = layerThickness; int layerZdistanceTop = supportZDistanceTop / supportLayerThickness + 1; // support must always be 1 layer below overhang int layerZdistanceBottom = supportZDistanceBottom / supportLayerThickness; double tanAngle = tan(supportAngle) - 0.01; // the XY-component of the supportAngle int maxDistFromLowerLayer = tanAngle * supportLayerThickness; // max dist which can be bridged int support_layer_count = layer_count; double tanTowerRoofAngle = tan(supportTowerRoofAngle); int towerRoofExpansionDistance = layerThickness / tanTowerRoofAngle; // computation std::vector<Polygons> joinedLayers; // join model layers of all meshes into polygons and store small areas which need tower support std::vector<std::pair<int, std::vector<Polygons>>> overhang_points; // stores overhang_points along with the layer index at which the overhang point occurs AreaSupport::joinMeshesAndDetectOverhangPoints(storage, joinedLayers, overhang_points, layer_count, supportMinAreaSqrt, extrusionWidth); // initialization of supportAreasPerLayer for (int layer_idx = 0; layer_idx < layer_count ; layer_idx++) storage.support.supportAreasPerLayer.emplace_back(); int overhang_points_pos = overhang_points.size() - 1; Polygons supportLayer_last; std::vector<Polygons> towerRoofs; for (int layer_idx = support_layer_count - 1 - layerZdistanceTop; layer_idx >= 0 ; layer_idx--) { // compute basic overhang and put in right layer ([layerZdistanceTOp] layers below) Polygons supportLayer_supportee = joinedLayers[layer_idx+layerZdistanceTop]; Polygons supportLayer_supported = joinedLayers[layer_idx-1+layerZdistanceTop].offset(maxDistFromLowerLayer); Polygons basic_overhang = supportLayer_supportee.difference(supportLayer_supported); Polygons support_extension = basic_overhang.offset(maxDistFromLowerLayer); support_extension = support_extension.intersection(supportLayer_supported); support_extension = support_extension.intersection(supportLayer_supportee); Polygons overhang = basic_overhang.unionPolygons(support_extension); /* supported * ................. * ______________| * _______| ^^^^^ basic overhang * * ^^^^^^^^^ overhang extensions * ^^^^^^^^^^^^^^ overhang */ Polygons& supportLayer_this = overhang; supportLayer_this = supportLayer_this.simplify(50); // TODO: hardcoded value! if (supportMinAreaSqrt > 0) { // handle straight walls AreaSupport::handleWallStruts(supportLayer_this, supportMinAreaSqrt, supportTowerDiameter); // handle towers AreaSupport::handleTowers(supportLayer_this, towerRoofs, overhang_points, overhang_points_pos, layer_idx, towerRoofExpansionDistance, supportTowerDiameter, supportMinAreaSqrt, layer_count, z_layer_distance_tower); } if (layer_idx+1 < support_layer_count) { // join with support from layer up Polygons& supportLayer_up = supportLayer_last; Polygons joined = supportLayer_this.unionPolygons(supportLayer_up); // join different parts if (supportJoinDistance > 0) { joined = joined.offset(supportJoinDistance); joined = joined.offset(-supportJoinDistance); } if (smoothing_distance > 0) joined = joined.smooth(smoothing_distance, min_smoothing_area); // remove layer Polygons insetted = joined.difference(joinedLayers[layer_idx]); supportLayer_this = insetted; } supportLayer_last = supportLayer_this; // inset using X/Y distance if (supportLayer_this.size() > 0) supportLayer_this = supportLayer_this.difference(joinedLayers[layer_idx].offset(supportXYDistance)); // move up from model if (layerZdistanceBottom > 0 && layer_idx >= layerZdistanceBottom) { int stepHeight = support_bottom_stair_step_height / supportLayerThickness + 1; int bottomLayer = ((layer_idx - layerZdistanceBottom) / stepHeight) * stepHeight; supportLayer_this = supportLayer_this.difference(joinedLayers[bottomLayer]); } storage.support.supportAreasPerLayer[layer_idx] = supportLayer_this; logProgress("support", support_layer_count - layer_idx, support_layer_count); } // do stuff for when support on buildplate only if (supportOnBuildplateOnly) { Polygons touching_buildplate = storage.support.supportAreasPerLayer[0]; for (unsigned int layer_idx = 1 ; layer_idx < storage.support.supportAreasPerLayer.size() ; layer_idx++) { Polygons& supportLayer = storage.support.supportAreasPerLayer[layer_idx]; touching_buildplate = supportLayer.intersection(touching_buildplate); // from bottom to top, support areas can only decrease! storage.support.supportAreasPerLayer[layer_idx] = touching_buildplate; } } joinedLayers.clear(); storage.support.generated = true; }
void processFile(const char* input_filename, ConfigSettings& config, GCodeExport& gcode, bool firstFile) { for(unsigned int n=1; n<16; n++) gcode.setExtruderOffset(n, config.extruderOffset[n].p()); gcode.setFlavor(config.gcodeFlavor); double t = getTime(); log("Loading %s from disk...\n", input_filename); SimpleModel* m = loadModel(input_filename, config.matrix); if (!m) { log("Failed to load model: %s\n", input_filename); return; } log("Loaded from disk in %5.3fs\n", timeElapsed(t)); log("Analyzing and optimizing model...\n"); OptimizedModel* om = new OptimizedModel(m, Point3(config.objectPosition.X, config.objectPosition.Y, -config.objectSink)); for(unsigned int v = 0; v < m->volumes.size(); v++) { log(" Face counts: %i -> %i %0.1f%%\n", (int)m->volumes[v].faces.size(), (int)om->volumes[v].faces.size(), float(om->volumes[v].faces.size()) / float(m->volumes[v].faces.size()) * 100); log(" Vertex counts: %i -> %i %0.1f%%\n", (int)m->volumes[v].faces.size() * 3, (int)om->volumes[v].points.size(), float(om->volumes[v].points.size()) / float(m->volumes[v].faces.size() * 3) * 100); } delete m; log("Optimize model %5.3fs \n", timeElapsed(t)); //om->saveDebugSTL("c:\\models\\output.stl"); log("Slicing model...\n"); vector<Slicer*> slicerList; for(unsigned int volumeIdx=0; volumeIdx < om->volumes.size(); volumeIdx++) { slicerList.push_back(new Slicer(&om->volumes[volumeIdx], config.initialLayerThickness / 2, config.layerThickness, config.fixHorrible & FIX_HORRIBLE_KEEP_NONE_CLOSED, config.fixHorrible & FIX_HORRIBLE_EXTENSIVE_STITCHING)); //slicerList[volumeIdx]->dumpSegmentsToHTML("C:\\models\\output.html"); } log("Sliced model in %5.3fs\n", timeElapsed(t)); SliceDataStorage storage; fprintf(stdout,"Generating support map...\n"); generateSupportGrid(storage.support, om, config.supportAngle, config.supportEverywhere > 0, config.supportXYDistance, config.supportZDistance); storage.modelSize = om->modelSize; storage.modelMin = om->vMin; storage.modelMax = om->vMax; delete om; log("Generating layer parts...\n"); for(unsigned int volumeIdx=0; volumeIdx < slicerList.size(); volumeIdx++) { storage.volumes.push_back(SliceVolumeStorage()); createLayerParts(storage.volumes[volumeIdx], slicerList[volumeIdx], config.fixHorrible & (FIX_HORRIBLE_UNION_ALL_TYPE_A | FIX_HORRIBLE_UNION_ALL_TYPE_B | FIX_HORRIBLE_UNION_ALL_TYPE_C)); delete slicerList[volumeIdx]; } //carveMultipleVolumes(storage.volumes); generateMultipleVolumesOverlap(storage.volumes, config.multiVolumeOverlap); log("Generated layer parts in %5.3fs\n", timeElapsed(t)); //dumpLayerparts(storage, "c:/models/output.html"); const unsigned int totalLayers = storage.volumes[0].layers.size(); for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++) { int insetCount = config.insetCount; if (config.spiralizeMode && int(layerNr) < config.downSkinCount && layerNr % 2 == 1)//Add extra insets every 2 layers when spiralizing, this makes bottoms of cups watertight. insetCount += 5; generateInsets(&storage.volumes[volumeIdx].layers[layerNr], config.extrusionWidth, insetCount); } logProgress("inset",layerNr+1,totalLayers); } log("Generated inset in %5.3fs\n", timeElapsed(t)); for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { if (!config.spiralizeMode || int(layerNr) < config.downSkinCount) //Only generate up/downskin and infill for the first X layers when spiralize is choosen. { for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++) { generateSkins(layerNr, storage.volumes[volumeIdx], config.extrusionWidth, config.downSkinCount, config.upSkinCount, config.infillOverlap); generateSparse(layerNr, storage.volumes[volumeIdx], config.extrusionWidth, config.downSkinCount, config.upSkinCount); } } logProgress("skin",layerNr+1,totalLayers); } log("Generated up/down skin in %5.3fs\n", timeElapsed(t)); generateSkirt(storage, config.skirtDistance, config.extrusionWidth, config.skirtLineCount, config.skirtMinLength); generateRaft(storage, config.raftMargin); for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++) { for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { for(unsigned int partNr=0; partNr<storage.volumes[volumeIdx].layers[layerNr].parts.size(); partNr++) { if (layerNr > 0) storage.volumes[volumeIdx].layers[layerNr].parts[partNr].bridgeAngle = bridgeAngle(&storage.volumes[volumeIdx].layers[layerNr].parts[partNr], &storage.volumes[volumeIdx].layers[layerNr-1]); else storage.volumes[volumeIdx].layers[layerNr].parts[partNr].bridgeAngle = -1; } } } gcode.setRetractionSettings(config.retractionAmount, config.retractionSpeed, config.retractionAmountExtruderSwitch, config.minimalExtrusionBeforeRetraction); if (firstFile) { if (gcode.getFlavor() == GCODE_FLAVOR_ULTIGCODE) { gcode.addCode(";FLAVOR:UltiGCode"); gcode.addCode(";TIME:<__TIME__>"); gcode.addCode(";MATERIAL:<FILAMENT>"); } gcode.addCode(config.startCode); } else { gcode.addFanCommand(0); gcode.resetExtrusionValue(); gcode.addRetraction(); gcode.setZ(maxObjectHeight + 5000); gcode.addMove(Point(storage.modelMin.x, storage.modelMin.y), config.moveSpeed, 0); } gcode.addComment("total_layers=%d",totalLayers); GCodePathConfig skirtConfig(config.printSpeed, config.extrusionWidth, "SKIRT"); GCodePathConfig inset0Config(config.printSpeed, config.extrusionWidth, "WALL-OUTER"); GCodePathConfig inset1Config(config.printSpeed, config.extrusionWidth, "WALL-INNER"); GCodePathConfig fillConfig(config.infillSpeed, config.extrusionWidth, "FILL"); GCodePathConfig supportConfig(config.printSpeed, config.extrusionWidth, "SUPPORT"); if (config.raftBaseThickness > 0 && config.raftInterfaceThickness > 0) { GCodePathConfig raftBaseConfig(config.initialLayerSpeed, config.raftBaseLinewidth, "SUPPORT"); GCodePathConfig raftInterfaceConfig(config.initialLayerSpeed, config.raftInterfaceLinewidth, "SUPPORT"); { gcode.addComment("LAYER:-2"); gcode.addComment("RAFT"); GCodePlanner gcodeLayer(gcode, config.moveSpeed, config.retractionMinimalDistance); gcode.setZ(config.raftBaseThickness); gcode.setExtrusion(config.raftBaseThickness, config.filamentDiameter, config.filamentFlow); gcodeLayer.addPolygonsByOptimizer(storage.raftOutline, &raftBaseConfig); Polygons raftLines; generateLineInfill(storage.raftOutline, raftLines, config.raftBaseLinewidth, config.raftLineSpacing, config.infillOverlap, 0); gcodeLayer.addPolygonsByOptimizer(raftLines, &raftBaseConfig); gcodeLayer.writeGCode(false, config.raftBaseThickness); } { gcode.addComment("LAYER:-1"); gcode.addComment("RAFT"); GCodePlanner gcodeLayer(gcode, config.moveSpeed, config.retractionMinimalDistance); gcode.setZ(config.raftBaseThickness + config.raftInterfaceThickness); gcode.setExtrusion(config.raftInterfaceThickness, config.filamentDiameter, config.filamentFlow); Polygons raftLines; generateLineInfill(storage.raftOutline, raftLines, config.raftInterfaceLinewidth, config.raftLineSpacing, config.infillOverlap, 90); gcodeLayer.addPolygonsByOptimizer(raftLines, &raftInterfaceConfig); gcodeLayer.writeGCode(false, config.raftInterfaceThickness); } } int volumeIdx = 0; for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { logProgress("export", layerNr+1, totalLayers); GCodePlanner gcodeLayer(gcode, config.moveSpeed, config.retractionMinimalDistance); gcode.addComment("LAYER:%d", layerNr); int32_t z = config.initialLayerThickness + layerNr * config.layerThickness; z += config.raftBaseThickness + config.raftInterfaceThickness; gcode.setZ(z); if (layerNr == 0) gcodeLayer.addPolygonsByOptimizer(storage.skirt, &skirtConfig); for(unsigned int volumeCnt = 0; volumeCnt < storage.volumes.size(); volumeCnt++) { if (volumeCnt > 0) volumeIdx = (volumeIdx + 1) % storage.volumes.size(); SliceLayer* layer = &storage.volumes[volumeIdx].layers[layerNr]; gcodeLayer.setExtruder(volumeIdx); PathOrderOptimizer partOrderOptimizer(gcode.getPositionXY()); for(unsigned int partNr=0; partNr<layer->parts.size(); partNr++) { partOrderOptimizer.addPolygon(layer->parts[partNr].insets[0][0]); } partOrderOptimizer.optimize(); for(unsigned int partCounter=0; partCounter<partOrderOptimizer.polyOrder.size(); partCounter++) { SliceLayerPart* part = &layer->parts[partOrderOptimizer.polyOrder[partCounter]]; if (config.enableCombing) gcodeLayer.setCombBoundary(&part->combBoundery); else gcodeLayer.setAlwaysRetract(true); gcodeLayer.forceRetract(); if (config.insetCount > 0) { if (config.spiralizeMode) { if (int(layerNr) >= config.downSkinCount) inset0Config.spiralize = true; if (int(layerNr) == config.downSkinCount && part->insets.size() > 0) gcodeLayer.addPolygonsByOptimizer(part->insets[0], &inset1Config); } for(int insetNr=part->insets.size()-1; insetNr>-1; insetNr--) { if (insetNr == 0) gcodeLayer.addPolygonsByOptimizer(part->insets[insetNr], &inset0Config); else gcodeLayer.addPolygonsByOptimizer(part->insets[insetNr], &inset1Config); } } Polygons fillPolygons; int fillAngle = 45; if (layerNr & 1) fillAngle += 90; //int sparseSteps[1] = {config.extrusionWidth}; //generateConcentricInfill(part->skinOutline, fillPolygons, sparseSteps, 1); generateLineInfill(part->skinOutline, fillPolygons, config.extrusionWidth, config.extrusionWidth, config.infillOverlap, (part->bridgeAngle > -1) ? part->bridgeAngle : fillAngle); //int sparseSteps[2] = {config.extrusionWidth*5, config.extrusionWidth * 0.8}; //generateConcentricInfill(part->sparseOutline, fillPolygons, sparseSteps, 2); if (config.sparseInfillLineDistance > 0) { if (config.sparseInfillLineDistance > config.extrusionWidth * 4) { generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance * 2, config.infillOverlap, 45); generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance * 2, config.infillOverlap, 45 + 90); } else { generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance, config.infillOverlap, fillAngle); } } gcodeLayer.addPolygonsByOptimizer(fillPolygons, &fillConfig); //After a layer part, make sure the nozzle is inside the comb boundary, so we do not retract on the perimeter. if (!config.spiralizeMode || int(layerNr) < config.downSkinCount) gcodeLayer.moveInsideCombBoundary(config.extrusionWidth * 2); } gcodeLayer.setCombBoundary(NULL); } if (storage.support.generated) { if (config.supportExtruder > -1) gcodeLayer.setExtruder(config.supportExtruder); SupportPolyGenerator supportGenerator(storage.support, z); for(unsigned int volumeCnt = 0; volumeCnt < storage.volumes.size(); volumeCnt++) { SliceLayer* layer = &storage.volumes[volumeIdx].layers[layerNr]; Polygons polys; for(unsigned int n=0; n<layer->parts.size(); n++) supportGenerator.polygons = supportGenerator.polygons.difference(layer->parts[n].outline.offset(config.supportXYDistance)); } //Contract and expand the suppory polygons so small sections are removed and the final polygon is smoothed a bit. supportGenerator.polygons = supportGenerator.polygons.offset(-1000); supportGenerator.polygons = supportGenerator.polygons.offset(1000); Polygons supportLines; if (config.supportLineDistance > 0) { if (config.supportLineDistance > config.extrusionWidth * 4) { generateLineInfill(supportGenerator.polygons, supportLines, config.extrusionWidth, config.supportLineDistance*2, config.infillOverlap, 0); generateLineInfill(supportGenerator.polygons, supportLines, config.extrusionWidth, config.supportLineDistance*2, config.infillOverlap, 90); } else { generateLineInfill(supportGenerator.polygons, supportLines, config.extrusionWidth, config.supportLineDistance, config.infillOverlap, (layerNr & 1) ? 0 : 90); } } gcodeLayer.addPolygonsByOptimizer(supportGenerator.polygons, &supportConfig); gcodeLayer.addPolygonsByOptimizer(supportLines, &supportConfig); } //Finish the layer by applying speed corrections for minimal layer times and slowdown for the initial layer. if (int(layerNr) < config.initialSpeedupLayers) { int n = config.initialSpeedupLayers; int layer0Factor = config.initialLayerSpeed * 100 / config.printSpeed; gcodeLayer.setExtrudeSpeedFactor((layer0Factor * (n - layerNr) + 100 * (layerNr)) / n); if (layerNr == 0)//On the first layer, also slow down the travel gcodeLayer.setTravelSpeedFactor(layer0Factor); } gcodeLayer.forceMinimalLayerTime(config.minimalLayerTime, config.minimalFeedrate); if (layerNr == 0) gcode.setExtrusion(config.initialLayerThickness, config.filamentDiameter, config.filamentFlow); else gcode.setExtrusion(config.layerThickness, config.filamentDiameter, config.filamentFlow); int fanSpeed = config.fanSpeedMin; if (gcodeLayer.getExtrudeSpeedFactor() <= 50) { fanSpeed = config.fanSpeedMax; } else { int n = gcodeLayer.getExtrudeSpeedFactor() - 50; fanSpeed = config.fanSpeedMin * n / 50 + config.fanSpeedMax * (50 - n) / 50; } if (int(layerNr) < config.fanFullOnLayerNr) { //Slow down the fan on the layers below the [fanFullOnLayerNr], where layer 0 is speed 0. fanSpeed = fanSpeed * layerNr / config.fanFullOnLayerNr; } gcode.addFanCommand(fanSpeed); gcodeLayer.writeGCode(config.coolHeadLift > 0, int(layerNr) > 0 ? config.layerThickness : config.initialLayerThickness); } /* support debug for(int32_t y=0; y<storage.support.gridHeight; y++) { for(int32_t x=0; x<storage.support.gridWidth; x++) { unsigned int n = x+y*storage.support.gridWidth; if (storage.support.grid[n].size() < 1) continue; int32_t z = storage.support.grid[n][0].z; gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, 0), 0); gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, z), z); gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, 0), 0); } } //*/ log("Wrote layers in %5.2fs.\n", timeElapsed(t)); gcode.tellFileSize(); gcode.addFanCommand(0); logProgress("process", 1, 1); log("Total time elapsed %5.2fs.\n", timeElapsed(t,true)); //Store the object height for when we are printing multiple objects, as we need to clear every one of them when moving to the next position. maxObjectHeight = std::max(maxObjectHeight, storage.modelSize.z); }
//! @brief extract currently opened archive //! @brief dest path to extract archive to, can be filename when extracting a //! single file. //! @brief file file to extract from archive, full archive if empty. //! @return true on success, false otherwise bool ZipUtil::extractArchive(QString& dest, QString file) { qDebug() << "[ZipUtil] extractArchive" << dest << file; bool result = true; if(!m_zip) { return false; } QuaZipFile *currentFile = new QuaZipFile(m_zip); int entries = m_zip->getEntriesCount(); int current = 0; // construct the filename when extracting a single file from an archive. // if the given destination is a full path use it as output name, // otherwise use it as path to place the file as named in the archive. QString singleoutfile; if(!file.isEmpty() && QFileInfo(dest).isDir()) { singleoutfile = dest + "/" + file; } else if(!file.isEmpty()){ singleoutfile = dest; } for(bool more = m_zip->goToFirstFile(); more; more = m_zip->goToNextFile()) { ++current; // if the entry is a path ignore it. Path existence is ensured separately. if(m_zip->getCurrentFileName().split("/").last() == "") continue; // some tools set the MS-DOS file attributes. Check those for D flag, // since in some cases a folder entry does not end with a / QuaZipFileInfo fi; currentFile->getFileInfo(&fi); if(fi.externalAttr & 0x10) // FAT entry bit 4 indicating directory continue; QString outfilename; if(!singleoutfile.isEmpty() && QFileInfo(m_zip->getCurrentFileName()).fileName() == file) { outfilename = singleoutfile; } else if(singleoutfile.isEmpty()) { outfilename = dest + "/" + m_zip->getCurrentFileName(); } if(outfilename.isEmpty()) continue; QFile outputFile(outfilename); // make sure the output path exists if(!QDir().mkpath(QFileInfo(outfilename).absolutePath())) { result = false; emit logItem(tr("Creating output path failed"), LOGERROR); qDebug() << "[ZipUtil] creating output path failed for:" << outfilename; break; } if(!outputFile.open(QFile::WriteOnly)) { result = false; emit logItem(tr("Creating output file failed"), LOGERROR); qDebug() << "[ZipUtil] creating output file failed:" << outfilename; break; } currentFile->open(QIODevice::ReadOnly); outputFile.write(currentFile->readAll()); if(currentFile->getZipError() != UNZ_OK) { result = false; emit logItem(tr("Error during Zip operation"), LOGERROR); qDebug() << "[ZipUtil] QuaZip error:" << currentFile->getZipError() << "on file" << currentFile->getFileName(); break; } currentFile->close(); outputFile.close(); emit logProgress(current, entries); } delete currentFile; emit logProgress(1, 1); return result; }
void TalkGenerator::encProgress(int value) { emit logProgress(value, encFutureWatcher.progressMaximum()); }
void Weaver::weave(PrintObject* object, CommandSocket* commandSocket) { int maxz = object->max().z; int layer_count = (maxz - initial_layer_thickness) / connectionHeight + 1; DEBUG_SHOW(layer_count); std::vector<cura::Slicer*> slicerList; for(Mesh& mesh : object->meshes) { cura::Slicer* slicer = new cura::Slicer(&mesh, initial_layer_thickness, connectionHeight, layer_count, mesh.getSettingBoolean("meshfix_keep_open_polygons"), mesh.getSettingBoolean("meshfix_extensive_stitching")); slicerList.push_back(slicer); } int starting_layer_idx; { // find first non-empty layer for (starting_layer_idx = 0; starting_layer_idx < layer_count; starting_layer_idx++) { Polygons parts; for (cura::Slicer* slicer : slicerList) parts.add(slicer->layers[starting_layer_idx].polygonList); if (parts.size() > 0) break; } if (starting_layer_idx > 0) { logError("First %i layers are empty!\n", starting_layer_idx); } } std::cerr<< "chainifying layers..." << std::endl; { int starting_z = -1; for (cura::Slicer* slicer : slicerList) wireFrame.bottom_outline.add(slicer->layers[starting_layer_idx].polygonList); if (commandSocket) commandSocket->sendPolygons(Inset0Type, 0, wireFrame.bottom_outline); wireFrame.z_bottom = slicerList[0]->layers[starting_layer_idx].z; Point starting_point_in_layer; if (wireFrame.bottom_outline.size() > 0) starting_point_in_layer = (wireFrame.bottom_outline.max() + wireFrame.bottom_outline.min()) / 2; else starting_point_in_layer = (Point(0,0) + object->max() + object->min()) / 2; for (int layer_idx = starting_layer_idx + 1; layer_idx < layer_count; layer_idx++) { logProgress("inset", layer_idx+1, layer_count); // abuse the progress system of the normal mode of CuraEngine Polygons parts1; for (cura::Slicer* slicer : slicerList) parts1.add(slicer->layers[layer_idx].polygonList); Polygons chainified; chainify_polygons(parts1, starting_point_in_layer, chainified, false); if (commandSocket) commandSocket->sendPolygons(Inset0Type, layer_idx - starting_layer_idx, chainified); if (chainified.size() > 0) { if (starting_z == -1) starting_z = slicerList[0]->layers[layer_idx-1].z; wireFrame.layers.emplace_back(); WeaveLayer& layer = wireFrame.layers.back(); layer.z0 = slicerList[0]->layers[layer_idx-1].z - starting_z; layer.z1 = slicerList[0]->layers[layer_idx].z - starting_z; layer.supported = chainified; starting_point_in_layer = layer.supported.back().back(); } } } std::cerr<< "finding horizontal parts..." << std::endl; { Polygons* lower_top_parts = &wireFrame.bottom_outline; for (unsigned int layer_idx = 0; layer_idx < wireFrame.layers.size(); layer_idx++) { logProgress("skin", layer_idx+1, wireFrame.layers.size()); // abuse the progress system of the normal mode of CuraEngine WeaveLayer& layer = wireFrame.layers[layer_idx]; Polygons empty; Polygons& layer_above = (layer_idx+1 < wireFrame.layers.size())? wireFrame.layers[layer_idx+1].supported : empty; createHorizontalFill(*lower_top_parts, layer, layer_above, layer.z1); lower_top_parts = &layer.supported; } } // at this point layer.supported still only contains the polygons to be connected // when connecting layers, we further add the supporting polygons created by the roofs std::cerr<< "connecting layers..." << std::endl; { Polygons* lower_top_parts = &wireFrame.bottom_outline; int last_z = wireFrame.z_bottom; for (unsigned int layer_idx = 0; layer_idx < wireFrame.layers.size(); layer_idx++) // use top of every layer but the last { WeaveLayer& layer = wireFrame.layers[layer_idx]; connect_polygons(*lower_top_parts, last_z, layer.supported, layer.z1, layer); layer.supported.add(layer.roofs.roof_outlines); lower_top_parts = &layer.supported; last_z = layer.z1; } } { // roofs: WeaveLayer& top_layer = wireFrame.layers.back(); Polygons to_be_supported; // empty for the top layer fillRoofs(top_layer.supported, to_be_supported, -1, top_layer.z1, top_layer.roofs); } { // bottom: Polygons to_be_supported; // is empty for the bottom layer, cause the order of insets doesn't really matter (in a sense everything is to be supported) fillRoofs(wireFrame.bottom_outline, to_be_supported, -1, wireFrame.layers.front().z0, wireFrame.bottom_infill); } }
//! \brief Does needed Tasks when we need to abort. Cleans up Files. Stops the Logger, Stops TTS and Encoder //! void TalkFileCreator::doAbort() { cleanup(); emit logProgress(0,1); emit done(true); }
//! \brief Creates Talkfiles. //! TalkGenerator::Status TalkGenerator::process(QList<TalkEntry>* list,int wavtrimth) { m_abort = false; QString errStr; bool warnings = false; //tts emit logItem(tr("Starting TTS Engine"),LOGINFO); m_tts = TTSBase::getTTS(this,RbSettings::value(RbSettings::Tts).toString()); if(!m_tts->start(&errStr)) { emit logItem(errStr.trimmed(),LOGERROR); emit logItem(tr("Init of TTS engine failed"),LOGERROR); emit done(true); return eERROR; } QCoreApplication::processEvents(); // Encoder emit logItem(tr("Starting Encoder Engine"),LOGINFO); m_enc = EncBase::getEncoder(this,RbSettings::value(RbSettings::CurEncoder).toString()); if(!m_enc->start()) { emit logItem(tr("Init of Encoder engine failed"),LOGERROR); emit done(true); m_tts->stop(); return eERROR; } QCoreApplication::processEvents(); emit logProgress(0,0); // Voice entries emit logItem(tr("Voicing entries..."),LOGINFO); Status voiceStatus= voiceList(list,wavtrimth); if(voiceStatus == eERROR) { m_tts->stop(); m_enc->stop(); emit done(true); return eERROR; } else if( voiceStatus == eWARNING) warnings = true; QCoreApplication::processEvents(); // Encoding Entries emit logItem(tr("Encoding files..."),LOGINFO); Status encoderStatus = encodeList(list); if( encoderStatus == eERROR) { m_tts->stop(); m_enc->stop(); emit done(true); return eERROR; } else if( voiceStatus == eWARNING) warnings = true; QCoreApplication::processEvents(); m_tts->stop(); m_enc->stop(); emit logProgress(1,1); if(warnings) return eWARNING; return eOK; }
//! \brief Voices a List of string //! TalkGenerator::Status TalkGenerator::voiceList(QList<TalkEntry>* list,int wavtrimth) { int progressMax = list->size(); int m_progress = 0; emit logProgress(m_progress,progressMax); QStringList errors; QStringList dublicates; bool warnings = false; for(int i=0; i < list->size(); i++) { if(m_abort) { emit logItem(tr("Voicing aborted"), LOGERROR); return eERROR; } // skip dublicated wav entrys if(!dublicates.contains(list->at(i).wavfilename)) dublicates.append(list->at(i).wavfilename); else { qDebug() << "dublicate skipped"; (*list)[i].voiced = true; emit logProgress(++m_progress,progressMax); continue; } // skip already voiced entrys if(list->at(i).voiced == true) { emit logProgress(++m_progress,progressMax); continue; } // skip entry whith empty text if(list->at(i).toSpeak == "") { emit logProgress(++m_progress,progressMax); continue; } // voice entry QString error; qDebug() << "voicing: " << list->at(i).toSpeak << "to" << list->at(i).wavfilename; TTSStatus status = m_tts->voice(list->at(i).toSpeak,list->at(i).wavfilename, &error); if(status == Warning) { warnings = true; emit logItem(tr("Voicing of %1 failed: %2").arg(list->at(i).toSpeak).arg(error), LOGWARNING); } else if (status == FatalError) { emit logItem(tr("Voicing of %1 failed: %2").arg(list->at(i).toSpeak).arg(error), LOGERROR); return eERROR; } else (*list)[i].voiced = true; //wavetrim if needed if(wavtrimth != -1) { char buffer[255]; wavtrim(list->at(i).wavfilename.toLocal8Bit().data(),wavtrimth,buffer,255); } emit logProgress(++m_progress,progressMax); QCoreApplication::processEvents(); } if(warnings) return eWARNING; else return eOK; }
void processFile(const char* input_filename, Config& config, GCodeExport& gcode, bool firstFile) { for(unsigned int n=1; n<16;n++) gcode.setExtruderOffset(n, config.extruderOffset[n]); double t = getTime(); log("Loading %s from disk...\n", input_filename); SimpleModel* m = loadModel(input_filename, config.matrix); if (!m) { log("Failed to load model: %s\n", input_filename); return; } log("Loaded from disk in %5.3fs\n", timeElapsed(t)); log("Analyzing and optimizing model...\n"); OptimizedModel* om = new OptimizedModel(m, Point3(config.objectPosition.X, config.objectPosition.Y, -config.objectSink)); for(unsigned int v = 0; v < m->volumes.size(); v++) { log(" Face counts: %i -> %i %0.1f%%\n", (int)m->volumes[v].faces.size(), (int)om->volumes[v].faces.size(), float(om->volumes[v].faces.size()) / float(m->volumes[v].faces.size()) * 100); log(" Vertex counts: %i -> %i %0.1f%%\n", (int)m->volumes[v].faces.size() * 3, (int)om->volumes[v].points.size(), float(om->volumes[v].points.size()) / float(m->volumes[v].faces.size() * 3) * 100); } delete m; log("Optimize model %5.3fs \n", timeElapsed(t)); //om->saveDebugSTL("c:\\models\\output.stl"); log("Slicing model...\n"); vector<Slicer*> slicerList; for(unsigned int volumeIdx=0; volumeIdx < om->volumes.size(); volumeIdx++) { slicerList.push_back(new Slicer(&om->volumes[volumeIdx], config.initialLayerThickness / 2, config.layerThickness, config.fixHorrible & FIX_HORRIBLE_KEEP_NONE_CLOSED, config.fixHorrible & FIX_HORRIBLE_EXTENSIVE_STITCHING)); //slicerList[volumeIdx]->dumpSegments("C:\\models\\output.html"); } log("Sliced model in %5.3fs\n", timeElapsed(t)); SliceDataStorage storage; if (config.supportAngle > -1) { fprintf(stdout,"Generating support map...\n"); generateSupportGrid(storage.support, om, config.initialLayerThickness / 2, config.layerThickness); } storage.modelSize = om->modelSize; storage.modelMin = om->vMin; storage.modelMax = om->vMax; delete om; log("Generating layer parts...\n"); for(unsigned int volumeIdx=0; volumeIdx < slicerList.size(); volumeIdx++) { storage.volumes.push_back(SliceVolumeStorage()); createLayerParts(storage.volumes[volumeIdx], slicerList[volumeIdx], config.fixHorrible & (FIX_HORRIBLE_UNION_ALL_TYPE_A | FIX_HORRIBLE_UNION_ALL_TYPE_B)); delete slicerList[volumeIdx]; } //carveMultipleVolumes(storage.volumes); generateMultipleVolumesOverlap(storage.volumes, config.multiVolumeOverlap); log("Generated layer parts in %5.3fs\n", timeElapsed(t)); //dumpLayerparts(storage, "c:/models/output.html"); const unsigned int totalLayers = storage.volumes[0].layers.size(); for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++) { generateInsets(&storage.volumes[volumeIdx].layers[layerNr], config.extrusionWidth, config.insetCount); } logProgress("inset",layerNr+1,totalLayers); } log("Generated inset in %5.3fs\n", timeElapsed(t)); //dumpLayerparts(storage, "c:/models/output.html"); for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++) { generateSkins(layerNr, storage.volumes[volumeIdx], config.extrusionWidth, config.downSkinCount, config.upSkinCount, config.infillOverlap); generateSparse(layerNr, storage.volumes[volumeIdx], config.extrusionWidth, config.downSkinCount, config.upSkinCount); } logProgress("skin",layerNr+1,totalLayers); } log("Generated up/down skin in %5.3fs\n", timeElapsed(t)); generateSkirt(storage, config.skirtDistance, config.extrusionWidth, config.skirtLineCount); generateRaft(storage, config.raftMargin); log("Generated skirt and raft in %5.3fs\n", timeElapsed(t)); for(unsigned int volumeIdx=0; volumeIdx<storage.volumes.size(); volumeIdx++) { for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { for(unsigned int partNr=0; partNr<storage.volumes[volumeIdx].layers[layerNr].parts.size(); partNr++) { if (layerNr > 0) storage.volumes[volumeIdx].layers[layerNr].parts[partNr].bridgeAngle = bridgeAngle(&storage.volumes[volumeIdx].layers[layerNr].parts[partNr], &storage.volumes[volumeIdx].layers[layerNr-1]); else storage.volumes[volumeIdx].layers[layerNr].parts[partNr].bridgeAngle = -1; } } } log("Stored volumes in %5.3fs\n", timeElapsed(t)); gcode.setRetractionSettings(config.retractionAmount, config.retractionSpeed, config.retractionAmountExtruderSwitch); if (firstFile) { gcode.addCode(config.startCode); }else{ gcode.resetExtrusionValue(); gcode.addRetraction(); gcode.setZ(maxObjectHeight + 5000); gcode.addMove(config.objectPosition, config.moveSpeed, 0); } gcode.addComment("total_layers=%d",totalLayers); log("Added general info to gcode in %5.3fs\n", timeElapsed(t)); GCodePathConfig skirtConfig(config.printSpeed, config.extrusionWidth, "SKIRT"); GCodePathConfig inset0Config(config.printSpeed, config.extrusionWidth, "WALL-OUTER"); GCodePathConfig inset1Config(config.printSpeed, config.extrusionWidth, "WALL-INNER"); GCodePathConfig fillConfig(config.infillSpeed, config.extrusionWidth, "FILL"); GCodePathConfig supportConfig(config.printSpeed, config.supportLineWidth, "SUPPORT"); if (config.raftBaseThickness > 0 && config.raftInterfaceThickness > 0) { GCodePathConfig raftBaseConfig(config.initialLayerSpeed, config.raftBaseLinewidth, "SUPPORT"); GCodePathConfig raftInterfaceConfig(config.initialLayerSpeed, config.raftInterfaceLinewidth, "SUPPORT"); { gcode.addComment("LAYER:-2"); gcode.addComment("RAFT"); GCodePlanner gcodeLayer(gcode, config.moveSpeed); gcode.setZ(config.raftBaseThickness); gcode.setExtrusion(config.raftBaseThickness, config.filamentDiameter, config.filamentFlow); gcodeLayer.addPolygonsByOptimizer(storage.raftOutline, &raftBaseConfig); Polygons raftLines; generateLineInfill(storage.raftOutline, raftLines, config.raftBaseLinewidth, config.raftLineSpacing, config.infillOverlap, 0); gcodeLayer.addPolygonsByOptimizer(raftLines, &raftBaseConfig); gcodeLayer.writeGCode(false); } { gcode.addComment("LAYER:-1"); gcode.addComment("RAFT"); GCodePlanner gcodeLayer(gcode, config.moveSpeed); gcode.setZ(config.raftBaseThickness + config.raftInterfaceThickness); gcode.setExtrusion(config.raftInterfaceThickness, config.filamentDiameter, config.filamentFlow); Polygons raftLines; generateLineInfill(storage.raftOutline, raftLines, config.raftInterfaceLinewidth, config.raftLineSpacing, config.infillOverlap, 90); gcodeLayer.addPolygonsByOptimizer(raftLines, &raftInterfaceConfig); gcodeLayer.writeGCode(false); } } int volumeIdx = 0; for(unsigned int layerNr=0; layerNr<totalLayers; layerNr++) { logProgress("export", layerNr+1, totalLayers); log("Handling layer %u out of %u \n", layerNr+1, totalLayers); GCodePlanner gcodeLayer(gcode, config.moveSpeed); gcode.addComment("LAYER:%d", layerNr); int32_t z = config.initialLayerThickness + layerNr * config.layerThickness; z += config.raftBaseThickness + config.raftInterfaceThickness; gcode.setZ(z); //if (layerNr == 0) // gcodeLayer.addPolygonsByOptimizer(storage.skirt, &skirtConfig); //log("Mark1 in %5.3fs\n", timeElapsed(t)); for(unsigned int volumeCnt = 0; volumeCnt < storage.volumes.size(); volumeCnt++) { log(" Going through volume %u out of %u \n", volumeCnt+1, storage.volumes.size()); if (volumeCnt > 0) volumeIdx = (volumeIdx + 1) % storage.volumes.size(); SliceLayer* layer = &storage.volumes[volumeIdx].layers[layerNr]; gcodeLayer.setExtruder(volumeIdx); PathOptimizer partOrderOptimizer(gcode.getPositionXY()); for(unsigned int partNr=0; partNr<layer->parts.size(); partNr++) { partOrderOptimizer.addPolygon(layer->parts[partNr].insets[0][0]); } log("partOrderOptimizer polygons %u \n", partOrderOptimizer.polygons.size()); log("partOrderOptimizer polyorder %u \n", partOrderOptimizer.polyOrder.size()); partOrderOptimizer.optimize(); log("POST OPTIM partOrderOptimizer polygons %u \n", partOrderOptimizer.polygons.size()); log("POST OPTIM partOrderOptimizer polyorder %u \n", partOrderOptimizer.polyOrder.size()); //log("POST OPTIM polyorder at 0 %u \n", partOrderOptimizer.polyOrder[0]); log("toto\n"); for(unsigned int partCounter=0; partCounter<partOrderOptimizer.polyOrder.size(); partCounter++) { //log(" Going through part %u out of %u \n", partCounter+1, partOrderOptimizer.polyOrder.size()); SliceLayerPart* part = &layer->parts[partOrderOptimizer.polyOrder[partCounter]]; log("order index %u \n", partOrderOptimizer.polyOrder[partCounter]); try { if(part->skinOutline.size()>0) { if(part->skinOutline[0].size()>0) { Point p0 = (part->skinOutline)[0][0]; log("bla %f pof\n",p0.X); } } } catch (int e) { log("An exception occurred. Exception Nr.%i \n",e); } /*gcodeLayer.setCombBoundary(&part->combBoundery); gcodeLayer.forceRetract(); if (config.insetCount > 0) { for(int insetNr=part->insets.size()-1; insetNr>-1; insetNr--) { if (insetNr == 0) gcodeLayer.addPolygonsByOptimizer(part->insets[insetNr], &inset0Config); else gcodeLayer.addPolygonsByOptimizer(part->insets[insetNr], &inset1Config); } }*/ Polygons fillPolygons; int fillAngle = 45; if (layerNr & 1) fillAngle += 90; //int sparseSteps[1] = {config.extrusionWidth}; //generateConcentricInfill(part->skinOutline, fillPolygons, sparseSteps, 1); log("Passing skinOutline of size %u to generator\n", (part->skinOutline).size()); generateLineInfill(part->skinOutline, fillPolygons, config.extrusionWidth, config.extrusionWidth, config.infillOverlap, (part->bridgeAngle > -1) ? part->bridgeAngle : fillAngle); //int sparseSteps[2] = {config.extrusionWidth*5, config.extrusionWidth * 0.8}; //generateConcentricInfill(part->sparseOutline, fillPolygons, sparseSteps, 2); //log("Mark1-2: after infillLineGen\n"); if (config.sparseInfillLineDistance > 0) { if (config.sparseInfillLineDistance > config.extrusionWidth * 4) { generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance * 2, config.infillOverlap, 45); generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance * 2, config.infillOverlap, 45 + 90); } else { generateLineInfill(part->sparseOutline, fillPolygons, config.extrusionWidth, config.sparseInfillLineDistance, config.infillOverlap, fillAngle); } } //log("Mark1-3: after before adding polygons\n"); gcodeLayer.addPolygonsByOptimizer(fillPolygons, &fillConfig); } gcodeLayer.setCombBoundary(NULL); } //log("Mark2: before supportAngle\n"); if (config.supportAngle > -1) { SupportPolyGenerator supportGenerator(storage.support, z, config.supportAngle, config.supportEverywhere > 0, true); gcodeLayer.addPolygonsByOptimizer(supportGenerator.polygons, &supportConfig); if (layerNr == 0) { SupportPolyGenerator supportGenerator2(storage.support, z, config.supportAngle, config.supportEverywhere > 0, false); gcodeLayer.addPolygonsByOptimizer(supportGenerator2.polygons, &supportConfig); } } //log("Mark2: before speedup\n"); //Finish the layer by applying speed corrections for minimal layer times and slowdown for the initial layer. if (int(layerNr) < config.initialSpeedupLayers) { int n = config.initialSpeedupLayers; int layer0Factor = config.initialLayerSpeed * 100 / config.printSpeed; gcodeLayer.setSpeedFactor((layer0Factor * (n - layerNr) + 100 * (layerNr)) / n); } gcodeLayer.forceMinimalLayerTime(config.minimalLayerTime, config.minimalFeedrate); if (layerNr == 0) gcode.setExtrusion(config.initialLayerThickness, config.filamentDiameter, config.filamentFlow); else gcode.setExtrusion(config.layerThickness, config.filamentDiameter, config.filamentFlow); //log("Mark3: before fan on\n"); if (int(layerNr) >= config.fanOnLayerNr) { int speed = config.fanSpeedMin; if (gcodeLayer.getSpeedFactor() <= 50) { speed = config.fanSpeedMax; }else{ int n = gcodeLayer.getSpeedFactor() - 50; speed = config.fanSpeedMin * n / 50 + config.fanSpeedMax * (50 - n) / 50; } gcode.addFanCommand(speed); }else{ gcode.addFanCommand(0); } //log("Finished layer in %5.3fs\n", timeElapsed(t)); gcodeLayer.writeGCode(config.coolHeadLift > 0); //log("Finished writing layer in %5.3fs\n", timeElapsed(t)); } /* support debug for(int32_t y=0; y<storage.support.gridHeight; y++) { for(int32_t x=0; x<storage.support.gridWidth; x++) { unsigned int n = x+y*storage.support.gridWidth; if (storage.support.grid[n].size() < 1) continue; int32_t z = storage.support.grid[n][0].z; gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, 0), 0); gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, z), z); gcode.addMove(Point3(x * storage.support.gridScale + storage.support.gridOffset.X, y * storage.support.gridScale + storage.support.gridOffset.Y, 0), 0); } } //*/ log("Wrote layers in %5.2fs.\n", timeElapsed(t)); gcode.tellFileSize(); gcode.addFanCommand(0); logProgress("process", 1, 1); log("Total time elapsed %5.2fs.\n", timeElapsed(t,true)); //Store the object height for when we are printing multiple objects, as we need to clear every one of them when moving to the next position. maxObjectHeight = std::max(maxObjectHeight, storage.modelSize.z); }
OptimizedVolume::OptimizedVolume(SimpleVolume* volume, OptimizedModel* model) : model(model) { points.reserve(volume->faces.size() * 3); faces.reserve(volume->faces.size()); std::map<uint32_t, std::vector<uint32_t> > indexMap; double t = getTime(); for(uint32_t i=0; i<volume->faces.size(); i++) { OptimizedFace f; if((i%1000==0) && (getTime()-t)>2.0) logProgress("optimized", i + 1, volume->faces.size()); for(uint32_t j=0; j<3; j++) { Point3 p = volume->faces[i].v[j]; int hash = ((p.x + MELD_DIST/2) / MELD_DIST) ^ (((p.y + MELD_DIST/2) / MELD_DIST) << 10) ^ (((p.z + MELD_DIST/2) / MELD_DIST) << 20); uint32_t idx; bool add = true; for(unsigned int n = 0; n < indexMap[hash].size(); n++) { if ((points[indexMap[hash][n]].p - p).testLength(MELD_DIST)) { idx = indexMap[hash][n]; add = false; break; } } if (add) { indexMap[hash].push_back(points.size()); idx = points.size(); points.push_back(p); } f.index[j] = idx; } if (f.index[0] != f.index[1] && f.index[0] != f.index[2] && f.index[1] != f.index[2]) { //Check if there is a face with the same points bool duplicate = false; for(unsigned int _idx0 = 0; _idx0 < points[f.index[0]].faceIndexList.size(); _idx0++) { for(unsigned int _idx1 = 0; _idx1 < points[f.index[1]].faceIndexList.size(); _idx1++) { for(unsigned int _idx2 = 0; _idx2 < points[f.index[2]].faceIndexList.size(); _idx2++) { if (points[f.index[0]].faceIndexList[_idx0] == points[f.index[1]].faceIndexList[_idx1] && points[f.index[0]].faceIndexList[_idx0] == points[f.index[2]].faceIndexList[_idx2]) duplicate = true; } } } if (!duplicate) { points[f.index[0]].faceIndexList.push_back(faces.size()); points[f.index[1]].faceIndexList.push_back(faces.size()); points[f.index[2]].faceIndexList.push_back(faces.size()); faces.push_back(f); } } } //fprintf(stdout, "\rAll faces are optimized in %5.1fs.\n",timeElapsed(t)); int openFacesCount = 0; for(unsigned int i=0;i<faces.size();i++) { OptimizedFace* f = &faces[i]; f->touching[0] = getFaceIdxWithPoints(f->index[0], f->index[1], i); f->touching[1] = getFaceIdxWithPoints(f->index[1], f->index[2], i); f->touching[2] = getFaceIdxWithPoints(f->index[2], f->index[0], i); if (f->touching[0] == -1) openFacesCount++; if (f->touching[1] == -1) openFacesCount++; if (f->touching[2] == -1) openFacesCount++; } //fprintf(stdout, " Number of open faces: %i\n", openFacesCount); }
void LogLoader::run() { QFile logfile(m_filename); logfile.open(QIODevice::ReadOnly); //QByteArray arr = logfile.readAll(); //logfile.close(); int curr = 0; //bool escape = false; bool inpacket = false; QByteArray currPacket; //while (!escape || curr >= arr.size()) QByteArray retval; while (!logfile.atEnd()) { //logfile.read(&retval,1); emit logProgress(logfile.pos(),logfile.size()); retval = logfile.read(1); if (retval[0] == (char)0xAA) { if (inpacket) { //in the middle of a packet currPacket.clear(); } currPacket.append(retval[0]); //Start byte //qDebug() << "Start byte"; inpacket = true; } else if (retval[0] == (char)0xCC) { //currPacket currPacket.append(retval[0]); QString output; for (int i=0;i<currPacket.size();i++) { int num = (unsigned char)currPacket[i]; output.append(" ").append((num < 0xF) ? "0" : "").append(QString::number(num,16)); } qDebug() << "Full packet:"; //qDebug() << output; parseBuffer(currPacket); currPacket.clear(); //qDebug() << "loop"; msleep(25); } else if (inpacket) { if (retval[0] == (char)0xBB) { //Need to escape the next byte retval = logfile.read(1); if (retval[0] == (char)0x55) { currPacket.append((char)0xAA); } else if (retval[0] == (char)0x44) { currPacket.append((char)0xBB); } else if (retval[0] == (char)0x33) { currPacket.append((char)0xCC); } } else { currPacket.append(retval[0]); } } curr++; } emit endOfLog(); logfile.close(); return; /*for (int i=0;i<arr.size();i++) { //i++; //qDebug() << QString::number(arr[i],16); curr = i; curr+=3; curr += 1; i += curr-1; //i++; }*/ }
void ZipInstaller::downloadDone(bool error) { qDebug() << "[ZipInstall] download done, error:" << error; QStringList zipContents; // needed later // update progress bar emit logProgress(1, 1); if(getter->httpResponse() != 200 && !getter->isCached()) { emit logItem(tr("Download error: received HTTP error %1.") .arg(getter->httpResponse()),LOGERROR); emit done(true); return; } if(getter->isCached()) emit logItem(tr("Cached file used."), LOGINFO); if(error) { emit logItem(tr("Download error: %1").arg(getter->errorString()), LOGERROR); emit done(true); return; } else emit logItem(tr("Download finished."),LOGOK); QCoreApplication::processEvents(); if(m_unzip) { // unzip downloaded file qDebug() << "[ZipInstall] about to unzip " << m_file << "to" << m_mountpoint; emit logItem(tr("Extracting file."), LOGINFO); QCoreApplication::processEvents(); UnZip::ErrorCode ec; RbUnZip uz; connect(&uz, SIGNAL(unzipProgress(int, int)), this, SIGNAL(logProgress(int, int))); connect(this, SIGNAL(internalAborted()), &uz, SLOT(abortUnzip())); ec = uz.openArchive(m_file); if(ec != UnZip::Ok) { emit logItem(tr("Opening archive failed: %1.") .arg(uz.formatError(ec)),LOGERROR); emit logProgress(1, 1); emit done(true); return; } // check for free space. Make sure after installation will still be // some room for operating (also includes calculation mistakes due to // cluster sizes on the player). if(Utils::filesystemFree(m_mountpoint) < (uz.totalSize() + 1000000)) { emit logItem(tr("Not enough disk space! Aborting."), LOGERROR); emit logProgress(1, 1); emit done(true); return; } ec = uz.extractArchive(m_mountpoint); // TODO: better handling of aborted unzip operation. if(ec != UnZip::Ok) { emit logItem(tr("Extracting failed: %1.") .arg(uz.formatError(ec)),LOGERROR); emit logProgress(1, 1); emit done(true); return; } // prepare file list for log zipContents = uz.fileList(); } else {