bool TConnectionProcessor::TOutboxQueue::transferMessage(const TRecipientPublicKey& senderId,
  const TPhysicalMailMessage& msg)
  {
  bool sendStatus = false;

  try
    {
    bts::extended_private_key senderPrivKey;
    if(findIdentityPrivateKey(senderId, &senderPrivKey))
      {
      TPhysicalMailMessage msgToSend(msg);
      TRecipientPublicKeys bccList(msg.bcc_list);
      /// \warning Message to be sent must have cleared bcc list.
      msgToSend.bcc_list.clear();

      size_t totalRecipientCount = msgToSend.to_list.size() + msgToSend.cc_list.size() + bccList.size();

      for(const auto& public_key : msgToSend.to_list)
        {
        if(isCancelled())
          return false;
        sendMail(msgToSend, public_key, senderPrivKey);
        }

      for(const auto& public_key : msgToSend.cc_list)
        {
        if(isCancelled())
          return false;
        sendMail(msgToSend, public_key, senderPrivKey);
        }

      for(const auto& public_key : bccList)
        {
        if(isCancelled())
          return false;
        sendMail(msgToSend, public_key, senderPrivKey);
        }

      sendStatus = true;
      }
    else
      {
      Processor.Sink->OnMissingSenderIdentity(senderId, msg);
      sendStatus = false;
      }
    }
  catch(const fc::exception& e)
    {
    sendStatus = false;
    elog("${e}", ("e", e.to_detail_string()));
    /// Probably connection related error, try to start it again
    checkForAvailableConnection();
    }

  return sendStatus;
  }
Пример #2
0
void FTTask::runTask()
{
    // Steps: Read audio, compute spectrogram, save it.
    incMaxProgress(2.0f);
    if (!isVolatile())
        incMaxProgress(1.0f);

    // Take into account additional transformations.
    incMaxProgress(0.5f * _transforms.size());

    // From now on, perform periodical checks to see if the task has been
    // cancelled.
    do {
        // Get the audio source, set the sample rate
        readAudioFile();
        incTotalProgress(1.0f);

        // Mandatory check.
        if (isCancelled())
            break;

        // Compute the spectrogram.
        computeSpectrogram();
        incTotalProgress(1.0f);

        // Mandatory check.
        if (isCancelled())
            break;

        // Additional transformations, if desired.
        doAdditionalTransformations();

        // Mandatory check.
        if (isCancelled())
            break;

        // Store the matrices.
        if (!isVolatile()) {
            storeComponents();
            incTotalProgress(1.0f);
        }

        // Mandatory check.
        if (isCancelled())
            break;

        if (_exportSpectrogram)
            exportSpectrogram();

    } while (false);
}
Пример #3
0
bool DefaultClientTask::onHTTPClientResponseEvent(HTTP::ClientResponseEventArgs& args)
{
    const std::size_t bufferSize = IO::ByteBufferUtils::DEFAULT_BUFFER_SIZE;

    std::istream& istr = args.getResponseStream();

    std::streamsize contentLength = args.getResponse().getContentLength();

    IO::ByteBuffer _byteBuffer;

    if (contentLength > 0)
    {
        _byteBuffer.reserve(contentLength);
    }

    Poco::Buffer<char> buffer(bufferSize);
    std::streamsize len = 0;
	istr.read(buffer.begin(), bufferSize);
    std::streamsize n = istr.gcount();
    
    while (n > 0)
	{
		len += n;
        _byteBuffer.writeBytes(reinterpret_cast<uint8_t*>(buffer.begin()), n);

        // Check for task cancellation.
        if (istr && !isCancelled())
		{
			istr.read(buffer.begin(), bufferSize);
            n = istr.gcount();
		}
        else
        {
            n = 0;
        }
	}

    // Don't return cancelled data.
    if (!isCancelled())
    {
        ClientResponseBufferEventArgs bufferEvent(_byteBuffer,
                                                  args.getRequest(),
                                                  args.getResponse(),
                                                  args.getContext());
        handleBufferEvent(bufferEvent);
    }

    return true;
}
Пример #4
0
void AP::HTTPRequest::HTTPRequest::mainAsynchronous() {
    curl_multi_add_handle(_curl_multi, _curl);
    _content.clear();
    int count;
    do {
        curl_multi_perform(_curl_multi, &count);
    } while ( count > 0 || isCancelled() );
    if ( !isCancelled() ) {
        long code = 0;
        curl_easy_getinfo(_curl, CURLINFO_RESPONSE_CODE, &code);
        _response.setCode(code);
    }
    _response.setResponseBody(_content);
    curl_multi_remove_handle(_curl_multi, _curl);
}
Пример #5
0
void stk500SaveFiles::saveFolder(DirectoryEntryPtr dirStartPtr, QString sourceFilePath, QString destFilePath, double progStart, double progTotal) {
    // First of all - create the destination directory before processing
    QDir dir(destFilePath);
    if (!dir.exists()) {
        dir.mkpath(".");
    }

    // Perform a file listing of the current directory
    QList<DirectoryInfo> subFiles = sd_list(dirStartPtr);

    // Filter any VOLUME entries
    int tmpIdx = 0;
    while (tmpIdx < subFiles.length()) {
        DirectoryInfo info = subFiles.at(tmpIdx);
        if (info.isVolume()) {
            subFiles.removeAt(tmpIdx);
        } else {
            tmpIdx++;
        }
    }

    // If cancelled or nothing to go through, stop here
    if (isCancelled() || subFiles.isEmpty()) {
        return;
    }

    // Name to append before the filename to produce a source location
    QString srcPath = sourceFilePath;
    if (!srcPath.isEmpty()) {
        srcPath.append('/');
    }

    // Go by all the files processing them
    int totalFiles = subFiles.length();
    double subProgTotal = progTotal * (1.0 / (double) totalFiles);
    for (int fileIdx = 0; fileIdx < totalFiles && !isCancelled(); fileIdx++) {
        DirectoryInfo info = subFiles.at(fileIdx);
        QString subSrcPath = srcPath + info.name();
        QString subDestPath = destFilePath + '/' + info.name();
        double subProgStart = progStart + progTotal * ((double) fileIdx / (double) totalFiles);
        if (info.isDirectory()) {
            DirectoryEntryPtr dirStartPtr = protocol->sd().getDirPtrFromCluster(info.firstCluster());
            saveFolder(dirStartPtr, subSrcPath, subDestPath, subProgStart, subProgTotal);
        } else {
            saveFile(info.entry(), subSrcPath, subDestPath, subProgStart, subProgTotal);
        }
    }
}
Пример #6
0
static DWORD vlc_WaitForMultipleObjects (DWORD count, const HANDLE *handles,
        DWORD delay)
{
    DWORD ret;
    if (count == 0)
    {
#if VLC_WINSTORE_APP
        do {
            DWORD new_delay = 50;
            if (new_delay > delay)
                new_delay = delay;
            ret = SleepEx (new_delay, TRUE);
            if (delay != INFINITE)
                delay -= new_delay;
            if (isCancelled())
                ret = WAIT_IO_COMPLETION;
        } while (delay && ret == 0);
#else
        ret = SleepEx (delay, TRUE);
#endif

        if (ret == 0)
            ret = WAIT_TIMEOUT;
    }
    else {
#if VLC_WINSTORE_APP
        do {
            DWORD new_delay = 50;
            if (new_delay > delay)
                new_delay = delay;
            ret = WaitForMultipleObjectsEx (count, handles, FALSE, new_delay, TRUE);
            if (delay != INFINITE)
                delay -= new_delay;
            if (isCancelled())
                ret = WAIT_IO_COMPLETION;
        } while (delay && ret == WAIT_TIMEOUT);
#else
        ret = WaitForMultipleObjectsEx (count, handles, FALSE, delay, TRUE);
#endif
    }

    /* We do not abandon objects... this would be a bug */
    assert (ret < WAIT_ABANDONED_0 || WAIT_ABANDONED_0 + count - 1 < ret);

    if (unlikely(ret == WAIT_FAILED))
        abort (); /* We are screwed! */
    return ret;
}
Пример #7
0
void TSizeCalculator::run()
{
    clearStateFlags();
    emit begin();
    calculateAll();
    if (isCancelled())
        emit cancelled();
    emit end(m_TaskSize);
}
Пример #8
0
void stk500SaveFiles::run() {
    if (this->sourceFile.endsWith('/')) {
        // Saving a full directory
        // First navigate to this directory
        QString dirPath = this->sourceFile;
        dirPath.remove(dirPath.length() - 1, 1);
        QString destDirPath = this->destFile;
        if (destDirPath.endsWith('/')) {
            destDirPath.remove(destDirPath.length() - 1, 1);
        }

        DirectoryEntryPtr dirStartPtr;
        if (dirPath.isEmpty()) {
            dirStartPtr = protocol->sd().getRootPtr();
        } else {
            DirectoryEntryPtr dirEntryPtr = sd_findEntry(dirPath, true, false);
            if (isCancelled()) {
                return;
            }
            if (!dirEntryPtr.isValid()) {
                // Should not happen, but just in case...
                throw ProtocolException("Folder not found");
            }
            DirectoryEntry folderEntry = protocol->sd().readDirectory(dirEntryPtr);
            if (folderEntry.firstCluster()) {
                dirStartPtr = protocol->sd().getDirPtrFromCluster(folderEntry.firstCluster());
            } else {
                dirStartPtr = DirectoryEntryPtr(0, 0);
            }
        }
        saveFolder(dirStartPtr, dirPath, destDirPath, 0.0, 1.0);
    } else {
        // Saving a single file
        DirectoryEntryPtr filePtr = sd_findEntry(this->sourceFile, false, false);
        if (isCancelled()) {
            return;
        }
        if (!filePtr.isValid()) {
            throw ProtocolException("File not found");
        }
        DirectoryEntry fileEntry = protocol->sd().readDirectory(filePtr);
        saveFile(fileEntry, this->sourceFile, this->destFile, 0.0, 1.0);
    }
}
void TConnectionProcessor::TOutboxQueue::transmissionLoop()
  {
  bool notificationSent = false;

  TPhysicalMailMessage  mail_msg;
  TRequestMessage       auth_msg;
  TStoredMailMessage    storedMsg;
  bool                  auth_flag = false;

  while(!isCancelled() && fetchNextMessage(&storedMsg, &mail_msg, &auth_msg, &auth_flag))
    {
    if(!notificationSent)
      {
      Processor.Sink->OnMessageSendingStart();
      notificationSent = true;
      }

    if(auth_flag)
    {
      if(transferAuthMsg(storedMsg.from_key, auth_msg))
        Outbox->remove_message(storedMsg);
    }
    else
    {
      if(transferMessage(storedMsg.from_key, mail_msg))
        moveMsgToSentDB(storedMsg, mail_msg);
    }

    if(isCancelled())
      break;

    fc::usleep(fc::milliseconds(250));
    }

  if(notificationSent)
    Processor.Sink->OnMessageSendingEnd();
  }
Пример #10
0
void FTTask::doAdditionalTransformations()
{
    // Backup FT magnitude matrix if transformations are applied afterwards.
    if (!_transforms.empty()) {
        _ftMagMatrix = new Matrix(*_amplitudeMatrix);
    }
    // Apply transformations in specified order.
    for (vector<MatrixTransform*>::const_iterator it = _transforms.begin();
         it != _transforms.end() && !isCancelled(); ++it)
    {
        Matrix *trResult = (*it)->transform(_amplitudeMatrix);
        if (trResult != _amplitudeMatrix) {
            replaceAmplitudeMatrix(trResult);
        }
        incTotalProgress(0.5f);
    }
}
Пример #11
0
Файл: Song.cpp Проект: LMMS/lmms
void Song::restoreControllerStates( const QDomElement & element )
{
	QDomNode node = element.firstChild();
	while( !node.isNull() && !isCancelled() )
	{
		Controller * c = Controller::create( node.toElement(), this );
		if (c) {addController(c);}
		else
		{
			// Fix indices to ensure correct connections
			m_controllers.append(Controller::create(
				Controller::DummyController, this));
		}

		node = node.nextSibling();
	}
}
Пример #12
0
int main(int argc, char** argv) {
    
    partie Partie;
    string choice, answer;
    bool is_Legal = false;
    createPartie(Partie);
    displayPartie(Partie);
    for (int i=0; i<TOTALMOVE; i++){
        int numTurn=Partie.moveNum[i][0];
        
        Display:
        cout<<"Turn "<<i+1<<": "<<Partie.gamers[numTurn].name<<endl;
        cout<<"Enter your move:";
        cin>>choice;
        
        for (int i=0; i<100; i++){
            if(choice.compare(Partie.game.pos[i])==0)
                Partie.coup[8] = i;
        }
        Partie.coup[9] = numTurn;
        //check if move is legal
        is_Legal = isLegal(Partie.coup, Partie.game);
        if(!is_Legal){
            cout<<"Position illegal"<<endl;
            goto Display;
        }else{   
            validated(Partie.coup, Partie.game);
            displayPartie(Partie);
            cout<<"Do you validate your turn? (y/n)";
            cin>>answer;
            if(answer.compare("n")==0){
                isCancelled(Partie.coup, Partie.game);
                displayPartie(Partie);
                goto Display;
            }
            Partie.moveNum[i][1] = Partie.coup[8];
        }    

    }

    string winner;
    (Partie.game.pawn[BLACK]<Partie.game.pawn[WHITE]) ? winner= Partie.gamers[WHITE].name:Partie.gamers[BLACK].name;
    cout<<"Winner is: "<<winner<<" *** CONGRATULATION!!! ***" <<endl;
    return 0;
}
Пример #13
0
Block MergeTreeBaseBlockInputStream::readImpl()
{
    Block res;

    while (!res && !isCancelled())
    {
        if (!task && !getNewTask())
            break;

        res = readFromPart();

        if (res)
            injectVirtualColumns(res);

        if (task->mark_ranges.empty())
            task.reset();
    }

    return res;
}
Block MergeTreeSequentialBlockInputStream::readImpl()
try
{
    Block res;
    if (!isCancelled() && current_row < data_part->rows_count)
    {
        bool continue_reading = (current_mark != 0);
        size_t rows_readed = reader->readRows(current_mark, continue_reading, storage.index_granularity, res);

        if (res)
        {
            res.checkNumberOfRows();

            current_row += rows_readed;
            current_mark += (rows_readed / storage.index_granularity);

            bool should_reorder = false, should_evaluate_missing_defaults = false;
            reader->fillMissingColumns(res, should_reorder, should_evaluate_missing_defaults, res.rows());

            if (should_evaluate_missing_defaults)
                reader->evaluateMissingDefaults(res);

            if (should_reorder)
                reader->reorderColumns(res, header.getNames(), nullptr);
        }
    }
    else
    {
        finish();
    }

    return res;
}
catch (...)
{
    /// Suspicion of the broken part. A part is added to the queue for verification.
    if (getCurrentExceptionCode() != ErrorCodes::MEMORY_LIMIT_EXCEEDED)
        storage.reportBrokenPart(data_part->name);
    throw;
}
Пример #15
0
void SenderTask::runTask()
{
	_logger.debug("Starting SenderTask...");

	while(!isCancelled())
	{
		Poco::Notification::Ptr pNf(queue.waitDequeueNotification());
		if (pNf)
		{
			RedirectNotification::Ptr pRedirectNf = pNf.cast<RedirectNotification>();
			if (pRedirectNf)
			{
				if(pRedirectNf->is_rst())
					sender->SendRST(pRedirectNf->user_port(), pRedirectNf->dst_port(),pRedirectNf->user_ip(),pRedirectNf->dst_ip(), pRedirectNf->acknum(), pRedirectNf->seqnum(), pRedirectNf->f_psh());
				else
					sender->Redirect(pRedirectNf->user_port(), pRedirectNf->dst_port(),pRedirectNf->user_ip(),pRedirectNf->dst_ip(), pRedirectNf->acknum(), pRedirectNf->seqnum(), pRedirectNf->f_psh(), pRedirectNf->additional_param());
			}
		}
	}

	_logger.debug("Stopping SenderTask...");
}
Пример #16
0
void TSizeCalculator::calculateAll()
{
    Q_ASSERT(m_pDirEnum != NULL);

    m_TaskSize.clear();

    if (m_Task.isNull()) {
        qWarning("Attempt to calculate size of null task.");
        return;
    }

    const TTaskSettings* pTaskSettings = &m_Task->TaskSettings;

    // Инициализация неизменяющихся параметров перечислителя.
    // TODO: Такой же код в коде класса TReader
    TDirEnumerator::TParams Params;
    Params.filter = TDirEnumerator::Files;
    if (pTaskSettings->CopyEmptyDirs)
        Params.filter |= TDirEnumerator::Dirs;
    if (pTaskSettings->CopyHidden)
        Params.filter |= TDirEnumerator::Hidden;
    if (pTaskSettings->CopySystem)
        Params.filter |= TDirEnumerator::System;
    if (pTaskSettings->FollowShortcuts)
        Params.filter |= TDirEnumerator::FollowShortcuts;
    Params.dirStatOptions = 0;
    Params.subdirsDepth = pTaskSettings->SubDirsDepth;

    const QStringList* pSrcList = &m_Task->SrcList;
    for (int i = 0; i < pSrcList->count(); ++i)
    {
        Params.startPath = pSrcList->at(i);
        calculateOne(Params);
        if (isCancelled())
            break;
    }
}
Пример #17
0
void TSizeCalculator::calculateOne(const TDirEnumerator::TParams& Params)
{
    Q_ASSERT(m_pDirEnum != NULL);

    if (Params.startPath.isEmpty()) {
        qWarning("TSizeCalculator::calculateOne called with empty source.");
        return;
    }

    if (m_pDirEnum->start(Params))
    {
        do {
            pausePoint();
            if (isCancelled()) {
                break;
            }
            const TFileInfoEx* pInfo = m_pDirEnum->infoPtr();
            Q_ASSERT(pInfo != NULL);

            if (pInfo->isDir())
            {
                ++m_TaskSize.DirsCount;
            }
            else {
                ++m_TaskSize.FilesCount;
                m_TaskSize.TotalSize += pInfo->size();
            }
        } while (m_pDirEnum->next());

        m_pDirEnum->finish();
    }
    else {
        qWarning("TSizeCalculator::calculateOne. Bad source (\"%s\").",
                 qPrintable(Params.startPath));
    }
}
Пример #18
0
void stk500SaveFiles::saveFile(DirectoryEntry fileEntry, QString sourceFilePath, QString destFilePath, double progStart, double progTotal) {
    /* Ensure that the parent directory exists */
    QString destFolderPath = destFilePath;
    int destFolderIdx = destFolderPath.lastIndexOf('/');
    if (destFolderIdx != -1) {
        destFolderPath.remove(destFolderIdx, destFolderPath.length() - destFolderIdx);
    }
    QDir dir = QDir::root();
    dir.mkpath(destFolderPath);

    /* Open the file for writing */
    QFile destFile(destFilePath);
    if (!destFile.open(QIODevice::WriteOnly)) {
        throw ProtocolException("Failed to open file for writing");
    }

    /* Proceed to read in data */
    quint32 cluster = fileEntry.firstCluster();
    if (cluster) {
        char buff[512];
        quint32 remaining = fileEntry.fileSize;
        quint32 done = 0;
        qint64 startTime = QDateTime::currentMSecsSinceEpoch();
        qint64 time = startTime;
        qint64 timeElapsed = 0;
        while (remaining > 0) {
            quint32 block = protocol->sd().getClusterBlock(cluster);
            for (int i = 0; i < protocol->sd().volume().blocksPerCluster; i++) {
                protocol->sd().read(block + i, 0, buff, 512);

                /* If cancelled, stop reading/writing by setting remaining to 0 */
                if (isCancelled()) {
                    remaining = 0;
                }

                time = QDateTime::currentMSecsSinceEpoch();
                timeElapsed = (time - startTime) / 1000;
                done = (fileEntry.fileSize - remaining);
                int speed_ps;
                if (timeElapsed == 0 || done == 0) {
                    speed_ps = 6000;
                } else {
                    speed_ps = done / timeElapsed;
                }

                /* Update progress */
                setProgress(progStart + progTotal * ((double) done / (double) fileEntry.fileSize));

                /* Update the status info */
                QString newStatus;
                newStatus.append("Reading ").append(sourceFilePath).append(": ");
                newStatus.append(stk500::getSizeText(remaining)).append(" remaining (");
                newStatus.append(stk500::getSizeText(speed_ps)).append("/s)\n");
                newStatus.append("Elapsed: ").append(stk500::getTimeText(timeElapsed));
                newStatus.append(", estimated ").append(stk500::getTimeText(remaining / speed_ps));
                newStatus.append(" remaining");
                setStatus(newStatus);

                /* Write the 512 or less bytes of buffered data to the file */
                if (remaining < 512) {
                    destFile.write(buff, remaining);
                    remaining = 0;
                    break;
                } else {
                    destFile.write(buff, 512);
                    remaining -= 512;
                }
            }

            // Next cluster, if end of chain no more clusters follow
            cluster = protocol->sd().fatGet(cluster);
            if (protocol->sd().isEOC(cluster)) {
                break;
            }
        }
    }

    // If errors occur the deconstructor closes it as well...
    destFile.close();

    // If cancelled, delete the file again (awh...)
    if (isCancelled()) {
        destFile.remove();
    }
}
Пример #19
0
Block MergeTreeBaseBlockInputStream::readFromPart()
{
    Block res;

    if (task->size_predictor)
        task->size_predictor->startBlock();

    if (prewhere_actions)
    {
        do
        {
            /// Let's read the full block of columns needed to calculate the expression in PREWHERE.
            size_t space_left = std::max(1LU, max_block_size_marks);
            MarkRanges ranges_to_read;

            if (task->size_predictor)
            {
                /// FIXME: size prediction model is updated by filtered rows, but it predicts size of unfiltered rows also

                size_t recommended_marks = task->size_predictor->estimateNumMarks(preferred_block_size_bytes, storage.index_granularity);
                if (res && recommended_marks < 1)
                    break;

                space_left = std::min(space_left, std::max(1LU, recommended_marks));
            }

            while (!task->mark_ranges.empty() && space_left && !isCancelled())
            {
                auto & range = task->mark_ranges.back();
                size_t marks_to_read = std::min(range.end - range.begin, space_left);

                pre_reader->readRange(range.begin, range.begin + marks_to_read, res);

                ranges_to_read.emplace_back(range.begin, range.begin + marks_to_read);
                space_left -= marks_to_read;
                range.begin += marks_to_read;
                if (range.begin == range.end)
                    task->mark_ranges.pop_back();
            }

            /// In case of isCancelled.
            if (!res)
                return res;

            progressImpl({ res.rows(), res.bytes() });
            pre_reader->fillMissingColumns(res, task->ordered_names, task->should_reorder);

            /// Compute the expression in PREWHERE.
            prewhere_actions->execute(res);

            ColumnPtr column = res.getByName(prewhere_column).column;
            if (task->remove_prewhere_column)
                res.erase(prewhere_column);

            const auto pre_bytes = res.bytes();

            ColumnPtr observed_column;
            if (column->isNullable())
            {
                ColumnNullable & nullable_col = static_cast<ColumnNullable &>(*column);
                observed_column = nullable_col.getNestedColumn();
            }
            else
                observed_column = column;

            /** If the filter is a constant (for example, it says PREWHERE 1),
                * then either return an empty block, or return the block unchanged.
                */
            if (const auto column_const = typeid_cast<const ColumnConstUInt8 *>(observed_column.get()))
            {
                if (!column_const->getData())
                {
                    res.clear();
                    return res;
                }

                for (const auto & range : ranges_to_read)
                    reader->readRange(range.begin, range.end, res);

                progressImpl({ 0, res.bytes() - pre_bytes });
            }
            else if (const auto column_vec = typeid_cast<const ColumnUInt8 *>(observed_column.get()))
            {
                size_t index_granularity = storage.index_granularity;

                const auto & pre_filter = column_vec->getData();
                IColumn::Filter post_filter(pre_filter.size());

                /// Let's read the rest of the columns in the required segments and compose our own filter for them.
                size_t pre_filter_pos = 0;
                size_t post_filter_pos = 0;

                for (const auto & range : ranges_to_read)
                {
                    auto begin = range.begin;
                    auto pre_filter_begin_pos = pre_filter_pos;

                    for (auto mark = range.begin; mark <= range.end; ++mark)
                    {
                        UInt8 nonzero = 0;

                        if (mark != range.end)
                        {
                            const size_t limit = std::min(pre_filter.size(), pre_filter_pos + index_granularity);
                            for (size_t row = pre_filter_pos; row < limit; ++row)
                                nonzero |= pre_filter[row];
                        }

                        if (!nonzero)
                        {
                            if (mark > begin)
                            {
                                memcpy(
                                    &post_filter[post_filter_pos],
                                    &pre_filter[pre_filter_begin_pos],
                                    pre_filter_pos - pre_filter_begin_pos);
                                post_filter_pos += pre_filter_pos - pre_filter_begin_pos;
                                reader->readRange(begin, mark, res);
                            }
                            begin = mark + 1;
                            pre_filter_begin_pos = std::min(pre_filter_pos + index_granularity, pre_filter.size());
                        }

                        if (mark < range.end)
                            pre_filter_pos = std::min(pre_filter_pos + index_granularity, pre_filter.size());
                    }
                }

                if (!post_filter_pos)
                {
                    res.clear();
                    continue;
                }

                progressImpl({ 0, res.bytes() - pre_bytes });

                post_filter.resize(post_filter_pos);

                /// Filter the columns related to PREWHERE using pre_filter,
                ///  other columns - using post_filter.
                size_t rows = 0;
                for (const auto i : ext::range(0, res.columns()))
                {
                    auto & col = res.safeGetByPosition(i);
                    if (col.name == prewhere_column && res.columns() > 1)
                        continue;
                    col.column =
                        col.column->filter(task->column_name_set.count(col.name) ? post_filter : pre_filter, -1);
                    rows = col.column->size();
                }

                /// Replace column with condition value from PREWHERE to a constant.
                if (!task->remove_prewhere_column)
                    res.getByName(prewhere_column).column = std::make_shared<ColumnConstUInt8>(rows, 1);
            }
            else
                throw Exception{
                    "Illegal type " + column->getName() + " of column for filter. Must be ColumnUInt8 or ColumnConstUInt8.",
                    ErrorCodes::ILLEGAL_TYPE_OF_COLUMN_FOR_FILTER
                };

            if (res)
            {
                if (task->size_predictor)
                    task->size_predictor->update(res);

                reader->fillMissingColumnsAndReorder(res, task->ordered_names);
            }
        }
        while (!task->mark_ranges.empty() && !res && !isCancelled());
    }
    else
    {
        size_t space_left = std::max(1LU, max_block_size_marks);

        while (!task->mark_ranges.empty() && space_left && !isCancelled())
        {
            auto & range = task->mark_ranges.back();

            size_t marks_to_read = std::min(range.end - range.begin, space_left);
            if (task->size_predictor)
            {
                size_t recommended_marks = task->size_predictor->estimateNumMarks(preferred_block_size_bytes, storage.index_granularity);
                if (res && recommended_marks < 1)
                    break;

                marks_to_read = std::min(marks_to_read, std::max(1LU, recommended_marks));
            }

            reader->readRange(range.begin, range.begin + marks_to_read, res);

            if (task->size_predictor)
                task->size_predictor->update(res);

            space_left -= marks_to_read;
            range.begin += marks_to_read;
            if (range.begin == range.end)
                task->mark_ranges.pop_back();
        }

        /// In the case of isCancelled.
        if (!res)
            return res;

        progressImpl({ res.rows(), res.bytes() });
        reader->fillMissingColumns(res, task->ordered_names, task->should_reorder);
    }

    return res;
}
Block MergeSortingBlockInputStream::readImpl()
{
    /** Algorithm:
      * - read to memory blocks from source stream;
      * - if too much of them and if external sorting is enabled,
      *   - merge all blocks to sorted stream and write it to temporary file;
      * - at the end, merge all sorted streams from temporary files and also from rest of blocks in memory.
      */

    /// If has not read source blocks.
    if (!impl)
    {
        while (Block block = children.back()->read())
        {
            if (!sample_block)
            {
                sample_block = block.cloneEmpty();
                removeConstantsFromSortDescription(sample_block, description);
            }

            /// If there were only const columns in sort description, then there is no need to sort.
            /// Return the blocks as is.
            if (description.empty())
                return block;

            removeConstantsFromBlock(block);

            blocks.push_back(block);
            sum_bytes_in_blocks += block.bytes();

            /** If too much of them and if external sorting is enabled,
              *  will merge blocks that we have in memory at this moment and write merged stream to temporary (compressed) file.
              * NOTE. It's possible to check free space in filesystem.
              */
            if (max_bytes_before_external_sort && sum_bytes_in_blocks > max_bytes_before_external_sort)
            {
                temporary_files.emplace_back(new Poco::TemporaryFile(tmp_path));
                const std::string & path = temporary_files.back()->path();
                WriteBufferFromFile file_buf(path);
                CompressedWriteBuffer compressed_buf(file_buf);
                NativeBlockOutputStream block_out(compressed_buf);
                MergeSortingBlocksBlockInputStream block_in(blocks, description, max_merged_block_size, limit);

                LOG_INFO(log, "Sorting and writing part of data into temporary file " + path);
                ProfileEvents::increment(ProfileEvents::ExternalSortWritePart);
                copyData(block_in, block_out, &is_cancelled);    /// NOTE. Possibly limit disk usage.
                LOG_INFO(log, "Done writing part of data into temporary file " + path);

                blocks.clear();
                sum_bytes_in_blocks = 0;
            }
        }

        if ((blocks.empty() && temporary_files.empty()) || isCancelled())
            return Block();

        if (temporary_files.empty())
        {
            impl = std::make_unique<MergeSortingBlocksBlockInputStream>(blocks, description, max_merged_block_size, limit);
        }
        else
        {
            /// If there was temporary files.
            ProfileEvents::increment(ProfileEvents::ExternalSortMerge);

            LOG_INFO(log, "There are " << temporary_files.size() << " temporary sorted parts to merge.");

            /// Create sorted streams to merge.
            for (const auto & file : temporary_files)
            {
                temporary_inputs.emplace_back(std::make_unique<TemporaryFileStream>(file->path()));
                inputs_to_merge.emplace_back(temporary_inputs.back()->block_in);
            }

            /// Rest of blocks in memory.
            if (!blocks.empty())
                inputs_to_merge.emplace_back(std::make_shared<MergeSortingBlocksBlockInputStream>(blocks, description, max_merged_block_size, limit));

            /// Will merge that sorted streams.
            impl = std::make_unique<MergingSortedBlockInputStream>(inputs_to_merge, description, max_merged_block_size, limit);
        }
    }

    Block res = impl->read();
    if (res)
        enrichBlockWithConstants(res, sample_block);
    return res;
}
Пример #21
0
Файл: Song.cpp Проект: LMMS/lmms
// load given song
void Song::loadProject( const QString & fileName )
{
	QDomNode node;

	m_loadingProject = true;

	Engine::projectJournal()->setJournalling( false );

	m_oldFileName = m_fileName;
	setProjectFileName(fileName);

	DataFile dataFile( m_fileName );
	// if file could not be opened, head-node is null and we create
	// new project
	if( dataFile.head().isNull() )
	{
		if( m_loadOnLaunch )
		{
			createNewProject();
		}
		setProjectFileName(m_oldFileName);
		return;
	}

	m_oldFileName = m_fileName;

	clearProject();

	clearErrors();

	Engine::mixer()->requestChangeInModel();

	// get the header information from the DOM
	m_tempoModel.loadSettings( dataFile.head(), "bpm" );
	m_timeSigModel.loadSettings( dataFile.head(), "timesig" );
	m_masterVolumeModel.loadSettings( dataFile.head(), "mastervol" );
	m_masterPitchModel.loadSettings( dataFile.head(), "masterpitch" );

	if( m_playPos[Mode_PlaySong].m_timeLine )
	{
		// reset loop-point-state
		m_playPos[Mode_PlaySong].m_timeLine->toggleLoopPoints( 0 );
	}

	if( !dataFile.content().firstChildElement( "track" ).isNull() )
	{
		m_globalAutomationTrack->restoreState( dataFile.content().
						firstChildElement( "track" ) );
	}

	//Backward compatibility for LMMS <= 0.4.15
	PeakController::initGetControllerBySetting();

	// Load mixer first to be able to set the correct range for FX channels
	node = dataFile.content().firstChildElement( Engine::fxMixer()->nodeName() );
	if( !node.isNull() )
	{
		Engine::fxMixer()->restoreState( node.toElement() );
		if( gui )
		{
			// refresh FxMixerView
			gui->fxMixerView()->refreshDisplay();
		}
	}

	node = dataFile.content().firstChild();

	QDomNodeList tclist=dataFile.content().elementsByTagName("trackcontainer");
	m_nLoadingTrack=0;
	for( int i=0,n=tclist.count(); i<n; ++i )
	{
		QDomNode nd=tclist.at(i).firstChild();
		while(!nd.isNull())
		{
			if( nd.isElement() && nd.nodeName() == "track" )
			{
				++m_nLoadingTrack;
				if( nd.toElement().attribute("type").toInt() == Track::BBTrack )
				{
					n += nd.toElement().elementsByTagName("bbtrack").at(0)
						.toElement().firstChildElement().childNodes().count();
				}
				nd=nd.nextSibling();
			}
		}
	}

	while( !node.isNull() && !isCancelled() )
	{
		if( node.isElement() )
		{
			if( node.nodeName() == "trackcontainer" )
			{
				( (JournallingObject *)( this ) )->restoreState( node.toElement() );
			}
			else if( node.nodeName() == "controllers" )
			{
				restoreControllerStates( node.toElement() );
			}
			else if( gui )
			{
				if( node.nodeName() == gui->getControllerRackView()->nodeName() )
				{
					gui->getControllerRackView()->restoreState( node.toElement() );
				}
				else if( node.nodeName() == gui->pianoRoll()->nodeName() )
				{
					gui->pianoRoll()->restoreState( node.toElement() );
				}
				else if( node.nodeName() == gui->automationEditor()->m_editor->nodeName() )
				{
					gui->automationEditor()->m_editor->restoreState( node.toElement() );
				}
				else if( node.nodeName() == gui->getProjectNotes()->nodeName() )
				{
					 gui->getProjectNotes()->SerializingObject::restoreState( node.toElement() );
				}
				else if( node.nodeName() == m_playPos[Mode_PlaySong].m_timeLine->nodeName() )
				{
					m_playPos[Mode_PlaySong].m_timeLine->restoreState( node.toElement() );
				}
			}
		}
		node = node.nextSibling();
	}

	// quirk for fixing projects with broken positions of TCOs inside
	// BB-tracks
	Engine::getBBTrackContainer()->fixIncorrectPositions();

	// Connect controller links to their controllers
	// now that everything is loaded
	ControllerConnection::finalizeConnections();

	// Remove dummy controllers that was added for correct connections
	m_controllers.erase(std::remove_if(m_controllers.begin(), m_controllers.end(),
		[](Controller* c){return c->type() == Controller::DummyController;}),
		m_controllers.end());

	// resolve all IDs so that autoModels are automated
	AutomationPattern::resolveAllIDs();


	Engine::mixer()->doneChangeInModel();

	ConfigManager::inst()->addRecentlyOpenedProject( fileName );

	Engine::projectJournal()->setJournalling( true );

	emit projectLoaded();

	if( isCancelled() )
	{
		m_isCancelled = false;
		createNewProject();
		return;
	}

	if ( hasErrors())
	{
		if ( gui )
		{
			QMessageBox::warning( NULL, tr("LMMS Error report"), errorSummary(),
							QMessageBox::Ok );
		}
		else
		{
			QTextStream(stderr) << Engine::getSong()->errorSummary() << endl;
		}
	}

	m_loadingProject = false;
	setModified(false);
	m_loadOnLaunch = false;
}
Пример #22
0
void ReloadTask::runTask()
{
	_logger.debug("Starting reload task...");
	while (!isCancelled())
	{
		if(_event.tryWait(300))
		{
			_logger.information("Reloading data from files...");

			AhoCorasickPlus *atm_new = new AhoCorasickPlus();
			DomainsMatchType *dm_new = new DomainsMatchType;
			AhoCorasickPlus *to_del_atm;
			DomainsMatchType *to_del_dm;
			try
			{
				_parent->loadDomains(_parent->getSSLFile(),atm_new,dm_new);
				atm_new->finalize();
				{
					Poco::Mutex::ScopedLock lock(nfqFilter::_sslMutex);
					to_del_atm = nfqFilter::atm_ssl;
					to_del_dm = nfqFilter::_SSLdomainsMatchType;
					nfqFilter::atm_ssl = atm_new;
					nfqFilter::_SSLdomainsMatchType = dm_new;
				}
				delete to_del_atm;
				delete to_del_dm;
				_logger.information("Reloaded data for ssl hosts list");
			} catch (Poco::Exception &excep)
			{
				_logger.error("Got exception while reload ssl data: %s", excep.displayText());
				delete atm_new;
				delete dm_new;
			}
			

			atm_new = new AhoCorasickPlus();
			dm_new = new DomainsMatchType;
			try
			{
				_parent->loadDomains(_parent->getDomainsFile(),atm_new,dm_new);
				atm_new->finalize();
				{
					Poco::Mutex::ScopedLock lock(nfqFilter::_domainMapMutex);
					to_del_atm = nfqFilter::atm_domains;
					to_del_dm = nfqFilter::_domainsMatchType;
					nfqFilter::atm_domains = atm_new;
					nfqFilter::_domainsMatchType = dm_new;
				}
				delete to_del_atm;
				delete to_del_dm;
				_logger.information("Reloaded data for domains list");
			} catch (Poco::Exception &excep)
			{
				_logger.error("Got exception while reload domains data: %s", excep.displayText());
				delete atm_new;
				delete dm_new;
			}

			atm_new = new AhoCorasickPlus();
			try
			{
				_parent->loadURLs(_parent->getURLsFile(),atm_new);
				atm_new->finalize();
				{
					Poco::Mutex::ScopedLock lock(nfqFilter::_urlMapMutex);
					to_del_atm = nfqFilter::atm;
					nfqFilter::atm = atm_new;
				}
				delete to_del_atm;
				_logger.information("Reloaded data for urls list");
			} catch (Poco::Exception &excep)
			{
				_logger.error("Got exception while reload urls data: %s", excep.displayText());
				delete atm_new;
			}

			IPPortMap *ip_port_map = new IPPortMap;
			try
			{
				IPPortMap *old;
				_parent->loadHosts(_parent->getHostsFile(),ip_port_map);
				{
					Poco::ScopedWriteRWLock lock(nfqFilter::_ipportMapMutex);
					old = nfqFilter::_ipportMap;
					nfqFilter::_ipportMap = ip_port_map;
				}
				delete old;
				_logger.information("Reloaded data for ip port list");
			} catch (Poco::Exception &excep)
			{
				_logger.error("Got exception while reload ip port data: %s", excep.displayText());
				delete ip_port_map;
			}

			Patricia *ssl_ips = new Patricia;
			try
			{
				_parent->loadSSLIP(_parent->getSSLIpsFile(),ssl_ips);
				Patricia *ssl_ips_old;
				{
					Poco::ScopedWriteRWLock lock(nfqFilter::_sslIpsSetMutex);
					ssl_ips_old = nfqFilter::_sslIps;
					nfqFilter::_sslIps = ssl_ips;
				}
				delete ssl_ips_old;
				_logger.information("Reloaded data for ssl ip list");
			} catch (Poco::Exception &excep)
			{
				_logger.error("Got exception while reload ip ssl data: %s", excep.displayText());
				delete ssl_ips;
			}
		}
	}
	_logger.debug("Stopping reload task...");
}
Пример #23
0
void WebApplicationTask::runTask()
{
	while(!isCancelled()) 
		sleep(50);
}
Пример #24
0
bool StreamManager::isDone() {
  return hasError() || isCancelled() ||
         (sink_.hasSentEnd() && source_.hasReceivedEnd());
}
void CreatingSetsBlockInputStream::createOne(SubqueryForSet & subquery)
{
    LOG_TRACE(log, (subquery.set ? "Creating set. " : "")
        << (subquery.join ? "Creating join. " : "")
        << (subquery.table ? "Filling temporary table. " : ""));
    Stopwatch watch;

    BlockOutputStreamPtr table_out;
    if (subquery.table)
        table_out = subquery.table->write({}, context);

    bool done_with_set = !subquery.set;
    bool done_with_join = !subquery.join;
    bool done_with_table = !subquery.table;

    if (done_with_set && done_with_join && done_with_table)
        throw Exception("Logical error: nothing to do with subquery", ErrorCodes::LOGICAL_ERROR);

    if (table_out)
        table_out->writePrefix();

    while (Block block = subquery.source->read())
    {
        if (isCancelled())
        {
            LOG_DEBUG(log, "Query was cancelled during set / join or temporary table creation.");
            return;
        }

        if (!done_with_set)
        {
            if (!subquery.set->insertFromBlock(block))
                done_with_set = true;
        }

        if (!done_with_join)
        {
            subquery.renameColumns(block);

            if (subquery.joined_block_actions)
                subquery.joined_block_actions->execute(block);

            if (!subquery.join->insertFromBlock(block))
                done_with_join = true;
        }

        if (!done_with_table)
        {
            block = materializeBlock(block);
            table_out->write(block);

            rows_to_transfer += block.rows();
            bytes_to_transfer += block.bytes();

            if (!network_transfer_limits.check(rows_to_transfer, bytes_to_transfer, "IN/JOIN external table", ErrorCodes::SET_SIZE_LIMIT_EXCEEDED))
                done_with_table = true;
        }

        if (done_with_set && done_with_join && done_with_table)
        {
            subquery.source->cancel(false);
            break;
        }
    }

    if (table_out)
        table_out->writeSuffix();

    watch.stop();

    size_t head_rows = 0;
    const BlockStreamProfileInfo & profile_info = subquery.source->getProfileInfo();

    head_rows = profile_info.rows;

    if (subquery.join)
        subquery.join->setTotals(subquery.source->getTotals());

    if (head_rows != 0)
    {
        std::stringstream msg;
        msg << std::fixed << std::setprecision(3);
        msg << "Created. ";

        if (subquery.set)
            msg << "Set with " << subquery.set->getTotalRowCount() << " entries from " << head_rows << " rows. ";
        if (subquery.join)
            msg << "Join with " << subquery.join->getTotalRowCount() << " entries from " << head_rows << " rows. ";
        if (subquery.table)
            msg << "Table with " << head_rows << " rows. ";

        msg << "In " << watch.elapsedSeconds() << " sec.";
        LOG_DEBUG(log, msg.rdbuf());
    }
    else
    {
        LOG_DEBUG(log, "Subquery has empty result.");
    }
}
Пример #26
0
Файл: pal.cpp Проект: Ariki/QGIS
  /**
  * \brief Problem Factory
  * Select features from user's choice layers within
  * a specific bounding box
  * @param nbLayers # wanted layers
  * @param layersFactor layers importance
  * @param layersName layers in problem
  * @param lambda_min west bbox
  * @param phi_min south bbox
  * @param lambda_max east bbox
  * @param phi_max north bbox
  * @param scale the scale
  */
  Problem* Pal::extract( int nbLayers, char **layersName, double *layersFactor, double lambda_min, double phi_min, double lambda_max, double phi_max, double scale, std::ofstream *svgmap )
  {
    Q_UNUSED( svgmap );
    // to store obstacles
    RTree<PointSet*, double, 2, double> *obstacles = new RTree<PointSet*, double, 2, double>();

    Problem *prob = new Problem();

    int i, j;

    double bbx[4];
    double bby[4];

    double amin[2];
    double amax[2];

    int max_p = 0;

    LabelPosition* lp;

    bbx[0] = bbx[3] = amin[0] = prob->bbox[0] = lambda_min;
    bby[0] = bby[1] = amin[1] = prob->bbox[1] = phi_min;
    bbx[1] = bbx[2] = amax[0] = prob->bbox[2] = lambda_max;
    bby[2] = bby[3] = amax[1] = prob->bbox[3] = phi_max;


    prob->scale = scale;
    prob->pal = this;

    LinkedList<Feats*> *fFeats = new LinkedList<Feats*> ( ptrFeatsCompare );

    FeatCallBackCtx *context = new FeatCallBackCtx();
    context->fFeats = fFeats;
    context->scale = scale;
    context->obstacles = obstacles;
    context->candidates = prob->candidates;

    context->bbox_min[0] = amin[0];
    context->bbox_min[1] = amin[1];

    context->bbox_max[0] = amax[0];
    context->bbox_max[1] = amax[1];

#ifdef _EXPORT_MAP_
    context->svgmap = svgmap;
#endif

#ifdef _VERBOSE_
    std::cout <<  nbLayers << "/" << layers->size() << " layers to extract " << std::endl;
    std::cout << "scale is 1:" << scale << std::endl << std::endl;

#endif


    /* First step : extract feature from layers
     *
     * */
    int oldNbft = 0;
    Layer *layer;

    QList<char*> *labLayers = new QList<char*>();

    lyrsMutex->lock();
    for ( i = 0; i < nbLayers; i++ )
    {
      for ( QList<Layer*>::iterator it = layers->begin(); it != layers->end(); ++it ) // iterate on pal->layers
      {
        layer = *it;
        // Only select those who are active and labellable (with scale constraint) or those who are active and which must be treated as obstaclewhich must be treated as obstacle
        if ( layer->active
             && ( layer->obstacle || ( layer->toLabel && layer->isScaleValid( scale ) ) ) )
        {

          // check if this selected layers has been selected by user
          if ( strcmp( layersName[i], layer->name ) == 0 )
          {
            // check for connected features with the same label text and join them
            if ( layer->getMergeConnectedLines() )
              layer->joinConnectedFeatures();

            layer->chopFeaturesAtRepeatDistance();


            context->layer = layer;
            context->priority = layersFactor[i];
            // lookup for feature (and generates candidates list)

#ifdef _EXPORT_MAP_
            *svgmap << "<g inkscape:label=\"" << layer->name << "\"" << std::endl
            <<  "    inkscape:groupmode=\"layer\"" << std::endl
            <<  "    id=\"" << layer->name << "\">" << std::endl << std::endl;
#endif

            context->layer->modMutex->lock();
            context->layer->rtree->Search( amin, amax, extractFeatCallback, ( void* ) context );
            context->layer->modMutex->unlock();

#ifdef _EXPORT_MAP_
            *svgmap  << "</g>" << std::endl << std::endl;
#endif

#ifdef _VERBOSE_
            std::cout << "Layer's name: " << layer->getName() << std::endl;
            std::cout << "     scale range: " << layer->getMinScale() << "->" << layer->getMaxScale() << std::endl;
            std::cout << "     active:" << layer->isToLabel() << std::endl;
            std::cout << "     obstacle:" << layer->isObstacle() << std::endl;
            std::cout << "     toLabel:" << layer->isToLabel() << std::endl;
            std::cout << "     # features: " << layer->getNbFeatures() << std::endl;
            std::cout << "     # extracted features: " << context->fFeats->size() - oldNbft << std::endl;
#endif
            if ( context->fFeats->size() - oldNbft > 0 )
            {
              char *name = new char[strlen( layer->getName() ) +1];
              strcpy( name, layer->getName() );
              labLayers->push_back( name );
            }
            oldNbft = context->fFeats->size();


            break;
          }
        }
      }
    }
    delete context;
    lyrsMutex->unlock();

    prob->nbLabelledLayers = labLayers->size();
    prob->labelledLayersName = new char*[prob->nbLabelledLayers];
    for ( i = 0; i < prob->nbLabelledLayers; i++ )
    {
      prob->labelledLayersName[i] = labLayers->front();
      labLayers->pop_front();
    }

    delete labLayers;

    if ( fFeats->size() == 0 )
    {
#ifdef _VERBOSE_
      std::cout << std::endl << "Empty problem" << std::endl;
#endif
      delete fFeats;
      delete prob;
      delete obstacles;
      return NULL;
    }

    prob->nbft = fFeats->size();
    prob->nblp = 0;
    prob->featNbLp = new int [prob->nbft];
    prob->featStartId = new int [prob->nbft];
    prob->inactiveCost = new double[prob->nbft];

    Feats *feat;

#ifdef _VERBOSE_
    std::cout << "FIRST NBFT : " << prob->nbft << std::endl;
#endif

    // Filtering label positions against obstacles
    amin[0] = amin[1] = -DBL_MAX;
    amax[0] = amax[1] = DBL_MAX;
    FilterContext filterCtx;
    filterCtx.cdtsIndex = prob->candidates;
    filterCtx.scale = prob->scale;
    filterCtx.pal = this;
    obstacles->Search( amin, amax, filteringCallback, ( void* ) &filterCtx );

    if ( isCancelled() )
    {
      delete fFeats;
      delete prob;
      delete obstacles;
      return 0;
    }

    int idlp = 0;
    for ( i = 0; i < prob->nbft; i++ ) /* foreach feature into prob */
    {
      feat = fFeats->pop_front();
#ifdef _DEBUG_FULL_
      std::cout << "Feature:" << feat->feature->getLayer()->getName() << "/" << feat->feature->getUID() << " candidates " << feat->nblp << std::endl;
#endif
      prob->featStartId[i] = idlp;
      prob->inactiveCost[i] = pow( 2, 10 - 10 * feat->priority );

      switch ( feat->feature->getGeosType() )
      {
        case GEOS_POINT:
          max_p = point_p;
          break;
        case GEOS_LINESTRING:
          max_p = line_p;
          break;
        case GEOS_POLYGON:
          max_p = poly_p;
          break;
      }

      // sort candidates by cost, skip less interesting ones, calculate polygon costs (if using polygons)
      max_p = CostCalculator::finalizeCandidatesCosts( feat, max_p, obstacles, bbx, bby );

#ifdef _DEBUG_FULL_
      std::cout << "All costs are set" << std::endl;
#endif
      // only keep the 'max_p' best candidates
      for ( j = max_p; j < feat->nblp; j++ )
      {
        // TODO remove from index
        feat->lPos[j]->removeFromIndex( prob->candidates );
        delete feat->lPos[j];
      }
      feat->nblp = max_p;

      // update problem's # candidate
      prob->featNbLp[i] = feat->nblp;
      prob->nblp += feat->nblp;

      // add all candidates into a rtree (to speed up conflicts searching)
      for ( j = 0; j < feat->nblp; j++, idlp++ )
      {
        lp = feat->lPos[j];
        //lp->insertIntoIndex(prob->candidates);
        lp->setProblemIds( i, idlp ); // bugfix #1 (maxence 10/23/2008)
      }
      fFeats->push_back( feat );
    }

#ifdef _DEBUG_FULL_
    std::cout << "Malloc problem...." << std::endl;
#endif


    idlp = 0;
    int nbOverlaps = 0;
    prob->labelpositions = new LabelPosition*[prob->nblp];
    //prob->feat = new int[prob->nblp];

#ifdef _DEBUG_FULL_
    std::cout << "problem malloc'd" << std::endl;
#endif


    j = 0;
    while ( fFeats->size() > 0 ) // foreach feature
    {
      if ( isCancelled() )
      {
        delete fFeats;
        delete prob;
        delete obstacles;
        return 0;
      }

      feat = fFeats->pop_front();
      for ( i = 0; i < feat->nblp; i++, idlp++ )  // foreach label candidate
      {
        lp = feat->lPos[i];
        lp->resetNumOverlaps();

        // make sure that candidate's cost is less than 1
        lp->validateCost();

        prob->labelpositions[idlp] = lp;
        //prob->feat[idlp] = j;

        lp->getBoundingBox( amin, amax );

        // lookup for overlapping candidate
        prob->candidates->Search( amin, amax, LabelPosition::countOverlapCallback, ( void* ) lp );

        nbOverlaps += lp->getNumOverlaps();
#ifdef _DEBUG_FULL_
        std::cout << "Nb overlap for " << idlp << "/" << prob->nblp - 1 << " : " << lp->getNumOverlaps() << std::endl;
#endif
      }
      j++;
      delete[] feat->lPos;
      delete feat;
    }
    delete fFeats;

    //delete candidates;
    delete obstacles;


    nbOverlaps /= 2;
    prob->all_nblp = prob->nblp;
    prob->nbOverlap = nbOverlaps;


#ifdef _VERBOSE_
    std::cout << "nbOverlap: " << prob->nbOverlap << std::endl;
    std::cerr << scale << "\t"
              << prob->nbft << "\t"
              << prob->nblp << "\t"
              << prob->nbOverlap << "\t";
#endif

    return prob;
  }