AvailableGroups::~ AvailableGroups( ) { qDeleteAll(groupsList); groupsList.clear(); if (subscribedIcon) { Q_DELETE(subscribedIcon); Q_DELETE(notSubscribedIcon); Q_DELETE(crossIcon); } }
void AvailableGroups::slotDeleteServer(quint16 serverId) { Dbt key, data; memset(&key, 0, sizeof(key)); memset(&data, 0, sizeof(data)); Dbc *cursor = 0; int ret; groupDb->cursor(0, &cursor, DB_WRITECURSOR); while ((ret=cursor->get(&key, &data, DB_NEXT)) != DB_NOTFOUND) { g=new AvailableGroup((char*)data.get_data()); g->removeServerPresence(serverId); //resave the group... const char *p=g->data(); memset(&data, 0, sizeof(data)); data.set_data((void*)p); data.set_size(g->size()); if (g->noServerPresence()) { ret=cursor->del(0); if (ret != 0) quban->getLogAlertList()->logMessage(LogAlertList::Error, tr("Cannot delete server ") + servers->value(serverId)->getHostName() + tr(" group record")); } else if ((ret=cursor->put(&key, &data, DB_CURRENT)) != 0) quban->getLogAlertList()->logMessage(LogAlertList::Error, tr("Cannot update group record. Error: ") + dbEnv->strerror(ret)); Q_DELETE(p); } cursor->close(); reloadData(); }
void MultiPartHeader::getAllArticleNums(Db* pDB, PartNumMap* serverArticleNos, QMap<quint16, quint64>* partSize,QMap<quint16, QString>* partMsgId) { quint64 multiPartKey = getMultiPartKey(); Header* h = 0; // Walk the headers to get the PartNumMap Dbc *cursorp = 0; // Get a cursor pDB->cursor(NULL, &cursorp, 0); // Set up our DBTs Dbt key(&multiPartKey, sizeof(quint64)); Dbt data; int ret = cursorp->get(&key, &data, DB_SET); while (ret != DB_NOTFOUND) { h = new Header((char*)data.get_data(), (char*)key.get_data()); serverArticleNos->insert(h->getPartNo(), h->getServerArticlemap()); partSize->insert(h->getPartNo(), h->getSize()); partMsgId->insert(h->getPartNo(), h->getMessageId()); Q_DELETE(h); ret = cursorp->get(&key, &data, DB_NEXT_DUP); } if (cursorp != NULL) cursorp->close(); }
QPostItem::~QPostItem() { Q_DELETE(listItem); if (post) { ;//Q_DELETE(post); // Why has this started core dumping ???? } }
AvailGroupModel* AvailableGroups::reloadData() { qDeleteAll(groupsList); groupsList.clear(); Q_DELETE(model); return loadData(); }
ServersList::~ServersList() { qDebug() << "In ServersList::~ServersList"; Q_DELETE(activeIcon); Q_DELETE(activePausedIcon); Q_DELETE(passiveIcon); Q_DELETE(passivePausedIcon); Q_DELETE(dormantIcon); QMapIterator<int, ThreadView> i(serverThreads); while (i.hasNext()) { i.next(); QMapIterator<int, SpeedThread*> j(i.value()); while (j.hasNext()) { j.next(); if (j.value()) { if (j.value()->item) Q_DELETE(j.value()->item); Q_DELETE_NO_LVALUE(j.value()); } } } Q_DELETE(serverSpeedTimer); serverThreads.clear(); }
void QueueSchedule::removeElement(quint16 id) { if (queuePeriodsById.contains(id)) { QueueScheduleElement* element = queuePeriodsById.take(id); if (queuePeriods.contains(element->getStartSecs())) queuePeriods.remove(element->getStartSecs()); Q_DELETE(element); } }
Thread::~Thread() { if (speedTimer) { speedTimer->stop(); Q_DELETE(speedTimer); } if (idleTimer) { idleTimer->stop(); Q_DELETE(idleTimer); } if (retryTimer) { retryTimer->stop(); Q_DELETE(retryTimer); } Q_DELETE(nntpT); Q_DELETE(threadBytes); }
AvailGroupModel* AvailableGroups::loadData() { if (!model) { model = new AvailGroupModel(servers, groupDb,this); Dbt *key = 0, *data = 0; key=new Dbt; data=new Dbt; Dbc *cursor = 0; AvailableGroup *g = 0; if (groupDb->cursor(NULL, &cursor, 0)) qDebug() << "Error opening cursor"; int ret; quint32 count=0; while ((ret=cursor->get(key, data, DB_NEXT)) != DB_NOTFOUND) { g=new AvailableGroup((char*)data->get_data()); if (subscribedGroups->contains(g->getName())) g->setSubscribed(true); model->setupTopLevelItem(g); // add to model groupsList.insert(g->getName(), g); ++count; } qDebug() << "Available Groups: " << count; cursor->close(); Q_DELETE(key); Q_DELETE(data); } return model; }
void QueueScheduler::removeCurrentSchedule() { if (currentSchedule) { dbDelete(currentSchedule->getName()); queueSchedules.removeOne(currentSchedule); if (currentSchedule->getIsActive()) { enabledSchedules.remove(currentSchedule->getPriority(), currentSchedule); managePeriods(); } Q_DELETE(currentSchedule); } }
bool AvailGroupItem::removeChildren(int position, int count) { if (position < 0 || position + count > childItems.size()) return false; AvailGroupItem* item = 0; for (int row = 0; row < count; ++row) { item = childItems.takeAt(position); Q_DELETE(item); } return true; }
void MultiPartHeader::removeServer(Db* pDB, quint16 serverId) { serverPart.remove(serverId); serverLowest.remove(serverId); quint64 multiPartKey = getMultiPartKey(); Header* h = 0; // Walk the headers to get the PartNumMap Dbc *cursorp = 0; // Get a cursor pDB->cursor(NULL, &cursorp, DB_WRITECURSOR); // Set up our DBTs Dbt key(&multiPartKey, sizeof(quint64)); Dbt data; int ret = cursorp->get(&key, &data, DB_SET); while (ret != DB_NOTFOUND) { h = new Header((char*)data.get_data(), (char*)key.get_data()); if (h->isHeldByServer(serverId)) { h->removeServerPartNum(serverId); if (h->isServerNumMapEmpty()) { cursorp->del(0); size-=h->getSize(); missingParts++; } } Q_DELETE(h); ret = cursorp->get(&key, &data, DB_NEXT_DUP); } if (cursorp != NULL) cursorp->close(); }
void QueueScheduler::updateCurrentSchedule(QueueSchedule& newSchedule) { bool wasActive = false; if (currentSchedule) { queueSchedules.removeOne(currentSchedule); if (currentSchedule->getIsActive()) { enabledSchedules.remove(currentSchedule->getPriority(), currentSchedule); wasActive = true; } Q_DELETE(currentSchedule); } currentSchedule = new QueueSchedule(newSchedule); // TODO For the moment only allow one enabled schedule if (newSchedule.getIsActive()) { QListIterator<QueueSchedule*> i(queueSchedules); while (i.hasNext()) i.next()->setIsActive(false); enabledSchedules.clear(); } queueSchedules.append(currentSchedule); if (currentSchedule->getIsActive()) { enabledSchedules.insert(currentSchedule->getPriority(), currentSchedule); managePeriods(); } else { if (wasActive) // was active, but not any more managePeriods(); } currentSchedule->dbSave(); }
BehaviorTreeAgent::~BehaviorTreeAgent() { Q_DELETE(root); }
bool DecodeManager::decode(QPostItem *postItem) { emit decodeStarted(postItem); QFile thisPart; QTime start, end; start = QTime::currentTime(); QStringList partList; bool errorFlag = false; bool diskError = false; bool asciiFile = false; QString destFile2; QString errorString = QString::null; QMap<int, Part *>::iterator pit; parts = 0; int progress = 0; //Build a QStringList of filenames (with path) for (pit = postItem->parts.begin(); pit != postItem->parts.end(); ++pit) { partList.append(postItem->getFName() + '.' + QString::number(pit.key())); if (pit.key() > parts) parts = pit.key(); } destDir = postItem->getSavePath(); decoder = getDecoderForPost(partList); int doneParts = 0; if (decoder != NULL) { while (!m_cancel && !diskError && !decoder->decodingComplete()) { doneParts++; switch (decoder->decodeNextPart()) { case Decoder::Err_BadCRC: //bad crc in part errorFlag = true; errorString += "Bad part crc; "; break; case Decoder::Err_MissingPart: //Missing part...right now only activated if cannot read from input errorFlag = true; errorString += "Missing part; "; break; case Decoder::Err_No: //No error break; case Decoder::Err_SizeMismatch: //Size mismatch between expected size and written bytes errorFlag = true; errorString += "Part size mismatch; "; break; case Decoder::Err_Write: //Disk error...disk full? errorFlag = true; diskError = true; errorString = "I/O error!"; break; } progress = (int) (((float) doneParts / (float) parts) * 100); emit decodeProgress(postItem, progress); } if (!m_cancel && !decoder->isSizeCorrect()) { //Error: size is wrong. //This can happen because there are some missing parts in the post. errorFlag = true; errorString += "Wrong size"; qDebug() << "Wrong total size"; } } else { qDebug() << "Post is not uu/yy encoded"; if (config->deleteFailed) { errorFlag = true; errorString = "Post is not uu/yy encoded"; emit logMessage(LogAlertList::Warning, tr("Decode error: Post is not uu or yy encoded")); } else if (!m_cancel) { QString tempFile; QString destFile1; QByteArray ba; char *c_str = 0; char *c_str2 = 0; asciiFile = true; for (pit = postItem->parts.begin(); pit != postItem->parts.end(); ++pit) { tempFile = postItem->getFName() + '.' + QString::number(pit.key()); thisPart.setFileName(tempFile); if (thisPart.open(QIODevice::ReadOnly | QIODevice::Text)) { //qDebug() << "Part size : " << thisPart.size(); if (thisPart.size() == 0) { thisPart.remove(); // closes as well } else { thisPart.close(); ba = tempFile.toLocal8Bit(); c_str = ba.data(); std::ifstream f1(c_str, std::fstream::binary); destFile1 = postItem->getHeaderBase()->getSubj().replace(' ', '_'); destFile2 = (destDir + "/" + destFile1.remove(QRegExp("\\W")) + '.' + QString::number(pit.key()) + ".txt"); qDebug() << "Checking existance of " << destFile2; if (!m_overWrite && QFile::exists(destFile2)) { qDebug() << "File exists!"; //rename the file int i = 1; destFile1 = destFile2.remove(".txt"); while (QFile::exists( destFile1 + '.' + QString::number(i) + ".txt")) { i++; } destFile2 = destFile1 + '.' + QString::number(i) + ".txt"; } qDebug() << "Fname: " << destFile2; ba = destFile2.toLocal8Bit(); c_str2 = ba.data(); std::ofstream f2(c_str2, std::fstream::trunc | std::fstream::binary); f2 << f1.rdbuf(); } } } } } end = QTime::currentTime(); qDebug() << "Seconds used for decoding with knzb decoder: " << start.secsTo(end); if (m_cancel) { qDebug() << "Cancelled decoding"; qDebug() << "Deleting partial file: " << destDir + '/' + decoder->encodedFilename(); //Remove partial file if (decoder) QFile::remove(destDir + '/' + decoder->encodedFilename()); return false; } else if (errorFlag && !diskError) { emit logMessage(LogAlertList::Warning, errorString); if (decoder) emit decodeFinished(postItem, false, decoder->encodedFilename(), errorString); else emit decodeFinished(postItem, false, QString::null, errorString); } else if (diskError) { emit logMessage(LogAlertList::Warning, errorString); emit decodeDiskError(); } else { if (asciiFile == false) emit decodeFinished(postItem, true, decoder->encodedFilename(), QString::null); else { QFileInfo fi(destFile2); emit decodeFinished(postItem, true, fi.fileName(), QString::null); } } Q_DELETE(decoder); if (diskError) return false; else return true; }
bool BulkHeaderGroup::BulkHeaderGroupBody() { // belt and braces key.set_data(keymem); data.set_data(datamem); if (m_cancel) { emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq); return false; } emit updateJob(JobList::BulkHeaderGroup, JobList::Running, job->seq); NewsGroup* ng = job->ng; Db* db = ng->getDb(); MultiPartHeader mph; SinglePartHeader sph; HeaderBase* hb = 0; HeaderGroup* headerGroup = 0; HeaderGroup* advancedHeaderGroup = 0; // typedef QMap<QString, QString> HeaderGroupIndexes; // subj, headerGroup index // typedef QMap<QString, HeaderGroup*> HeaderGroups; // headerGroup index, headerGroup * HeaderGroupIndexes headerGroupIndexes; HeaderGroups headerGroups; DBC *dbcp = 0; DBT ckey, cdata; memset(&ckey, 0, sizeof(ckey)); memset(&cdata, 0, sizeof(cdata)); size_t retklen, retdlen; void *retkey = 0, *retdata = 0; int ret, t_ret; void *p = 0; quint64 count=0; cdata.data = (void *) new char[HEADER_BULK_BUFFER_LENGTH]; cdata.ulen = HEADER_BULK_BUFFER_LENGTH; cdata.flags = DB_DBT_USERMEM; ckey.data = (void *) new char[HEADER_BULK_BUFFER_LENGTH]; ckey.ulen = HEADER_BULK_BUFFER_LENGTH; ckey.flags = DB_DBT_USERMEM; /* Acquire a cursor for the database. */ if ((ret = db->get_DB()->cursor(db->get_DB(), NULL, &dbcp, DB_CURSOR_BULK)) != 0) { db->err(ret, "DB->cursor"); char* ptr = 0; ptr = (char*)(ckey.data); Q_DELETE_ARRAY(ptr); ptr = (char*)(cdata.data); Q_DELETE_ARRAY(ptr); return false; } // To save the group records ng->articlesNeedDeleting(false); // Store the data in the database - flush first ... u_int32_t delCount; uchar keymem[KEYMEM_SIZE]; uchar datamem[DATAMEM_SIZE]; Dbt key, data; char* p2 = 0; QByteArray ba; const char *k = 0; key.set_flags(DB_DBT_USERMEM); key.set_data(&keymem); key.set_ulen(KEYMEM_SIZE); data.set_flags(DB_DBT_USERMEM); data.set_ulen(DATAMEM_SIZE); data.set_data(&datamem); QString subj = "MDQuban", from = "MDQuban"; //QString rs1 = "^(.*)(\".*\")"; //QString rs2 = "^(.*)\\s-\\s(.*)$"; //QString rs3 = "^(\\S+.*)\\[.*\\].*(\".*\")"; //QString rs3 = "^(.*)\\s-\\s.*\\s-\\s(.*)$"; QRegExp rx[3]; bool rxPosBack[3]; bool noRegexpGrouping; QString recKey, storeIndex; QString prevSubj = "MDQuban", prevFrom = "MDQuban"; int pos; bool newGroup = false; bool mphFound = false; quint32 grouped = 0, single = 0, numGroups = 0; qint16 stringDiff = -1; bool prevGroup = false; bool advancedPlacement = false; bool skipAdvanced = false; noRegexpGrouping = ng->isThereNoRegexOnGrouping(); if (noRegexpGrouping == false) // need regex for grouping { rx[0].setPattern(ng->getGroupRE1()); rx[1].setPattern(ng->getGroupRE2()); rx[2].setPattern(ng->getGroupRE3()); rxPosBack[0] = ng->getGroupRE1Back(); rxPosBack[1] = ng->getGroupRE2Back(); rxPosBack[2] = ng->getGroupRE3Back(); } ng->getGroupingDb()->truncate(0, &delCount, 0); qDebug() << "Deleted " << delCount << " records from group db"; QMapIterator<QString, QString> it(headerGroupIndexes); QString advancedIndex; for (;;) { /* * Acquire the next set of key/data pairs. This code * does not handle single key/data pairs that won't fit * in a BUFFER_LENGTH size buffer, instead returning * DB_BUFFER_SMALL to our caller. */ if ((ret = dbcp->get(dbcp, &ckey, &cdata, DB_MULTIPLE_KEY | DB_NEXT)) != 0) { if (ret != DB_NOTFOUND) db->err(ret, "DBcursor->get"); break; } for (DB_MULTIPLE_INIT(p, &cdata);;) { DB_MULTIPLE_KEY_NEXT(p, &cdata, retkey, retklen, retdata, retdlen); if (p == NULL) break; if (retdlen){;} // MD TODO compiler .... unused variable recKey = QString::fromLocal8Bit((char*)retkey, retklen); if (*((char *)retdata) == 'm') { MultiPartHeader::getMultiPartHeader((unsigned int)retklen, (char *)retkey, (char *)retdata, &mph); hb = (HeaderBase*)&mph; mphFound = true; } else if (*((char *)retdata) == 's') { SinglePartHeader::getSinglePartHeader((unsigned int)retklen, (char *)retkey, (char *)retdata, &sph); hb = (HeaderBase*)&sph; mphFound = false; } else { // What have we found ????? qDebug() << "Found unexpected identifier for header : " << (char)*((char *)retdata); continue; } ++count; prevSubj = subj; prevFrom = from; subj = hb->getSubj(); from = hb->getFrom(); if (noRegexpGrouping == false) // need regex for grouping { for (int i=0; i<3; ++i) { if (rx[i].isEmpty() == false) { if (rxPosBack[i] == true) // from the back { pos = subj.lastIndexOf(rx[i]); if (pos != -1) subj.truncate(pos); } else // from the front { pos = rx[i].indexIn(subj); if (pos > -1) subj = rx[i].cap(0); } } } } //qDebug() << "Stripped down to: " << subj; stringDiff = -1; if (prevFrom != from) // change of contributor { newGroup = true; } else // same contributor { if ((stringDiff = levenshteinDistance(prevSubj, subj)) > ng->getMatchDistance()) // no match ... newGroup = true; else newGroup = false; //qDebug() << "Diff between " << prevSubj << " and " << subj << " is " << stringDiff; } if (newGroup) { if (ng->isThereAdvancedGrouping()) { it.toFront(); // decide if we can match to a previous group while (it.hasNext()) { it.next(); if ((stringDiff = levenshteinDistance(it.key(), subj)) <= ng->getMatchDistance()) // match ... { // The index for this group is in it.value() // See if we have the HeaderGroup in our cache headerGroups) if (headerGroups.contains(it.value())) { advancedHeaderGroup = headerGroups.value(it.value()); } else // not in cache { advancedIndex = it.value(); advancedHeaderGroup = getGroup(ng, advancedIndex); if (advancedHeaderGroup) { headerGroups.insert(advancedIndex, advancedHeaderGroup); } else // db read failed .. { skipAdvanced = true; } } if (skipAdvanced == false) { if (mphFound) advancedHeaderGroup->addMphKey(recKey); else advancedHeaderGroup->addSphKey(recKey); advancedPlacement = true; subj = prevSubj; // ignore this header as it's been placed out of sequence from = prevFrom; newGroup = false; // as we managed to relocate to an existing group break; // stop looking at previous groups } else skipAdvanced = false; } } } } if (newGroup) { if (prevGroup) // save before moving on { ba = storeIndex.toLocal8Bit(); k= ba.constData(); memcpy(keymem, k, storeIndex.length()); key.set_size(storeIndex.length()); p2=headerGroup->data(); data.set_data(p2); data.set_size(headerGroup->getRecordSize()); ret=ng->getGroupingDb()->put(NULL, &key, &data, 0); if (ret!=0) qDebug("Error updating record: %d", ret); if (ng->isThereAdvancedGrouping()) headerGroupIndexes.insert(storeIndex.section('\n', 0, 0), storeIndex); Q_DELETE_ARRAY(p2); Q_DELETE(headerGroup); numGroups++; } prevGroup = true; storeIndex = subj % "\n" % from; headerGroup = new HeaderGroup(); headerGroup->setDisplayName(subj); headerGroup->setPostingDate(hb->getPostingDate()); headerGroup->setDownloadDate(hb->getDownloadDate()); headerGroup->setStatus(hb->getStatus()); headerGroup->setNextDistance(stringDiff); } // if we've found somewhere else to place this header then don't add again if (!advancedPlacement) { if (mphFound) headerGroup->addMphKey(recKey); else headerGroup->addSphKey(recKey); } else advancedPlacement = false; if (count % 250 == 0) { QCoreApplication::processEvents(); emit updateJob(JobList::BulkHeaderGroup, tr("Header bulk grouping for newsgroup ") + job->ng->getAlias() + ": " + QString::number(count) + " out of " + QString::number(ng->getTotal()) + tr(" grouped"), job->seq); } if (m_cancel) { emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq); return false; } } if (m_cancel) { emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq); return false; } } if ((t_ret = dbcp->close(dbcp)) != 0) { db->err(ret, "DBcursor->close"); if (ret == 0) ret = t_ret; } char* ptr = ((char*)ckey.data); Q_DELETE_ARRAY(ptr); ptr = ((char*)cdata.data); Q_DELETE_ARRAY(ptr); if (headerGroups.count()) { qDeleteAll(headerGroups); headerGroups.clear(); } qDebug() << "Multi = " << grouped << ", single = " << single; ng->setHeadersNeedGrouping(false); // Finally update the newsgroup emit saveGroup(ng); emit updateJob(JobList::BulkHeaderGroup, tr("Header bulk grouping for newsgroup ") + job->ng->getAlias() + ": " + QString::number(count) + " out of " + QString::number(ng->getTotal()) + tr(" grouped"), job->seq); if (m_cancel) { emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq); return false; } emit logEvent(tr("Bulk grouping of ") + ng->getTotal() + tr(" articles completed successfully.")); emit updateJob(JobList::BulkHeaderGroup, JobList::Finished_Ok, job->seq); ng->setTotalGroups(numGroups); Q_DELETE(headerGroup); return true; }
QListItem::~QListItem() { Q_DELETE(listItem); }
QGroupLimitsItem::~QGroupLimitsItem() { listItem->setSelected(false); Q_DELETE(listItem); }
QUpdItem::~QUpdItem() { listItem->setSelected(false); Q_DELETE(listItem); }
QExtensionsItem::~QExtensionsItem() { listItem->setSelected(false); Q_DELETE(listItem); }
AvailGroupModel::~AvailGroupModel() { Q_DELETE(rootItem); }
bool HeaderReadWorker::cacheFlush(uint size) { Dbt key, data; int ret; RawHeader* h = 0; QString cIndex; NewsGroup* ng = job->ng; Db* pdb = ng->getPartsDb(); // get the parts Db MultiPartHeader* mph = 0; SinglePartHeader* sph = 0; HeaderBase* hb = 0; quint16 capPart = 0; quint16 capTotal = 0; if (size == 0) size = headerCache.count(); qDebug() << "Flushing cache"; qDebug() << "cacheIndex is " << headerCache.count() << " .Flushing " << size << " records"; //Flush "size" elements, with a FIFO policy if ((int) size > headerCache.count()) { qDebug() << "wrong flush size!"; size = headerCache.count(); } uint partsAdded = size; QMutexLocker locker(headerDbLock); quint64 mPHKey = ng->getMultiPartSequence(); //qDebug() << "Multi Part Sequence in cache is " << mPHKey << " for host " << hostId; for (uint i = 0; i < size; i++) { h = headerCache.dequeue(); if (h == 0) // didn't exist { //qDebug() << "Failed to find header in cache flush !! i = " << i << ", size = " << size; continue; } pos = 0; capPart = 0; capTotal = 0; index = -1; //qDebug() << "Subject in flush is " << h->m_subj; while ((pos = rx.indexIn(h->m_subj, pos)) != -1) { index = pos; pos += rx.matchedLength(); capPart = rx.cap(1).toInt(); capTotal = rx.cap(2).toInt(); } if (index == -1) // It looks like a single part header { cIndex = h->m_subj.left(index).simplified() % '\n' % h->m_mid; // This is the common part of the subject + separator + msgId sph = (SinglePartHeader*)(cache.value(cIndex)); if (sph) // This is a duplicate (We've already seen this subj and msgId for this server ... { partsAdded--; //qDebug() << "Duplicate article in cache : " << sph->getSubj() << "for server " << hostId; } else { //Try to get the header from the db... sph = (SinglePartHeader*)dbBinHeaderGet(cIndex); if (sph) { if (!sph->isServerPresent(hostId)) // Article not currently registered to this server { sph->setServerPartNum(hostId, h->m_num); cache.insert(cIndex, sph); //qDebug() << "Server " << hostId << ", db find"; } else // This is a duplicate (We've already seen this subj and msgId for this server ... { partsAdded--; //qDebug() << "Duplicate article in db : " << sph->getSubj() << "for server " << hostId; } } else { //qDebug() << "Server " << hostId << ", sph create"; //Create new header and put it in the cache... sph = new SinglePartHeader; groupArticles++; //The new article is, of course, unread... unreadArticles++; sph->setSubj(h->m_subj.left(index).simplified()); sph->setFrom(h->m_from); sph->setStatus(MultiPartHeader::bh_new); sph->setLines(h->m_lines.toLong()); sph->setSize(h->m_bytes.toLongLong()); if (h->m_date.isNull()) qDebug() << "Date is null!!!"; else sph->setPostingDate(createDateTime(h->m_date.split(" "))); sph->setDownloadDate(QDateTime::currentDateTime().toTime_t()); sph->setMessageId(h->m_mid); sph->setServerPartNum(hostId, h->m_num); cache.insert(cIndex, sph); ng->setHeadersNeedGrouping(true); } } } else { cIndex = h->m_subj.left(index).simplified() % '\n' % h->m_from; // This is the common part of the subject + separator + sender mph = (MultiPartHeader*)(cache.value(cIndex)); if (mph == 0) { //Try to get the header from the db... //mph = dynamic_cast<MultiPartHeader*>(dbBinHeaderGet(db, cIndex)); mph = (MultiPartHeader*)(dbBinHeaderGet(cIndex)); if (mph) { //qDebug() << "Server " << hostId << ", db find"; //qDebug() << "Using mph with rec type " << mph->getHeaderType(); //qDebug() << "Using mph with key " << mph->multiPartKey; //update the header in the cache... switch (mph->addPart(pdb, capPart, h, hostId)) { case MultiPartHeader::Duplicate_Part: case MultiPartHeader::Error_Part: partsAdded--; break; case MultiPartHeader::Unread_Status: //Added a part that changed the status of the post... unreadArticles++; break; case MultiPartHeader::New_Part: case MultiPartHeader::Updated_Part: break; } cache.insert(cIndex, mph); } } else { //qDebug() << "Server " << hostId << ", cache find"; //qDebug() << "Using mph with key " << mph->multiPartKey; //update the header in the cache... switch (mph->addPart(pdb, capPart, h, hostId)) { case MultiPartHeader::Duplicate_Part: case MultiPartHeader::Error_Part: partsAdded--; break; case MultiPartHeader::Unread_Status: //Added a part that changed the status of the post... unreadArticles++; break; case MultiPartHeader::New_Part: case MultiPartHeader::Updated_Part: break; } } if (mph == 0) { //qDebug() << "Server " << hostId << ", mph create"; //Create new header and put it in the cache... mph = new MultiPartHeader; groupArticles++; //The new article is, of course, unread... unreadArticles++; mph->setSubj(h->m_subj.left(index).simplified()); mph->setFrom(h->m_from); mph->setStatus(MultiPartHeader::bh_new); mph->setKey(++mPHKey); //qDebug() << "Created mph with key " << mPHKey; if (h->m_date.isNull()) qDebug() << "Date is null!!!"; else mph->setPostingDate(createDateTime(h->m_date.split(" "))); mph->setDownloadDate(QDateTime::currentDateTime().toTime_t()); mph->setNumParts(capTotal); // Also sets missing parts to capTotal mph->addPart(pdb, capPart, h, hostId); cache.insert(cIndex, mph); ng->setHeadersNeedGrouping(true); } } Q_DELETE(h); } // Multiple threads are still controlled by mutex at this point ng->servLocalParts[hostId] += size; qDebug() << "Server " << hostId << ", total articles = " << ng->totalArticles << ", adding " << groupArticles; ng->totalArticles += groupArticles; ng->unreadArticles += unreadArticles; qDebug() << "Server " << hostId << ", total articles = " << ng->totalArticles; ng->setMultiPartSequence(mPHKey); groupArticles = 0; unreadArticles = 0; // sync the parts db pdb->sync(0); //Flush the cache that we've just built QHash<QString, HeaderBase*>::iterator it = cache.begin(); QByteArray ba; const char *cIndexCharArr = 0; while (it != cache.end()) { cIndex = it.key(); hb = (HeaderBase*)(it.value()); if (hb->getHeaderType() == 'm') { mph = (MultiPartHeader*)(it.value()); data.set_data(mph->data()); data.set_size(mph->getRecordSize()); //qDebug() << "Just saved mph with key " << mph->multiPartKey; } else { sph = (SinglePartHeader*)(it.value()); data.set_data(sph->data()); data.set_size(sph->getRecordSize()); } ba = cIndex.toLocal8Bit(); cIndexCharArr = ba.constData(); key.set_data((void*) cIndexCharArr); key.set_size(cIndex.length()); ret = db->put(NULL, &key, &data, 0); if (ret != 0) { qDebug() << "Error flushing cache: " << ret; // errorString=g_dbenv->strerror(ret); errorString = "Failure whilst writing header record to database"; return false; } void *ptr = data.get_data(); Q_FREE(ptr); if (hb->getHeaderType() == 'm') { Q_DELETE(mph); } else { Q_DELETE(sph); } ++it; } locker.unlock(); // *************** this is a massive hold *********************** cache.clear(); qDebug() << "Ok, cache flushed"; cacheFlushed = true; return true; }
BehaviorTreeDecoratorNode::~BehaviorTreeDecoratorNode() { if (child) Q_DELETE(child); }
MapperRLEnvironment::~MapperRLEnvironment() { if (currentObservation) Q_DELETE(currentObservation); }
BehaviorTreeInternalNode::~BehaviorTreeInternalNode() { for (uint8_t i = 0; i < nChildren; i++) Q_DELETE(children[i]); Alloc::free(children); }
// Modify or create a part based on the following information MultiPartHeader::Add_Code MultiPartHeader::modifyPart(Db* pdb, quint16 partNo, RawHeader *rh, quint16 hostId) { //qDebug() << "Entering modifyPart"; Add_Code retCode = New_Part; int ret = 0; bool createPart = false; Header* h = 0; char *phead = 0; Dbc *cursorp = 0; Dbt key; Dbt data; //qDebug() << "Creating part for key " << multiPartKey; key.set_data(&multiPartKey); key.set_size(sizeof(quint64)); data.set_data(&partNo); data.set_size(sizeof(quint16)); // Get a cursor pdb->cursor(NULL, &cursorp, DB_WRITECURSOR); //qDebug("1"); // Position the cursor to the first record in the database whose // key matches the search key and whose data begins with the search // data. ret = cursorp->get(&key, &data, DB_GET_BOTH); if (ret == 0) { //qDebug("2"); // update the data h = new Header((char*)data.get_data(), (char*)key.get_data()); //qDebug("3"); if (h->isHeldByServer(hostId)) { retCode = Duplicate_Part; } else retCode = Updated_Part; // qDebug() << "Found an update for part " << partNo << ", existing part " << h->getPartNo() << ", existing msgid " << h->getMessageId() << ", new msgid " << rh->m_mid; // Always update the article number h->setServerPartNum(hostId, rh->m_num); //qDebug("4"); phead=h->data(); data.set_data(phead); data.set_size(h->getRecordSize()); //qDebug("5"); cursorp->put(&key, &data, DB_CURRENT); //qDebug("6"); Q_DELETE_ARRAY(phead); } else if (ret == DB_NOTFOUND) // create the part { //qDebug("7"); createPart = true; } else // encountered an error { qDebug() << "Unable to find part " << partNo << " for key " << multiPartKey << ", result = " << ret; retCode = Error_Part; } // Close the cursor if (cursorp != NULL) cursorp->close(); if (createPart == true) { //qDebug("8"); quint32 partSize = rh->m_bytes.toULong(); h = new Header(multiPartKey, partNo, partSize); //qDebug("9"); h->setMessageId(rh->m_mid); h->setServerPartNum(hostId, rh->m_num); //save in the parts db phead=h->data(); data.set_data(phead); data.set_size(h->getRecordSize()); //qDebug("10"); ret = pdb->put(0, &key, &data, 0); if (ret) { qDebug() << "Unable to create part " << partNo << " for key " << multiPartKey << ", result = " << ret; retCode = Error_Part; } else retCode = New_Part; //qDebug("11"); Q_DELETE_ARRAY(phead); } Q_DELETE(h); //qDebug("12\n"); //qDebug() << "Exiting modifyPart"; return retCode; }