示例#1
0
void db_multiple_key_next(void *pointer, DBT *data,
			  unsigned char **key, u_int32_t *ret_key_size,
			  unsigned char **result, u_int32_t *result_size) {
  DB_MULTIPLE_KEY_NEXT(pointer, data,
		       *key, *ret_key_size,
		       *result, *result_size);
}
示例#2
0
文件: b_inmem.c 项目: AdeebNqo/poedit
static void
b_inmem_op_ds_bulk(u_int ops, u_int *totalp)
{
	DB_ENV *dbenv;
	DB *dbp;
	DBC *dbc;
	DBT key, data;
	u_int32_t len, klen;
	u_int i, total;
	char *keybuf, *databuf;
	void *pointer, *dp, *kp;
	DB_MPOOL_STAT  *gsp;

	DB_BENCH_ASSERT((keybuf = malloc(keysize)) != NULL);
	DB_BENCH_ASSERT((databuf = malloc(bulkbufsize)) != NULL);

	memset(&key, 0, sizeof(key));
	memset(&data, 0, sizeof(data));
	key.data = keybuf;
	key.size = keysize;

	data.data = databuf;
	data.size = datasize;
	memset(databuf, 'b', datasize);

	DB_BENCH_ASSERT(db_create(&dbp, NULL, 0) == 0);
	dbenv = dbp->dbenv;
	dbp->set_errfile(dbp, stderr);

	DB_BENCH_ASSERT(dbp->set_pagesize(dbp, pagesize) == 0);
	DB_BENCH_ASSERT(dbp->set_cachesize(dbp, 0, cachesize, 1) == 0);
	DB_BENCH_ASSERT(
	    dbp->open(dbp, NULL, NULL, NULL, DB_BTREE, DB_CREATE, 0666) == 0);

	for (i = 1; i <= numitems; ++i) {
		(void)snprintf(keybuf, keysize, "%7d", i);
		DB_BENCH_ASSERT(dbp->put(dbp, NULL, &key, &data, 0) == 0);
	}

#if 0
	fp = fopen("before", "w");
	dbp->set_msgfile(dbp, fp);
	DB_BENCH_ASSERT (dbp->stat_print(dbp, DB_STAT_ALL) == 0);
#endif

	DB_BENCH_ASSERT(dbp->cursor(dbp, NULL, &dbc, 0) == 0);

	data.ulen = bulkbufsize;
	data.flags = DB_DBT_USERMEM;

	(void)dbenv->memp_stat(dbenv, &gsp, NULL, DB_STAT_CLEAR);

	TIMER_START;
	for (total = 0; ops > 0; --ops) {
		DB_BENCH_ASSERT(dbc->c_get(
		    dbc, &key, &data, DB_FIRST | DB_MULTIPLE_KEY) == 0);
		DB_MULTIPLE_INIT(pointer, &data);
		while (pointer != NULL) {
			DB_MULTIPLE_KEY_NEXT(pointer, &data, kp, klen, dp, len);
			if (kp != NULL)
				++total;
		}
	}
	TIMER_STOP;
	*totalp = total;

	if (dbenv->memp_stat(dbenv, &gsp, NULL, 0) == 0)
	    DB_BENCH_ASSERT(gsp->st_cache_miss == 0);

#if 0
	fp = fopen("before", "w");
	dbp->set_msgfile(dbp, fp);
	DB_BENCH_ASSERT (dbp->stat_print(dbp, DB_STAT_ALL) == 0);
#endif

	DB_BENCH_ASSERT(dbp->close(dbp, 0) == 0);

	COMPQUIET(dp, NULL);
	COMPQUIET(klen, 0);
	COMPQUIET(len, 0);
}
示例#3
0
bool BulkHeaderGroup::BulkHeaderGroupBody()
{
    // belt and braces
    key.set_data(keymem);
    data.set_data(datamem);

    if (m_cancel)
    {
        emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq);
        return false;
    }

    emit updateJob(JobList::BulkHeaderGroup, JobList::Running, job->seq);

    NewsGroup* ng = job->ng;
    Db* db = ng->getDb();

    MultiPartHeader mph;
    SinglePartHeader sph;
    HeaderBase* hb = 0;
    HeaderGroup* headerGroup = 0;
    HeaderGroup* advancedHeaderGroup = 0;

    // typedef QMap<QString, QString> HeaderGroupIndexes; // subj, headerGroup index
    // typedef QMap<QString, HeaderGroup*> HeaderGroups; //  headerGroup index, headerGroup *

    HeaderGroupIndexes headerGroupIndexes;
    HeaderGroups       headerGroups;

    DBC *dbcp = 0;
    DBT ckey, cdata;

    memset(&ckey, 0, sizeof(ckey));
    memset(&cdata, 0, sizeof(cdata));

    size_t retklen, retdlen;
    void *retkey = 0, *retdata = 0;
    int ret, t_ret;
    void *p = 0;

    quint64 count=0;

    cdata.data = (void *) new char[HEADER_BULK_BUFFER_LENGTH];
    cdata.ulen = HEADER_BULK_BUFFER_LENGTH;
    cdata.flags = DB_DBT_USERMEM;

    ckey.data = (void *) new char[HEADER_BULK_BUFFER_LENGTH];
    ckey.ulen = HEADER_BULK_BUFFER_LENGTH;
    ckey.flags = DB_DBT_USERMEM;

    /* Acquire a cursor for the database. */
    if ((ret = db->get_DB()->cursor(db->get_DB(), NULL, &dbcp, DB_CURSOR_BULK)) != 0)
    {
        db->err(ret, "DB->cursor");
        char* ptr = 0;

        ptr = (char*)(ckey.data);
        Q_DELETE_ARRAY(ptr);
        ptr = (char*)(cdata.data);
        Q_DELETE_ARRAY(ptr);
        return false;
    }

    // To save the group records
    ng->articlesNeedDeleting(false);

    // Store the data in the database - flush first ...

    u_int32_t delCount;

    uchar keymem[KEYMEM_SIZE];
    uchar datamem[DATAMEM_SIZE];
    Dbt key, data;
    char* p2 = 0;
    QByteArray ba;
    const char *k = 0;

    key.set_flags(DB_DBT_USERMEM);
    key.set_data(&keymem);
    key.set_ulen(KEYMEM_SIZE);

    data.set_flags(DB_DBT_USERMEM);
    data.set_ulen(DATAMEM_SIZE);
    data.set_data(&datamem);

    QString subj =  "MDQuban", from = "MDQuban";

    //QString rs1 = "^(.*)(\".*\")";
    //QString rs2 = "^(.*)\\s-\\s(.*)$";
    //QString rs3 = "^(\\S+.*)\\[.*\\].*(\".*\")";

    //QString rs3 = "^(.*)\\s-\\s.*\\s-\\s(.*)$";
    QRegExp rx[3];
    bool    rxPosBack[3];
    bool    noRegexpGrouping;

    QString recKey, storeIndex;
    QString prevSubj = "MDQuban", prevFrom = "MDQuban";

    int pos;
    bool newGroup = false;

    bool mphFound = false;

    quint32 grouped = 0,
            single = 0,
            numGroups = 0;

    qint16 stringDiff = -1;

    bool prevGroup = false;
    bool advancedPlacement = false;
    bool skipAdvanced = false;

    noRegexpGrouping = ng->isThereNoRegexOnGrouping();

    if (noRegexpGrouping == false) // need regex for grouping
    {
        rx[0].setPattern(ng->getGroupRE1());
        rx[1].setPattern(ng->getGroupRE2());
        rx[2].setPattern(ng->getGroupRE3());

        rxPosBack[0] = ng->getGroupRE1Back();
        rxPosBack[1] = ng->getGroupRE2Back();
        rxPosBack[2] = ng->getGroupRE3Back();
    }

    ng->getGroupingDb()->truncate(0, &delCount, 0);
    qDebug() << "Deleted " << delCount << " records from group db";

    QMapIterator<QString, QString> it(headerGroupIndexes);
    QString advancedIndex;

    for (;;)
    {
        /*
         * Acquire the next set of key/data pairs.  This code
         * does not handle single key/data pairs that won't fit
         * in a BUFFER_LENGTH size buffer, instead returning
         * DB_BUFFER_SMALL to our caller.
         */
        if ((ret = dbcp->get(dbcp, &ckey, &cdata, DB_MULTIPLE_KEY | DB_NEXT)) != 0)
        {
            if (ret != DB_NOTFOUND)
                db->err(ret, "DBcursor->get");
            break;
        }

        for (DB_MULTIPLE_INIT(p, &cdata);;)
        {
            DB_MULTIPLE_KEY_NEXT(p, &cdata, retkey, retklen, retdata, retdlen);
            if (p == NULL)
                break;

            if (retdlen){;} // MD TODO compiler .... unused variable

            recKey = QString::fromLocal8Bit((char*)retkey, retklen);

            if (*((char *)retdata) == 'm')
            {
                MultiPartHeader::getMultiPartHeader((unsigned int)retklen, (char *)retkey, (char *)retdata, &mph);
                hb = (HeaderBase*)&mph;
                mphFound = true;
            }
            else if (*((char *)retdata) == 's')
            {
                SinglePartHeader::getSinglePartHeader((unsigned int)retklen, (char *)retkey, (char *)retdata, &sph);
                hb = (HeaderBase*)&sph;
                mphFound = false;
            }
            else
            {
                // What have we found ?????
                qDebug() << "Found unexpected identifier for header : " << (char)*((char *)retdata);
                continue;
            }

            ++count;

            prevSubj = subj;

            prevFrom = from;

            subj = hb->getSubj();
            from = hb->getFrom();

            if (noRegexpGrouping == false) // need regex for grouping
            {
                for (int i=0; i<3; ++i)
                {
                    if (rx[i].isEmpty() == false)
                    {
                        if (rxPosBack[i] == true) // from the back
                        {
                            pos = subj.lastIndexOf(rx[i]);
                            if (pos != -1)
                                subj.truncate(pos);
                        }
                        else // from the front
                        {
                            pos = rx[i].indexIn(subj);
                            if (pos > -1)
                                subj = rx[i].cap(0);
                        }
                    }
                }
            }

            //qDebug() << "Stripped down to: " << subj;

            stringDiff = -1;

            if (prevFrom != from) // change of contributor
            {
                newGroup = true;
            }
            else // same contributor
            {
               if ((stringDiff = levenshteinDistance(prevSubj, subj)) > ng->getMatchDistance()) // no match ...
                   newGroup = true;
               else
                   newGroup = false;

               //qDebug() << "Diff between " << prevSubj << " and " << subj << " is " << stringDiff;
            }

            if (newGroup)
            {
                if (ng->isThereAdvancedGrouping())
                {
                    it.toFront();

                    // decide if we can match to a previous group
                    while (it.hasNext())
                    {
                        it.next();
                        if ((stringDiff = levenshteinDistance(it.key(), subj)) <= ng->getMatchDistance()) // match ...
                        {
                            // The index for this group is in it.value()
                            // See if we have the HeaderGroup in our cache headerGroups)

                            if (headerGroups.contains(it.value()))
                            {
                                advancedHeaderGroup = headerGroups.value(it.value());
                            }
                            else // not in cache
                            {
                                advancedIndex = it.value();
                                advancedHeaderGroup = getGroup(ng, advancedIndex);
                                if (advancedHeaderGroup)
                                {
                                    headerGroups.insert(advancedIndex, advancedHeaderGroup);
                                }
                                else // db read failed ..
                                {
                                    skipAdvanced = true;
                                }
                            }

                            if (skipAdvanced == false)
                            {
                                if (mphFound)
                                    advancedHeaderGroup->addMphKey(recKey);
                                else
                                    advancedHeaderGroup->addSphKey(recKey);

                                advancedPlacement = true;
                                subj = prevSubj; // ignore this header as it's been placed out of sequence
                                from = prevFrom;
                                newGroup = false; // as we managed to relocate to an existing group

                                break; // stop looking at previous groups
                            }
                            else
                                skipAdvanced = false;
                        }
                    }
                }
            }

            if (newGroup)
            {
                if (prevGroup) // save before moving on
                {
                    ba = storeIndex.toLocal8Bit();
                    k= ba.constData();
                    memcpy(keymem, k, storeIndex.length());
                    key.set_size(storeIndex.length());

                    p2=headerGroup->data();
                    data.set_data(p2);
                    data.set_size(headerGroup->getRecordSize());
                    ret=ng->getGroupingDb()->put(NULL, &key, &data, 0);
                    if (ret!=0)
                        qDebug("Error updating record: %d", ret);

                    if (ng->isThereAdvancedGrouping())
                        headerGroupIndexes.insert(storeIndex.section('\n', 0, 0), storeIndex);

                    Q_DELETE_ARRAY(p2);
                    Q_DELETE(headerGroup);
                    numGroups++;
                }

                prevGroup = true;

                storeIndex = subj % "\n" % from;

                headerGroup = new HeaderGroup();

                headerGroup->setDisplayName(subj);
                headerGroup->setPostingDate(hb->getPostingDate());
                headerGroup->setDownloadDate(hb->getDownloadDate());
                headerGroup->setStatus(hb->getStatus());
                headerGroup->setNextDistance(stringDiff);
            }

            // if we've found somewhere else to place this header then don't add again
            if (!advancedPlacement)
            {
                if (mphFound)
                    headerGroup->addMphKey(recKey);
                else
                    headerGroup->addSphKey(recKey);
            }
            else
                advancedPlacement = false;

            if (count % 250 == 0)
            {
                QCoreApplication::processEvents();

                emit updateJob(JobList::BulkHeaderGroup, tr("Header bulk grouping for newsgroup ") + job->ng->getAlias() + ": " +
                    QString::number(count) + " out of " + QString::number(ng->getTotal()) + tr(" grouped"), job->seq);
            }

            if (m_cancel)
            {
                emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq);
                return false;
            }
        }

        if (m_cancel)
        {
            emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq);
            return false;
        }
    }

    if ((t_ret = dbcp->close(dbcp)) != 0)
    {
        db->err(ret, "DBcursor->close");
        if (ret == 0)
            ret = t_ret;
    }

    char* ptr = ((char*)ckey.data);
    Q_DELETE_ARRAY(ptr);
    ptr = ((char*)cdata.data);
    Q_DELETE_ARRAY(ptr);
    if (headerGroups.count())
    {
        qDeleteAll(headerGroups);
        headerGroups.clear();
    }

    qDebug() << "Multi = " << grouped << ", single = " << single;

    ng->setHeadersNeedGrouping(false);
    // Finally update the newsgroup
    emit saveGroup(ng);

    emit updateJob(JobList::BulkHeaderGroup, tr("Header bulk grouping for newsgroup ") + job->ng->getAlias() + ": " +
                  QString::number(count) + " out of " + QString::number(ng->getTotal()) + tr(" grouped"), job->seq);

    if (m_cancel)
    {
        emit updateJob(JobList::BulkHeaderGroup, JobList::Cancelled, job->seq);
        return false;
    }

    emit logEvent(tr("Bulk grouping of ") + ng->getTotal() + tr(" articles completed successfully."));

    emit updateJob(JobList::BulkHeaderGroup, JobList::Finished_Ok, job->seq);

    ng->setTotalGroups(numGroups);

    Q_DELETE(headerGroup);

    return true;
}
示例#4
0
文件: backup.c 项目: galaxyeye/bdb
/*
 * Use Bulk Get/Put to copy the given number of pages worth of
 * records from the source database to the destination database,
 * this function should be called until all tables are copied, at
 * which point it will return SQLITE_DONE.  Both Btrees need to
 * have transactions before calling this function.
 * p->pSrc - Source Btree
 * p->tables - Contains a list of iTables to copy, gotten using
 *          btreeGetTables().
 * p->currentTable - Index in tables of the current table being copied.
 * p->srcCur -  Cursor on the current source table being copied.
 * p->pDest - Destiniation Btree.
 * p->destCur - BtCursor on the destination table being copied into.
 * pages - Number of pages worth of data to copy.
 */
static int btreeCopyPages(sqlite3_backup *p, int *pages)
{
	DB *dbp;
	DBT dataOut, dataIn;
	char bufOut[MULTI_BUFSIZE], bufIn[MULTI_BUFSIZE];
	int ret, rc, copied, srcIsDupIndex;
	void *in, *out, *app;

	ret = 0;
	rc = SQLITE_OK;
	dbp = NULL;
	copied = 0;
	memset(&dataOut, 0, sizeof(dataOut));
	memset(&dataIn, 0, sizeof(dataIn));
	dataOut.flags = DB_DBT_USERMEM;
	dataIn.flags = DB_DBT_USERMEM;
	dataOut.data = bufOut;
	dataOut.ulen = sizeof(bufOut);
	dataIn.data = bufIn;
	dataIn.ulen = sizeof(bufIn);

	while (*pages < 0 || *pages > copied) {
		/* No tables left to copy */
		if (p->tables[p->currentTable] == -1) {
			u32 val;
			/*
			 * Update the schema file format and largest rootpage
			 * in the meta data.  Other meta data values should
			 * not be changed.
			 */
			sqlite3BtreeGetMeta(p->pSrc, 1, &val);
			if (p->pSrc->db->errCode == SQLITE_BUSY) {
				rc = SQLITE_BUSY;
				goto err;
			}
			rc = sqlite3BtreeUpdateMeta(p->pDest, 1, val);
			if (rc != SQLITE_OK)
				goto err;
			sqlite3BtreeGetMeta(p->pSrc, 3, &val);
		       if (p->pSrc->db->errCode == SQLITE_BUSY) {
				rc = SQLITE_BUSY;
				goto err;
			}
			rc = sqlite3BtreeUpdateMeta(p->pDest, 3, val);
			if (rc != SQLITE_OK)
				goto err;
			ret = SQLITE_DONE;
			goto err;
		}
		/* If not currently copying a table, get the next table. */
		if (!p->srcCur) {
			rc = btreeGetUserTable(p->pSrc, p->srcTxn, &dbp,
			    p->tables[p->currentTable]);
			if (rc != SQLITE_OK)
				goto err;
			assert(dbp);
			memset(&p->destCur, 0, sizeof(p->destCur));
			/*
			 * Open a cursor on the destination table, this will
			 * create the table and allow the Btree to manage the
			 * DB object.
			 */
			sqlite3BtreeCursor(p->pDest, p->tables[p->currentTable],
			    1, dbp->app_private, &p->destCur);
			if ((rc = p->destCur.error) != SQLITE_OK) {
				app = dbp->app_private;
				dbp->close(dbp, DB_NOSYNC);
				if (app)
					sqlite3DbFree(p->pSrcDb, app);
				goto err;
			}
			/* Open a cursor on the source table. */
			if ((ret = dbp->cursor(dbp,
			    p->srcTxn, &p->srcCur, 0)) != 0)
				goto err;
			dbp = 0;
		}
		srcIsDupIndex = isDupIndex((p->tables[p->currentTable] & 1) ?
		    BTREE_INTKEY : 0, p->pSrc->pBt->dbStorage,
		    p->srcCur->dbp->app_private, p->srcCur->dbp);
		/*
		 * Copy the current table until the given number of
		 * pages is copied, or the entire table has been copied.
		 */
		while (*pages < 0 || *pages > copied) {
			DBT key, data;
			memset(&key, 0, sizeof(key));
			memset(&data, 0, sizeof(data));
			/* Do a Bulk Get from the source table. */
			ret = p->srcCur->get(p->srcCur, &key, &dataOut,
			    DB_NEXT | DB_MULTIPLE_KEY);
			if (ret == DB_NOTFOUND)
				break;
			if (ret != 0)
				goto err;
			/* Copy the records into the Bulk buffer. */
			DB_MULTIPLE_INIT(out, &dataOut);
			DB_MULTIPLE_WRITE_INIT(in, &dataIn);
			DB_MULTIPLE_KEY_NEXT(out, &dataOut, key.data,
			    key.size, data.data, data.size);
			while (out) {
				/*
				 * Have to translate the index formats if they
				 * are not the same.
				 */
				if (p->destCur.isDupIndex != srcIsDupIndex) {
					if (srcIsDupIndex) {
						p->destCur.key = key;
						p->destCur.data = data;
						if (!btreeCreateIndexKey(
						    &p->destCur)) {
							rc = SQLITE_NOMEM;
							goto err;
						}
						DB_MULTIPLE_KEY_WRITE_NEXT(in,
						    &dataIn,
						    p->destCur.index.data,
						    p->destCur.index.size,
						    p->destCur.data.data, 0);
					} else {
						/* Copy the key into the cursor
						 * index since spliting the key
						 * requires changing the
						 * internal memory.
						 */
						if (!allocateCursorIndex(
						    &p->destCur, key.size)) {
							rc = SQLITE_NOMEM;
							goto err;
						}
						memcpy(p->destCur.index.data,
						    key.data, key.size);
						p->destCur.index.size =
						    key.size;
						p->destCur.key.data =
						    p->destCur.index.data;
						p->destCur.key.size =
						    p->destCur.index.size;
						splitIndexKey(&p->destCur);
						DB_MULTIPLE_KEY_WRITE_NEXT(
						    in, &dataIn,
						    p->destCur.key.data,
						    p->destCur.key.size,
						    p->destCur.data.data,
						    p->destCur.data.size);
					}
				} else
					DB_MULTIPLE_KEY_WRITE_NEXT(in, &dataIn,
					    key.data, key.size,
					    data.data, data.size);
				DB_MULTIPLE_KEY_NEXT(out, &dataOut,
				    key.data, key.size,
				    data.data, data.size);
			}
			/* Insert into the destination table. */
			dbp = p->destCur.cached_db->dbp;
			if ((ret = dbp->put(dbp, p->pDest->savepoint_txn,
			    &dataIn, 0, DB_MULTIPLE_KEY)) != 0)
				goto err;
			dbp = NULL;
			copied += MULTI_BUFSIZE/SQLITE_DEFAULT_PAGE_SIZE;
		}
		/*
		 * Done copying the current table, time to look for a new
		 * table to copy.
		 */
		if (ret == DB_NOTFOUND) {
			ret = 0;
			rc = sqlite3BtreeCloseCursor(&p->destCur);
			if (p->srcCur) {
				app = p->srcCur->dbp->app_private;
				dbp = p->srcCur->dbp;
				p->srcCur->close(p->srcCur);
				ret = dbp->close(dbp, DB_NOSYNC);
				if (app)
					sqlite3DbFree(p->pSrcDb, app);
			}
			p->srcCur = NULL;
			if (ret != 0 || rc != SQLITE_OK)
				goto err;
			p->currentTable += 1;
		}
	}
	goto done;
err:	if (ret == SQLITE_DONE)
		return ret;
done:	return MAP_ERR(rc, ret);
}
示例#5
0
int* ROSWOSSegment::lookupByIndex( char* key, int size, Dbc *iter,
				   u_int32_t flags, void **_ptr, void **buf )
{
   	size_t retklen, retdlen;
	Dbt skey;
	void *retkey, *retdata;	
	bool second_pass = false;
	int answer;

	memset(&skey, 0, sizeof(skey)); 
	memset(&_bulk_data, 0, sizeof(_bulk_data)); 
	//memset( (*buf), 0, sizeof( (*buf) ) );

	//_bulk_data.set_data( _bulk_buffer );
	_bulk_data.set_data( (*buf) );
	_bulk_data.set_ulen( BUFFER_LENGTH );
	_bulk_data.set_flags( DB_DBT_USERMEM );

	skey.set_data( key );
	skey.set_size( size );

	//if  ( _debug ) return NULL;

	do
	  {

	// if bulk buffer is done, get a new one.
	    //if ( _p_bulk == NULL )
	if ( ((*_ptr)) == NULL )
	  {
	    //cout << " Request from iter " << iter << "  key " << *(int *)key << " FLAGS? " << flags <<  endl;
	    memset( (*buf), 0, sizeof( (*buf) ) );

	    answer = iter->/*_sort_key_cursor->*/get( &skey, &_bulk_data, 
				   flags | DB_MULTIPLE_KEY );

	    if ( answer == DB_NOTFOUND )
	      return NULL;
	    //DB_MULTIPLE_INIT(_p_bulk, _bulk_data.get_DBT());
	    DB_MULTIPLE_INIT((*_ptr), _bulk_data.get_DBT());
	    //cout << " DB_NOTFOUND " << DB_NOTFOUND << " ans " <<  answer << " -- Getting answers, page size " << BUFFER_LENGTH << " ITER " << iter << " PTR " << (*_ptr) <<endl;
	  }
	//else
	//cout << " ROSWOSSegment, *_ptr is not NULL, go go go" << endl;
	/*if ( answer == DB_NOTFOUND )
	  {
	    //_debug = true;
	    // THAT really does NOT close the iterator. WHY, I have not
	    // a faintest idea!
	    //cout << " ITERATOR " << iter << " IS CLOSED " << endl;
	    //iter->close();
	    return NULL;
	    }*/

	//if ( !_p_bulk )
	if ( !((*_ptr)) )
	  return NULL;

	// I don't understand that.
	//if ( !_p_bulk && second_pass )
	//cout << " PTR " << (*_ptr) << " SECOND PASS " << second_pass << endl;
	if ( !((*_ptr)) && second_pass )
	  return NULL;
	
	DB_MULTIPLE_KEY_NEXT((*_ptr), _bulk_data.get_DBT(), retkey, retklen, retdata, retdlen);
	//DB_MULTIPLE_KEY_NEXT(_p_bulk, _bulk_data.get_DBT(), retkey, retklen, retdata, retdlen);
	// cout << " ANd now, ptr is " << *_ptr << " int ptr " << ((int*)retkey) << endl;

	second_pass = true;

	  }
	//while( !_p_bulk );  // if bulk is null, get the buffer.
	while( !(*_ptr) );  // if bulk is null, get the buffer.
	
	/*if ( (*((int*)retkey)) > 10500 )
	{
	cout << DB_NOTFOUND << " <=> " << answer << " Ret answer " << *((int *)retkey) << " retDATA " << *((int *)retdata) << endl;
	cout << "     Recieved buffer " << buf << " points to buf " << (*buf) << " and ptr " << *_ptr << endl;
	*/
	/*if ( *((int *)retdata) == 596992  &&
	     10511 == *((int *)retkey) )
	  *(int *)retdata = 597221;

	if ( *((int *)retdata) == 598272  &&
	     10524 == *((int *)retkey) )
	     *(int *)retdata = 598362;*/

	//cout << " Finally returning " << ( ((int *)retdata) ) << endl << endl;
	return( ((int *)retdata) );
  /*
  //  THIS IS AN ITERATION SEQUENCE. DO NOT ERASE.
	while ( _sort_key_cursor->get( &skey, &data, DB_NEXT | DB_MULTIPLE_KEY ) == 0 )
	  {
	    
	    for (DB_MULTIPLE_INIT(p, data.get_DBT());;) 
	      {
		
		DB_MULTIPLE_KEY_NEXT(p, data.get_DBT(), retkey, retklen, retdata, retdlen);
		if (p == NULL)
		  break;
		//cout << " Ret klen " << *((int *)retkey) << " retdlen " << *((int *)retdata) << endl;
		//printf("key: %.*s, data: %.*s\n",
		//(int)retklen, (char*)retkey, (int)retdlen, (char*)retdata);
		ctr2++;
	      }

	    cout << " Got SORT key " << *((int *)skey.get_data()) << endl;
	    cout << " Curr count " << ctr2 << endl;
	  }
	*/

  /*
	Dbt dkey, data; // Dbt is a key / data pair
	int ret; //,a, b;
	char *result = ( char* )malloc( sizeof(int) );

	
	memset(&dkey, 0, sizeof(dkey)); 
	memset(&data, 0, sizeof(data)); 

	data.set_data( &result );
	data.set_size( sizeof(int) );
	// BDB documentation neglects to mention that once you use your own
	// buffer you MUST set this parameter or things will break horribly.
	// thanks to a helpful post on a google newsgroup...
	data.set_ulen( sizeof(int) );

	dkey.set_data( key );
	dkey.set_size( size );

	try
	{
	  int before = StopWatch::ticks();
	  ret = iter->get( &dkey, &data, flags );
		int after = StopWatch::ticks();
		db_access+=(after-before)*StopWatch::secondsPerTick();
		//cout << _segNameDebug <<  " Got " << (after-before)*StopWatch::secondsPerTick() << " for " << db_access << endl;
	}
	catch(DbException& e) 
	{
		if (DEBUG) cout << "Exception " << e.what() << endl;
		// Throw exception here
		exit( 1 );
	}

	if ( ret < 0 )
	{
	  //cout << ret << "Nothing found for key ... " << key << endl;
	  return NULL;
	}
	

	//int a,b;
	//memcpy( &a, key, 4 );
	//memcpy( &b, data.get_data(), 4 );
	
	
	//cout << "Receiving  key " <<  b << " for given key " << a << endl;
	
	//if ( fake_key > 599874 )
	//return NULL;

	  //memcpy( result, &fake_key, sizeof(int) );
	//fake_key++;
	
	memcpy( result, data.get_data(), sizeof(int) );

	return ( int* )result;
  */
}
示例#6
0
文件: bulkLoad.cpp 项目: quban2/quban
bool BulkLoad::bulkLoadBody()
{
	if (m_cancel)
	{
		emit updateJob(JobList::BulkLoad, JobList::Cancelled, job->seq);
		return false;
	}

	emit updateJob(JobList::BulkLoad, JobList::Running, job->seq);

	NewsGroup*  ng = job->ng;
	HeaderList* hl = job->headerList;

	mphDeletionsList = 0;
	sphDeletionsList = 0;

    DBC *dbcp = 0;

    MultiPartHeader mph;
    SinglePartHeader sph;
    HeaderBase* hb = 0;

	memset(&ckey, 0, sizeof(ckey));
	memset(&cdata, 0, sizeof(cdata));

    cdata.data = (void *) dataBuffer;
    cdata.ulen = HEADER_BULK_BUFFER_LENGTH;
    cdata.flags = DB_DBT_USERMEM;

    ckey.data = (void *) keyBuffer;
    ckey.ulen = HEADER_BULK_BUFFER_LENGTH;
    ckey.flags = DB_DBT_USERMEM;

    size_t retklen, retdlen;
    void *retkey = 0, *retdata = 0;
    int ret, t_ret;
    void *p = 0;

    quint64 count=0;

	QTime start = QTime::currentTime();
    qDebug() << "Loading started: " << start.toString();

    /* Acquire a cursor for the database. */
    if ((ret = ng->getDb()->get_DB()->cursor(ng->getDb()->get_DB(), NULL, &dbcp, DB_CURSOR_BULK)) != 0)
    {
    	ng->getDb()->err(ret, "DB->cursor");
        return false;
    }

    quint32 numIgnored = 0;
    bool    mphFound = false;

	for (;;)
	{
		/*
		 * Acquire the next set of key/data pairs.  This code
		 * does not handle single key/data pairs that won't fit
		 * in a BUFFER_LENGTH size buffer, instead returning
		 * DB_BUFFER_SMALL to our caller.
		 */
		if ((ret = dbcp->get(dbcp, &ckey, &cdata, DB_MULTIPLE_KEY | DB_NEXT)) != 0)
		{
			if (ret != DB_NOTFOUND)
				ng->getDb()->err(ret, "DBcursor->get");
			break;
		}

		for (DB_MULTIPLE_INIT(p, &cdata);;)
		{
			DB_MULTIPLE_KEY_NEXT(p, &cdata, retkey, retklen, retdata, retdlen);
			if (p == NULL)
				break;

            if (retdlen){;} // MD TODO compiler .... unused variable

			if (*((char *)retdata) == 'm')
			{                
                MultiPartHeader::getMultiPartHeader((unsigned int)retklen, (char *)retkey, (char *)retdata, &mph);
                hb = (HeaderBase*)&mph;

                mphFound = true;
			}
            else if (*((char *)retdata) == 's')
            {
                SinglePartHeader::getSinglePartHeader((unsigned int)retklen, (char *)retkey, (char *)retdata, &sph);
                hb = (HeaderBase*)&sph;

                mphFound = false;
                // qDebug() << "Single index = " << sph.getIndex();
            }
			else
			{
				// What have we found ?????
				qDebug() << "Found unexpected identifier for header : " << (char)*((char *)retdata);
				continue;
			}

            if (hb->getStatus() & HeaderBase::MarkedForDeletion)
			{
				// ignore this header, garbage collection will remove it ...
				++numIgnored;
				++count; // ... but still count it as bulk delete will reduce the count

				if (numIgnored == 1)
				{
					// These two will be freed by bulk delete
					mphDeletionsList = new QList<QString>;
					sphDeletionsList = new QList<QString>;
				}

				if (mphFound)
					mphDeletionsList->append(hb->getIndex());
				else
					sphDeletionsList->append(hb->getIndex());

				continue;
			}

            if (m_cancel)
                break;

            hl->headerTreeModel->setupTopLevelItem(hb);

			++count;

            if (count % 50 == 0)
            {
                QString labelText = tr("Loaded ") + QString("%L1").arg(count) +
                        " of " + QString("%L1").arg(ng->getTotal()) +
                        " articles.";
                emit progress(job->seq, count, labelText);
                QCoreApplication::processEvents();
            }
		}

		if (m_cancel)
			break;
	}

	if ((t_ret = dbcp->close(dbcp)) != 0)
	{
		ng->getDb()->err(ret, "DBcursor->close");
		if (ret == 0)
			ret = t_ret;
	}

    emit loadReady(job->seq);

    QString labelText = tr("Loaded ") + QString("%L1").arg(count) +
            " of " + QString("%L1").arg(ng->getTotal()) +
            " articles.";
    emit progress(job->seq, count, labelText);
    QCoreApplication::processEvents();

	if (!m_cancel)
	{
		quint64 totalArticles = ng->getTotal();
        if (totalArticles != count || ng->unread() > count)
		{
			qint64 difference = count - totalArticles; // may be negative
			quint64 unread = (quint64)qMax((qint64)(ng->unread() + difference), (qint64)0);

            if (unread > count)
                unread = count;

			ng->setTotal(count);
			ng->setUnread(unread);

			emit updateArticleCounts(ng);
		}
	}

	qDebug() << "Loading finished. Seconds: " << start.secsTo(QTime::currentTime());
	qDebug() << "Ignored " << numIgnored << " articles";
    qDebug() << "Loaded " << count << " articles";

	if (mphDeletionsList) // need bulk deletion
        emit addBulkJob(JobList::BulkDelete, ng, hl, mphDeletionsList, sphDeletionsList);

	if (m_cancel)
	{
        emit updateJob(JobList::BulkLoad, JobList::Cancelled, job->seq);
		return false;
	}

	emit updateJob(JobList::BulkLoad, JobList::Finished_Ok, job->seq);
	return true;
}
static void bulk_get_hash (u_int32_t offset, u_int32_t count, bdb_drv_t *pdrv) {

    int ret;
    size_t retklen, retdlen;
    void *retkey, *retdata;
    void* p;

    DB*  pdb;
    DBC* pdbc = NULL;
    DBT  key, data;
   
    db_recno_t curr         = 1;
    db_recno_t limit        = 1;
    db_recno_t actual_count = 0;

    ErlDrvTermData *spec;
 
    u_int32_t spec_items, idx;

    curr  = offset;
    limit = offset + count;

    pdb = pdrv->pcfg->pdb;

    memset(&key, 0, sizeof(key));
    memset(&data, 0, sizeof(data));

    data.data  = pdrv->pcfg->buffer;
    data.ulen  = pdrv->pcfg->bulk_get_buffer_size_bytes;
    data.flags = DB_DBT_USERMEM;

    if ((ret = pdb->cursor(pdb, NULL, &pdbc, 0)) != 0) {
        return_error_tuple(pdrv, db_strerror(ret));
        return;
    }

    //Fast forward to the correct index
    idx = 1;

    while ((ret = pdbc->c_get(pdbc, &key, &data, DB_NEXT)) == 0) {

        idx++;

        if (idx >= offset) { break; }
    }

    if ((ret != DB_NOTFOUND) && (ret != 0)) {
        return_error_tuple(pdrv, db_strerror(ret));

    } else if (ret == DB_NOTFOUND) {
        return_ok_empty_list(pdrv);

    } else {

            if ((ret = pdbc->c_get(pdbc, &key, &data, DB_MULTIPLE_KEY | DB_CURRENT)) != 0) {

                if (ret == DB_NOTFOUND) {
                return_ok_empty_list(pdrv);
                } else {
                    return_error_tuple(pdrv, db_strerror(ret));
                }

            } else {

                //First count the number of recs...
                for (DB_MULTIPLE_INIT(p, &data); curr < limit; curr++) {

                    DB_MULTIPLE_KEY_NEXT(p, &data, retkey, retklen, retdata, retdlen);

                    if (p == NULL) {
                        break;
                    } else {
                        actual_count++;
                    }
        
                }

                spec_items = (8 * actual_count) + 7;

                spec = malloc(sizeof(ErlDrvTermData) * spec_items);

                if (spec == NULL) {
                    return_error_tuple(pdrv, "Could not allocate memory for operation!");
                } else {

                    spec[0] = ERL_DRV_ATOM;
                    spec[1] = driver_mk_atom("ok");

                    //Now pipe the data...
                    p    = NULL;
                    curr = 0;

                    for (DB_MULTIPLE_INIT(p, &data); curr < actual_count; curr++) {

                        DB_MULTIPLE_KEY_NEXT(p, &data, retkey, retklen, retdata, retdlen);

                        if (p == NULL) {
                            break;
                        } else {

                            idx = 2 + (curr * 8);

                            spec[idx + 0] = ERL_DRV_STRING;
                            spec[idx + 1] = (ErlDrvTermData)retkey;
                            spec[idx + 2] = retklen;

                            spec[idx + 3] = ERL_DRV_STRING;
                            spec[idx + 4] = (ErlDrvTermData)retdata;
                            spec[idx + 5] = retdlen;

                            spec[idx + 6] = ERL_DRV_TUPLE;
                            spec[idx + 7] = 2;
     
                       }

                    }

                    spec[spec_items - 5] = ERL_DRV_NIL;

                    spec[spec_items - 4] = ERL_DRV_LIST;
                    spec[spec_items - 3] = actual_count + 1;

                    spec[spec_items - 2] = ERL_DRV_TUPLE;
                    spec[spec_items - 1] = 2;

#if ((ERL_DRV_EXTENDED_MAJOR_VERSION == 1) || ((ERL_DRV_EXTENDED_MAJOR_VERSION == 2) && (ERL_DRV_EXTENDED_MINOR_VERSION == 0)))
                    driver_output_term(pdrv->port, spec, spec_items);
#else
                    ErlDrvTermData mkport = driver_mk_port(pdrv->port);
                    erl_drv_output_term(mkport, spec, spec_items);
#endif

                    free(spec);

                }


        }

    }


    pdbc->c_close(pdbc);

    return;

 }
示例#8
0
文件: db.c 项目: puavo-org/puavo-os
int puavo_conf_db_get_all(struct puavo_conf *const conf,
                          struct puavo_conf_list *const list,
                          struct puavo_conf_err *const errp)
{
        DBC *db_cursor = NULL;
        DBT db_null;
        DBT db_batch;
        size_t length = 0;
        char **keys = NULL;
        char **values = NULL;
        int ret = -1;
        int db_error;

        memset(&db_null, 0, sizeof(DBT));
        memset(&db_batch, 0, sizeof(DBT));

        db_batch.flags = DB_DBT_USERMEM;
        db_batch.ulen  = PUAVO_CONF_DEFAULT_DB_BATCH_SIZE;
        db_batch.data  = malloc(db_batch.ulen);
        if (!db_batch.data) {
                puavo_conf_err_set(errp, PUAVO_CONF_ERRNUM_SYS, 0,
                                   "Failed to get all parameters");
                goto out;
        }

        db_error = conf->db->cursor(conf->db, NULL, &db_cursor, 0);
        if (db_error) {
                db_cursor = NULL;
                puavo_conf_err_set(errp, PUAVO_CONF_ERRNUM_DB, db_error,
                                   "Failed to get all parameters");
                goto out;
        }

        /* Iterate key/value pairs in batches until all are found. */
        while (1) {
                void *batch_iterator;

                /* Get the next batch of key-value pairs. */
                db_error = db_cursor->get(db_cursor, &db_null, &db_batch,
                                          DB_MULTIPLE_KEY | DB_NEXT);
                switch (db_error) {
                case 0:
                        break;
                case DB_NOTFOUND:
                        ret = 0;
                        goto out;
                default:
                        puavo_conf_err_set(errp, PUAVO_CONF_ERRNUM_DB, db_error,
                                           "Failed to get all parameters");
                        goto out;
                }

                /* Iterate the batch. */
                DB_MULTIPLE_INIT(batch_iterator, &db_batch);
                while (1) {
                        char *key;
                        char *val;
                        size_t key_size;
                        size_t val_size;
                        char **new_keys;
                        char **new_values;

                        DB_MULTIPLE_KEY_NEXT(batch_iterator, &db_batch,
                                             key, key_size, val, val_size);
                        if (!batch_iterator)
                                break; /* The batch is empty. */

                        new_keys = realloc(keys,
                                           sizeof(char *) * (length + 1));
                        if (!new_keys) {
                                puavo_conf_err_set(errp, PUAVO_CONF_ERRNUM_SYS,
                                                   0, "Failed to get all "
                                                   "parameters");
                                goto out;
                        }
                        keys = new_keys;

                        new_values = realloc(values,
                                             sizeof(char *) * (length + 1));
                        if (!new_values) {
                                puavo_conf_err_set(errp, PUAVO_CONF_ERRNUM_SYS,
                                                   0, "Failed to get all "
                                                   "parameters");
                                goto out;
                        }
                        values = new_values;

                        ++length;

                        keys[length - 1] = strndup(key, key_size);
                        values[length - 1] = strndup(val, val_size);
                }
        }
        ret = 0;
out:
        if (db_cursor) {
                db_error = db_cursor->close(db_cursor);
                /* Obey exit-on-first-error policy: Do not shadow any
                 * existing error, record close error only if we are
                 * cleaning up without any earlier errors. */
                if (!ret && db_error) {
                        ret = -1;
                        puavo_conf_err_set(errp, PUAVO_CONF_ERRNUM_DB, db_error,
                                           "Failed to get all parameters");
                }
        }

        if (ret) {
                size_t i;

                for (i = 0; i < length; ++i) {
                        free(keys[i]);
                        free(values[i]);
                }
                free(keys);
                free(values);
        } else {
                list->keys = keys;
                list->values = values;
                list->length = length;
        }

        free(db_batch.data);

        return ret;
}