示例#1
0
/**
 * gtags_next: return next record.
 *
 *	@param[in]	gtop	#GTOP structure
 *	@return		record
 *			@VAR{NULL} end of tag
 */
GTP *
gtags_next(GTOP *gtop)
{
	if (gtop->flags & GTOP_PATH) {
		if (gtop->path_index >= gtop->path_count)
			return NULL;
		gtop->gtp.path = gtop->path_array[gtop->path_index++];
		return &gtop->gtp;
	} else if (gtop->flags & GTOP_KEY) {
		for (gtop->gtp.tag = dbop_next(gtop->dbop);
		     gtop->gtp.tag != NULL;
		     gtop->gtp.tag = dbop_next(gtop->dbop))
		{
			VIRTUAL_GRTAGS_GSYMS_PROCESSING(gtop);
			break;
		}
		return gtop->gtp.tag ? &gtop->gtp : NULL;
	} else {
		/*
		 * End of segment.
		 * Reset resources and read new segment again.
		 */
		if (gtop->gtp_index >= gtop->gtp_count) {
			varray_reset(gtop->vb);
			pool_reset(gtop->segment_pool);
			/* strhash_reset(gtop->path_hash); */
			segment_read(gtop);
		}
		if (gtop->gtp_index >= gtop->gtp_count)
			return NULL;
		return &gtop->gtp_array[gtop->gtp_index++];
	}
}
/* Empty the cell list.  This is called at the start of every pixel
 * row. */
inline static void
cell_list_reset (struct cell_list *cells)
{
    cell_list_rewind (cells);
    cells->head.next = &cells->tail;
    pool_reset (cells->cell_pool.base);
}
示例#3
0
// Free List Reconstruction Task routine.
//
void dispatch_FreeListReconstruction(void *pUnused, int iUnused)
{
    UNUSED_PARAMETER(pUnused);
    UNUSED_PARAMETER(iUnused);

    if (mudconf.control_flags & CF_DBCHECK)
    {
        const char *cmdsave = mudstate.debug_cmd;
        mudstate.debug_cmd = (char *)"< dbck >";
        do_dbck(NOTHING, NOTHING, NOTHING, 0);
        Guest.CleanUp();
        pcache_trim();
        pool_reset();
        mudstate.debug_cmd = cmdsave;
    }

    // Schedule ourselves again.
    //
    CLinearTimeAbsolute ltaNow;
    ltaNow.GetUTC();
    CLinearTimeDelta ltd;
    ltd.SetSeconds(mudconf.check_interval);
    mudstate.check_counter = ltaNow + ltd;
    scheduler.DeferTask(mudstate.check_counter, PRIORITY_SYSTEM,
        dispatch_FreeListReconstruction, 0, 0);
}
示例#4
0
/**
 * gtags_next: return next record.
 *
 *	@param[in]	gtop	#GTOP structure
 *	@return		record
 *			@VAR{NULL} end of tag
 */
GTP *
gtags_next(GTOP *gtop)
{
	gtop->readcount++;
	if (gtop->flags & GTOP_PATH) {
		if (gtop->path_index >= gtop->path_count)
			return NULL;
		gtop->gtp.path = gtop->path_array[gtop->path_index++];
		return &gtop->gtp;
	} else if (gtop->flags & GTOP_KEY) {
		gtop->gtp.tag = dbop_next(gtop->dbop);
again3:
		for (; gtop->gtp.tag != NULL; gtop->gtp.tag = dbop_next(gtop->dbop))
		{
			VIRTUAL_GRTAGS_GSYMS_PROCESSING(gtop);
			break;
		}
		if (gtop->gtp.tag == NULL) {
			if (gtop->prefix && gtags_restart(gtop)) {
				gtop->gtp.tag = dbop_first(gtop->dbop, gtop->key, gtop->preg, gtop->dbflags);
				goto again3;
			}
		}
		return gtop->gtp.tag ? &gtop->gtp : NULL;
	} else {
		/*
		 * End of segment.
		 * Reset resources and read new segment again.
		 */
		if (gtop->gtp_index >= gtop->gtp_count) {
			varray_reset(gtop->vb);
			pool_reset(gtop->segment_pool);
			/* strhash_reset(gtop->path_hash); */
			segment_read(gtop);
		}
		if (gtop->gtp_index >= gtop->gtp_count) {
			if (gtop->prefix && gtags_restart(gtop)) {
				gtop->gtp.tag = dbop_first(gtop->dbop, gtop->key, gtop->preg, gtop->dbflags);
				if (gtop->gtp.tag == NULL)
					return NULL;
				dbop_unread(gtop->dbop);
				segment_read(gtop);
			} else
				return NULL;
		}
		return &gtop->gtp_array[gtop->gtp_index++];
	}
}
示例#5
0
/*
 * strhash_reset: reset string hash.
 *
 *	i)	sh	STRHASH structure
 */
void
strhash_reset(STRHASH *sh)
{
	int i;

	/*
	 * Free and reinitialize entries for each bucket.
	 */
	for (i = 0; i < sh->buckets; i++) {
		SLIST_INIT(&sh->htab[i]);
	}
	/*
	 * Free all memory in sh->pool but leave it valid for further allocation.
	 */
	pool_reset(sh->pool);
	sh->entries = 0;
}
static void TestPoolKeepTillDestroy()
{
    const int ITERS = 50*1024;
    void *ptrs[2*ITERS+1];
    rml::MemPoolPolicy pol(getMemPolicy, putMemPolicy);
    rml::MemoryPool *pool;

    // 1st create default pool that returns memory back to callback,
    // then use keepMemTillDestroy policy
    for (int keep=0; keep<2; keep++) {
        getMemCalls = putMemCalls = 0;
        if (keep)
            pol.keepAllMemory = 1;
        pool_create_v1(0, &pol, &pool);
        for (int i=0; i<2*ITERS; i+=2) {
            ptrs[i] = pool_malloc(pool, 7*1024);
            ptrs[i+1] = pool_malloc(pool, 10*1024);
        }
        ptrs[2*ITERS] = pool_malloc(pool, 8*1024*1024);
        ASSERT(!putMemCalls, NULL);
        for (int i=0; i<2*ITERS; i++)
            pool_free(pool, ptrs[i]);
        pool_free(pool, ptrs[2*ITERS]);
        size_t totalPutMemCalls = putMemCalls;
        if (keep)
            ASSERT(!putMemCalls, NULL);
        else {
            ASSERT(putMemCalls, NULL);
            putMemCalls = 0;
        }
        size_t currGetCalls = getMemCalls;
        pool_malloc(pool, 8*1024*1024);
        if (keep)
            ASSERT(currGetCalls == getMemCalls, "Must not lead to new getMem call");
        size_t currPuts = putMemCalls;
        pool_reset(pool);
        ASSERT(currPuts == putMemCalls, "Pool is not releasing memory during reset.");
        pool_destroy(pool);
        ASSERT(putMemCalls, NULL);
        totalPutMemCalls += putMemCalls;
        ASSERT(getMemCalls == totalPutMemCalls, "Memory leak detected.");
    }

}
示例#7
0
	fn void
	reset(Block* block, b32 zero_out)
	{
		switch(block->type)
		{
			case BlockType::HEAP:
			{
				heap_reset(block, zero_out);
				return;
			}
			case BlockType::POOL:
			{
				pool_reset(block, zero_out);
				return;
			}
			default:
			{
				FAIL("Invalid block type");
			}	
		}
	}
void TestPoolReset()
{
    rml::MemPoolPolicy pol(getMallocMem, putMallocMem);
    rml::MemoryPool *pool;

    pool_create_v1(0, &pol, &pool);
    for (int i=0; i<100; i++) {
        ASSERT(pool_malloc(pool, 8), NULL);
        ASSERT(pool_malloc(pool, 50*1024), NULL);
    }
    int regionsBeforeReset = liveRegions;
    pool_reset(pool);
    for (int i=0; i<100; i++) {
        ASSERT(pool_malloc(pool, 8), NULL);
        ASSERT(pool_malloc(pool, 50*1024), NULL);
    }
    ASSERT(regionsBeforeReset == liveRegions,
           "Expected no new regions allocation.");
    pool_destroy(pool);
    ASSERT(!liveRegions, "Expected all regions were released.");
}
int main(int argc, char *argv[])
{
	const uint32_t unequal = pool_intern("does not equal");
	const uint32_t equal = pool_intern("equals");
	uint32_t buf[3];
	uint32_t n;

	if (argc != 2)
		usage("test-string-pool <string>,<string>");

	n = pool_tok_seq(3, buf, ",-", argv[1]);
	if (n >= 3)
		die("too many strings");
	if (n <= 1)
		die("too few strings");

	buf[2] = buf[1];
	buf[1] = (buf[0] == buf[2]) ? equal : unequal;
	pool_print_seq(3, buf, ' ', stdout);
	fputc('\n', stdout);

	pool_reset();
	return 0;
}
示例#10
0
/**
 * gtags_first: return first record
 *
 *	@param[in]	gtop	#GTOP structure
 *	@param[in]	pattern	tag name <br>
 *		- may be regular expression
 *		- may be @VAR{NULL}
 *	@param[in]	flags	#GTOP_PREFIX:	prefix read <br>
 *			#GTOP_KEY:	read key only <br>
 *			#GTOP_PATH:	read path only <br>
 *			#GTOP_NOREGEX:	don't use regular expression. <br>
 *			#GTOP_IGNORECASE:	ignore case distinction. <br>
 *			#GTOP_BASICREGEX:	use basic regular expression. <br>
 *			#GTOP_NOSORT:	don't sort
 *	@return		record
 */
GTP *
gtags_first(GTOP *gtop, const char *pattern, int flags)
{
	int dbflags = 0;
	int regflags = 0;
	char prefix[IDENTLEN];
	static regex_t reg;
	regex_t *preg = &reg;
	const char *key = NULL;
	const char *tagline;

	/* Settlement for last time if any */
	if (gtop->path_hash) {
		strhash_close(gtop->path_hash);
		gtop->path_hash = NULL;
	}
	if (gtop->path_array) {
		free(gtop->path_array);
		gtop->path_array = NULL;
	}

	gtop->flags = flags;
	if (flags & GTOP_PREFIX && pattern != NULL)
		dbflags |= DBOP_PREFIX;
	if (flags & GTOP_KEY)
		dbflags |= DBOP_KEY;

	if (!(flags & GTOP_BASICREGEX))
		regflags |= REG_EXTENDED;
	if (flags & GTOP_IGNORECASE)
		regflags |= REG_ICASE;
	/*
	 * Get key and compiled regular expression for dbop_xxxx().
	 */
	if (flags & GTOP_NOREGEX) {
		key = pattern;
		preg = NULL;
	} else if (pattern == NULL || !strcmp(pattern, ".*")) {
		/*
		 * Since the regular expression '.*' matches to any record,
		 * we take sequential read method.
		 */
		key = NULL;
		preg = NULL;
	} else if (isregex(pattern) && regcomp(preg, pattern, regflags) == 0) {
		const char *p;
		/*
		 * If the pattern include '^' + some non regular expression
		 * characters like '^aaa[0-9]', we take prefix read method
		 * with the non regular expression part as the prefix.
		 */
		if (!(flags & GTOP_IGNORECASE) && *pattern == '^' && *(p = pattern + 1) && !isregexchar(*p)) {
			int i = 0;

			while (*p && !isregexchar(*p) && i < IDENTLEN)
				prefix[i++] = *p++;
			prefix[i] = '\0';
			key = prefix;
			dbflags |= DBOP_PREFIX;
		} else {
			key = NULL;
		}
	} else {
		key = pattern;
		preg = NULL;
	}
	/*
	 * If GTOP_PATH is set, at first, we collect all path names in a pool and
	 * sort them. gtags_first() and gtags_next() returns one of the pool.
	 */
	if (gtop->flags & GTOP_PATH) {
		struct sh_entry *entry;
		char *p;
		const char *cp;
		unsigned long i;

		gtop->path_hash = strhash_open(HASHBUCKETS);
		/*
		 * Pool path names.
		 *
		 * fid		path name
		 * +--------------------------
		 * |100		./aaa/a.c
		 * |105		./aaa/b.c
		 *  ...
		 */
		for (tagline = dbop_first(gtop->dbop, key, preg, dbflags);
		     tagline != NULL;
		     tagline = dbop_next(gtop->dbop))
		{
			VIRTUAL_GRTAGS_GSYMS_PROCESSING(gtop);
			/* extract file id */
			p = locatestring(tagline, " ", MATCH_FIRST);
			if (p == NULL)
				die("Illegal tag record. '%s'\n", tagline);
			*p = '\0';
			entry = strhash_assign(gtop->path_hash, tagline, 1);
			/* new entry: get path name and set. */
			if (entry->value == NULL) {
				cp = gpath_fid2path(tagline, NULL);
				if (cp == NULL)
					die("GPATH is corrupted.(file id '%s' not found)", tagline);
				entry->value = strhash_strdup(gtop->path_hash, cp, 0);
			}
		}
		/*
		 * Sort path names.
		 *
		 * fid		path name	path_array (sort)
		 * +--------------------------	+---+
		 * |100		./aaa/a.c <-------* |
		 * |105		./aaa/b.c <-------* |
		 *  ...				...
		 */
		gtop->path_array = (char **)check_malloc(gtop->path_hash->entries * sizeof(char *));
		i = 0;
		for (entry = strhash_first(gtop->path_hash); entry != NULL; entry = strhash_next(gtop->path_hash))
			gtop->path_array[i++] = entry->value;
		if (i != gtop->path_hash->entries)
			die("Something is wrong. 'i = %lu, entries = %lu'" , i, gtop->path_hash->entries);
		if (!(gtop->flags & GTOP_NOSORT))
			qsort(gtop->path_array, gtop->path_hash->entries, sizeof(char *), compare_path);
		gtop->path_count = gtop->path_hash->entries;
		gtop->path_index = 0;

		if (gtop->path_index >= gtop->path_count)
			return NULL;
		gtop->gtp.path = gtop->path_array[gtop->path_index++];
		return &gtop->gtp;
	} else if (gtop->flags & GTOP_KEY) {
		for (gtop->gtp.tag = dbop_first(gtop->dbop, key, preg, dbflags);
		     gtop->gtp.tag != NULL;
		     gtop->gtp.tag = dbop_next(gtop->dbop))
		{
			VIRTUAL_GRTAGS_GSYMS_PROCESSING(gtop);
			break;
		}
		return gtop->gtp.tag ? &gtop->gtp : NULL;
	} else {
		if (gtop->vb == NULL)
			gtop->vb = varray_open(sizeof(GTP), 200);
		else
			varray_reset(gtop->vb);
		if (gtop->segment_pool == NULL)
			gtop->segment_pool = pool_open();
		else
			pool_reset(gtop->segment_pool);
		if (gtop->path_hash == NULL)
			gtop->path_hash = strhash_open(HASHBUCKETS);
		else
			strhash_reset(gtop->path_hash);
		tagline = dbop_first(gtop->dbop, key, preg, dbflags);
		if (tagline == NULL)
			return NULL;
		/*
		 * Dbop_next() wil read the same record again.
		 */
		dbop_unread(gtop->dbop);
		/*
		 * Read a tag segment with sorting.
		 */
		segment_read(gtop);
		return  &gtop->gtp_array[gtop->gtp_index++];
	}
}
void TestPools() {
    rml::MemPoolPolicy pol(getMem, putMem);
    size_t beforeNumBackRef, afterNumBackRef;

    rml::MemoryPool *pool1;
    rml::MemoryPool *pool2;
    pool_create_v1(0, &pol, &pool1);
    pool_create_v1(0, &pol, &pool2);
    pool_destroy(pool1);
    pool_destroy(pool2);

    scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, NULL);
    beforeNumBackRef = allocatedBackRefCount();
    rml::MemoryPool *fixedPool;

    pool_create_v1(0, &pol, &fixedPool);
    pol.pAlloc = getMallocMem;
    pol.pFree = putMallocMem;
    pol.granularity = 8;
    rml::MemoryPool *mallocPool;

    pool_create_v1(0, &pol, &mallocPool);
/* check that large object cache (LOC) returns correct size for cached objects
   passBackendSz Byte objects are cached in LOC, but bypassed the backend, so
   memory requested directly from allocation callback.
   nextPassBackendSz Byte objects must fit to another LOC bin,
   so that their allocation/realeasing leads to cache cleanup.
   All this is expecting to lead to releasing of passBackendSz Byte object
   from LOC during LOC cleanup, and putMallocMem checks that returned size
   is correct.
*/
    const size_t passBackendSz = Backend::maxBinned_HugePage+1,
        anotherLOCBinSz = minLargeObjectSize+1;
    for (int i=0; i<10; i++) { // run long enough to be cached
        void *p = pool_malloc(mallocPool, passBackendSz);
        ASSERT(p, "Memory was not allocated");
        pool_free(mallocPool, p);
    }
    // run long enough to passBackendSz allocation was cleaned from cache
    // and returned back to putMallocMem for size checking
    for (int i=0; i<1000; i++) {
        void *p = pool_malloc(mallocPool, anotherLOCBinSz);
        ASSERT(p, "Memory was not allocated");
        pool_free(mallocPool, p);
    }

    void *smallObj =  pool_malloc(fixedPool, 10);
    ASSERT(smallObj, "Memory was not allocated");
    memset(smallObj, 1, 10);
    void *ptr = pool_malloc(fixedPool, 1024);
    ASSERT(ptr, "Memory was not allocated");
    memset(ptr, 1, 1024);
    void *largeObj = pool_malloc(fixedPool, minLargeObjectSize);
    ASSERT(largeObj, "Memory was not allocated");
    memset(largeObj, 1, minLargeObjectSize);
    ptr = pool_malloc(fixedPool, minLargeObjectSize);
    ASSERT(ptr, "Memory was not allocated");
    memset(ptr, minLargeObjectSize, minLargeObjectSize);
    pool_malloc(fixedPool, 10*minLargeObjectSize); // no leak for unsuccesful allocations
    pool_free(fixedPool, smallObj);
    pool_free(fixedPool, largeObj);

    // provoke large object cache cleanup and hope no leaks occurs
    for( int p=MaxThread; p>=MinThread; --p )
        NativeParallelFor( p, StressLOCacheWork(mallocPool) );
    pool_destroy(mallocPool);
    pool_destroy(fixedPool);

    scalable_allocation_command(TBBMALLOC_CLEAN_ALL_BUFFERS, NULL);
    afterNumBackRef = allocatedBackRefCount();
    ASSERT(beforeNumBackRef==afterNumBackRef, "backreference leak detected");

    {
        // test usedSize/cachedSize and LOC bitmask correctness
        void *p[5];
        pool_create_v1(0, &pol, &mallocPool);
        const LargeObjectCache *loc = &((rml::internal::MemoryPool*)mallocPool)->extMemPool.loc;
        p[3] = pool_malloc(mallocPool, minLargeObjectSize+2*LargeObjectCache::largeBlockCacheStep);
        for (int i=0; i<10; i++) {
            p[0] = pool_malloc(mallocPool, minLargeObjectSize);
            p[1] = pool_malloc(mallocPool, minLargeObjectSize+LargeObjectCache::largeBlockCacheStep);
            pool_free(mallocPool, p[0]);
            pool_free(mallocPool, p[1]);
        }
        ASSERT(loc->getUsedSize(), NULL);
        pool_free(mallocPool, p[3]);
        ASSERT(loc->getLOCSize() < 3*(minLargeObjectSize+LargeObjectCache::largeBlockCacheStep), NULL);
        const size_t maxLocalLOCSize = LocalLOCImpl<3,30>::getMaxSize();
        ASSERT(loc->getUsedSize() <= maxLocalLOCSize, NULL);
        for (int i=0; i<3; i++)
            p[i] = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep);
        size_t currUser = loc->getUsedSize();
        ASSERT(!loc->getLOCSize() && currUser >= 3*(minLargeObjectSize+LargeObjectCache::largeBlockCacheStep), NULL);
        p[4] = pool_malloc(mallocPool, minLargeObjectSize+3*LargeObjectCache::largeBlockCacheStep);
        ASSERT(loc->getUsedSize() - currUser >= minLargeObjectSize+3*LargeObjectCache::largeBlockCacheStep, NULL);
        pool_free(mallocPool, p[4]);
        ASSERT(loc->getUsedSize() <= currUser+maxLocalLOCSize, NULL);
        pool_reset(mallocPool);
        ASSERT(!loc->getLOCSize() && !loc->getUsedSize(), NULL);
        pool_destroy(mallocPool);
    }
    // To test LOC we need bigger lists than released by current LocalLOC
    //   in production code. Create special LocalLOC.
    {
        LocalLOCImpl<2, 20> lLOC;
        pool_create_v1(0, &pol, &mallocPool);
        rml::internal::ExtMemoryPool *mPool = &((rml::internal::MemoryPool*)mallocPool)->extMemPool;
        const LargeObjectCache *loc = &((rml::internal::MemoryPool*)mallocPool)->extMemPool.loc;
        for (int i=0; i<22; i++) {
            void *o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep);
            bool ret = lLOC.put(((LargeObjectHdr*)o - 1)->memoryBlock, mPool);
            ASSERT(ret, NULL);

            o = pool_malloc(mallocPool, minLargeObjectSize+i*LargeObjectCache::largeBlockCacheStep);
            ret = lLOC.put(((LargeObjectHdr*)o - 1)->memoryBlock, mPool);
            ASSERT(ret, NULL);
        }
        lLOC.externalCleanup(mPool);
        ASSERT(!loc->getUsedSize(), NULL);

        pool_destroy(mallocPool);
    }
}
示例#12
0
/**
 * gtags_first: return first record
 *
 *	@param[in]	gtop	#GTOP structure
 *	@param[in]	pattern	tag name <br>
 *		- may be regular expression
 *		- may be @VAR{NULL}
 *	@param[in]	flags	#GTOP_PREFIX:	prefix read <br>
 *			#GTOP_KEY:	read key only <br>
 *			#GTOP_PATH:	read path only <br>
 *			#GTOP_NOREGEX:	don't use regular expression. <br>
 *			#GTOP_IGNORECASE:	ignore case distinction. <br>
 *			#GTOP_BASICREGEX:	use basic regular expression. <br>
 *			#GTOP_NOSORT:	don't sort
 *	@return		record
 */
GTP *
gtags_first(GTOP *gtop, const char *pattern, int flags)
{
	int regflags = 0;
	static regex_t reg;
	const char *tagline;
	STATIC_STRBUF(regex);

	strbuf_clear(regex);
	gtop->preg = &reg;
	gtop->key = NULL;
	gtop->prefix = NULL;
	gtop->flags = flags;
	gtop->dbflags = 0;
	gtop->readcount = 1;

	/* Settlement for last time if any */
	if (gtop->path_hash) {
		strhash_close(gtop->path_hash);
		gtop->path_hash = NULL;
	}
	if (gtop->path_array) {
		free(gtop->path_array);
		gtop->path_array = NULL;
	}

	if (flags & GTOP_KEY)
		gtop->dbflags |= DBOP_KEY;
	if (!(flags & GTOP_BASICREGEX))
		regflags |= REG_EXTENDED;

	/*
	 * decide a read method
	 */
	if (pattern == NULL)
		gtop->preg = NULL;
	else if (pattern[0] == 0)
		return NULL;
	else if (!strcmp(pattern, ".*") || !strcmp(pattern, "^.*$") ||
		!strcmp(pattern, "^") || !strcmp(pattern, "$") ||
		!strcmp(pattern, "^.*") || !strcmp(pattern, ".*$")) {
		/*
		 * Since these regular expressions match to any record,
		 * we take sequential read method.
		 */
		gtop->preg = NULL;
	} else if (flags & GTOP_IGNORECASE) {
		regflags |= REG_ICASE;
		if (!isregex(pattern) || flags & GTOP_NOREGEX) {
			gtop->prefix = get_prefix(pattern, flags);
			if (gtop->openflags & GTAGS_DEBUG)
				if (gtop->prefix != NULL)
					fprintf(stderr, "Using prefix: %s\n", gtop->prefix);
			if (gtop->prefix == NULL)
				die("gtags_first: impossible (1).");
			strbuf_putc(regex, '^');
			strbuf_puts(regex, pattern);
			if (!(flags & GTOP_PREFIX))
				strbuf_putc(regex, '$');
		} else if (*pattern == '^' && (gtop->prefix = get_prefix(pattern, flags)) != NULL) {
			if (gtop->openflags & GTAGS_DEBUG)
				fprintf(stderr, "Using prefix: %s\n", gtop->prefix);
			strbuf_puts(regex, pattern);
		} else {
			strbuf_puts(regex, pattern);
		}
	} else {
		if (!isregex(pattern) || flags & GTOP_NOREGEX) {
			if (flags & GTOP_PREFIX)
				gtop->dbflags |= DBOP_PREFIX;
			gtop->key = pattern;
			gtop->preg = NULL;
		} else if (*pattern == '^' && (gtop->key = get_prefix(pattern, flags)) != NULL) {
			if (gtop->openflags & GTAGS_DEBUG)
				fprintf(stderr, "Using prefix: %s\n", gtop->key);
			gtop->dbflags |= DBOP_PREFIX;
			gtop->preg = NULL;
		} else {
			strbuf_puts(regex, pattern);
		}
	}
	if (gtop->prefix) {
		if (gtop->key)
			die("gtags_first: impossible (2).");
		gtop->key = gtop->prefix;
		gtop->dbflags |= DBOP_PREFIX;
	}
	if (strbuf_getlen(regex) > 0) {
		if (gtop->preg == NULL)
			die("gtags_first: impossible (3).");
		if (regcomp(gtop->preg, strbuf_value(regex), regflags) != 0)
			die("invalid regular expression.");
	}
	/*
	 * If GTOP_PATH is set, at first, we collect all path names in a pool and
	 * sort them. gtags_first() and gtags_next() returns one of the pool.
	 */
	if (gtop->flags & GTOP_PATH) {
		struct sh_entry *entry;
		char *p;
		const char *cp;
		unsigned long i;

		gtop->path_hash = strhash_open(HASHBUCKETS);
		/*
		 * Pool path names.
		 *
		 * fid		path name
		 * +--------------------------
		 * |100		./aaa/a.c
		 * |105		./aaa/b.c
		 *  ...
		 */
again0:
		for (tagline = dbop_first(gtop->dbop, gtop->key, gtop->preg, gtop->dbflags);
		     tagline != NULL;
		     tagline = dbop_next(gtop->dbop))
		{
			VIRTUAL_GRTAGS_GSYMS_PROCESSING(gtop);
			/* extract file id */
			p = locatestring(tagline, " ", MATCH_FIRST);
			if (p == NULL)
				die("Invalid tag record. '%s'\n", tagline);
			*p = '\0';
			entry = strhash_assign(gtop->path_hash, tagline, 1);
			/* new entry: get path name and set. */
			if (entry->value == NULL) {
				cp = gpath_fid2path(tagline, NULL);
				if (cp == NULL)
					die("GPATH is corrupted.(file id '%s' not found)", tagline);
				entry->value = strhash_strdup(gtop->path_hash, cp, 0);
			}
		}
		if (gtop->prefix && gtags_restart(gtop))
			goto again0;
		/*
		 * Sort path names.
		 *
		 * fid		path name	path_array (sort)
		 * +--------------------------	+---+
		 * |100		./aaa/a.c <-------* |
		 * |105		./aaa/b.c <-------* |
		 *  ...				...
		 */
		gtop->path_array = (char **)check_malloc(gtop->path_hash->entries * sizeof(char *));
		i = 0;
		for (entry = strhash_first(gtop->path_hash); entry != NULL; entry = strhash_next(gtop->path_hash))
			gtop->path_array[i++] = entry->value;
		if (i != gtop->path_hash->entries)
			die("Something is wrong. 'i = %lu, entries = %lu'" , i, gtop->path_hash->entries);
		if (!(gtop->flags & GTOP_NOSORT))
			qsort(gtop->path_array, gtop->path_hash->entries, sizeof(char *), compare_path);
		gtop->path_count = gtop->path_hash->entries;
		gtop->path_index = 0;

		if (gtop->path_index >= gtop->path_count)
			return NULL;
		gtop->gtp.path = gtop->path_array[gtop->path_index++];
		return &gtop->gtp;
	} else if (gtop->flags & GTOP_KEY) {
again1:
		for (gtop->gtp.tag = dbop_first(gtop->dbop, gtop->key, gtop->preg, gtop->dbflags);
		     gtop->gtp.tag != NULL;
		     gtop->gtp.tag = dbop_next(gtop->dbop))
		{
			VIRTUAL_GRTAGS_GSYMS_PROCESSING(gtop);
			break;
		}
		if (gtop->gtp.tag == NULL) {
			if (gtop->prefix && gtags_restart(gtop))
				goto again1;
		}
		return gtop->gtp.tag ? &gtop->gtp : NULL;
	} else {
		if (gtop->vb == NULL)
			gtop->vb = varray_open(sizeof(GTP), 200);
		else
			varray_reset(gtop->vb);
		if (gtop->segment_pool == NULL)
			gtop->segment_pool = pool_open();
		else
			pool_reset(gtop->segment_pool);
		if (gtop->path_hash == NULL)
			gtop->path_hash = strhash_open(HASHBUCKETS);
		else
			strhash_reset(gtop->path_hash);
again2:
		tagline = dbop_first(gtop->dbop, gtop->key, gtop->preg, gtop->dbflags);
		if (tagline == NULL) {
			if (gtop->prefix && gtags_restart(gtop))
				goto again2;
			return NULL;
		}
		/*
		 * Dbop_next() wil read the same record again.
		 */
		dbop_unread(gtop->dbop);
		/*
		 * Read a tag segment with sorting.
		 */
		segment_read(gtop);
		return  &gtop->gtp_array[gtop->gtp_index++];
	}
}