예제 #1
0
/*
 * Sweeps through the cache and marks all entries as deleted
 *
 * Returns the number of elements it found and marked deleted.
 */
int32
Cache_Clear(Cache *cache)
{
	Assert(NULL != cache);

	int32 startIdx = cdb_randint(cache->cacheHdr->nEntries - 1, 0);
	int32 entryIdx = startIdx;
	int32 numClearedEntries = 0;

	while (true)
	{
		entryIdx = (entryIdx + 1) % cache->cacheHdr->nEntries;
		if (entryIdx == startIdx)
		{
			/* Completed one loop through the list of all entries. We're done */
			break;
		}

		CacheEntry *crtEntry = Cache_GetEntryByIndex(cache->cacheHdr, entryIdx);

		/* Lock entry so that nobody else changes its state until we're done with it */
		Cache_LockEntry(cache, crtEntry);

		if (crtEntry->state != CACHE_ENTRY_CACHED)
		{
			/* Not interested in free/acquired/deleted entries. Go back and look at next entry */
			Cache_UnlockEntry(cache, crtEntry);
			continue;
		}

		/* Found cached entry */
		Cache_EntryAddRef(cache, crtEntry);

		if (crtEntry->state == CACHE_ENTRY_FREE || crtEntry->state == CACHE_ENTRY_ACQUIRED)
		{
			/* Someone freed up the entry before we had a chance to Add-Ref it. Skip it. */
			Assert(false);
			Cache_EntryDecRef(cache, crtEntry);
			Cache_UnlockEntry(cache, crtEntry);
			continue;
		}

		Cache_RegisterCleanup(cache, crtEntry, true /* isCachedEntry */);

		Cache_Remove(cache, crtEntry);

		/* Done with changing the state. Unlock the entry */
		Cache_UnlockEntry(cache, crtEntry);

		Cache_Release(cache, crtEntry);

		numClearedEntries++;

	}

	return numClearedEntries;
}
예제 #2
0
/*
 * Create a CdbHash for this session.
 *
 * CdbHash maintains the following information about the hash.
 * In here we set the variables that should not change in the scope of the newly created
 * CdbHash, these are:
 *
 * 1 - number of segments in Greenplum Database.
 * 2 - hashingalgorithm used.
 * 3 - reduction method.
 *
 * The hash value itself will be initialized for every tuple in cdbhashinit()
 */
CdbHash *
makeCdbHash(int numsegs, CdbHashAlg algorithm)
{
	CdbHash    *h;

	assert(numsegs > 0);		/* verify number of segments is legal. */
	assert(algorithm == HASH_FNV_1); /* make sure everybody uses same algorithm */

	/* Create a pointer to a CdbHash that includes the hash properties */
	h = palloc(sizeof(CdbHash));

	/*
	 * set this hash session characteristics.
	 */
	h->hash = 0;
	h->numsegs = numsegs;
	h->hashalg = algorithm;

	if (h->hashalg == HASH_FNV_1)
		h->hashfn = &fnv1_32_buf;
	else if (h->hashalg == HASH_FNV_1A)
		h->hashfn = &fnv1a_32_buf;

	/*
	 * set the reduction algorithm: If num_segs is power of 2 use bit mask,
	 * else use lazy mod (h mod n)
	 */
	if (ispowof2(numsegs))
	{
		h->reducealg = REDUCE_BITMASK;
	}
	else
	{
		h->reducealg = REDUCE_LAZYMOD;
	}

	/*
	 * if we distribute into a relation with an empty partitioning policy, 
	 * we will round robin the tuples starting off from this index. Note that 
	 * the random number is created one per makeCdbHash. This means that commands 
	 * that create a cdbhash object only once for all tuples (like COPY, 
	 * INSERT-INTO-SELECT) behave more like a round-robin distribution, while 
	 * commands that create a cdbhash per row (like INSERT) behave more like a 
	 * random distribution.
	 */
	h->rrindex = cdb_randint(0, UPPER_VAL);
		
	ereport(DEBUG4,
	  (errmsg("CDBHASH started using algorithm %d into %d segment databases",
			  h->hashalg,
			  h->numsegs)));

	return h;
}
예제 #3
0
/*
 * Given total number of primary segment databases and a number of
 * segments to "skip" - this routine creates a boolean map (array) the
 * size of total number of segments and randomly selects several
 * entries (total number of total_to_skip) to be marked as
 * "skipped". This is used for external tables with the 'gpfdist'
 * protocol where we want to get a number of *random* segdbs to
 * connect to a gpfdist client.
 *
 * Caller of this function should pfree skip_map when done with it.
 */
bool *
makeRandomSegMap(int total_primaries, int total_to_skip)
{
	int			randint;     /* some random int representing a seg    */
	int			skipped = 0; /* num segs already marked to be skipped */
	bool		*skip_map;
	
	skip_map = (bool *) palloc(total_primaries * sizeof(bool));
	MemSet(skip_map, false, total_primaries * sizeof(bool));
	
	while (total_to_skip != skipped)
	{
		/*
		 * create a random int between 0 and (total_primaries - 1).
		 * 
		 * NOTE that the lower and upper limits in cdb_randint() are
		 * inclusive so we take them into account. In reality the
		 * chance of those limits to get selected by the random
		 * generator is extremely small, so we may want to find a
		 * better random generator some time (not critical though).
		 */
		randint = cdb_randint(0, total_primaries - 1);
		
		/*
		 * mark this random index 'true' in the skip map (marked to be
		 * skipped) unless it was already marked.
		 */
		if (skip_map[randint] == false)
		{
			skip_map[randint] = true;
			skipped++;
		}
	}	
	
	return skip_map;
}
void
add_filesystem_credential(const char * uri)
{
	char *protocol;
	char *host;

    bool found = false;
	struct FileSystemCredentialKey key;

	HTAB * currentFilesystemCredentials;
    MemoryContext currentFilesystemCredentialsMemoryContext;

    get_current_credential_cache_and_memcxt(&currentFilesystemCredentials,
            &currentFilesystemCredentialsMemoryContext);

    Insist(NULL != currentFilesystemCredentials);
    Insist(NULL != currentFilesystemCredentialsMemoryContext);

	MemoryContext old = MemoryContextSwitchTo(currentFilesystemCredentialsMemoryContext);

	memset(&key, 0, sizeof(key));

	if (HdfsParsePath(uri, &protocol, &host, &key.port, NULL)
			|| NULL == protocol || NULL == host)
		elog(ERROR, "fail to parse uri: %s", uri);

	StrNCpy(key.protocol, protocol, sizeof(key.protocol));
	StrNCpy(key.host, host, sizeof(key.host));

	hash_search(currentFilesystemCredentials, &key, HASH_FIND, &found);

	if (!found)
	{
		int retry = 5;
		bool success = false;

		/*
		 * Canceling the query at this point will result in an exception.
		 * We have to make sure we clean the hash table from the new entry,
		 * because all credentials are being removed from HDFS when
		 * we end the transaction, and this entry doesn't have a valid
		 * credential yet.
		 */
		PG_TRY();
		{
			struct FileSystemCredential *entry =
					(struct FileSystemCredential *) hash_search(
							currentFilesystemCredentials, &key, HASH_ENTER, NULL);

			Assert(NULL != entry);

			while (true)
			{
				success = get_filesystem_credential_internal(entry);

				if (success)
				{
					break;
				}

				if (--retry <= 0)
				{
					hash_search(currentFilesystemCredentials, &key, HASH_REMOVE, NULL);
					break;
				}

				elog(DEBUG5, "failed to getting credentials for %s://%s:%d, retrying...",
					 key.protocol, key.host, key.port);

				pg_usleep(cdb_randint(0, 5) * 1000000L);
			}
		}
		PG_CATCH();
		{
			if (!success)
			{
				hash_search(currentFilesystemCredentials, &key, HASH_REMOVE, NULL);
			}
			PG_RE_THROW();
		}
		PG_END_TRY();

		if (retry <= 0)
		{
			elog(ERROR, "fail to get filesystem credential for uri %s", uri);
		}
	}

	MemoryContextSwitchTo(old);
}