void ctp2_TextBuffer::Scroll(sint32 numLines)
{

	sint32		errcode;
	char		*buffer;
	sint32		deltaY = numLines * m_charHeight;

	errcode = m_surface->Lock(NULL, (LPVOID *)&buffer, 0);
	Assert(errcode == AUI_ERRCODE_OK);
	if (errcode != AUI_ERRCODE_OK) return;

	sint32		x = m_rect.left;
	sint32		y = m_rect.top;

	sint32		h = m_rect.bottom - m_rect.top,
				w = m_rect.right - m_rect.left,
				copyHeight = h - abs(deltaY);

	sint32		pitch = m_surface->Pitch();

	uint32		*srcPtr, *destPtr;
	sint32		dy = abs(deltaY);
	sint32		i,j;

	sint32		slop;
	char		*scrollBuffer = buffer + (pitch * y + x * 2);

	if (deltaY) {
		if (deltaY < 0) {
			srcPtr =	(uint32 *)(scrollBuffer + (pitch * (h - dy - 1)));
			destPtr =	(uint32 *)(scrollBuffer + (pitch * (h - 1)));
			slop = (pitch / 4) + (w / 2);
			for (i=0; i<copyHeight; i++) {
				for (j=0; j<w>>1; j++) {
					*destPtr++ = *srcPtr++;
				}
				srcPtr -= slop;
				destPtr -= slop;
			}
			for (i=0; i<dy; i++) {
				for (j=0; j<w>>1; j++) {
					*destPtr++ = 0x00000000;
				}
				destPtr -= slop;
			}
		} else {
			srcPtr =	(uint32 *)(scrollBuffer + (pitch * dy));
			destPtr =	(uint32 *)(scrollBuffer);
			slop = (pitch / 4) - (w / 2);
			for (i=0; i<copyHeight; i++) {
				for (j=0; j<w>>1; j++) {
					*destPtr++ = *srcPtr++;
				}
				srcPtr += slop;
				destPtr += slop;
			}
			for (i=0; i<dy; i++) {
				for (j=0; j<w>>1; j++) {
					*destPtr++ = 0x00000000;
				}
				destPtr += slop;
			}
		}
	}

	errcode = m_surface->Unlock(buffer);
	Assert(errcode == AUI_ERRCODE_OK);
	if (errcode != AUI_ERRCODE_OK) return;
}
//-----------------------------------------------------------------------------
// A Scene image file contains all the compiled .XCD
//-----------------------------------------------------------------------------
bool CSceneImage::CreateSceneImageFile( CUtlBuffer &targetBuffer, char const *pchModPath, bool bLittleEndian, bool bQuiet, ISceneCompileStatus *pStatus )
{
	CUtlVector<fileList_t>	vcdFileList;
	CUtlSymbolTable			vcdSymbolTable( 0, 32, true );

	Msg( "\n" );

	// get all the VCD files according to the seacrh paths
	char searchPaths[512];
	g_pFullFileSystem->GetSearchPath( "GAME", false, searchPaths, sizeof( searchPaths ) );
	char *pPath = strtok( searchPaths, ";" );
	while ( pPath )
	{
		int currentCount = vcdFileList.Count();

		char szPath[MAX_PATH];
		V_ComposeFileName( pPath, "scenes/*.vcd", szPath, sizeof( szPath ) );

		scriptlib->FindFiles( szPath, true, vcdFileList );

		Msg( "Scenes: Searching '%s' - Found %d scenes.\n", szPath, vcdFileList.Count() - currentCount );

		pPath = strtok( NULL, ";" );
	}

	if ( !vcdFileList.Count() )
	{
		Msg( "Scenes: No Scene Files found!\n" );
		return false;
	}

	// iterate and convert all the VCD files
	bool bGameIsTF = V_stristr( pchModPath, "\\tf" ) != NULL;
	for ( int i=0; i<vcdFileList.Count(); i++ )
	{
		const char *pFilename = vcdFileList[i].fileName.String();
		const char *pSceneName = V_stristr( pFilename, "scenes\\" );
		if ( !pSceneName )
		{
			continue;
		}

		if ( !bLittleEndian && bGameIsTF && V_stristr( pSceneName, "high\\" ) )
		{
			continue;
		}

		// process files in order they would be found in search paths
		// i.e. skipping later processed files that match an earlier conversion
		UtlSymId_t symbol = vcdSymbolTable.Find( pSceneName );
		if ( symbol == UTL_INVAL_SYMBOL )
		{
			vcdSymbolTable.AddString( pSceneName );

			pStatus->UpdateStatus( pFilename, bQuiet, i, vcdFileList.Count() );

			if ( !CreateTargetFile_VCD( pFilename, "", false, bLittleEndian ) )
			{
				Error( "CreateSceneImageFile: Failed on '%s' conversion!\n", pFilename );
			}


		}
	}

	if ( !g_SceneFiles.Count() )
	{
		// nothing to do
		return true;
	}

	Msg( "Scenes: Finalizing %d unique scenes.\n", g_SceneFiles.Count() );


	// get the string pool
	CUtlVector< unsigned int > stringOffsets;
	CUtlBuffer stringPool;
	g_ChoreoStringPool.GetTableAndPool( stringOffsets, stringPool );

	if ( !bQuiet )
	{
		Msg( "Scenes: String Table: %d bytes\n", stringOffsets.Count() * sizeof( int ) );
		Msg( "Scenes: String Pool: %d bytes\n", stringPool.TellMaxPut() );
	}

	// first header, then lookup table, then string pool blob
	int stringPoolStart = sizeof( SceneImageHeader_t ) + stringOffsets.Count() * sizeof( int );
	// then directory
	int sceneEntryStart = stringPoolStart + stringPool.TellMaxPut();
	// then variable sized summaries
	int sceneSummaryStart = sceneEntryStart + g_SceneFiles.Count() * sizeof( SceneImageEntry_t );
	// then variable sized compiled binary scene data
	int sceneDataStart = 0;

	// construct header
	SceneImageHeader_t imageHeader = { 0 };
	imageHeader.nId = SCENE_IMAGE_ID;
	imageHeader.nVersion = SCENE_IMAGE_VERSION;
	imageHeader.nNumScenes = g_SceneFiles.Count();
	imageHeader.nNumStrings = stringOffsets.Count();
	imageHeader.nSceneEntryOffset = sceneEntryStart;
	if ( !bLittleEndian )
	{
		imageHeader.nId = BigLong( imageHeader.nId );
		imageHeader.nVersion = BigLong( imageHeader.nVersion );
		imageHeader.nNumScenes = BigLong( imageHeader.nNumScenes );
		imageHeader.nNumStrings = BigLong( imageHeader.nNumStrings );
		imageHeader.nSceneEntryOffset = BigLong( imageHeader.nSceneEntryOffset );
	}
	targetBuffer.Put( &imageHeader, sizeof( imageHeader ) );

	// header is immediately followed by string table and pool
	for ( int i = 0; i < stringOffsets.Count(); i++ )
	{
		unsigned int offset = stringPoolStart + stringOffsets[i];
		if ( !bLittleEndian )
		{
			offset = BigLong( offset );
		}
		targetBuffer.PutInt( offset );
	}
	Assert( stringPoolStart == targetBuffer.TellMaxPut() );
	targetBuffer.Put( stringPool.Base(), stringPool.TellMaxPut() );

	// construct directory
	CUtlSortVector< SceneImageEntry_t, CSceneImageEntryLessFunc > imageDirectory;
	imageDirectory.EnsureCapacity( g_SceneFiles.Count() );

	// build directory
	// directory is linear sorted by filename checksum for later binary search
	for ( int i = 0; i < g_SceneFiles.Count(); i++ )
	{
		SceneImageEntry_t imageEntry = { 0 };

		// name needs to be normalized for determinstic later CRC name calc
		// calc crc based on scenes\anydir\anyscene.vcd
		char szCleanName[MAX_PATH];
		V_strncpy( szCleanName, g_SceneFiles[i].fileName.String(), sizeof( szCleanName ) );
		V_strlower( szCleanName );
		V_FixSlashes( szCleanName );
		char *pName = V_stristr( szCleanName, "scenes\\" );
		if ( !pName )
		{
			// must have scenes\ in filename
			Error( "CreateSceneImageFile: Unexpected lack of scenes prefix on %s\n", g_SceneFiles[i].fileName.String() );
		}

		CRC32_t crcFilename = CRC32_ProcessSingleBuffer( pName, strlen( pName ) );
		imageEntry.crcFilename = crcFilename;

		// temp store an index to its file, fixup later, necessary to access post sort
		imageEntry.nDataOffset = i;
		if ( imageDirectory.Find( imageEntry ) != imageDirectory.InvalidIndex() )
		{
			// filename checksums must be unique or runtime binary search would be bogus
			Error( "CreateSceneImageFile: Unexpected filename checksum collision!\n" );
		}		

		imageDirectory.Insert( imageEntry );
	}

	// determine sort order and start of data after dynamic summaries
	CUtlVector< int > writeOrder;
	writeOrder.EnsureCapacity( g_SceneFiles.Count() );
	sceneDataStart = sceneSummaryStart;
	for ( int i = 0; i < imageDirectory.Count(); i++ )
	{
		// reclaim offset, indicates write order of scene file
		int iScene = imageDirectory[i].nDataOffset;
		writeOrder.AddToTail( iScene );

		// march past each variable sized summary to determine start of scene data
		int numSounds = g_SceneFiles[iScene].soundList.Count();
		sceneDataStart += sizeof( SceneImageSummary_t ) + ( numSounds - 1 ) * sizeof( int );
	}

	// finalize and write directory
	Assert( sceneEntryStart == targetBuffer.TellMaxPut() );
	int nSummaryOffset = sceneSummaryStart;
	int nDataOffset = sceneDataStart;
	for ( int i = 0; i < imageDirectory.Count(); i++ )
	{
		int iScene = writeOrder[i];

		imageDirectory[i].nDataOffset = nDataOffset;
		imageDirectory[i].nDataLength = g_SceneFiles[iScene].compiledBuffer.TellMaxPut();
		imageDirectory[i].nSceneSummaryOffset = nSummaryOffset;
		if ( !bLittleEndian )
		{
			imageDirectory[i].crcFilename = BigLong( imageDirectory[i].crcFilename );
			imageDirectory[i].nDataOffset = BigLong( imageDirectory[i].nDataOffset );
			imageDirectory[i].nDataLength = BigLong( imageDirectory[i].nDataLength );
			imageDirectory[i].nSceneSummaryOffset = BigLong( imageDirectory[i].nSceneSummaryOffset );
		}
		targetBuffer.Put( &imageDirectory[i], sizeof( SceneImageEntry_t ) );

		int numSounds = g_SceneFiles[iScene].soundList.Count();
		nSummaryOffset += sizeof( SceneImageSummary_t ) + (numSounds - 1) * sizeof( int );

		nDataOffset += g_SceneFiles[iScene].compiledBuffer.TellMaxPut();
	}

	// finalize and write summaries
	Assert( sceneSummaryStart == targetBuffer.TellMaxPut() );
	for ( int i = 0; i < imageDirectory.Count(); i++ )
	{
		int iScene = writeOrder[i];
		int msecs = g_SceneFiles[iScene].msecs;
		int soundCount = g_SceneFiles[iScene].soundList.Count();
		if ( !bLittleEndian )
		{
			msecs = BigLong( msecs );
			soundCount = BigLong( soundCount );
		}
		targetBuffer.PutInt( msecs );
		targetBuffer.PutInt( soundCount );
		for ( int j = 0; j < g_SceneFiles[iScene].soundList.Count(); j++ )
		{
			int soundId = g_SceneFiles[iScene].soundList[j];
			if ( !bLittleEndian )
			{
				soundId = BigLong( soundId );
			}
			targetBuffer.PutInt( soundId );
		}
	}

	// finalize and write data
	Assert( sceneDataStart == targetBuffer.TellMaxPut() );
	for ( int i = 0; i < imageDirectory.Count(); i++ )
	{	
		int iScene = writeOrder[i];
		targetBuffer.Put( g_SceneFiles[iScene].compiledBuffer.Base(), g_SceneFiles[iScene].compiledBuffer.TellMaxPut() );
	}

	if ( !bQuiet )
	{
		Msg( "Scenes: Final size: %.2f MB\n", targetBuffer.TellMaxPut() / (1024.0f * 1024.0f ) );
	}

	// cleanup
	g_SceneFiles.Purge();

	return true;
}
Beispiel #3
0
/**
**  Create a missile.
**
**  @param l  Lua state.
*/
static int CclMissile(lua_State *l)
{
	MissileType *type = NULL;
	PixelPos position(-1, -1);
	PixelPos destination(-1, -1);
	PixelPos source(-1, -1);
	Missile *missile = NULL;

	DebugPrint("FIXME: not finished\n");

	const int args = lua_gettop(l);
	for (int j = 0; j < args; ++j) {
		const char *value = LuaToString(l, j + 1);
		++j;

		if (!strcmp(value, "type")) {
			type = MissileTypeByIdent(LuaToString(l, j + 1));
		} else if (!strcmp(value, "pos")) {
			CclGetPos(l, &position.x, &position.y, j + 1);
		} else if (!strcmp(value, "origin-pos")) {
			CclGetPos(l, &source.x, &source.y, j + 1);
		} else if (!strcmp(value, "goal")) {
			CclGetPos(l, &destination.x, &destination.y, j + 1);
		} else if (!strcmp(value, "local")) {
			Assert(type);
			missile = MakeLocalMissile(*type, position, destination);
			missile->Local = 1;
			--j;
		} else if (!strcmp(value, "global")) {
			Assert(type);
			missile = MakeMissile(*type, position, destination);
			missile->position = position;
			missile->source = source;
			missile->destination = destination;
			missile->Local = 0;
			--j;
		} else if (!strcmp(value, "frame")) {
			Assert(missile);
			missile->SpriteFrame = LuaToNumber(l, j + 1);
		} else if (!strcmp(value, "state")) {
			Assert(missile);
			missile->State = LuaToNumber(l, j + 1);
		} else if (!strcmp(value, "anim-wait")) {
			Assert(missile);
			missile->AnimWait = LuaToNumber(l, j + 1);
		} else if (!strcmp(value, "wait")) {
			Assert(missile);
			missile->Wait = LuaToNumber(l, j + 1);
		} else if (!strcmp(value, "delay")) {
			Assert(missile);
			missile->Delay = LuaToNumber(l, j + 1);
		} else if (!strcmp(value, "source")) {
			Assert(missile);
			lua_pushvalue(l, j + 1);
			missile->SourceUnit = CclGetUnitFromRef(l);
			lua_pop(l, 1);
		} else if (!strcmp(value, "target")) {
			Assert(missile);
			lua_pushvalue(l, j + 1);
			missile->TargetUnit = CclGetUnitFromRef(l);
			lua_pop(l, 1);
		} else if (!strcmp(value, "damage")) {
			Assert(missile);
			missile->Damage = LuaToNumber(l, j + 1);
		} else if (!strcmp(value, "ttl")) {
			Assert(missile);
			missile->TTL = LuaToNumber(l, j + 1);
		} else if (!strcmp(value, "hidden")) {
			Assert(missile);
			missile->Hidden = 1;
			--j;
		} else if (!strcmp(value, "step")) {
			Assert(missile);
			if (!lua_istable(l, j + 1) || lua_rawlen(l, j + 1) != 2) {
				LuaError(l, "incorrect argument");
			}
			missile->CurrentStep = LuaToNumber(l, j + 1, 1);
			missile->TotalStep = LuaToNumber(l, j + 1, 2);
		} else {
			LuaError(l, "Unsupported tag: %s" _C_ value);
		}
	}

	// we need to reinitialize position parameters - that's because of
	// the way InitMissile() (called from MakeLocalMissile()) computes
	// them - it works for creating a missile during a game but breaks
	// loading the missile from a file.
	missile->position = position;
	missile->source = source;
	missile->destination = destination;
	return 0;
}
Beispiel #4
0
/*
 * Associate a BackgroundWorkerHandle with a shm_mq_handle just as if it had
 * been passed to shm_mq_attach.
 */
void
shm_mq_set_handle(shm_mq_handle *mqh, BackgroundWorkerHandle *handle)
{
	Assert(mqh->mqh_handle == NULL);
	mqh->mqh_handle = handle;
}
Beispiel #5
0
/*
 * Receive a message from a shared message queue.
 *
 * We set *nbytes to the message length and *data to point to the message
 * payload.  If the entire message exists in the queue as a single,
 * contiguous chunk, *data will point directly into shared memory; otherwise,
 * it will point to a temporary buffer.  This mostly avoids data copying in
 * the hoped-for case where messages are short compared to the buffer size,
 * while still allowing longer messages.  In either case, the return value
 * remains valid until the next receive operation is perfomed on the queue.
 *
 * When nowait = false, we'll wait on our process latch when the ring buffer
 * is empty and we have not yet received a full message.  The sender will
 * set our process latch after more data has been written, and we'll resume
 * processing.  Each call will therefore return a complete message
 * (unless the sender detaches the queue).
 *
 * When nowait = true, we do not manipulate the state of the process latch;
 * instead, whenever the buffer is empty and we need to read from it, we
 * return SHM_MQ_WOULD_BLOCK.  In this case, the caller should call this
 * function again after the process latch has been set.
 */
shm_mq_result
shm_mq_receive(shm_mq_handle *mqh, Size *nbytesp, void **datap, bool nowait)
{
	shm_mq	   *mq = mqh->mqh_queue;
	shm_mq_result res;
	Size		rb = 0;
	Size		nbytes;
	void	   *rawdata;

	Assert(mq->mq_receiver == MyProc);

	/* We can't receive data until the sender has attached. */
	if (!mqh->mqh_counterparty_attached)
	{
		if (nowait)
		{
			if (shm_mq_get_sender(mq) == NULL)
				return SHM_MQ_WOULD_BLOCK;
		}
		else if (!shm_mq_wait_internal(mq, &mq->mq_sender, mqh->mqh_handle)
				 && shm_mq_get_sender(mq) == NULL)
		{
			mq->mq_detached = true;
			return SHM_MQ_DETACHED;
		}
		mqh->mqh_counterparty_attached = true;
	}

	/* Consume any zero-copy data from previous receive operation. */
	if (mqh->mqh_consume_pending > 0)
	{
		shm_mq_inc_bytes_read(mq, mqh->mqh_consume_pending);
		mqh->mqh_consume_pending = 0;
	}

	/* Try to read, or finish reading, the length word from the buffer. */
	while (!mqh->mqh_length_word_complete)
	{
		/* Try to receive the message length word. */
		Assert(mqh->mqh_partial_bytes < sizeof(Size));
		res = shm_mq_receive_bytes(mq, sizeof(Size) - mqh->mqh_partial_bytes,
								   nowait, &rb, &rawdata);
		if (res != SHM_MQ_SUCCESS)
			return res;

		/*
		 * Hopefully, we'll receive the entire message length word at once.
		 * But if sizeof(Size) > MAXIMUM_ALIGNOF, then it might be split over
		 * multiple reads.
		 */
		if (mqh->mqh_partial_bytes == 0 && rb >= sizeof(Size))
		{
			Size		needed;

			nbytes = *(Size *) rawdata;

			/* If we've already got the whole message, we're done. */
			needed = MAXALIGN(sizeof(Size)) + MAXALIGN(nbytes);
			if (rb >= needed)
			{
				/*
				 * Technically, we could consume the message length
				 * information at this point, but the extra write to shared
				 * memory wouldn't be free and in most cases we would reap no
				 * benefit.
				 */
				mqh->mqh_consume_pending = needed;
				*nbytesp = nbytes;
				*datap = ((char *) rawdata) + MAXALIGN(sizeof(Size));
				return SHM_MQ_SUCCESS;
			}

			/*
			 * We don't have the whole message, but we at least have the whole
			 * length word.
			 */
			mqh->mqh_expected_bytes = nbytes;
			mqh->mqh_length_word_complete = true;
			shm_mq_inc_bytes_read(mq, MAXALIGN(sizeof(Size)));
			rb -= MAXALIGN(sizeof(Size));
		}
		else
		{
			Size		lengthbytes;

			/* Can't be split unless bigger than required alignment. */
			Assert(sizeof(Size) > MAXIMUM_ALIGNOF);

			/* Message word is split; need buffer to reassemble. */
			if (mqh->mqh_buffer == NULL)
			{
				mqh->mqh_buffer = MemoryContextAlloc(mqh->mqh_context,
													 MQH_INITIAL_BUFSIZE);
				mqh->mqh_buflen = MQH_INITIAL_BUFSIZE;
			}
			Assert(mqh->mqh_buflen >= sizeof(Size));

			/* Copy and consume partial length word. */
			if (mqh->mqh_partial_bytes + rb > sizeof(Size))
				lengthbytes = sizeof(Size) - mqh->mqh_partial_bytes;
			else
				lengthbytes = rb;
			memcpy(&mqh->mqh_buffer[mqh->mqh_partial_bytes], rawdata,
				   lengthbytes);
			mqh->mqh_partial_bytes += lengthbytes;
			shm_mq_inc_bytes_read(mq, MAXALIGN(lengthbytes));
			rb -= lengthbytes;

			/* If we now have the whole word, we're ready to read payload. */
			if (mqh->mqh_partial_bytes >= sizeof(Size))
			{
				Assert(mqh->mqh_partial_bytes == sizeof(Size));
				mqh->mqh_expected_bytes = *(Size *) mqh->mqh_buffer;
				mqh->mqh_length_word_complete = true;
				mqh->mqh_partial_bytes = 0;
			}
		}
	}
	nbytes = mqh->mqh_expected_bytes;

	if (mqh->mqh_partial_bytes == 0)
	{
		/*
		 * Try to obtain the whole message in a single chunk.  If this works,
		 * we need not copy the data and can return a pointer directly into
		 * shared memory.
		 */
		res = shm_mq_receive_bytes(mq, nbytes, nowait, &rb, &rawdata);
		if (res != SHM_MQ_SUCCESS)
			return res;
		if (rb >= nbytes)
		{
			mqh->mqh_length_word_complete = false;
			mqh->mqh_consume_pending = MAXALIGN(nbytes);
			*nbytesp = nbytes;
			*datap = rawdata;
			return SHM_MQ_SUCCESS;
		}

		/*
		 * The message has wrapped the buffer.  We'll need to copy it in order
		 * to return it to the client in one chunk.  First, make sure we have
		 * a large enough buffer available.
		 */
		if (mqh->mqh_buflen < nbytes)
		{
			Size		newbuflen = Max(mqh->mqh_buflen, MQH_INITIAL_BUFSIZE);

			while (newbuflen < nbytes)
				newbuflen *= 2;

			if (mqh->mqh_buffer != NULL)
			{
				pfree(mqh->mqh_buffer);
				mqh->mqh_buffer = NULL;
				mqh->mqh_buflen = 0;
			}
			mqh->mqh_buffer = MemoryContextAlloc(mqh->mqh_context, newbuflen);
			mqh->mqh_buflen = newbuflen;
		}
	}

	/* Loop until we've copied the entire message. */
	for (;;)
	{
		Size		still_needed;

		/* Copy as much as we can. */
		Assert(mqh->mqh_partial_bytes + rb <= nbytes);
		memcpy(&mqh->mqh_buffer[mqh->mqh_partial_bytes], rawdata, rb);
		mqh->mqh_partial_bytes += rb;

		/*
		 * Update count of bytes read, with alignment padding.  Note that this
		 * will never actually insert any padding except at the end of a
		 * message, because the buffer size is a multiple of MAXIMUM_ALIGNOF,
		 * and each read and write is as well.
		 */
		Assert(mqh->mqh_partial_bytes == nbytes || rb == MAXALIGN(rb));
		shm_mq_inc_bytes_read(mq, MAXALIGN(rb));

		/* If we got all the data, exit the loop. */
		if (mqh->mqh_partial_bytes >= nbytes)
			break;

		/* Wait for some more data. */
		still_needed = nbytes - mqh->mqh_partial_bytes;
		res = shm_mq_receive_bytes(mq, still_needed, nowait, &rb, &rawdata);
		if (res != SHM_MQ_SUCCESS)
			return res;
		if (rb > still_needed)
			rb = still_needed;
	}

	/* Return the complete message, and reset for next message. */
	*nbytesp = nbytes;
	*datap = mqh->mqh_buffer;
	mqh->mqh_length_word_complete = false;
	mqh->mqh_partial_bytes = 0;
	return SHM_MQ_SUCCESS;
}
Beispiel #6
0
/**
 *  Interprets a string and assigns it to a USBFilter field.
 *
 *  (This function is also used by HostUSBDeviceFilter.)
 *
 *  @param  aFilter     The filter.
 *  @param  aIdx        The field index.
 *  @param  aStr        The input string.
 *  @param  aName       The field name for use in the error string.
 *  @param  aErrStr     Where to return the error string on failure.
 *
 *  @return COM status code.
 *  @remark The idea was to have this as a static function, but tr() doesn't wanna work without a class :-/
 */
/*static*/ HRESULT USBDeviceFilter::usbFilterFieldFromString(PUSBFILTER aFilter,
                                                             USBFILTERIDX aIdx,
                                                             const Utf8Str &aValue,
                                                             Utf8Str &aErrStr)
{
    int vrc;
//     Utf8Str str (aStr);
    if (aValue.isEmpty())
        vrc = USBFilterSetIgnore(aFilter, aIdx);
    else
    {
        const char *pcszValue = aValue.c_str();
        if (USBFilterIsNumericField(aIdx))
        {
            /* Is it a lonely number? */
            char *pszNext;
            uint64_t u64;
            vrc = RTStrToUInt64Ex(pcszValue, &pszNext, 16, &u64);
            if (RT_SUCCESS(vrc))
                pszNext = RTStrStripL (pszNext);
            if (    vrc == VINF_SUCCESS
                &&  !*pszNext)
            {
                if (u64 > 0xffff)
                {
                    // there was a bug writing out "-1" values in earlier versions, which got
                    // written as "FFFFFFFF"; make sure we don't fail on those
                    if (u64 == 0xffffffff)
                        u64 = 0xffff;
                    else
                    {
                        aErrStr = Utf8StrFmt(tr("The %s value '%s' is too big (max 0xFFFF)"), describeUSBFilterIdx(aIdx), pcszValue);
                        return E_INVALIDARG;
                    }
                }

                vrc = USBFilterSetNumExact(aFilter, aIdx, (uint16_t)u64, true /* fMustBePresent */);
            }
            else
                vrc = USBFilterSetNumExpression(aFilter, aIdx, pcszValue, true /* fMustBePresent */);
        }
        else
        {
            /* Any wildcard in the string? */
            Assert(USBFilterIsStringField(aIdx));
            if (    strchr(pcszValue, '*')
                ||  strchr(pcszValue, '?')
                /* || strchr (psz, '[') - later */
                )
                vrc = USBFilterSetStringPattern(aFilter, aIdx, pcszValue, true /* fMustBePresent */);
            else
                vrc = USBFilterSetStringExact(aFilter, aIdx, pcszValue, true /* fMustBePresent */);
        }
    }

    if (RT_FAILURE(vrc))
    {
        if (vrc == VERR_INVALID_PARAMETER)
        {
            aErrStr = Utf8StrFmt(tr("The %s filter expression '%s' is not valid"), describeUSBFilterIdx(aIdx), aValue.c_str());
            return E_INVALIDARG;
        }
        if (vrc == VERR_BUFFER_OVERFLOW)
        {
            aErrStr = Utf8StrFmt(tr("Insufficient expression space for the '%s' filter expression '%s'"), describeUSBFilterIdx(aIdx), aValue.c_str());
            return E_FAIL;
        }
        AssertRC(vrc);
        aErrStr = Utf8StrFmt(tr("Encountered unexpected status %Rrc when setting '%s' to '%s'"), vrc, describeUSBFilterIdx(aIdx), aValue.c_str());
        return E_FAIL;
    }

    return S_OK;
}
Beispiel #7
0
/*
 * Translate an object name and arguments (as passed by the parser) to an
 * ObjectAddress.
 *
 * The returned object will be locked using the specified lockmode.  If a
 * sub-object is looked up, the parent object will be locked instead.
 *
 * If the object is a relation or a child object of a relation (e.g. an
 * attribute or contraint), the relation is also opened and *relp receives
 * the open relcache entry pointer; otherwise, *relp is set to NULL.  This
 * is a bit grotty but it makes life simpler, since the caller will
 * typically need the relcache entry too.  Caller must close the relcache
 * entry when done with it.  The relation is locked with the specified lockmode
 * if the target object is the relation itself or an attribute, but for other
 * child objects, only AccessShareLock is acquired on the relation.
 *
 * We don't currently provide a function to release the locks acquired here;
 * typically, the lock must be held until commit to guard against a concurrent
 * drop operation.
 */
ObjectAddress
get_object_address(ObjectType objtype, List *objname, List *objargs,
				   Relation *relp, LOCKMODE lockmode)
{
	ObjectAddress address;
	Relation	relation = NULL;

	/* Some kind of lock must be taken. */
	Assert(lockmode != NoLock);

	switch (objtype)
	{
		case OBJECT_INDEX:
		case OBJECT_SEQUENCE:
		case OBJECT_TABLE:
		case OBJECT_VIEW:
			relation =
				get_relation_by_qualified_name(objtype, objname, lockmode);
			address.classId = RelationRelationId;
			address.objectId = RelationGetRelid(relation);
			address.objectSubId = 0;
			break;
		case OBJECT_COLUMN:
			address =
				get_object_address_attribute(objtype, objname, &relation,
											 lockmode);
			break;
		case OBJECT_RULE:
		case OBJECT_TRIGGER:
		case OBJECT_CONSTRAINT:
			address = get_object_address_relobject(objtype, objname, &relation);
			break;
		case OBJECT_DATABASE:
		case OBJECT_EXTENSION:
		case OBJECT_TABLESPACE:
		case OBJECT_ROLE:
		case OBJECT_SCHEMA:
		case OBJECT_LANGUAGE:
			address = get_object_address_unqualified(objtype, objname);
			break;
		case OBJECT_TYPE:
		case OBJECT_DOMAIN:
			address.classId = TypeRelationId;
			address.objectId =
				typenameTypeId(NULL, makeTypeNameFromNameList(objname), NULL);
			address.objectSubId = 0;
			break;
		case OBJECT_AGGREGATE:
			address.classId = ProcedureRelationId;
			address.objectId = LookupAggNameTypeNames(objname, objargs, false);
			address.objectSubId = 0;
			break;
		case OBJECT_FUNCTION:
			address.classId = ProcedureRelationId;
			address.objectId = LookupFuncNameTypeNames(objname, objargs, false);
			address.objectSubId = 0;
			break;
		case OBJECT_OPERATOR:
			Assert(list_length(objargs) == 2);
			address.classId = OperatorRelationId;
			address.objectId =
				LookupOperNameTypeNames(NULL, objname,
										(TypeName *) linitial(objargs),
										(TypeName *) lsecond(objargs),
										false, -1);
			address.objectSubId = 0;
			break;
		case OBJECT_OPCLASS:
		case OBJECT_OPFAMILY:
			address = get_object_address_opcf(objtype, objname, objargs);
			break;
		case OBJECT_CAST:
			{
				TypeName   *sourcetype = (TypeName *) linitial(objname);
				TypeName   *targettype = (TypeName *) linitial(objargs);
				Oid			sourcetypeid = typenameTypeId(NULL, sourcetype, NULL);
				Oid			targettypeid = typenameTypeId(NULL, targettype, NULL);

				address.classId = CastRelationId;
				address.objectId =
					get_cast_oid(sourcetypeid, targettypeid, false);
				address.objectSubId = 0;
			}
			break;
		default:
			elog(ERROR, "unrecognized objtype: %d", (int) objtype);
			/* placate compiler, in case it thinks elog might return */
			address.classId = InvalidOid;
			address.objectId = InvalidOid;
			address.objectSubId = 0;
	}

	/*
	 * If we're dealing with a relation or attribute, then the relation is
	 * already locked.  If we're dealing with any other type of object, we
	 * need to lock it and then verify that it still exists.
	 */
	if (address.classId != RelationRelationId)
	{
		if (IsSharedRelation(address.classId))
			LockSharedObject(address.classId, address.objectId, 0, lockmode);
		else
			LockDatabaseObject(address.classId, address.objectId, 0, lockmode);
		/* Did it go away while we were waiting for the lock? */
		if (!object_exists(address))
			elog(ERROR, "cache lookup failed for class %u object %u subobj %d",
				 address.classId, address.objectId, address.objectSubId);
	}

	/* Return the object address and the relation. */
	*relp = relation;
	return address;
}
Beispiel #8
0
int rtR0MemObjNativeMapUser(PPRTR0MEMOBJINTERNAL ppMem, RTR0MEMOBJ pMemToMap, RTR3PTR R3PtrFixed, size_t uAlignment,
                            unsigned fProt, RTR0PROCESS R0Process)
{
#if 0
    /*
     * Check for unsupported stuff.
     */
    AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED);
    AssertMsgReturn(R3PtrFixed == (RTR3PTR)-1, ("%p\n", R3PtrFixed), VERR_NOT_SUPPORTED);
    if (uAlignment > PAGE_SIZE)
        return VERR_NOT_SUPPORTED;

    int                rc;
    PRTR0MEMOBJHAIKU pMemToMapHaiku = (PRTR0MEMOBJHAIKU)pMemToMap;
    struct proc       *pProc            = (struct proc *)R0Process;
    struct vm_map     *pProcMap         = &pProc->p_vmspace->vm_map;

    /* calc protection */
    vm_prot_t       ProtectionFlags = 0;
    if ((fProt & RTMEM_PROT_NONE) == RTMEM_PROT_NONE)
        ProtectionFlags = VM_PROT_NONE;
    if ((fProt & RTMEM_PROT_READ) == RTMEM_PROT_READ)
        ProtectionFlags |= VM_PROT_READ;
    if ((fProt & RTMEM_PROT_WRITE) == RTMEM_PROT_WRITE)
        ProtectionFlags |= VM_PROT_WRITE;
    if ((fProt & RTMEM_PROT_EXEC) == RTMEM_PROT_EXEC)
        ProtectionFlags |= VM_PROT_EXECUTE;

    /* calc mapping address */
    PROC_LOCK(pProc);
    vm_offset_t AddrR3 = round_page((vm_offset_t)pProc->p_vmspace->vm_daddr + lim_max(pProc, RLIMIT_DATA));
    PROC_UNLOCK(pProc);

    /* Insert the object in the map. */
    rc = vm_map_find(pProcMap,              /* Map to insert the object in */
                     NULL,                  /* Object to map */
                     0,                     /* Start offset in the object */
                     &AddrR3,               /* Start address IN/OUT */
                     pMemToMap->cb,         /* Size of the mapping */
                     TRUE,                  /* Whether a suitable address should be searched for first */
                     ProtectionFlags,       /* protection flags */
                     VM_PROT_ALL,           /* Maximum protection flags */
                     0);                    /* Copy on write */

    /* Map the memory page by page into the destination map. */
    if (rc == KERN_SUCCESS)
    {
        size_t         cPages       = pMemToMap->cb >> PAGE_SHIFT;;
        pmap_t         pPhysicalMap = pProcMap->pmap;
        vm_offset_t    AddrR3Dst    = AddrR3;

        if (   pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS
                || pMemToMap->enmType == RTR0MEMOBJTYPE_PHYS_NC
                || pMemToMap->enmType == RTR0MEMOBJTYPE_PAGE)
        {
            /* Mapping physical allocations */
            Assert(cPages == pMemToMapHaiku->u.Phys.cPages);

            /* Insert the memory page by page into the mapping. */
            for (uint32_t iPage = 0; iPage < cPages; iPage++)
            {
                vm_page_t pPage = pMemToMapHaiku->u.Phys.apPages[iPage];

                MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
                AddrR3Dst += PAGE_SIZE;
            }
        }
        else
        {
            /* Mapping cont or low memory types */
            vm_offset_t AddrToMap = (vm_offset_t)pMemToMap->pv;

            for (uint32_t iPage = 0; iPage < cPages; iPage++)
            {
                vm_page_t pPage = PHYS_TO_VM_PAGE(vtophys(AddrToMap));

                MY_PMAP_ENTER(pPhysicalMap, AddrR3Dst, pPage, ProtectionFlags, TRUE);
                AddrR3Dst += PAGE_SIZE;
                AddrToMap += PAGE_SIZE;
            }
        }
    }
Beispiel #9
0
//////////////////////////////////////////////////////////////////////////////
// 슬레이어 오브젝트
//////////////////////////////////////////////////////////////////////////////
void DoubleShot::execute (Slayer* pSlayer, ObjectID_t TargetObjectID,  SkillSlot* pSkillSlot, CEffectID_t CEffectID)
	throw(Error)
{
	__BEGIN_TRY __BEGIN_DEBUG

	//cout << "TID[" << Thread::self() << "]" << getSkillHandlerName() << " Begin" << endl;

	Assert(pSlayer != NULL);
	Assert(pSkillSlot != NULL);

	try 
	{
		Player* pPlayer = pSlayer->getPlayer();
		Zone* pZone = pSlayer->getZone();
		Assert(pPlayer != NULL);
		Assert(pZone != NULL);

		Creature* pTargetCreature = pZone->getCreature(TargetObjectID);
		//Assert(pTargetCreature != NULL);

		// NoSuch제거. by sigi. 2002.5.2
		if (pTargetCreature==NULL
			|| !canAttack(pSlayer, pTargetCreature )
			|| pTargetCreature->isNPC()) 
		{
			executeSkillFailException(pSlayer, getSkillType());
			//cout << "TID[" << Thread::self() << "]" << getSkillHandlerName() << " End" << endl;
			return;
		}

		bool bIncreaseDomainExp = pSlayer->isRealWearingEx(Slayer::WEAR_RIGHTHAND);

		GCAttackArmsOK1 _GCAttackArmsOK1;
		GCAttackArmsOK2 _GCAttackArmsOK2;
		GCAttackArmsOK3 _GCAttackArmsOK3;
		GCAttackArmsOK4 _GCAttackArmsOK4;
		GCAttackArmsOK5 _GCAttackArmsOK5;

		// 들고 있는 무기가 없거나, 총 계열 무기가 아니라면 기술을 쓸 수 없다.
		// 총 계열 무기 중에서도 SG나 SR은 DoubleShot를 쓸 수가 없다.
		// SR, SG 도 이제 쓸수 있다.
		// 2003.1.14 by bezz
		Item* pWeapon = pSlayer->getWearItem(Slayer::WEAR_RIGHTHAND);
		if (pWeapon == NULL || isArmsWeapon(pWeapon) == false )
//			pWeapon->getItemClass() == Item::ITEM_CLASS_SG || 
//			pWeapon->getItemClass() == Item::ITEM_CLASS_SR)
		{
			executeSkillFailException(pSlayer, getSkillType());
			//cout << "TID[" << Thread::self() << "]" << getSkillHandlerName() << " End" << endl;
			return;
		}

		SkillType_t       SkillType  = pSkillSlot->getSkillType();
		SkillInfo*        pSkillInfo = g_pSkillInfoManager->getSkillInfo(SkillType);
		SkillDomainType_t DomainType = pSkillInfo->getDomainType();
		SkillLevel_t      SkillLevel = pSkillSlot->getExpLevel();

		SkillInput input(pSlayer, pSkillSlot);
		SkillOutput output;
		computeOutput(input, output);

		// 페널티 값을 계산한다.
		int ToHitPenalty = getPercentValue(pSlayer->getToHit(), output.ToHit); 

		int  RequiredMP  = (int)pSkillInfo->getConsumeMP();
		bool bManaCheck   = hasEnoughMana(pSlayer, RequiredMP);
		bool bTimeCheck   = verifyRunTime(pSkillSlot);
		bool bRangeCheck  = verifyDistance(pSlayer, pTargetCreature, pWeapon->getRange());
		bool bBulletCheck = (getRemainBullet(pWeapon) > 0) ? true : false;
		bool bHitRoll     = HitRoll::isSuccess(pSlayer, pTargetCreature, ToHitPenalty);
		bool bPK          = verifyPK(pSlayer, pTargetCreature);

		// 총알 숫자는 무조건 떨어뜨린다.
		Bullet_t RemainBullet = 0;
		if (bBulletCheck)
		{
			// 총알 숫자를 떨어뜨리고, 저장하고, 남은 총알 숫자를 받아온다.
			decreaseBullet(pWeapon);
			// 한발쓸때마다 저장할 필요 없다. by sigi. 2002.5.9
			//pWeapon->save(pSlayer->getName(), STORAGE_GEAR, 0, Slayer::WEAR_RIGHTHAND, 0);
			RemainBullet = getRemainBullet(pWeapon);
		}

		if (bManaCheck && bTimeCheck && bRangeCheck && bBulletCheck && bHitRoll && bPK)
		{
			decreaseMana(pSlayer, RequiredMP, _GCAttackArmsOK1);

			_GCAttackArmsOK5.setSkillSuccess(true);
			_GCAttackArmsOK1.setSkillSuccess(true);

			bool bCriticalHit = false;

			// 데미지를 계산하고, quickfire 페널티를 가한다.
			// output.Damage가 음수이기 때문에, %값을 구해 더하면 결국 빼는 것이 된다.
			int Damage = computeDamage(pSlayer, pTargetCreature, SkillLevel/5, bCriticalHit);
			Damage += getPercentValue(Damage, output.Damage);
			Damage = max(0, Damage);

			//cout << "DoubleShotDamage:" << Damage << endl;

			// 데미지를 세팅한다.
			setDamage(pTargetCreature, Damage, pSlayer, SkillType, &_GCAttackArmsOK2, &_GCAttackArmsOK1);
			computeAlignmentChange(pTargetCreature, Damage, pSlayer, &_GCAttackArmsOK2, &_GCAttackArmsOK1);

			// 공격자와 상대의 아이템 내구성 떨어트림.
			// 밑에 있던걸 이쪽으로 옮겼다.  by sigi. 2002.5.13
			decreaseDurability(pSlayer, pTargetCreature, NULL, &_GCAttackArmsOK1, &_GCAttackArmsOK2);


			// 크리티컬 히트라면 상대방을 뒤로 물러나게 한다.
			if (bCriticalHit)
			{
				knockbackCreature(pZone, pTargetCreature, pSlayer->getX(), pSlayer->getY());
			}

			/*
			// 80% 확률로만 능력치가 상승한다.
			// 상대방이 슬레이어가 아닐 경우에만 경험치가 상승한다.
			if (Random(1, 100) < 80 && !pTargetCreature->isSlayer())
			{
			*/
			if(!pTargetCreature->isSlayer() ) {
				if (bIncreaseDomainExp )
				{
					shareAttrExp(pSlayer, Damage , 1, 8, 1, _GCAttackArmsOK1);
					increaseDomainExp(pSlayer, DomainType, pSkillInfo->getPoint(), _GCAttackArmsOK1, pTargetCreature->getLevel());
					increaseSkillExp(pSlayer, DomainType,  pSkillSlot, pSkillInfo, _GCAttackArmsOK1);
				}

				increaseAlignment(pSlayer, pTargetCreature, _GCAttackArmsOK1);
			}
			//}

			if (pTargetCreature->isPC()) 
			{
				Player* pTargetPlayer = pTargetCreature->getPlayer();
				if (pTargetPlayer != NULL) 
				{ 
					_GCAttackArmsOK2.setObjectID(pSlayer->getObjectID());
					pTargetPlayer->sendPacket(&_GCAttackArmsOK2);
				}
			} 
			else if (pTargetCreature->isMonster())
			{
				Monster* pMonster = dynamic_cast<Monster*>(pTargetCreature);
				pMonster->addEnemy(pSlayer);
			}

			ZoneCoord_t targetX = pTargetCreature->getX();
			ZoneCoord_t targetY = pTargetCreature->getY();
			ZoneCoord_t myX     = pSlayer->getX();
			ZoneCoord_t myY     = pSlayer->getY();
			
			_GCAttackArmsOK1.setObjectID(TargetObjectID);
			_GCAttackArmsOK1.setBulletNum(RemainBullet);

			_GCAttackArmsOK3.setObjectID(pSlayer->getObjectID());
			_GCAttackArmsOK3.setTargetXY (targetX, targetY);
			
			_GCAttackArmsOK4.setTargetObjectID (TargetObjectID);
			
			_GCAttackArmsOK5.setObjectID(pSlayer->getObjectID());
			_GCAttackArmsOK5.setTargetObjectID (TargetObjectID);

			pPlayer->sendPacket(&_GCAttackArmsOK1);

			list<Creature *> cList;
			cList.push_back(pTargetCreature);
			cList.push_back(pSlayer);
			cList = pZone->broadcastSkillPacket(myX, myY, targetX, targetY, &_GCAttackArmsOK5, cList);
			pZone->broadcastPacket(myX, myY, &_GCAttackArmsOK3, cList);
			pZone->broadcastPacket(targetX, targetY, &_GCAttackArmsOK4, cList);

			pSkillSlot->setRunTime(output.Delay);
		} 
		else 
		{
			executeSkillFailNormalWithGun(pSlayer, getSkillType(), pTargetCreature, RemainBullet);
		}
	} 
	catch (Throwable &t) 
	{
		executeSkillFailException(pSlayer, getSkillType());
	}

	//cout << "TID[" << Thread::self() << "]" << getSkillHandlerName() << " End" << endl;

	__END_DEBUG __END_CATCH
}
Beispiel #10
0
/*
 * get_relation_info -
 *	  Retrieves catalog information for a given relation.
 *
 * Given the Oid of the relation, return the following info into fields
 * of the RelOptInfo struct:
 *
 *	min_attr	lowest valid AttrNumber
 *	max_attr	highest valid AttrNumber
 *	indexlist	list of IndexOptInfos for relation's indexes
 *	pages		number of pages
 *	tuples		number of tuples
 *
 * Also, initialize the attr_needed[] and attr_widths[] arrays.  In most
 * cases these are left as zeroes, but sometimes we need to compute attr
 * widths here, and we may as well cache the results for costsize.c.
 *
 * If inhparent is true, all we need to do is set up the attr arrays:
 * the RelOptInfo actually represents the appendrel formed by an inheritance
 * tree, and so the parent rel's physical size and index information isn't
 * important for it.
 */
void
get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
                  RelOptInfo *rel)
{
    Index		varno = rel->relid;
    Relation	relation;
    bool		hasindex;
    List	   *indexinfos = NIL;
    bool		needs_longlock;

    /*
     * We need not lock the relation since it was already locked, either by
     * the rewriter or when expand_inherited_rtentry() added it to the query's
     * rangetable.
     */
    relation = heap_open(relationObjectId, NoLock);
    needs_longlock = rel_needs_long_lock(relationObjectId);

    rel->min_attr = FirstLowInvalidHeapAttributeNumber + 1;
    rel->max_attr = RelationGetNumberOfAttributes(relation);

    Assert(rel->max_attr >= rel->min_attr);
    rel->attr_needed = (Relids *)
                       palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(Relids));
    rel->attr_widths = (int32 *)
                       palloc0((rel->max_attr - rel->min_attr + 1) * sizeof(int32));

    /*
     * CDB: Get partitioning key info for distributed relation.
     */
    rel->cdbpolicy = RelationGetPartitioningKey(relation);

    /*
     * Estimate relation size --- unless it's an inheritance parent, in which
     * case the size will be computed later in set_append_rel_pathlist, and we
     * must leave it zero for now to avoid bollixing the total_table_pages
     * calculation.
     */
    if (!inhparent)
    {
        cdb_estimate_rel_size
        (
            rel,
            relation,
            relation,
            rel->attr_widths - rel->min_attr,
            &rel->pages,
            &rel->tuples,
            &rel->cdb_default_stats_used
        );
    }

    /*
     * Make list of indexes.  Ignore indexes on system catalogs if told to.
     * Don't bother with indexes for an inheritance parent, either.
     */
    if (inhparent ||
            (IgnoreSystemIndexes && IsSystemClass(relation->rd_rel)))
        hasindex = false;
    else
        hasindex = relation->rd_rel->relhasindex;

    if (hasindex)
    {
        List	   *indexoidlist;
        ListCell   *l;
        LOCKMODE	lmode;

        /* Warn if indexed table needs ANALYZE. */
        if (rel->cdb_default_stats_used)
            cdb_default_stats_warning_for_table(relation->rd_id);

        indexoidlist = RelationGetIndexList(relation);

        /*
         * For each index, we get the same type of lock that the executor will
         * need, and do not release it.  This saves a couple of trips to the
         * shared lock manager while not creating any real loss of
         * concurrency, because no schema changes could be happening on the
         * index while we hold lock on the parent rel, and neither lock type
         * blocks any other kind of index operation.
         */
        if (rel->relid == root->parse->resultRelation)
            lmode = RowExclusiveLock;
        else
            lmode = AccessShareLock;

        foreach(l, indexoidlist)
        {
            Oid			indexoid = lfirst_oid(l);
            Relation	indexRelation;
            Form_pg_index index;
            IndexOptInfo *info;
            int			ncolumns;
            int			i;
            int16		amorderstrategy;

            /*
             * Extract info from the relation descriptor for the index.
             */
            indexRelation = index_open(indexoid, lmode);
            index = indexRelation->rd_index;

            /*
             * Ignore invalid indexes, since they can't safely be used for
             * queries.  Note that this is OK because the data structure we
             * are constructing is only used by the planner --- the executor
             * still needs to insert into "invalid" indexes!
             */
            if (!index->indisvalid)
            {
                index_close(indexRelation, NoLock);
                continue;
            }

            info = makeNode(IndexOptInfo);

            info->indexoid = index->indexrelid;
            info->rel = rel;
            info->ncolumns = ncolumns = index->indnatts;

            /*
             * Need to make opfamily and ordering arrays large enough to put
             * a terminating 0 at the end of each one.
             */
            info->indexkeys = (int *) palloc(sizeof(int) * ncolumns);
            info->opfamily = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));
            info->ordering = (Oid *) palloc0(sizeof(Oid) * (ncolumns + 1));

            for (i = 0; i < ncolumns; i++)
            {
                info->opfamily[i] = indexRelation->rd_opfamily[i];
                info->indexkeys[i] = index->indkey.values[i];
            }

            info->relam = indexRelation->rd_rel->relam;
            info->amcostestimate = indexRelation->rd_am->amcostestimate;
            info->amoptionalkey = indexRelation->rd_am->amoptionalkey;

            /*
             * Fetch the ordering operators associated with the index, if any.
             */
            amorderstrategy = indexRelation->rd_am->amorderstrategy;
            if (amorderstrategy > 0)
            {
                int			oprindex = amorderstrategy - 1;

                /*
                 * Index AM must have a fixed set of strategies for it to
                 * make sense to specify amorderstrategy, so we need not
                 * allow the case amstrategies == 0.
                 */
                Assert(oprindex < indexRelation->rd_am->amstrategies);

                for (i = 0; i < ncolumns; i++)
                {
                    info->ordering[i] = indexRelation->rd_operator[oprindex];
                    oprindex += indexRelation->rd_am->amstrategies;
                }
            }

            /*
             * Fetch the index expressions and predicate, if any.  We must
             * modify the copies we obtain from the relcache to have the
             * correct varno for the parent relation, so that they match up
             * correctly against qual clauses.
             */
            info->indexprs = RelationGetIndexExpressions(indexRelation);
            info->indpred = RelationGetIndexPredicate(indexRelation);
            if (info->indexprs && varno != 1)
                ChangeVarNodes((Node *) info->indexprs, 1, varno, 0);
            if (info->indpred && varno != 1)
                ChangeVarNodes((Node *) info->indpred, 1, varno, 0);
            info->predOK = false;		/* set later in indxpath.c */
            info->unique = index->indisunique;

            /*
             * Estimate the index size.  If it's not a partial index, we lock
             * the number-of-tuples estimate to equal the parent table; if it
             * is partial then we have to use the same methods as we would for
             * a table, except we can be sure that the index is not larger
             * than the table.
             */
            cdb_estimate_rel_size(rel,
                                  relation,
                                  indexRelation,
                                  NULL,
                                  &info->pages,
                                  &info->tuples,
                                  &info->cdb_default_stats_used);

            if (!info->indpred ||
                    info->tuples > rel->tuples)
                info->tuples = rel->tuples;

            if (info->cdb_default_stats_used &&
                    !rel->cdb_default_stats_used)
                cdb_default_stats_warning_for_index(relation->rd_id, indexoid);

            index_close(indexRelation, needs_longlock ? NoLock : lmode);

            indexinfos = lcons(info, indexinfos);
        }

        list_free(indexoidlist);
    }
int 
JoystickState::AxisPosition( int axis ) const
{
    Assert( (axis >= 0) && (axis < static_cast<int>(m_axes.size())) );
    return  m_axes[ axis ];
}
Beispiel #12
0
//make a series of photographs
do_photos()
{
	FILE *vfile,*upvfile;
	int photo_num=0;
	char savename[13];
	grs_canvas *photo_canvas;
	vms_vector viewer_pos;
	vms_matrix viewer_orient;

	vfile=fopen("vectors.lst","rt");
	upvfile=fopen("upvecs.c","wt");

	Assert(vfile!=NULL && upvfile!=NULL);

	fprintf(upvfile,"\n\n#include \"vecmat.h\"\n\nvms_vector up_vecs[] = {\n");

	photo_canvas = gr_create_canvas(64,64);

	gr_set_current_canvas(photo_canvas);

	while (!feof(vfile)) {
		vms_vector v;
		vms_matrix m;
		float x,y,z;
		int nf;

		nf = fscanf(vfile,"%f %f %f",&x,&y,&z);

		if (nf!=3) break;

		vm_vec_make(&v,fl2f(x),fl2f(y),fl2f(z));

		vm_vector_2_matrix(&m,&v,NULL,NULL);

		fprintf(upvfile,"\t\t\t{%#x,%#x,%#x},\n",m.uvec.x,m.uvec.y,m.uvec.z);

		vm_vec_scale(&v,PHOTO_DIST);

		vm_vec_add(&viewer_pos,&cube_position,&v);

		viewer_orient = m;
		vm_vec_negate(&viewer_orient.fvec);
		vm_vec_negate(&viewer_orient.rvec);

		gr_clear_canvas(129);
		g3_start_frame();

		g3_set_view_matrix(&viewer_pos,&viewer_orient,0x9000);

		draw_cube();

		g3_end_frame();

gr_set_current_canvas(Canv_game);
gr_ubitmap(0,0,&photo_canvas->cv_bitmap);
gr_set_current_canvas(photo_canvas);

		sprintf(savename,"cube_%02d.bbm",photo_num);

		iff_write_bitmap(savename,&photo_canvas->cv_bitmap,gr_palette);

		photo_num++;

	}

	gr_free_canvas(photo_canvas);

	fprintf(upvfile,"\t\t};\n");

	fclose(vfile);
	fclose(upvfile);

}
//-----------------------------------------------------------------------------
// Purpose: 
// Input  : bool - 
//-----------------------------------------------------------------------------
void C_TEBaseBeam::PostDataUpdate( DataUpdateType_t updateType )
{
	Assert( 0 );
}
/* prepare(query="select * from foo")
 * prepare(query="select * from foo where bar = $1", params=["text"])
 * prepare(query="select * from foo where bar = $1", params=["text"], limit=5)
 */
PyObject *
PLy_spi_prepare(PyObject *self, PyObject *args)
{
	PLyPlanObject *plan;
	PyObject   *list = NULL;
	PyObject   *volatile optr = NULL;
	char	   *query;
	volatile MemoryContext oldcontext;
	volatile ResourceOwner oldowner;
	volatile int nargs;

	if (!PyArg_ParseTuple(args, "s|O", &query, &list))
		return NULL;

	if (list && (!PySequence_Check(list)))
	{
		PLy_exception_set(PyExc_TypeError,
					   "second argument of plpy.prepare must be a sequence");
		return NULL;
	}

	if ((plan = (PLyPlanObject *) PLy_plan_new()) == NULL)
		return NULL;

	plan->mcxt = AllocSetContextCreate(TopMemoryContext,
									   "PL/Python plan context",
									   ALLOCSET_DEFAULT_SIZES);
	oldcontext = MemoryContextSwitchTo(plan->mcxt);

	nargs = list ? PySequence_Length(list) : 0;

	plan->nargs = nargs;
	plan->types = nargs ? palloc(sizeof(Oid) * nargs) : NULL;
	plan->values = nargs ? palloc(sizeof(Datum) * nargs) : NULL;
	plan->args = nargs ? palloc(sizeof(PLyTypeInfo) * nargs) : NULL;

	MemoryContextSwitchTo(oldcontext);

	oldcontext = CurrentMemoryContext;
	oldowner = CurrentResourceOwner;

	PLy_spi_subtransaction_begin(oldcontext, oldowner);

	PG_TRY();
	{
		int			i;
		PLyExecutionContext *exec_ctx = PLy_current_execution_context();

		/*
		 * the other loop might throw an exception, if PLyTypeInfo member
		 * isn't properly initialized the Py_DECREF(plan) will go boom
		 */
		for (i = 0; i < nargs; i++)
		{
			PLy_typeinfo_init(&plan->args[i], plan->mcxt);
			plan->values[i] = PointerGetDatum(NULL);
		}

		for (i = 0; i < nargs; i++)
		{
			char	   *sptr;
			HeapTuple	typeTup;
			Oid			typeId;
			int32		typmod;

			optr = PySequence_GetItem(list, i);
			if (PyString_Check(optr))
				sptr = PyString_AsString(optr);
			else if (PyUnicode_Check(optr))
				sptr = PLyUnicode_AsString(optr);
			else
			{
				ereport(ERROR,
						(errmsg("plpy.prepare: type name at ordinal position %d is not a string", i)));
				sptr = NULL;	/* keep compiler quiet */
			}

			/********************************************************
			 * Resolve argument type names and then look them up by
			 * oid in the system cache, and remember the required
			 *information for input conversion.
			 ********************************************************/

			parseTypeString(sptr, &typeId, &typmod, false);

			typeTup = SearchSysCache1(TYPEOID,
									  ObjectIdGetDatum(typeId));
			if (!HeapTupleIsValid(typeTup))
				elog(ERROR, "cache lookup failed for type %u", typeId);

			Py_DECREF(optr);

			/*
			 * set optr to NULL, so we won't try to unref it again in case of
			 * an error
			 */
			optr = NULL;

			plan->types[i] = typeId;
			PLy_output_datum_func(&plan->args[i], typeTup, exec_ctx->curr_proc->langid, exec_ctx->curr_proc->trftypes);
			ReleaseSysCache(typeTup);
		}

		pg_verifymbstr(query, strlen(query), false);
		plan->plan = SPI_prepare(query, plan->nargs, plan->types);
		if (plan->plan == NULL)
			elog(ERROR, "SPI_prepare failed: %s",
				 SPI_result_code_string(SPI_result));

		/* transfer plan from procCxt to topCxt */
		if (SPI_keepplan(plan->plan))
			elog(ERROR, "SPI_keepplan failed");

		PLy_spi_subtransaction_commit(oldcontext, oldowner);
	}
	PG_CATCH();
	{
		Py_DECREF(plan);
		Py_XDECREF(optr);

		PLy_spi_subtransaction_abort(oldcontext, oldowner);
		return NULL;
	}
	PG_END_TRY();

	Assert(plan->plan != NULL);
	return (PyObject *) plan;
}
Beispiel #15
0
/**
 * Destroys a PDM thread.
 *
 * This will wakeup the thread, tell it to terminate, and wait for it terminate.
 *
 * @returns VBox status code.
 *          This reflects the success off destroying the thread and not the exit code
 *          of the thread as this is stored in *pRcThread.
 * @param   pThread         The thread to destroy.
 * @param   pRcThread       Where to store the thread exit code. Optional.
 * @thread  The emulation thread (EMT).
 */
VMMR3DECL(int) PDMR3ThreadDestroy(PPDMTHREAD pThread, int *pRcThread)
{
    /*
     * Assert sanity.
     */
    AssertPtrReturn(pThread, VERR_INVALID_POINTER);
    AssertReturn(pThread->u32Version == PDMTHREAD_VERSION, VERR_INVALID_MAGIC);
    Assert(pThread->Thread != RTThreadSelf());
    AssertPtrNullReturn(pRcThread, VERR_INVALID_POINTER);
    PVM pVM = pThread->Internal.s.pVM;
    VM_ASSERT_EMT(pVM);
    PUVM pUVM = pVM->pUVM;

    /*
     * Advance the thread to the terminating state.
     */
    int rc = VINF_SUCCESS;
    if (pThread->enmState <= PDMTHREADSTATE_TERMINATING)
    {
        for (;;)
        {
            PDMTHREADSTATE enmState = pThread->enmState;
            switch (enmState)
            {
                case PDMTHREADSTATE_RUNNING:
                    if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
                        continue;
                    rc = pdmR3ThreadWakeUp(pThread);
                    break;

                case PDMTHREADSTATE_SUSPENDED:
                case PDMTHREADSTATE_SUSPENDING:
                case PDMTHREADSTATE_RESUMING:
                case PDMTHREADSTATE_INITIALIZING:
                    if (!pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
                        continue;
                    break;

                case PDMTHREADSTATE_TERMINATING:
                case PDMTHREADSTATE_TERMINATED:
                    break;

                default:
                    AssertMsgFailed(("enmState=%d\n", enmState));
                    rc = VERR_PDM_THREAD_IPE_2;
                    break;
            }
            break;
        }
    }
    int rc2 = RTSemEventMultiSignal(pThread->Internal.s.BlockEvent);
    AssertRC(rc2);

    /*
     * Wait for it to terminate and the do cleanups.
     */
    rc2 = RTThreadWait(pThread->Thread, RT_SUCCESS(rc) ? 60*1000 : 150, pRcThread);
    if (RT_SUCCESS(rc2))
    {
        /* make it invalid. */
        pThread->u32Version = 0xffffffff;
        pThread->enmState = PDMTHREADSTATE_INVALID;
        pThread->Thread = NIL_RTTHREAD;

        /* unlink */
        RTCritSectEnter(&pUVM->pdm.s.ListCritSect);
        if (pUVM->pdm.s.pThreads == pThread)
        {
            pUVM->pdm.s.pThreads = pThread->Internal.s.pNext;
            if (!pThread->Internal.s.pNext)
                pUVM->pdm.s.pThreadsTail = NULL;
        }
        else
        {
            PPDMTHREAD pPrev = pUVM->pdm.s.pThreads;
            while (pPrev && pPrev->Internal.s.pNext != pThread)
                pPrev = pPrev->Internal.s.pNext;
            Assert(pPrev);
            if (pPrev)
                pPrev->Internal.s.pNext = pThread->Internal.s.pNext;
            if (!pThread->Internal.s.pNext)
                pUVM->pdm.s.pThreadsTail = pPrev;
        }
        pThread->Internal.s.pNext = NULL;
        RTCritSectLeave(&pUVM->pdm.s.ListCritSect);

        /* free the resources */
        RTSemEventMultiDestroy(pThread->Internal.s.BlockEvent);
        pThread->Internal.s.BlockEvent = NIL_RTSEMEVENTMULTI;

        RTSemEventMultiDestroy(pThread->Internal.s.SleepEvent);
        pThread->Internal.s.SleepEvent = NIL_RTSEMEVENTMULTI;

        MMR3HeapFree(pThread);
    }
    else if (RT_SUCCESS(rc))
        rc = rc2;

    return rc;
}
Beispiel #16
0
LPPL_YYSTYPE PrimFnDydCircleSlope_EM_YY
    (LPTOKEN lptkLftArg,                // Ptr to left arg token
     LPTOKEN lptkFunc,                  // Ptr to function token
     LPTOKEN lptkRhtArg,                // Ptr to right arg token
     LPTOKEN lptkAxis)                  // Ptr to axis token (may be NULL)

{
    APLSTYPE      aplTypeLft,           // Left arg storage type
                  aplTypeRht,           // Right ...
                  aplTypeRes;           // Result   ...
    APLNELM       aplNELMLft,           // Left arg NELM
                  aplNELMRht,           // Right ...
                  aplNELMRes;           // Result   ...
    APLRANK       aplRankLft,           // Left arg rank
                  aplRankRht,           // Right ...
                  aplRankRes;           // Result   ...
    HGLOBAL       hGlbLft = NULL,       // Left arg global memory handle
                  hGlbRht = NULL,       // Right ...
                  hGlbRes = NULL,       // Result   ...
                  hGlbAxis = NULL,      // Axis     ...
                  hGlbWVec = NULL,      // Weighting vector ...
                  hGlbOdo = NULL;       // Odometer ...
    LPAPLDIM      lpMemDimRht,          // Ptr to right arg dimensions
                  lpMemDimRes;          // Ptr to result    ...
    APLDIM        uMinDim;              //
    LPVOID        lpMemLft = NULL,      // Ptr to left arg global memory
                  lpMemRht = NULL,      // Ptr to right ...
                  lpMemRes = NULL;      // Ptr to result   ...
    LPAPLUINT     lpMemAxisHead = NULL, // Ptr to axis values, fleshed out by CheckAxis_EM
                  lpMemAxisTail,        // Ptr to grade up of AxisHead
                  lpMemWVec = NULL,     // Ptr to weighting vector ...
                  lpMemOdo = NULL;      // Ptr to odometer ...
    APLUINT       ByteRes,              // # bytes in the result
                  uRht,                 // Right arg loop counter
                  uRes,                 // Result    ...
                  uOdo;                 // Odometer  ...
    LPPL_YYSTYPE  lpYYRes = NULL;       // Ptr to the result
    UINT          uBitIndex,            // Bit index for marching through Booleans
                  uBitMask;             // Bit mask  ...
    APLINT        iDim,                 // Dimension loop counter
                  apaOffRht,            // Right arg APA offset
                  apaMulRht;            // ...           multiplier
    LPPLLOCALVARS lpplLocalVars;        // Ptr to re-entrant vars
    LPUBOOL       lpbCtrlBreak;         // Ptr to Ctrl-Break flag
    LPVARARRAY_HEADER lpMemHdrRht;      // Ptr to right arg header

    // Get the thread's ptr to local vars
    lpplLocalVars = TlsGetValue (dwTlsPlLocalVars);

    // Get the ptr to the Ctrl-Break flag
    lpbCtrlBreak = &lpplLocalVars->bCtrlBreak;

    //***************************************************************
    // This function is not sensitive to the axis operator,
    //   so signal a syntax error if present
    //***************************************************************

    if (lptkAxis NE NULL)
        goto AXIS_SYNTAX_EXIT;

    // Get the attributes (Type, NELM, and Rank) of the left & right args
    AttrsOfToken (lptkLftArg, &aplTypeLft, &aplNELMLft, &aplRankLft, NULL);
    AttrsOfToken (lptkRhtArg, &aplTypeRht, &aplNELMRht, &aplRankRht, NULL);

    // Get left and right arg's global ptrs
    GetGlbPtrs_LOCK (lptkLftArg, &hGlbLft, &lpMemLft);
    GetGlbPtrs_LOCK (lptkRhtArg, &hGlbRht, &lpMemRht);

    // Check for RANK ERROR
    if (IsMultiRank (aplRankLft))
        goto RANK_EXIT;

    // Check for LENGTH ERROR
    if (aplNELMLft NE aplRankRht)
        goto LENGTH_EXIT;

    // Treat the left arg as an axis
    if (!CheckAxis_EM (lptkLftArg,      // The "axis" token
                       aplRankRht,      // All values less than this
                       FALSE,           // TRUE iff scalar or one-element vector only
                       FALSE,           // TRUE iff want sorted axes
                       TRUE,            // TRUE iff axes must be contiguous
                       TRUE,            // TRUE iff duplicate axes are allowed
                       NULL,            // TRUE iff fractional values allowed
                      &aplRankRes,      // Return last axis value
                       NULL,            // Return # elements in axis vector
                      &hGlbAxis))       // Return HGLOBAL with APLUINT axis values
        goto DOMAIN_EXIT;

    // Map APA right arg to INT result
    if (IsSimpleAPA (aplTypeRht))
        aplTypeRes = ARRAY_INT;
    else
        aplTypeRes = aplTypeRht;

    // Strip out the simple scalar right argument case
    if (IsScalar (aplRankRht) && IsSimpleNH (aplTypeRes))
    {
        // Allocate a new YYRes
        lpYYRes = YYAlloc ();

        // Split cases based upon the right arg's token type
        switch (lptkRhtArg->tkFlags.TknType)
        {
            case TKT_VARNAMED:
                // tkData is an LPSYMENTRY
                Assert (GetPtrTypeDir (lptkRhtArg->tkData.tkVoid) EQ PTRTYPE_STCONST);

                // If it's not immediate, we must look inside the array
                if (!lptkRhtArg->tkData.tkSym->stFlags.Imm)
                {
                    // stData is a valid HGLOBAL variable array
                    Assert (IsGlbTypeVarDir_PTB (lptkRhtArg->tkData.tkSym->stData.stGlbData));
#ifdef DEBUG
                    // If we ever get here, we must have missed a type demotion
                    DbgStop ();             // #ifdef DEBUG
#endif
                } // End IF

                // Handle the immediate case

                // Fill in the result token
                lpYYRes->tkToken.tkFlags.TknType   = TKT_VARIMMED;
                lpYYRes->tkToken.tkFlags.ImmType   = lptkRhtArg->tkData.tkSym->stFlags.ImmType;
////////////////lpYYRes->tkToken.tkFlags.NoDisplay = FALSE; // Already zero from YYAlloc
                lpYYRes->tkToken.tkData.tkLongest  = lptkRhtArg->tkData.tkSym->stData.stLongest;
                lpYYRes->tkToken.tkCharIndex       = lptkFunc->tkCharIndex;

                break;

            case TKT_VARIMMED:
                // Fill in the result token
                lpYYRes->tkToken = *lptkRhtArg;

                break;

            defstop
                break;
        } // End SWITCH

        goto NORMAL_EXIT;
    } // End IF
Beispiel #17
0
/**
 * The PDM thread function.
 *
 * @returns return from pfnThread.
 *
 * @param   Thread  The thread handle.
 * @param   pvUser  Pointer to the PDMTHREAD structure.
 */
static DECLCALLBACK(int) pdmR3ThreadMain(RTTHREAD Thread, void *pvUser)
{
    PPDMTHREAD pThread = (PPDMTHREAD)pvUser;
    Log(("PDMThread: Initializing thread %RTthrd / %p / '%s'...\n", Thread, pThread, RTThreadGetName(Thread)));
    pThread->Thread = Thread;

    PUVM pUVM = pThread->Internal.s.pVM->pUVM;
    if (   pUVM->pVmm2UserMethods
        && pUVM->pVmm2UserMethods->pfnNotifyPdmtInit)
        pUVM->pVmm2UserMethods->pfnNotifyPdmtInit(pUVM->pVmm2UserMethods, pUVM);

    /*
     * The run loop.
     *
     * It handles simple thread functions which returns when they see a suspending
     * request and leaves the PDMR3ThreadIAmSuspending and PDMR3ThreadIAmRunning
     * parts to us.
     */
    int rc;
    for (;;)
    {
        switch (pThread->Internal.s.enmType)
        {
            case PDMTHREADTYPE_DEVICE:
                rc = pThread->u.Dev.pfnThread(pThread->u.Dev.pDevIns, pThread);
                break;

            case PDMTHREADTYPE_USB:
                rc = pThread->u.Usb.pfnThread(pThread->u.Usb.pUsbIns, pThread);
                break;

            case PDMTHREADTYPE_DRIVER:
                rc = pThread->u.Drv.pfnThread(pThread->u.Drv.pDrvIns, pThread);
                break;

            case PDMTHREADTYPE_INTERNAL:
                rc = pThread->u.Int.pfnThread(pThread->Internal.s.pVM, pThread);
                break;

            case PDMTHREADTYPE_EXTERNAL:
                rc = pThread->u.Ext.pfnThread(pThread);
                break;

            default:
                AssertMsgFailed(("%d\n", pThread->Internal.s.enmType));
                rc = VERR_PDM_THREAD_IPE_1;
                break;
        }
        if (RT_FAILURE(rc))
            break;

        /*
         * If this is a simple thread function, the state will be suspending
         * or initializing now. If it isn't we're supposed to terminate.
         */
        if (    pThread->enmState != PDMTHREADSTATE_SUSPENDING
            &&  pThread->enmState != PDMTHREADSTATE_INITIALIZING)
        {
            Assert(pThread->enmState == PDMTHREADSTATE_TERMINATING);
            break;
        }
        rc = PDMR3ThreadIAmSuspending(pThread);
        if (RT_FAILURE(rc))
            break;
        if (pThread->enmState != PDMTHREADSTATE_RESUMING)
        {
            Assert(pThread->enmState == PDMTHREADSTATE_TERMINATING);
            break;
        }

        rc = PDMR3ThreadIAmRunning(pThread);
        if (RT_FAILURE(rc))
            break;
    }

    if (RT_FAILURE(rc))
        LogRel(("PDMThread: Thread '%s' (%RTthrd) quit unexpectedly with rc=%Rrc.\n", RTThreadGetName(Thread), Thread, rc));

    /*
     * Advance the state to terminating and then on to terminated.
     */
    for (;;)
    {
        PDMTHREADSTATE enmState = pThread->enmState;
        if (    enmState == PDMTHREADSTATE_TERMINATING
            ||  pdmR3AtomicCmpXchgState(pThread, PDMTHREADSTATE_TERMINATING, enmState))
            break;
    }

    ASMAtomicXchgSize(&pThread->enmState, PDMTHREADSTATE_TERMINATED);
    int rc2 = RTThreadUserSignal(Thread); AssertRC(rc2);

    if (   pUVM->pVmm2UserMethods
        && pUVM->pVmm2UserMethods->pfnNotifyPdmtTerm)
        pUVM->pVmm2UserMethods->pfnNotifyPdmtTerm(pUVM->pVmm2UserMethods, pUVM);
    Log(("PDMThread: Terminating thread %RTthrd / %p / '%s': %Rrc\n", Thread, pThread, RTThreadGetName(Thread), rc));
    return rc;
}
Beispiel #18
0
init_points(polymodel *pm,vms_vector *box_size,int submodel_num,morph_data *md)
{
	ushort nverts;
	vms_vector *vp;
	ushort *data,type;
	int i;

	//printf("initing %d ",submodel_num);

	data = (ushort *) &pm->model_data[pm->submodel_ptrs[submodel_num]];

	type = *data++;

	Assert(type == 7 || type == 1);

	nverts = *data++;

	md->n_morphing_points[submodel_num] = 0;

	if (type==7) {
		i = *data++;		//get start point number
		data++;				//skip pad
	}
	else
		i = 0;				//start at zero

	Assert(i+nverts < MAX_VECS);

	md->submodel_startpoints[submodel_num] = i;

	vp = (vms_vector *) data;

	while (nverts--) {
		fix k,dist;

		if (box_size) {
			fix t;

			k = 0x7fffffff;

			if (vp->x && f2i(box_size->x)<abs(vp->x)/2 && (t = fixdiv(box_size->x,abs(vp->x))) < k) k=t;
			if (vp->y && f2i(box_size->y)<abs(vp->y)/2 && (t = fixdiv(box_size->y,abs(vp->y))) < k) k=t;
			if (vp->z && f2i(box_size->z)<abs(vp->z)/2 && (t = fixdiv(box_size->z,abs(vp->z))) < k) k=t;

			if (k==0x7fffffff) k=0;

		}
		else
			k=0;

		vm_vec_copy_scale(&md->morph_vecs[i],vp,k);

		dist = vm_vec_normalized_dir_quick(&md->morph_deltas[i],vp,&md->morph_vecs[i]);

		md->morph_times[i] = fixdiv(dist,morph_rate);

		if (md->morph_times[i] != 0)
			md->n_morphing_points[submodel_num]++;

		vm_vec_scale(&md->morph_deltas[i],morph_rate);

		vp++; i++;

	}

	//printf("npoints = %d\n",n_morphing_points[submodel_num]);

}
void CRecipientFilter::RemoveRecipientByPlayerIndex( int playerindex )
{
	Assert( playerindex >= 1 && playerindex <= ABSOLUTE_PLAYER_LIMIT );

	m_Recipients.FindAndRemove( playerindex );
}
Beispiel #20
0
draw_model(polymodel *pm,int submodel_num,vms_angvec *anim_angles,fix light,morph_data *md)
{
	int i,mn;
	int facing;
	int sort_list[MAX_SUBMODELS],sort_n;


	//first, sort the submodels

	sort_list[0] = submodel_num;
	sort_n = 1;

	for (i=0;i<pm->n_models;i++)

		if (md->submodel_active[i] && pm->submodel_parents[i]==submodel_num) {

			facing = g3_check_normal_facing(&pm->submodel_pnts[i],&pm->submodel_norms[i]);

			if (!facing)

				sort_list[sort_n++] = i;

			else {		//put at start
				int t;

				for (t=sort_n;t>0;t--)
					sort_list[t] = sort_list[t-1];

				sort_list[0] = i;

				sort_n++;


			}

		}
	

	//now draw everything

	for (i=0;i<sort_n;i++) {

		mn = sort_list[i];

		if (mn == submodel_num) {
 			int i;

			for (i=0;i<pm->n_textures;i++)		{
				texture_list_index[i] = ObjBitmaps[ObjBitmapPtrs[pm->first_texture+i]];
				texture_list[i] = &GameBitmaps[ObjBitmaps[ObjBitmapPtrs[pm->first_texture+i]].index];
			}

#ifdef PIGGY_USE_PAGING			
			// Make sure the textures for this object are paged in...
			piggy_page_flushed = 0;
			for (i=0;i<pm->n_textures;i++)	
				PIGGY_PAGE_IN( texture_list_index[i] );
			// Hmmm... cache got flushed in the middle of paging all these in,
			// so we need to reread them all in.
			if (piggy_page_flushed)	{
				piggy_page_flushed = 0;
				for (i=0;i<pm->n_textures;i++)	
					PIGGY_PAGE_IN( texture_list_index[i] );
			}
			// Make sure that they can all fit in memory.
			Assert( piggy_page_flushed == 0 );
#endif


			g3_draw_morphing_model(&pm->model_data[pm->submodel_ptrs[submodel_num]],texture_list,anim_angles,light,&md->morph_vecs[md->submodel_startpoints[submodel_num]]);

		}
		else {

			vms_matrix orient;

			vm_angles_2_matrix(&orient,&anim_angles[mn]);

			g3_start_instance_matrix(&pm->submodel_offsets[mn],&orient);

			draw_model(pm,mn,anim_angles,light,md);

			g3_done_instance();

		}
	}

}
Beispiel #21
0
/*
 * Test whether an object exists.
 */
static bool
object_exists(ObjectAddress address)
{
	int			cache = -1;
	Oid			indexoid = InvalidOid;
	Relation	rel;
	ScanKeyData skey[1];
	SysScanDesc sd;
	bool		found;

	/* Sub-objects require special treatment. */
	if (address.objectSubId != 0)
	{
		HeapTuple	atttup;

		/* Currently, attributes are the only sub-objects. */
		Assert(address.classId == RelationRelationId);
		atttup = SearchSysCache2(ATTNUM, ObjectIdGetDatum(address.objectId),
								 Int16GetDatum(address.objectSubId));
		if (!HeapTupleIsValid(atttup))
			found = false;
		else
		{
			found = ((Form_pg_attribute) GETSTRUCT(atttup))->attisdropped;
			ReleaseSysCache(atttup);
		}
		return found;
	}

	/*
	 * For object types that have a relevant syscache, we use it; for
	 * everything else, we'll have to do an index-scan.  This switch sets
	 * either the cache to be used for the syscache lookup, or the index to be
	 * used for the index scan.
	 */
	switch (address.classId)
	{
		case RelationRelationId:
			cache = RELOID;
			break;
		case RewriteRelationId:
			indexoid = RewriteOidIndexId;
			break;
		case TriggerRelationId:
			indexoid = TriggerOidIndexId;
			break;
		case ConstraintRelationId:
			cache = CONSTROID;
			break;
		case DatabaseRelationId:
			cache = DATABASEOID;
			break;
		case AuthIdRelationId:
			cache = AUTHOID;
			break;
		case NamespaceRelationId:
			cache = NAMESPACEOID;
			break;
		case LanguageRelationId:
			cache = LANGOID;
			break;
		case TypeRelationId:
			cache = TYPEOID;
			break;
		case ProcedureRelationId:
			cache = PROCOID;
			break;
		case OperatorRelationId:
			cache = OPEROID;
			break;
		case ConversionRelationId:
			cache = CONVOID;
			break;
		case OperatorClassRelationId:
			cache = CLAOID;
			break;
		case OperatorFamilyRelationId:
			cache = OPFAMILYOID;
			break;
		case CastRelationId:
			indexoid = CastOidIndexId;
			break;
		case TSParserRelationId:
			cache = TSPARSEROID;
			break;
		case TSDictionaryRelationId:
			cache = TSDICTOID;
			break;
		case TSTemplateRelationId:
			cache = TSTEMPLATEOID;
			break;
		case TSConfigRelationId:
			cache = TSCONFIGOID;
			break;
		case ExtensionRelationId:
			indexoid = ExtensionOidIndexId;
			break;
		default:
			elog(ERROR, "unrecognized classid: %u", address.classId);
	}

	/* Found a syscache? */
	if (cache != -1)
		return SearchSysCacheExists1(cache, ObjectIdGetDatum(address.objectId));

	/* No syscache, so examine the table directly. */
	Assert(OidIsValid(indexoid));
	ScanKeyInit(&skey[0],
				ObjectIdAttributeNumber,
				BTEqualStrategyNumber, F_OIDEQ,
				ObjectIdGetDatum(address.objectId));
	rel = heap_open(address.classId, AccessShareLock);
	sd = systable_beginscan(rel, indexoid, true, SnapshotNow, 1, skey);
	found = HeapTupleIsValid(systable_getnext(sd));
	systable_endscan(sd);
	heap_close(rel, AccessShareLock);
	return found;
}
Beispiel #22
0
const std::vector<SExpr>& SExpr::getChildren() const {
  PrettyCheckArgument(!isAtom(), this);
  Assert(d_children != NULL);
  return *d_children;
}
Beispiel #23
0
/*
 * Write a message into a shared message queue, gathered from multiple
 * addresses.
 *
 * When nowait = false, we'll wait on our process latch when the ring buffer
 * fills up, and then continue writing once the receiver has drained some data.
 * The process latch is reset after each wait.
 *
 * When nowait = true, we do not manipulate the state of the process latch;
 * instead, if the buffer becomes full, we return SHM_MQ_WOULD_BLOCK.  In
 * this case, the caller should call this function again, with the same
 * arguments, each time the process latch is set.  (Once begun, the sending
 * of a message cannot be aborted except by detaching from the queue; changing
 * the length or payload will corrupt the queue.)
 */
shm_mq_result
shm_mq_sendv(shm_mq_handle *mqh, shm_mq_iovec *iov, int iovcnt, bool nowait)
{
	shm_mq_result res;
	shm_mq	   *mq = mqh->mqh_queue;
	Size		nbytes = 0;
	Size		bytes_written;
	int			i;
	int			which_iov = 0;
	Size		offset;

	Assert(mq->mq_sender == MyProc);

	/* Compute total size of write. */
	for (i = 0; i < iovcnt; ++i)
		nbytes += iov[i].len;

	/* Try to write, or finish writing, the length word into the buffer. */
	while (!mqh->mqh_length_word_complete)
	{
		Assert(mqh->mqh_partial_bytes < sizeof(Size));
		res = shm_mq_send_bytes(mqh, sizeof(Size) - mqh->mqh_partial_bytes,
								((char *) &nbytes) +mqh->mqh_partial_bytes,
								nowait, &bytes_written);
		mqh->mqh_partial_bytes += bytes_written;
		if (res != SHM_MQ_SUCCESS)
			return res;

		if (mqh->mqh_partial_bytes >= sizeof(Size))
		{
			Assert(mqh->mqh_partial_bytes == sizeof(Size));

			mqh->mqh_partial_bytes = 0;
			mqh->mqh_length_word_complete = true;
		}

		/* Length word can't be split unless bigger than required alignment. */
		Assert(mqh->mqh_length_word_complete || sizeof(Size) > MAXIMUM_ALIGNOF);
	}

	/* Write the actual data bytes into the buffer. */
	Assert(mqh->mqh_partial_bytes <= nbytes);
	offset = mqh->mqh_partial_bytes;
	do
	{
		Size		chunksize;

		/* Figure out which bytes need to be sent next. */
		if (offset >= iov[which_iov].len)
		{
			offset -= iov[which_iov].len;
			++which_iov;
			if (which_iov >= iovcnt)
				break;
			continue;
		}

		/*
		 * We want to avoid copying the data if at all possible, but every
		 * chunk of bytes we write into the queue has to be MAXALIGN'd, except
		 * the last.  Thus, if a chunk other than the last one ends on a
		 * non-MAXALIGN'd boundary, we have to combine the tail end of its
		 * data with data from one or more following chunks until we either
		 * reach the last chunk or accumulate a number of bytes which is
		 * MAXALIGN'd.
		 */
		if (which_iov + 1 < iovcnt &&
			offset + MAXIMUM_ALIGNOF > iov[which_iov].len)
		{
			char		tmpbuf[MAXIMUM_ALIGNOF];
			int			j = 0;

			for (;;)
			{
				if (offset < iov[which_iov].len)
				{
					tmpbuf[j] = iov[which_iov].data[offset];
					j++;
					offset++;
					if (j == MAXIMUM_ALIGNOF)
						break;
				}
				else
				{
					offset -= iov[which_iov].len;
					which_iov++;
					if (which_iov >= iovcnt)
						break;
				}
			}
			res = shm_mq_send_bytes(mqh, j, tmpbuf, nowait, &bytes_written);
			mqh->mqh_partial_bytes += bytes_written;
			if (res != SHM_MQ_SUCCESS)
				return res;
			continue;
		}

		/*
		 * If this is the last chunk, we can write all the data, even if it
		 * isn't a multiple of MAXIMUM_ALIGNOF.  Otherwise, we need to
		 * MAXALIGN_DOWN the write size.
		 */
		chunksize = iov[which_iov].len - offset;
		if (which_iov + 1 < iovcnt)
			chunksize = MAXALIGN_DOWN(chunksize);
		res = shm_mq_send_bytes(mqh, chunksize, &iov[which_iov].data[offset],
								nowait, &bytes_written);
		mqh->mqh_partial_bytes += bytes_written;
		offset += bytes_written;
		if (res != SHM_MQ_SUCCESS)
			return res;
	} while (mqh->mqh_partial_bytes < nbytes);

	/* Reset for next message. */
	mqh->mqh_partial_bytes = 0;
	mqh->mqh_length_word_complete = false;

	/* Notify receiver of the newly-written data, and return. */
	return shm_mq_notify_receiver(mq);
}
Beispiel #24
0
/**
 * Internal worker for the RTMpOn* APIs.
 *
 * @returns IPRT status code.
 * @param   pfnWorker       The callback.
 * @param   pvUser1         User argument 1.
 * @param   pvUser2         User argument 2.
 * @param   enmCpuid        What to do / is idCpu valid.
 * @param   idCpu           Used if enmCpuid RT_NT_CPUID_SPECIFIC, otherwise ignored.
 */
static int rtMpCallUsingDpcs(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2, RT_NT_CPUID enmCpuid, RTCPUID idCpu)
{
    PRTMPARGS pArgs;
    KDPC     *paExecCpuDpcs;

#if 0
    /* KeFlushQueuedDpcs must be run at IRQL PASSIVE_LEVEL according to MSDN, but the
     * driver verifier doesn't complain...
     */
    AssertMsg(KeGetCurrentIrql() == PASSIVE_LEVEL, ("%d != %d (PASSIVE_LEVEL)\n", KeGetCurrentIrql(), PASSIVE_LEVEL));
#endif

#ifdef IPRT_TARGET_NT4
    KAFFINITY Mask;
    /* g_pfnrtNt* are not present on NT anyway. */
    return VERR_NOT_SUPPORTED;
#else
    KAFFINITY Mask = KeQueryActiveProcessors();
#endif

    /* KeFlushQueuedDpcs is not present in Windows 2000; import it dynamically so we can just fail this call. */
    if (!g_pfnrtNtKeFlushQueuedDpcs)
        return VERR_NOT_SUPPORTED;

    pArgs = (PRTMPARGS)ExAllocatePoolWithTag(NonPagedPool, MAXIMUM_PROCESSORS*sizeof(KDPC) + sizeof(RTMPARGS), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;

    pArgs->pfnWorker = pfnWorker;
    pArgs->pvUser1   = pvUser1;
    pArgs->pvUser2   = pvUser2;
    pArgs->idCpu     = NIL_RTCPUID;
    pArgs->cHits     = 0;
    pArgs->cRefs     = 1;

    paExecCpuDpcs = (KDPC *)(pArgs + 1);

    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        KeInitializeDpc(&paExecCpuDpcs[0], rtmpNtDPCWrapper, pArgs);
        KeSetImportanceDpc(&paExecCpuDpcs[0], HighImportance);
        KeSetTargetProcessorDpc(&paExecCpuDpcs[0], (int)idCpu);
    }
    else
    {
        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            KeInitializeDpc(&paExecCpuDpcs[i], rtmpNtDPCWrapper, pArgs);
            KeSetImportanceDpc(&paExecCpuDpcs[i], HighImportance);
            KeSetTargetProcessorDpc(&paExecCpuDpcs[i], i);
        }
    }

    /* Raise the IRQL to DISPATCH_LEVEL so we can't be rescheduled to another cpu.
     * KeInsertQueueDpc must also be executed at IRQL >= DISPATCH_LEVEL.
     */
    KIRQL oldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &oldIrql);

    /*
     * We cannot do other than assume a 1:1 relationship between the
     * affinity mask and the process despite the warnings in the docs.
     * If someone knows a better way to get this done, please let bird know.
     */
    ASMCompilerBarrier(); /* paranoia */
    if (enmCpuid == RT_NT_CPUID_SPECIFIC)
    {
        ASMAtomicIncS32(&pArgs->cRefs);
        BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[0], 0, 0);
        Assert(ret);
    }
    else
    {
        unsigned iSelf = KeGetCurrentProcessorNumber();

        for (unsigned i = 0; i < MAXIMUM_PROCESSORS; i++)
        {
            if (    (i != iSelf)
                &&  (Mask & RT_BIT_64(i)))
            {
                ASMAtomicIncS32(&pArgs->cRefs);
                BOOLEAN ret = KeInsertQueueDpc(&paExecCpuDpcs[i], 0, 0);
                Assert(ret);
            }
        }
        if (enmCpuid != RT_NT_CPUID_OTHERS)
            pfnWorker(iSelf, pvUser1, pvUser2);
    }

    KeLowerIrql(oldIrql);

    /* Flush all DPCs and wait for completion. (can take long!) */
    /** @todo Consider changing this to an active wait using some atomic inc/dec
     *  stuff (and check for the current cpu above in the specific case). */
    /** @todo Seems KeFlushQueuedDpcs doesn't wait for the DPCs to be completely
     *        executed. Seen pArgs being freed while some CPU was using it before
     *        cRefs was added. */
    g_pfnrtNtKeFlushQueuedDpcs();

    /* Dereference the argument structure. */
    int32_t cRefs = ASMAtomicDecS32(&pArgs->cRefs);
    Assert(cRefs >= 0);
    if (cRefs == 0)
        ExFreePool(pArgs);

    return VINF_SUCCESS;
}
Beispiel #25
0
/*
 * Write bytes into a shared message queue.
 */
static shm_mq_result
shm_mq_send_bytes(shm_mq_handle *mqh, Size nbytes, const void *data,
				  bool nowait, Size *bytes_written)
{
	shm_mq	   *mq = mqh->mqh_queue;
	Size		sent = 0;
	uint64		used;
	Size		ringsize = mq->mq_ring_size;
	Size		available;

	while (sent < nbytes)
	{
		bool		detached;
		uint64		rb;

		/* Compute number of ring buffer bytes used and available. */
		rb = shm_mq_get_bytes_read(mq, &detached);
		Assert(mq->mq_bytes_written >= rb);
		used = mq->mq_bytes_written - rb;
		Assert(used <= ringsize);
		available = Min(ringsize - used, nbytes - sent);

		/* Bail out if the queue has been detached. */
		if (detached)
		{
			*bytes_written = sent;
			return SHM_MQ_DETACHED;
		}

		if (available == 0 && !mqh->mqh_counterparty_attached)
		{
			/*
			 * The queue is full, so if the receiver isn't yet known to be
			 * attached, we must wait for that to happen.
			 */
			if (nowait)
			{
				if (shm_mq_get_receiver(mq) == NULL)
				{
					*bytes_written = sent;
					return SHM_MQ_WOULD_BLOCK;
				}
			}
			else if (!shm_mq_wait_internal(mq, &mq->mq_receiver,
										   mqh->mqh_handle))
			{
				mq->mq_detached = true;
				*bytes_written = sent;
				return SHM_MQ_DETACHED;
			}
			mqh->mqh_counterparty_attached = true;

			/*
			 * The receiver may have read some data after attaching, so we
			 * must not wait without rechecking the queue state.
			 */
		}
		else if (available == 0)
		{
			shm_mq_result res;

			/* Let the receiver know that we need them to read some data. */
			res = shm_mq_notify_receiver(mq);
			if (res != SHM_MQ_SUCCESS)
			{
				*bytes_written = sent;
				return res;
			}

			/* Skip manipulation of our latch if nowait = true. */
			if (nowait)
			{
				*bytes_written = sent;
				return SHM_MQ_WOULD_BLOCK;
			}

			/*
			 * Wait for our latch to be set.  It might already be set for some
			 * unrelated reason, but that'll just result in one extra trip
			 * through the loop.  It's worth it to avoid resetting the latch
			 * at top of loop, because setting an already-set latch is much
			 * cheaper than setting one that has been reset.
			 */
			WaitLatch(MyLatch, WL_LATCH_SET, 0);

			/* An interrupt may have occurred while we were waiting. */
			CHECK_FOR_INTERRUPTS();

			/* Reset the latch so we don't spin. */
			ResetLatch(MyLatch);
		}
		else
		{
			Size		offset = mq->mq_bytes_written % (uint64) ringsize;
			Size		sendnow = Min(available, ringsize - offset);

			/* Write as much data as we can via a single memcpy(). */
			memcpy(&mq->mq_ring[mq->mq_ring_offset + offset],
				   (char *) data + sent, sendnow);
			sent += sendnow;

			/*
			 * Update count of bytes written, with alignment padding.  Note
			 * that this will never actually insert any padding except at the
			 * end of a run of bytes, because the buffer size is a multiple of
			 * MAXIMUM_ALIGNOF, and each read is as well.
			 */
			Assert(sent == nbytes || sendnow == MAXALIGN(sendnow));
			shm_mq_inc_bytes_written(mq, MAXALIGN(sendnow));

			/*
			 * For efficiency, we don't set the reader's latch here.  We'll do
			 * that only when the buffer fills up or after writing an entire
			 * message.
			 */
		}
	}

	*bytes_written = sent;
	return SHM_MQ_SUCCESS;
}
Beispiel #26
0
RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
{
    /*
     * Don't try mess with an offline CPU.
     */
    if (!RTMpIsCpuOnline(idCpu))
        return !RTMpIsCpuPossible(idCpu)
              ? VERR_CPU_NOT_FOUND
              : VERR_CPU_OFFLINE;

    /*
     * Use the broadcast IPI routine if there are no more than two CPUs online,
     * or if the current IRQL is unsuitable for KeWaitForSingleObject.
     */
    if (   g_pfnrtKeIpiGenericCall
        && (   RTMpGetOnlineCount() <= 2
            || KeGetCurrentIrql()   > APC_LEVEL) )
        return rtMpCallUsingBroadcastIpi(pfnWorker, pvUser1, pvUser2, rtmpNtOnSpecificBroadcastIpiWrapper, 0);

#if 0 /** @todo untested code. needs some tuning. */
    /*
     * Initialize the argument package and the objects within it.
     * The package is referenced counted to avoid unnecessary spinning to
     * synchronize cleanup and prevent stack corruption.
     */
    PRTMPNTONSPECIFICARGS pArgs = (PRTMPNTONSPECIFICARGS)ExAllocatePoolWithTag(NonPagedPool, sizeof(*pArgs), (ULONG)'RTMp');
    if (!pArgs)
        return VERR_NO_MEMORY;
    pArgs->cRefs                  = 2;
    pArgs->fExecuting             = false;
    pArgs->fDone                  = false;
    pArgs->CallbackArgs.pfnWorker = pfnWorker;
    pArgs->CallbackArgs.pvUser1   = pvUser1;
    pArgs->CallbackArgs.pvUser2   = pvUser2;
    pArgs->CallbackArgs.idCpu     = idCpu;
    pArgs->CallbackArgs.cHits     = 0;
    pArgs->CallbackArgs.cRefs     = 2;
    KeInitializeEvent(&pArgs->DoneEvt, SynchronizationEvent, FALSE /* not signalled */);
    KeInitializeDpc(&pArgs->Dpc, rtMpNtOnSpecificDpcWrapper, pArgs);
    KeSetImportanceDpc(&pArgs->Dpc, HighImportance);
    KeSetTargetProcessorDpc(&pArgs->Dpc, (int)idCpu);

    /*
     * Disable preemption while we check the current processor and inserts the DPC.
     */
    KIRQL bOldIrql;
    KeRaiseIrql(DISPATCH_LEVEL, &bOldIrql);
    ASMCompilerBarrier(); /* paranoia */

    if (RTMpCpuId() == idCpu)
    {
        /* Just execute the callback on the current CPU. */
        pfnWorker(idCpu, pvUser1, pvUser2);
        KeLowerIrql(bOldIrql);

        ExFreePool(pArgs);
        return VINF_SUCCESS;
    }

    /* Different CPU, so queue it if the CPU is still online. */
    int rc;
    if (RTMpIsCpuOnline(idCpu))
    {
        BOOLEAN fRc = KeInsertQueueDpc(&pArgs->Dpc, 0, 0);
        Assert(fRc);
        KeLowerIrql(bOldIrql);

        uint64_t const nsRealWaitTS = RTTimeNanoTS();

        /*
         * Wait actively for a while in case the CPU/thread responds quickly.
         */
        uint32_t cLoopsLeft = 0x20000;
        while (cLoopsLeft-- > 0)
        {
            if (pArgs->fDone)
            {
                rtMpNtOnSpecificRelease(pArgs);
                return VINF_SUCCESS;
            }
            ASMNopPause();
        }

        /*
         * It didn't respond, so wait on the event object, poking the CPU if it's slow.
         */
        LARGE_INTEGER Timeout;
        Timeout.QuadPart = -10000; /* 1ms */
        NTSTATUS rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
        if (rcNt == STATUS_SUCCESS)
        {
            rtMpNtOnSpecificRelease(pArgs);
            return VINF_SUCCESS;
        }

        /* If it hasn't respondend yet, maybe poke it and wait some more. */
        if (rcNt == STATUS_TIMEOUT)
        {
            if (   !pArgs->fExecuting
                && (   g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalSendSoftwareInterrupt
                    || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiW7Plus
                    || g_pfnrtMpPokeCpuWorker == rtMpPokeCpuUsingHalReqestIpiPreW7))
                RTMpPokeCpu(idCpu);

            Timeout.QuadPart = -1280000; /* 128ms */
            rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
            if (rcNt == STATUS_SUCCESS)
            {
                rtMpNtOnSpecificRelease(pArgs);
                return VINF_SUCCESS;
            }
        }

        /*
         * Something weird is happening, try bail out.
         */
        if (KeRemoveQueueDpc(&pArgs->Dpc))
        {
            ExFreePool(pArgs); /* DPC was still queued, so we can return without further ado. */
            LogRel(("RTMpOnSpecific(%#x): Not processed after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
        }
        else
        {
            /* DPC is running, wait a good while for it to complete. */
            LogRel(("RTMpOnSpecific(%#x): Still running after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));

            Timeout.QuadPart = -30*1000*1000*10; /* 30 seconds */
            rcNt = KeWaitForSingleObject(&pArgs->DoneEvt, Executive, KernelMode, FALSE /* Alertable */, &Timeout);
            if (rcNt != STATUS_SUCCESS)
                LogRel(("RTMpOnSpecific(%#x): Giving up on running worker after %llu ns: rcNt=%#x\n", idCpu, RTTimeNanoTS() - nsRealWaitTS, rcNt));
        }
        rc = RTErrConvertFromNtStatus(rcNt);
    }
    else
    {
        /* CPU is offline.*/
        KeLowerIrql(bOldIrql);
        rc = !RTMpIsCpuPossible(idCpu) ? VERR_CPU_NOT_FOUND : VERR_CPU_OFFLINE;
    }

    rtMpNtOnSpecificRelease(pArgs);
    return rc;

#else
    return rtMpCallUsingDpcs(pfnWorker, pvUser1, pvUser2, RT_NT_CPUID_SPECIFIC, idCpu);
#endif
}
Beispiel #27
0
	void StartGroupingSounds()
	{
		Assert( g_GroupedSounds.Count() == 0 );
		SetImpactSoundRoute( ShotgunImpactSoundGroup );
	}
Beispiel #28
0
/*
 * find entry with lowest penalty
 */
OffsetNumber
gistchoose(Relation r, Page p, IndexTuple it,	/* it has compressed entry */
		   GISTSTATE *giststate)
{
	OffsetNumber maxoff;
	OffsetNumber i;
	OffsetNumber which;
	float		sum_grow,
				which_grow[INDEX_MAX_KEYS];
	GISTENTRY	entry,
				identry[INDEX_MAX_KEYS];
	bool		isnull[INDEX_MAX_KEYS];

	maxoff = PageGetMaxOffsetNumber(p);
	*which_grow = -1.0;
	which = InvalidOffsetNumber;
	sum_grow = 1;
	gistDeCompressAtt(giststate, r,
					  it, NULL, (OffsetNumber) 0,
					  identry, isnull);

	Assert(maxoff >= FirstOffsetNumber);
	Assert(!GistPageIsLeaf(p));

	for (i = FirstOffsetNumber; i <= maxoff && sum_grow; i = OffsetNumberNext(i))
	{
		int			j;
		IndexTuple	itup = (IndexTuple) PageGetItem(p, PageGetItemId(p, i));

		if (!GistPageIsLeaf(p) && GistTupleIsInvalid(itup))
		{
			ereport(LOG,
					(errmsg("index \"%s\" needs VACUUM or REINDEX to finish crash recovery",
							RelationGetRelationName(r))));
			continue;
		}

		sum_grow = 0;
		for (j = 0; j < r->rd_att->natts; j++)
		{
			Datum		datum;
			float		usize;
			bool		IsNull;

			datum = index_getattr(itup, j + 1, giststate->tupdesc, &IsNull);
			gistdentryinit(giststate, j, &entry, datum, r, p, i,
						   FALSE, IsNull);
			usize = gistpenalty(giststate, j, &entry, IsNull,
								&identry[j], isnull[j]);

			if (which_grow[j] < 0 || usize < which_grow[j])
			{
				which = i;
				which_grow[j] = usize;
				if (j < r->rd_att->natts - 1 && i == FirstOffsetNumber)
					which_grow[j + 1] = -1;
				sum_grow += which_grow[j];
			}
			else if (which_grow[j] == usize)
				sum_grow += usize;
			else
			{
				sum_grow = 1;
				break;
			}
		}
	}

	if (which == InvalidOffsetNumber)
		which = FirstOffsetNumber;

	return which;
}
 void AsmJsByteCodeWriter::AsmSimdTypedArr(OpCodeAsmJs op, RegSlot value, uint32 slotIndex, uint8 dataWidth, ArrayBufferView::ViewType viewType)
 {
     Assert(dataWidth >= 4 && dataWidth <= 16);
     MULTISIZE_LAYOUT_WRITE(AsmSimdTypedArr, op, value, slotIndex, dataWidth, viewType);
 }
Beispiel #30
0
/*
 * Finish writing out the completed btree.
 */
static void
_bt_uppershutdown(BTWriteState *wstate, BTPageState *state)
{
	BTPageState *s;
	BlockNumber rootblkno = P_NONE;
	uint32		rootlevel = 0;
	Page		metapage;

	/*
	 * Each iteration of this loop completes one more level of the tree.
	 */
	for (s = state; s != NULL; s = s->btps_next)
	{
		BlockNumber blkno;
		BTPageOpaque opaque;

		blkno = s->btps_blkno;
		opaque = (BTPageOpaque) PageGetSpecialPointer(s->btps_page);

		/*
		 * We have to link the last page on this level to somewhere.
		 *
		 * If we're at the top, it's the root, so attach it to the metapage.
		 * Otherwise, add an entry for it to its parent using its minimum key.
		 * This may cause the last page of the parent level to split, but
		 * that's not a problem -- we haven't gotten to it yet.
		 */
		if (s->btps_next == NULL)
		{
			opaque->btpo_flags |= BTP_ROOT;
			rootblkno = blkno;
			rootlevel = s->btps_level;
		}
		else
		{
			Assert(s->btps_minkey != NULL);
			ItemPointerSet(&(s->btps_minkey->t_tid), blkno, P_HIKEY);
			_bt_buildadd(wstate, s->btps_next, s->btps_minkey);
			pfree(s->btps_minkey);
			s->btps_minkey = NULL;
		}

		/*
		 * This is the rightmost page, so the ItemId array needs to be slid
		 * back one slot.  Then we can dump out the page.
		 */
		_bt_slideleft(s->btps_page);
		_bt_blwritepage(wstate, s->btps_page, s->btps_blkno);
		s->btps_page = NULL;	/* writepage freed the workspace */
	}

	/*
	 * As the last step in the process, construct the metapage and make it
	 * point to the new root (unless we had no data at all, in which case it's
	 * set to point to "P_NONE").  This changes the index to the "valid" state
	 * by filling in a valid magic number in the metapage.
	 */
	metapage = (Page) palloc(BLCKSZ);
	_bt_initmetapage(metapage, rootblkno, rootlevel);
	_bt_blwritepage(wstate, metapage, BTREE_METAPAGE);
}