示例#1
0
void BufferInterface::bind(ContextType type)throw(BufferException)
{
	if(type == OPEN_GL_CONTEXT_TYPE)
	{
		if( (mBufferInfo->usageContexts & OPEN_GL_CONTEXT_TYPE_FLAG) == 0)
		{throw(BufferException("BufferInterface::bind: GL binding requested, but this Buffer has no GL context;"));}
		PARA_COMP_MANAGER->acquireSharedBuffersForGraphics();
		GUARD(bindGL());
		return;
	}
	if(type == OPEN_CL_CONTEXT_TYPE)
	{
		if(  (mBufferInfo->usageContexts & OPEN_CL_CONTEXT_TYPE_FLAG) == 0)
				{throw(BufferException("BufferInterface::bind: CL binding requested, but this Buffer has no CL context;"));}
		LOG<<WARNING_LOG_LEVEL<<"binding a buffer to an OpenCL context makes no big sense to me at the moment ;). Try just assuring "<<
				"the Buffer is acquired for the CL context and it is set as a kernel argument properly;\n";
		PARA_COMP_MANAGER->acquireSharedBuffersForCompute();
		//GUARD(bindCL()); <-- bullshat :P
		return;
	}

	if(type == HOST_CONTEXT_TYPE)
	{
		assert("binding a buffer to the host context makes no sense to me at the moment ;)"&&0);
		return;
	}

	assert("should never end dowglBlitFramebuffern here" && 0);

}
示例#2
0
void BufferInterface::setData(const void* data, ContextTypeFlags where)throw(BufferException)
{
	//CPU
	if( where & HOST_CONTEXT_TYPE_FLAG )
	{
		if( ! (mBufferInfo->usageContexts & HOST_CONTEXT_TYPE_FLAG))
		{throw(BufferException("data copy to cpu buffer requested, but this buffer has no CPU storage!"));}

		if(! mCPU_Handle)
		{throw(BufferException(" Buffer::setData: mCPU_Handle is NULL; some implementing of (calling)  allocMem() went terribly wrong"));}

		memcpy(mCPU_Handle,data, mBufferInfo->bufferSizeInByte);
	}

	//GL
	if( where & OPEN_GL_CONTEXT_TYPE_FLAG )
	{
		if( ! (mBufferInfo->usageContexts & OPEN_GL_CONTEXT_TYPE_FLAG))
		{throw(BufferException("data copy to GL buffer requested, but this buffer has no GL storage!"));}


		//omit possible cl-release call where it is'nt necessary;
		//commented out the guard in case of driver bugs fu**ing up when doing too mush time-shared CL-GL-stuff
		//TODO uncomment when stable work is assured
		//if(isCLGLShared())
		{

			PARA_COMP_MANAGER->acquireSharedBuffersForGraphics();
		}

		bind(OPEN_GL_CONTEXT_TYPE);

		GUARD(writeGL(data));
	}
	else
	//CL; Handle this only if no GL copy was requested, as a shared buffer is sufficient to be set up
	//by one API ;)
	{
		if( where & OPEN_CL_CONTEXT_TYPE_FLAG )
		{
			if( ! (mBufferInfo->usageContexts & OPEN_CL_CONTEXT_TYPE_FLAG))
			{throw(BufferException("data copy to CL buffer requested, but this buffer has no CL storage!"));}

			//commented out the guard in case of driver bugs fu**ing up when doing too mush time-shared CL-GL-stuff
			//TODO uncomment when stable work is assured
			//if(isCLGLShared())
			{
				PARA_COMP_MANAGER->acquireSharedBuffersForCompute();
			}

			//it is not necessary to distinguish between a cl::Buffer and a cl::BufferGL here :).
			GUARD(writeCL(data));

		}
	}
}
示例#3
0
BYTE* ByteBuffer::allocContent(bufsize_t v_size, bufsize_t v_padding)
{
    if (v_size == 0) throw BufferException("Zero size requested");
    bufsize_t allocSize = v_size + v_padding;
    BYTE* content = new (std::nothrow) BYTE[allocSize];
    if (content == NULL) {
        throw BufferException("Cannot allocate buffer of size: 0x" + QString::number(allocSize, 16));
    }
    memset(content, 0, allocSize);
    return content;
}
示例#4
0
//called by constructor to early detect invalid values and permutations, like 8-bit float or 32bit normalized (u)int
void BufferElementInfo::validate()const throw (BufferException)
{
	if(hasNoChanneledElements)
	{
		//nothing to validate, unused
		return;
	}

	//check channel number:
	if(! ( (numChannels == 1) || (numChannels == 2) || (numChannels ==4)  ))
	{
		LOG<<ERROR_LOG_LEVEL << "BufferElementInfo::validate: wrong amount of channels: "<<numChannels<<";\n";
		throw(BufferException("BufferElementInfo::validate: numChannels must be 1,2 or 4!"));
	}

	//check bits per channel:
	if(! ( (bitsPerChannel == 8) || (bitsPerChannel == 16) || (bitsPerChannel ==32)  ))
	{
		LOG<<ERROR_LOG_LEVEL << "BufferElementInfo::validate: bitsPerChannel is not 8,16 or 32, but "<<bitsPerChannel<<";\n";
		throw(BufferException("BufferElementInfo::validate: bitsPerChannel must be 8,16 or 32!"));
	}

	if(internalGPU_DataType == GPU_DATA_TYPE_FLOAT)
	{
		if(normalizeIntegralValuesFlag)
		{
			throw(BufferException("normalization on float values makes no sense!"));
		}

		if(! ( (bitsPerChannel == 16) || (bitsPerChannel ==32)  ))
		{
			LOG<<ERROR_LOG_LEVEL << "BufferElementInfo::validate: float types need 16 or 32 bits per channel and not "<<bitsPerChannel<<";\n";
			throw(BufferException("float types need 16 or 32 bits per channel;"));
		}

	}

	if(normalizeIntegralValuesFlag)
	{
		//no check for floating point anymore, is catched above;

		//but check that we don't have 32 bit channels
		if(! ( (bitsPerChannel == 8) || (bitsPerChannel == 16)  ))
		{
			throw(BufferException("BufferElementInfo::validate: normalized integer types need 8 or 16 bits per channel; maybe 32 bit integral values are technically possible, but they don't make sense to me ;("));
		}

	}
}
示例#5
0
CPUBufferHandle BufferInterface::getCPUBufferHandle()const throw(BufferException)
{
	if((mBufferInfo->usageContexts & HOST_CONTEXT_TYPE_FLAG) == 0)
		throw(BufferException("BufferInterface::getCPUBufferHandle: buffer has no CPU attachment"));

	return mCPU_Handle;
}
示例#6
0
const ComputeBufferHandle& BufferInterface::getComputeBufferHandle()const  throw(BufferException)
{
	if((mBufferInfo->usageContexts & OPEN_CL_CONTEXT_TYPE_FLAG) == 0)
			throw(BufferException("BufferInterface::getComputeBufferHandle: buffer has no CL attachment"));

	return mComputeBufferHandle;
}
示例#7
0
Texture3D& BufferInterface::toTexture3D() throw(BufferException)
{
	Texture3D* toCastPtr = dynamic_cast<Texture3D*> (this);
	if (toCastPtr)
		return *toCastPtr;
	else
		throw(BufferException("Bad cast to Texture3D"));
}
示例#8
0
ByteBuffer::ByteBuffer(bufsize_t v_size, bufsize_t v_padding)
    : content(NULL), contentSize(v_size), padding(v_padding)
{
    if (v_size == 0) throw BufferException("Zero size requested");

    this->content =  allocContent(v_size, v_padding);
    this->contentSize = v_size;
}
示例#9
0
ByteBuffer::ByteBuffer(AbstractByteBuffer *v_parent, offset_t v_offset, bufsize_t v_size, bufsize_t v_padding)
{
    if (v_parent == NULL) throw BufferException("Cannot make subBuffer for NULL buffer!");
    if (v_size == 0) throw BufferException("Cannot make 0 size buffer!");

    bufsize_t parentSize = v_parent->getContentSize();

    bufsize_t copySize = v_size < parentSize ? v_size : parentSize;
    bufsize_t allocSize = v_size > parentSize ? v_size : parentSize;

    BYTE *bContent = v_parent->getContentAt(v_offset, copySize);
    if (bContent == NULL) throw BufferException("Cannot make Buffer for NULL content!");

    this->content =  allocContent(allocSize, v_padding);
    this->contentSize = allocSize;

    memcpy(this->content, bContent, copySize);
    TRACE();
}
示例#10
0
void BufferInterface::transferData(bool fromSystemToDevice)throw(BufferException)
{
	assert( "CPU buffer must exist for transfer between host and device" && mCPU_Handle);

	if(
		( hasBufferInContext(OPEN_GL_CONTEXT_TYPE) && PARA_COMP_MANAGER->graphicsAreInControl() )
		||
		! (hasBufferInContext(OPEN_CL_CONTEXT_TYPE))
	)
	{
		//commented out the guard in case of driver bugs fu**ing up when doing too mush time-shared CL-GL-stuff
		//TODO uncomment when stable work is assured
		//if(isCLGLShared())
		{
			PARA_COMP_MANAGER->acquireSharedBuffersForGraphics();
		}

		GUARD(bindGL());
		if(fromSystemToDevice)
		{
			GUARD(writeGL(mCPU_Handle));
		}
		else
		{
			GUARD(readGL(mCPU_Handle));
		}
		return;
	}

	if(
		( hasBufferInContext(OPEN_CL_CONTEXT_TYPE) && PARA_COMP_MANAGER->computeIsInControl() )
		||
		! (hasBufferInContext(OPEN_GL_CONTEXT_TYPE))
	)
	{
		//commented out the guard in case of driver bugs fu**ing up when doing too mush time-shared CL-GL-stuff
		//TODO uncomment when stable work is assured
		//if(isCLGLShared())
		{
			PARA_COMP_MANAGER->acquireSharedBuffersForCompute();
		}

		if(fromSystemToDevice)
		{
			GUARD(writeCL(mCPU_Handle));
		}
		else
		{
			GUARD(readCL(mCPU_Handle));
		}
		return;
	}

	throw(BufferException("BufferInterface::readBack(): need at least one GL or GL usage context in Buffer"));
}
示例#11
0
FileView::FileView(QString &path, bufsize_t maxSize)
    : AbstractFileBuffer(path), fIn (path)
{
    if (fIn.open(QFile::ReadOnly | QFile::Truncate) == false) {
        throw FileBufferException("Cannot open the file: " + path);
    }
    this->fileSize = fIn.size();
    //if (DBG_LVL) printf("File of size:\t%lld\n", fileSize);
    bufsize_t readableSize = getMappableSize(fIn);
    this->mappedSize = (readableSize > maxSize) ? maxSize : readableSize;
    //printf("Mapping size: %lx = %ld\n", this->mappedSize, this->mappedSize);
    uchar *pData = fIn.map(0, this->mappedSize);
    if (pData == NULL) {
        throw BufferException("Cannot map the file: " + path + " of size: 0x" + QString::number(this->mappedSize, 16));
    }
    this->mappedContent = (BYTE*) pData;
}
示例#12
0
文件: Endpoint.cpp 项目: yliu120/K3
    void InternalEndpoint::doWrite(const RemoteMessage& v) {
      if ( msgcodec_ ) {
        shared_ptr<string> bufv = make_shared<string>(std::move(msgcodec_->encode(v)));
        if ( buffer_ ) {
          bool success = buffer_->push_back(bufv);
          if ( !success ) {
            // Flush buffer, and then try to append again.
            flushBuffer();

            // Try to append again, and if this still fails, throw a buffering exception.
            success = buffer_->push_back(bufv);
          }

          if ( ! success )
            { throw BufferException("Failed to buffer value during endpoint write."); }
        } else {
          boost::lock_guard<boost::mutex> guard(mtx_);
          handle_->doWrite(bufv);
          notify_subscribers(bufv);
        }
      }
    }
示例#13
0
GraphicsBufferHandle BufferInterface::getGraphicsBufferHandle()const  throw(BufferException)
{
	if((mBufferInfo->usageContexts & OPEN_GL_CONTEXT_TYPE_FLAG) == 0)
			throw(BufferException("BufferInterface::getGraphicsBufferHandle: buffer has no GL attachment"));
	return mGraphicsBufferHandle;
}
示例#14
0
bool BufferInterface::allocMem()throw(BufferException)
{
	if( mBufferInfo->isPingPongBuffer ||
		//don't trust the bufferInfo
		dynamic_cast<PingPongBuffer*>(this)
	)
	{
		throw(BufferException("Buffer::allocMem(): this routine may never be calles for ping pong buffers, as they are only managers"
				"for other buffers but having some own associated memory!"));
	}


	//assert that this routine is called only once per object:
	if(mCPU_Handle || mGraphicsBufferHandle || mComputeBufferHandle())
	{
		throw(BufferException("Buffer::allocMem(): some buffers already allocated"));
	}

	if( mBufferInfo->usageContexts & HOST_CONTEXT_TYPE_FLAG )
	{
		mCPU_Handle = malloc(mBufferInfo->bufferSizeInByte);
	}

	if(mBufferInfo->usageContexts & OPEN_GL_CONTEXT_TYPE_FLAG)
	{
		//ok, there is a need for an openGL buffer; maybe it will be shared with openCL,
		//but that doesn't matter for the GL buffer creation :)
		if( isDefaultBuffer() && (mBufferInfo->glBufferType == NO_GL_BUFFER_TYPE)	)
		{
			throw(BufferException("no gl buffer type specified for a non-texture or non-renderbuffer Buffer, although a gl usage context was requested"));
		}
		//no special treatment for texture types, as we use native GL-#defines

		GUARD(generateGL());
		//"direct" call of "bindGL()" here isn't dangerous, as the buffer is not shared (yet),
		//as it has just been created;
		GUARD(bindGL());
		GUARD(allocGL());
	}

	//ok, the GL stuff is allocated if it was requested; Now let's check for the "compute" world;
	if(mBufferInfo->usageContexts & OPEN_CL_CONTEXT_TYPE_FLAG)
	{
		if(mBufferInfo->usageContexts & OPEN_GL_CONTEXT_TYPE_FLAG)
		{
			//both CL and GL are requested, that means interop:
			//neither bind nor alloc necessary, just generating:
			GUARD(generateCLGL());

		}
		else
		{
			//a CL-only buffer is requested:
			//in OpenCL, alloc is done at the same time of generation; so, no allocCL() routine must be called
			GUARD(generateCL());
			//GUARD(allocCL()); <--bullshaat ;)
		}
	}


#if (FLEWNIT_TRACK_MEMORY || FLEWNIT_DO_PROFILING)
	//only track memory for non-pingpongs, as pingpongs only manage, but don't "own" own data store;
	if(! mBufferInfo->isPingPongBuffer)
	{
		registerBufferAllocation(mBufferInfo->usageContexts,mBufferInfo->bufferSizeInByte);
	}
#endif

	return true;

}
示例#15
0
void VertexBasedGeometry::setAttributeBuffer(BufferInterface* buffi, BufferSemantics bs) throw(BufferException)
{
	bindSafe();

	BufferBasedGeometry::setAttributeBuffer(buffi);

	//validateBufferIntegrity();

	if(buffi)
	{
		mAnyAttribBufferIsPingPong = (mAnyAttribBufferIsPingPong || buffi->isPingPongBuffer() );

		buffi->bind(OPEN_GL_CONTEXT_TYPE);


		//Convention for the BufferElementInfo of a Buffer designated to be an attribute buffer:
			//if internal GPU data type is int or uint, it will always be handled as integer attributes,
			//unless the normalization flag is set; Note especcially that non-normalized int-to-float
			//conversions aren't supported this way; This is on purpose, as
			//	1.: I don't see any advantage in reading unnormalized integer values and convert
			//		them to float;
			//	2.: Control flow and usage flags to be tracked are less complex;
			//if one wants save memory, then the GL_HALF data type shall be used
			//(though I didn't test it in the glm library)
		GLenum elementTypeGL= GL_FLOAT;
		if(buffi->getBufferInfo().elementInfo.internalGPU_DataType != GPU_DATA_TYPE_FLOAT)
		{
			if(buffi->getBufferInfo().elementInfo.internalGPU_DataType == GPU_DATA_TYPE_UINT)
			{
				switch(buffi->getBufferInfo().elementInfo.bitsPerChannel)
				{
				case 8:  elementTypeGL= GL_UNSIGNED_BYTE; 	break;
				case 16: elementTypeGL= GL_UNSIGNED_SHORT;	break;
				case 32: elementTypeGL= GL_UNSIGNED_INT;	break;
				default: throw(BufferException("bad bits per channel")); break;
				}
			}else{ //must be signed int
				switch(buffi->getBufferInfo().elementInfo.bitsPerChannel)
				{
				case 8:  elementTypeGL= GL_BYTE; 	break;
				case 16: elementTypeGL= GL_SHORT;	break;
				case 32: elementTypeGL= GL_INT;		break;
				default: throw(BufferException("bad bits per channel")); break;
				}
			}
		} else //end "not float"
		{
			switch(buffi->getBufferInfo().elementInfo.bitsPerChannel)
			{
			case 8:  throw(BufferException("there is no 8 bit floating point type"));  	break;
			case 16: elementTypeGL= GL_HALF_FLOAT;	break;
			case 32: elementTypeGL= GL_FLOAT;		break;
			case 64: throw(BufferException("double precision floating point not supported (yet);"));		break;
			default: throw(BufferException("bad bits per channel")); break;
			}
		}



		if(
			(buffi->getBufferInfo().elementInfo.internalGPU_DataType==GPU_DATA_TYPE_FLOAT)
			||
			(buffi->getBufferInfo().elementInfo.normalizeIntegralValuesFlag)
		)
		{
			GUARD(
				glVertexAttribPointer(
					static_cast<GLuint> (buffi->getBufferInfo().bufferSemantics),
					buffi->getBufferInfo().elementInfo.numChannels,
					elementTypeGL,
					buffi->getBufferInfo().elementInfo.normalizeIntegralValuesFlag,
					0, //no stride, tightly packed
					0 //no offset to currently bound GL_ARRAY_BUFFER
				)
			);
		}
		else //"real" integer stuff, both concerning storage and lookup
		{
			GUARD(
				glVertexAttribIPointer(
					static_cast<GLuint> (buffi->getBufferInfo().bufferSemantics),
					buffi->getBufferInfo().elementInfo.numChannels,
					elementTypeGL,
					0, //no stride, tightly packed
					0 //no offset to currently bound GL_ARRAY_BUFFER
				)
			);
		}


		GUARD (glEnableVertexAttribArray(static_cast<GLuint> (buffi->getBufferInfo().bufferSemantics)) );

	}
	else
	{
		//unset attribute semantics
		GUARD ( glDisableVertexAttribArray( bs ) );
	}

	unBindSave();
}
示例#16
0
Buffer& BufferInterface::toDefaultBuffer()throw(BufferException)
{
	Buffer* toCastPtr = dynamic_cast<Buffer*>(this);
	if(toCastPtr) return *toCastPtr;
	else throw(BufferException("Bad cast to default Buffer"));
}
示例#17
0
PingPongBuffer& BufferInterface::toPingPongBuffer() throw(BufferException)
{
	PingPongBuffer* toCastPtr = dynamic_cast<PingPongBuffer*>(this);
	if(toCastPtr) return *toCastPtr;
	else throw(BufferException("Bad cast to PingPongBuffer"));
}
示例#18
0
bool TextureInfo::calculateCLGLImageFormatValues()throw (BufferException)
{
	elementInfo.validate();

	glBufferType= NO_GL_BUFFER_TYPE;
	numElements = dimensionExtends.x * dimensionExtends.y * dimensionExtends.z
			* numArrayLayers * numMultiSamples;

	if(isDepthTexture)
	{
		if( (elementInfo.numChannels != 1 )
				||
			(elementInfo.normalizeIntegralValuesFlag)
				||
			(elementInfo.bitsPerChannel != 32 )
				||
			(elementInfo.internalGPU_DataType != GPU_DATA_TYPE_FLOAT )
		)
		{
			throw(BufferException("BufferElementInfo is does not indicate a supported depth renderable format!"
					"Currently, only 32bit float is accepted"));
		}
		else
		{
			if( 	( dimensionality ==1 )
					//check for 1d array texture
					|| ((dimensionality == 1) && (numArrayLayers > 1))
					|| isMipMapped
					|| (numMultiSamples > 1)
			)
			{
				throw(BufferException("For depth textures, 1D textures, mip mapping"
				 " and/or multisampling is not supported (by this framework (yet) ;( )"));
			}
			else
			{
				if(isRectangleTex)
				{
					LOG<<WARNING_LOG_LEVEL<<"sorry at the moment there is no official support for rectangle depth textures;"
							<<"It should work, but use on your own risk, especially for shadowmapping (other sampler (samplerRectShadow) and bias matrix!);\n";
				}

				elementType = TYPE_FLOAT;

				glImageFormat.desiredInternalFormat = GL_DEPTH_COMPONENT32;
				glImageFormat.channelOrder= GL_DEPTH_COMPONENT;

				glImageFormat.channelDataType = GL_FLOAT;

				clImageFormat.image_channel_data_type = CL_FLOAT;
				clImageFormat.image_channel_order = CL_R; //will it work?#

				switch(textureTarget)
				{
				case GL_TEXTURE_2D:
					textureType = TEXTURE_TYPE_2D_DEPTH;
					break;
				case GL_TEXTURE_RECTANGLE:
					textureType = TEXTURE_TYPE_2D_RECT_DEPTH;
					break;
				case GL_TEXTURE_CUBE_MAP:
					textureType = TEXTURE_TYPE_2D_CUBE_DEPTH;
					break;
				case GL_TEXTURE_2D_ARRAY:
					textureType = TEXTURE_TYPE_2D_ARRAY_DEPTH;
					break;
				}
			}
		}
	}
	else //depthtexture
	{
		//first, set the most trivial stuff: the number of channels
			switch(elementInfo.numChannels)
			{
			case 1:
				glImageFormat.channelOrder  = GL_RED;
				clImageFormat.image_channel_order = CL_R ;
				break;
			case 2:
				glImageFormat.channelOrder  = GL_RG;
				clImageFormat.image_channel_order = CL_RG ;
				break;
			case 4:
				glImageFormat.channelOrder  = GL_RGBA;
				clImageFormat.image_channel_order = CL_RGBA ;
				break;
			default:
				assert(0&&"should never end here");
			}

			switch(textureTarget)
			{
			case GL_TEXTURE_1D:
				textureType = TEXTURE_TYPE_1D;
				break;
			case GL_TEXTURE_1D_ARRAY:
				textureType = TEXTURE_TYPE_1D_ARRAY;
				break;


			case GL_TEXTURE_2D:
				textureType = TEXTURE_TYPE_2D;
				break;
			case GL_TEXTURE_RECTANGLE:
				textureType = TEXTURE_TYPE_2D_RECT;
				break;
			case GL_TEXTURE_CUBE_MAP:
				textureType = TEXTURE_TYPE_2D_CUBE;
				break;
			case GL_TEXTURE_2D_ARRAY:
				textureType = TEXTURE_TYPE_2D_ARRAY;
				break;
			case GL_TEXTURE_2D_MULTISAMPLE:
				textureType = TEXTURE_TYPE_2D_MULTISAMPLE;
				break;
			case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
				textureType = TEXTURE_TYPE_2D_ARRAY_MULTISAMPLE;
				break;


			case GL_TEXTURE_3D:
				textureType = TEXTURE_TYPE_3D;
				break;

			}

			//------------------------------------------------------

			bool normalize =  elementInfo.normalizeIntegralValuesFlag;

			switch(elementInfo.internalGPU_DataType)
			{
			case GPU_DATA_TYPE_UINT :
				switch(elementInfo.bitsPerChannel)
				{
				case 8:
					glImageFormat.channelDataType = GL_UNSIGNED_BYTE;

					if(normalize)	clImageFormat.image_channel_data_type = CL_UNORM_INT8;
					else			clImageFormat.image_channel_data_type = CL_UNSIGNED_INT8;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_UINT8;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_R8;
						else			glImageFormat.desiredInternalFormat  = GL_R8UI;
						break;
					case 2:
						elementType = TYPE_VEC2UI8;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RG8;
						else			glImageFormat.desiredInternalFormat  = GL_RG8UI;
						break;
					case 4:
						elementType = TYPE_VEC4UI8;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RGBA8;
						else			glImageFormat.desiredInternalFormat  = GL_RGBA8UI;
						break;
					default:
						assert(0&&"should never end here");
					}
					break;
				case 16:
					glImageFormat.channelDataType = GL_UNSIGNED_SHORT;

					if(normalize)	clImageFormat.image_channel_data_type = CL_UNORM_INT16;
					else			clImageFormat.image_channel_data_type = CL_UNSIGNED_INT16;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_UINT16;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_R16;
						else			glImageFormat.desiredInternalFormat  = GL_R16UI;
						break;
					case 2:
						elementType = TYPE_VEC2UI16;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RG16;
						else			glImageFormat.desiredInternalFormat  = GL_RG16UI;
						break;
					case 4:
						elementType = TYPE_VEC4UI16;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RGBA16;
						else			glImageFormat.desiredInternalFormat  = GL_RGBA16UI;
						break;
					default:
						assert(0&&"should never end here");
					}
					break;
				case 32:
					//no normalization valid here!
					glImageFormat.channelDataType = GL_UNSIGNED_INT;

					clImageFormat.image_channel_data_type = CL_UNSIGNED_INT32;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_UINT32;
						glImageFormat.desiredInternalFormat  = GL_R32UI;
						break;
					case 2:
						elementType = TYPE_VEC2UI32;
						glImageFormat.desiredInternalFormat  = GL_RG32UI;
						break;
					case 4:
						elementType = TYPE_VEC4UI32;
						glImageFormat.desiredInternalFormat  = GL_RGBA32UI;
						break;
					default:
						assert(0&&"should never end here");
					}
					break;
				default:
					assert(0&&"should never end here");
					break;
				}
				break;
			//-----------------------------------------------------------------------------------
			case GPU_DATA_TYPE_INT:
				switch(elementInfo.bitsPerChannel)
				{
				case 8:
					glImageFormat.channelDataType = GL_BYTE;
					if(normalize)	clImageFormat.image_channel_data_type = CL_SNORM_INT8;
					else			clImageFormat.image_channel_data_type = CL_SIGNED_INT8;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_INT8;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_R8_SNORM;
						else			glImageFormat.desiredInternalFormat  = GL_R8I;
						break;
					case 2:
						elementType = TYPE_VEC2I8;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RG8_SNORM;
						else			glImageFormat.desiredInternalFormat  = GL_RG8I;
						break;
					case 4:
						elementType = TYPE_VEC4I8;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RGBA8_SNORM;
						else			glImageFormat.desiredInternalFormat  = GL_RGBA8I;
						break;
					default:
						assert(0&&"should never end here");
					}
					break;
				case 16:
					glImageFormat.channelDataType = GL_SHORT;
					if(normalize)	clImageFormat.image_channel_data_type = CL_SNORM_INT16;
					else			clImageFormat.image_channel_data_type = CL_SIGNED_INT16;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_INT16;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_R16_SNORM;
						else			glImageFormat.desiredInternalFormat  = GL_R16I;
						break;
					case 2:
						elementType = TYPE_VEC2I16;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RG16_SNORM;
						else			glImageFormat.desiredInternalFormat  = GL_RG16I;
						break;
					case 4:
						elementType = TYPE_VEC4I16;
						if(normalize)	glImageFormat.desiredInternalFormat  = GL_RGBA16_SNORM;
						else			glImageFormat.desiredInternalFormat  = GL_RGBA16I;
						break;
					default:
						assert(0&&"should never end here");
					}
					break;
				case 32:
					//no normalization valid here!
					glImageFormat.channelDataType = GL_INT;
					clImageFormat.image_channel_data_type = CL_SIGNED_INT32;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_INT32;
						glImageFormat.desiredInternalFormat  = GL_R32I;		break;
					case 2:
						elementType = TYPE_VEC2I32;
						glImageFormat.desiredInternalFormat  = GL_RG32I;	break;
					case 4:
						elementType = TYPE_VEC4I32;
						glImageFormat.desiredInternalFormat  = GL_RGBA32I;	break;
					default:
						assert(0&&"should never end here");
					}
					break;
				default:
					assert(0&&"should never end here");
					break;
				}
				break;
			//----------------------------------------------------------------------------------
			case GPU_DATA_TYPE_FLOAT:
				switch(elementInfo.bitsPerChannel)
				{
				case 16:
					//no normalization valid here!
					glImageFormat.channelDataType = GL_HALF_FLOAT;
					clImageFormat.image_channel_data_type = CL_HALF_FLOAT;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_HALF_FLOAT;
						glImageFormat.desiredInternalFormat  = GL_R16F;		break;
					case 2:
						elementType = TYPE_VEC2F16;
						glImageFormat.desiredInternalFormat  = GL_RG16F;	break;
					case 4:
						elementType = TYPE_VEC4F16;
						glImageFormat.desiredInternalFormat  = GL_RGBA16F;	break;
					default:
						assert(0&&"should never end here");
					}
					break;
				case 32:
					//no normalization valid here!
					glImageFormat.channelDataType = GL_FLOAT;
					clImageFormat.image_channel_data_type = CL_FLOAT;

					switch(elementInfo.numChannels)
					{
					case 1:
						elementType = TYPE_FLOAT;
						glImageFormat.desiredInternalFormat  = GL_R32F;		break;
					case 2:
						elementType = TYPE_VEC2F;
						glImageFormat.desiredInternalFormat  = GL_RG32F;	break;
					case 4:
						elementType = TYPE_VEC4F;
						glImageFormat.desiredInternalFormat  = GL_RGBA32F;	break;
					default:
						assert(0&&"should never end here");
					}
					break;
				default:
					assert(0&&"should never end here");
					break;
				}
				break;
			default:
				assert(0&&"should never and here");
				break;
			}
	}

	GLint maxNumMultiSamples;
	glGetIntegerv(GL_MAX_COLOR_TEXTURE_SAMPLES, & maxNumMultiSamples);

	if(! BufferHelper::isPowerOfTwo(numMultiSamples) ||
			(numMultiSamples > maxNumMultiSamples))
	{
		throw(BufferException("numMultiSamples must be power of two "
				"and <= GL_MAX_COLOR_TEXTURE_SAMPLES"));
	}


	bufferSizeInByte = BufferHelper::elementSize(elementType) * numElements;
	if(isCubeTex)
	{
		bufferSizeInByte *= 6;
	}

	return true;
}
示例#19
0
HRESULT Buffer::initialize(ID3D11Device *p_Device, ID3D11DeviceContext *p_DeviceContext,
	Description &p_Description)
{
	HRESULT result = S_OK;
	D3D11_BUFFER_DESC bufferDescription;

	m_Device = p_Device;
	m_DeviceContext = p_DeviceContext;
	m_Type = p_Description.type;

	bufferDescription.StructureByteStride = 0;
	bufferDescription.MiscFlags = 0;

	switch (m_Type)
	{
	case Type::VERTEX_BUFFER:
	{
								bufferDescription.BindFlags = D3D11_BIND_VERTEX_BUFFER;
								if (p_Description.usage == Usage::STREAM_OUT_TARGET)
								{
									bufferDescription.BindFlags |= D3D11_BIND_STREAM_OUTPUT;
								}
								break;
	}
	case Type::INDEX_BUFFER:
	{
							   bufferDescription.BindFlags = D3D11_BIND_INDEX_BUFFER;
							   break;
	}
	case Type::CONSTANT_BUFFER_VS:
	case Type::CONSTANT_BUFFER_GS:
	case Type::CONSTANT_BUFFER_PS:
	case Type::BUFFER_TYPE_COUNT:
	case Type::CONSTANT_BUFFER_ALL:
	{
									  bufferDescription.BindFlags = D3D11_BIND_CONSTANT_BUFFER;
									  break;
	}
	case  Type::STAGING_BUFFER:
	{
								  bufferDescription.BindFlags = 0;
								  break;
	}

	case Type::STRUCTURED_BUFFER:
	{
									bufferDescription.MiscFlags = D3D11_RESOURCE_MISC_BUFFER_STRUCTURED;
									bufferDescription.StructureByteStride = p_Description.sizeOfElement;
									bufferDescription.BindFlags = 0;
									break;
	}

	default:
	{
			   return S_FALSE;
			   break;
	}
	}

	if (p_Description.bindSRV)	bufferDescription.BindFlags |= D3D11_BIND_SHADER_RESOURCE;
	if (p_Description.bindUAV)	bufferDescription.BindFlags |= D3D11_BIND_UNORDERED_ACCESS;

	m_Usage = p_Description.usage;
	m_SizeOfElement = p_Description.sizeOfElement;
	m_NumOfElements = p_Description.numOfElements;
	bufferDescription.CPUAccessFlags = 0;
	switch (m_Usage)
	{
	case Usage::DEFAULT:
	{
						   bufferDescription.Usage = D3D11_USAGE_DEFAULT;
						   break;
	}
	case Usage::STREAM_OUT_TARGET:
	{
									 bufferDescription.Usage = D3D11_USAGE_DEFAULT;
									 break;
	}
	case Usage::CPU_WRITE:
	{
							 bufferDescription.Usage = D3D11_USAGE_DYNAMIC;
							 bufferDescription.CPUAccessFlags |= D3D11_CPU_ACCESS_WRITE;
							 break;
	}
	case Usage::CPU_WRITE_DISCARD:
	{
									 bufferDescription.Usage = D3D11_USAGE_DYNAMIC;
									 bufferDescription.CPUAccessFlags |= D3D11_CPU_ACCESS_WRITE;
									 break;
	}
	case Usage::CPU_READ:
	{
							if (m_Type != Type::STAGING_BUFFER)
							{
								throw BufferException("Cannot set CPU read to other than staging buffer", __LINE__, __FILE__);
							}
							bufferDescription.Usage = D3D11_USAGE_STAGING;
							bufferDescription.CPUAccessFlags |= D3D11_CPU_ACCESS_READ;
							break;
	}
	case Usage::USAGE_COUNT:
	{
							   bufferDescription.Usage = D3D11_USAGE_DEFAULT;
							   break;
	}
	case Usage::USAGE_IMMUTABLE:
	{
								   bufferDescription.Usage = D3D11_USAGE_IMMUTABLE;
								   break;
	}
	default:
	{
			   break;
	}
	}

	bufferDescription.ByteWidth = p_Description.numOfElements * p_Description.sizeOfElement;

	bufferDescription.ByteWidth = ((bufferDescription.ByteWidth + 15) / 16) * 16;

	if (p_Description.initData)
	{
		D3D11_SUBRESOURCE_DATA data;
		data.pSysMem = p_Description.initData;
		data.SysMemPitch = 0;
		data.SysMemSlicePitch = 0;
		result = createBuffer(&bufferDescription, &data, &m_Buffer);
	}
	else
	{
		result = createBuffer(&bufferDescription, nullptr, &m_Buffer);
	}

	return result;
}
示例#20
0
const BufferInterface& BufferInterface::operator=(const BufferInterface& rhs) throw(BufferException)
{
	//the buffers must match exactly in all their meta-info in order to be securely copied
	//OpenGL and OpenCL are less restrictve than me, but in this case, I trade flexibility
	//for simplicity and robustness;
	if( (*this) == rhs )
	{
		if((mBufferInfo->usageContexts & HOST_CONTEXT_TYPE_FLAG) !=0)
		{
			memcpy(mCPU_Handle,rhs.getCPUBufferHandle(),mBufferInfo->bufferSizeInByte);
		}

		//GL
		if(
			( hasBufferInContext(OPEN_GL_CONTEXT_TYPE) && PARA_COMP_MANAGER->graphicsAreInControl() )
			||
			! (hasBufferInContext(OPEN_CL_CONTEXT_TYPE))
		)
		{
			//commented out the guard in case of driver bugs fu**ing up when doing too mush time-shared CL-GL-stuff
			//TODO uncomment when stable work is assured
			//if(isCLGLShared())
			{
				PARA_COMP_MANAGER->acquireSharedBuffersForGraphics();
			}
			//do a barrier in by all means to assure buffer integrity;
			PARA_COMP_MANAGER->barrierGraphics();

			GUARD(copyGLFrom(rhs.getGraphicsBufferHandle()));
			//return, as a shared buffer does need only copy via one context;
			return *this;
		}

		//CL
		if(
			( hasBufferInContext(OPEN_CL_CONTEXT_TYPE) && PARA_COMP_MANAGER->computeIsInControl() )
			||
			! (hasBufferInContext(OPEN_GL_CONTEXT_TYPE))
		)
		{
			//commented out the guard in case of driver bugs fu**ing up when doing too mush time-shared CL-GL-stuff
			//TODO uncomment when stable work is assured
			//if(isCLGLShared())
			{
				PARA_COMP_MANAGER->acquireSharedBuffersForCompute();
			}

			//do a barrier in by all means to assure buffer integrity;
			PARA_COMP_MANAGER->barrierCompute();
			GUARD(copyCLFrom(rhs.getComputeBufferHandle()));
			return *this;
		}

	}
	else
	{
		throw(BufferException("Buffer::operator= : Buffers not compatible"));
	}


	return *this;
}