Esempio n. 1
0
void ofxColorStream::setup(ofPtr<openni::Device> device, bool isVerbose)
{
	this->device = device;

	openni::Status rc;

	if (device->getSensorInfo(openni::SENSOR_COLOR) != NULL)
	{
		stream = ofPtr<openni::VideoStream>(new openni::VideoStream);
		rc = stream->create(*device, openni::SENSOR_COLOR);
		if (rc != openni::STATUS_OK)
		{
			throw ("Couldn't create color stream\n%s\n", openni::OpenNI::getExtendedError());
		}
	}

	allocateBuffers();

	rc = stream->start();
	if (rc != openni::STATUS_OK)
	{
		throw ("Couldn't start the color stream\n%s\n", openni::OpenNI::getExtendedError());

	}
}
Esempio n. 2
0
Mesh::Mesh(Point3 *vertices_, Vector3 *vertexNormals_,
           int nI_, int nJ_, bool wrapI_, bool wrapJ_)
    : nI(nI_), nJ(nJ_), wrapI(wrapI_), wrapJ(wrapJ_)
{
    nVertices = nI * nJ;
    vertices = new Point3[nVertices];
    vertexNormals = new Vector3[nVertices];
    for (int i = 0; i < nVertices; i++) {
        vertices[i] = vertices_[i];
        vertexNormals[i] = vertexNormals_[i];
    }

    // This enforces our requirement for distinct mesh points and thus
    // prevents later trouble.
    assert(pointsAreDistinct());

    createVertexIndices();
    createFaceNormals();

    allocateBuffers();
    //
    // Since we're handling transforms in the vertex shader, we only
    // need to download the buffers once, here in the constructor,
    // rather than in Mesh::render().
    //
    updateBuffers();
}
void ShaderParticleRenderer::_updateRenderQueue(RenderQueue* queue, Ogre::list<Particle*>::type& currentParticles, bool cullIndividually)
{
    // be sure that we have enough space in buffers
    if (!allocateBuffers(currentParticles.size())) {
        assert(0 && "Cannot allocate buffers");
        return;
    }

    // update vertex data
    mRadius = 0.0f;
    if (!currentParticles.empty()) {
        HardwareVertexBufferSharedPtr pVB = mVertexData->vertexBufferBinding->getBuffer(0);
        uchar* pDataVB  = reinterpret_cast<uchar*>(pVB->lock(HardwareBuffer::HBL_DISCARD));
        for (Ogre::list<Particle*>::type::iterator it=currentParticles.begin(); it!=currentParticles.end(); ++it) {
            Particle* pParticle = *it;
            addParticle(pDataVB, *pParticle);
            pDataVB += 4 * mVertexSize;

            float fDist = (mParentNode != NULL) ? mParentNode->getPosition().distance(pParticle->mPosition) : pParticle->mPosition.length();
            if (fDist > mRadius)
                mRadius = fDist;
        }
        pVB->unlock();
    }

    // setup counts
    mVertexData->vertexCount = currentParticles.size() * 4;
    mIndexData->indexCount   = currentParticles.size() * 6;

    // update render queue
    queue->addRenderable(this, mRenderQueueID);
}
Esempio n. 4
0
/*
 * Hook called by EGL to acquire a buffer. This call may block if no
 * buffers are available.
 *
 * The window holds a reference to the buffer between dequeueBuffer and
 * either queueBuffer or cancelBuffer, so clients only need their own
 * reference if they might use the buffer after queueing or canceling it.
 * Holding a reference to a buffer after queueing or canceling it is only
 * allowed if a specific buffer count has been set.
 *
 * The libsync fence file descriptor returned in the int pointed to by the
 * fenceFd argument will refer to the fence that must signal before the
 * dequeued buffer may be written to.  A value of -1 indicates that the
 * caller may access the buffer immediately without waiting on a fence.  If
 * a valid file descriptor is returned (i.e. any value except -1) then the
 * caller is responsible for closing the file descriptor.
 *
 * Returns 0 on success or -errno on error.
 */
int HWComposerNativeWindow::dequeueBuffer(BaseNativeWindowBuffer** buffer, int *fenceFd)
{
    HYBRIS_TRACE_BEGIN("hwcomposer-platform", "dequeueBuffer", "");

    pthread_mutex_lock(&m_mutex);

    // Allocate buffers if the list is empty, typically on the first call
    if (m_bufList.empty())
        allocateBuffers();
    assert(!m_bufList.empty());
    assert(m_nextBuffer < m_bufList.size());


    // Grabe the next available buffer in the list and assign m_nextBuffer to
    // the next one.
    HWComposerNativeWindowBuffer *b = m_bufList.at(m_nextBuffer);
    TRACE("idx=%d, buffer=%p, fence=%d", m_nextBuffer, b, b->fenceFd);
    *buffer = b;
    m_nextBuffer++;
    if (m_nextBuffer >= m_bufList.size())
        m_nextBuffer = 0;

    // assign the buffer's fence to fenceFd and close/reset our fd.
    int fence = b->fenceFd;
    if (fenceFd)
        *fenceFd = dup(fence);
    if (fence != -1) {
        close(b->fenceFd);
        b->fenceFd = -1;
    }

    pthread_mutex_unlock(&m_mutex);
    HYBRIS_TRACE_END("hwcomposer-platform", "dequeueBuffer", "");
    return 0;
}
int PreviewStream::configure(int fps, bool /*videoSnapshot*/)
{
    FLOG_TRACE("PreviewStream %s running", __FUNCTION__);
    int ret = NO_ERROR;
    int errCode = 0;

    fAssert(mDeviceAdapter.get() != NULL);
    ret = mDeviceAdapter->setDeviceConfig(mWidth, mHeight, mFormat, fps);
    if (ret != NO_ERROR) {
        FLOGE("%s setDeviceConfig failed", __FUNCTION__);
        errCode = CAMERA2_MSG_ERROR_DEVICE;
        goto fail;
    }

    mDeviceAdapter->setCameraBufferProvide(this);
    ret = allocateBuffers(mWidth, mHeight, mFormat, mMaxProducerBuffers);
    if (ret != NO_ERROR) {
        FLOGE("%s allocateBuffers failed", __FUNCTION__);
        errCode = CAMERA2_MSG_ERROR_REQUEST;
        goto fail;
    }

    mPrepared = true;
    return NO_ERROR;

fail:
    freeBuffers();
    FLOGE("Error occurred, performing cleanup");

    if (NULL != mErrorListener) {
        mErrorListener->handleError(errCode);
    }

    return BAD_VALUE;
}
status_t CaptureStream::configure(void)
{
    if (mCaptureDevice->isStarted()) {
        LOGE("%s: Device has been started, can't registerBuffers!", __FUNCTION__);
        return INVALID_OPERATION;
    }

    LOG2("@%s, name:%s", __FUNCTION__, getName());
    status_t status = NO_ERROR;

    Mutex::Autolock _l(mBufBookKeepingLock);

    LOG2("@%s, buffers number: real:%d, skip:%d", __FUNCTION__, REAL_BUF_NUM, mMaxNumOfSkipFrames);
    // create fake buffer for frame skipping
    mFakeBufferIndexStart = mFakeBufferIndex = REAL_BUF_NUM;
    mFakeBuffers.clear();
    uint32_t bufCount = REAL_BUF_NUM + mMaxNumOfSkipFrames;
    mRealBufferIndex = 0;

    // Allocate internal buffers
    status = allocateBuffers(bufCount);
    if (status != NO_ERROR) {
        return status;
    }

    status = mCaptureDevice->setBufferPool(mV4l2CaptureBuffers, false);
    status |= mPostviewDevice->setBufferPool(mV4l2PostviewBuffers, false);
    if (status != NO_ERROR) {
        LOGE("Error registering capture buffers ret=%d", status);
        return status;
    }

    PERFORMANCE_TRACES_BREAKDOWN_STEP("Capture");
    return status;
}
void btParticlesDynamicsWorld::postInitDeviceData()
{
	m_hashSize = getMaxPowOf2(m_numParticles);
	createVBO();
	allocateBuffers();
	adjustGrid();
	grabSimulationData();
}
Esempio n. 8
0
PolyLine::PolyLine(Point3 *vertices_, int nI_, bool wrapI_)
    : nI(nI_), wrapI(wrapI_)
{
    vertices = new Point3[nI];
    for (int i = 0; i < nI; i++)
        vertices[i] = vertices_[i];
    allocateBuffers();
    updateBuffers();
}
void AnatomyOMXClient::prepare() {
	init();

	changeState(OMX_StateIdle);
	allocateBuffers();
	sleep(1);

	changeState(OMX_StateExecuting);
	sleep(1);
}
Esempio n. 10
0
void VisionEngine::setupCamera() {
    
    camera_ = CameraTool::getCamera(camera_config_);
	if (camera_ == NULL) {
        allocateBuffers();
        return;
    }
	
    if(camera_->initCamera()) {
        width_ = camera_->getWidth();
        height_ = camera_->getHeight();
        fps_ = camera_->getFps();
		format_ = camera_->getFormat();
		camera_->printInfo();
    } else {

		printf("could not initialize selected camera\n");
        camera_->closeCamera();
        delete camera_;
		camera_ = CameraTool::getDefaultCamera();

		if (camera_ == NULL) {
			allocateBuffers();
			return;
		} else if(camera_->initCamera()) {
			width_ = camera_->getWidth();
			height_ = camera_->getHeight();
			fps_ = camera_->getFps();
			format_ = camera_->getFormat();
			camera_->printInfo();
		} else {
			printf("could not initialize default camera\n");
			camera_->closeCamera();
			delete camera_;
			camera_ = NULL;
		}
    }
		
    allocateBuffers();
}
Esempio n. 11
0
PixelStates::PixelStates(uint8_t mW, uint8_t mH, uint8_t tX, uint8_t tY, uint8_t matrixType) : 
	Adafruit_GFX(mW * tX, mH * tY), 
	type(matrixType),
	selectBuffer(BUFFER_0),
	otherBuffer(BUFFER_1),
	matrixWidth(mW),
	matrixHeight(mH),
	tilesX(tX),
	tilesY(tY),
	arrSize(ARRAY_SIZE(mW * tX, mH * tY)) { 

	allocateBuffers();
}
Esempio n. 12
0
PixelStates::PixelStates(uint16_t w, uint16_t h, uint8_t matrixType) : 
	Adafruit_GFX(w, h),
	type(matrixType),
	selectBuffer(BUFFER_0),
	otherBuffer(BUFFER_1),
	matrixWidth(w),
	matrixHeight(h),
	tilesX(0),
	tilesY(0),
	arrSize(ARRAY_SIZE(w, h)) { 

	allocateBuffers();
}
// the principal program sequence
void PortVideoSDL::run() {

	if( !setupCamera() ) {
		if( !setupWindow() ) return;
		showError("No camera found!");
		teardownWindow();
		return;
	}

	if( !setupWindow() ) return;

	allocateBuffers();
	initFrameProcessors();

	bool success = camera_->startCamera();

	if( success ){
		SDL_FillRect(window_,0,0);
		SDL_Flip(window_);
	
		// add the help message from all FrameProcessors
		for (frame = processorList.begin(); frame!=processorList.end(); frame++) {
                        std::vector<std::string> processor_text = (*frame)->getOptions();
			if (processor_text.size()>0) help_text.push_back("");
			for(std::vector<std::string>::iterator processor_line = processor_text.begin(); processor_line!=processor_text.end(); processor_line++) {
				help_text.push_back(*processor_line);
			} 
		}
		
		//print the help message
		for(std::vector<std::string>::iterator help_line = help_text.begin(); help_line!=help_text.end(); help_line++) {
			std::cout << *help_line << std::endl;
		} std::cout << std::endl;

		running_=true;
		cameraThread = SDL_CreateThread(getFrameFromCamera,this);
		controlThread= SDL_CreateThread(getControlMessage,this);
		mainLoop();
		
		SDL_KillThread(cameraThread);
		SDL_KillThread(controlThread);
		teardownCamera();
	} else {
		showError("Could not start camera!");
	}	

	teardownWindow();
	freeBuffers();
	
}
void SharedMaterialProperties::setUniformBlockForShader(GLuint shaderProgram)
{
	// Determine the size of the block and set the binding point for the block(s)
	determineBlockSizeSetBindingPoint(shaderProgram);

	// Has the buffer been created and have the byte offset been found?
	if (blockSizeAndOffetsSet == false) {

		// Set up the buffers and bind to binding points
		allocateBuffers(shaderProgram);

		// Find the byte offsets of the uniform block variables
		findOffsets(shaderProgram);
	}

	checkOpenGLErrors("setUniformBlockForShader");

} // end setUniformBlockForShader
Esempio n. 15
0
static void
doPage(FILE *             const ifP, 
       struct cmdlineInfo const cmdline) {

    bit * bitrow;
    int rows, cols, format, row;
    unsigned int blankRows;
    bool rowIsBlank;

    pbm_readpbminit(ifP, &cols, &rows, &format);

    bitrow = pbm_allocrow(cols);

    allocateBuffers(cols);

    putinit(cmdline);

    blankRows = 0;
    prevRowBufferIndex = 0;
    memset(prevRowBuffer, 0, rowBufferSize);

    for (row = 0; row < rows; ++row) {
        pbm_readpbmrow(ifP, bitrow, cols, format);

        convertRow(bitrow, cols, cmdline.pack, cmdline.delta,
                   &rowIsBlank);

        if (rowIsBlank)
            ++blankRows;
        else {
            printBlankRows(blankRows);
            blankRows = 0;
            
            printRow();
        }
    }    
    printBlankRows(blankRows);
    blankRows = 0;

    putrest(!cmdline.noreset);

    freeBuffers();
    pbm_freerow(bitrow);
}
Esempio n. 16
0
int mainStreaming(char* ip) {
    // Prepare the application context to get video stream
    AppOMXContext ctx = AppOMXContext_Construct(ip) ;
    initAppOMX(&ctx) ;
    configureCamera(&ctx) ;
    configureEncoder(&ctx) ;
    configureNullSink(&ctx) ;

    // Configure ports to capture and compress video stream
    tunneling(&ctx) ;
    enablePorts(&ctx) ;
    allocateBuffers(&ctx) ;
    portsReady(&ctx) ;

    // Capture and make video streaming over Wifi connection
    while (1) {
        ctx.capture(&ctx) ;
    }
    return 0 ;
}
Esempio n. 17
0
void AudioConverter::initEncoder()
{
	int err;
	
	if (m_encoderInitialized)
		throw std::logic_error("Encoder already initialized");
	
	m_encoder->codec_type = AVMEDIA_TYPE_AUDIO;
	m_encoder->bit_rate = m_outBitRate;
	m_encoder->channels = m_destinationFormat.mChannelsPerFrame;
	m_encoder->sample_rate = m_destinationFormat.mSampleRate;
	m_encoder->channel_layout = CAChannelCountToLayout(m_destinationFormat.mChannelsPerFrame);
	m_encoder->sample_fmt = CACodecSampleFormat(&m_destinationFormat);
	
	err = avcodec_open2(m_encoder, m_codecOut, 0);
	if (err < 0)
		throw std::runtime_error("avcodec_open2() failed for encoder");
	
	allocateBuffers();
	m_encoderInitialized = true;
}
void threadedOpticalFlow::calculateFlow(const ofPixels& _pixels){
    lock();
    
    // realocate if size changed since last update
    if(_pixels.getWidth() != width  &&  _pixels.getHeight() != height){
        width = _pixels.getWidth();
        height = _pixels.getHeight();
        allocateBuffers(width, height);
    }
    
    // deep copy of incoming pixels
    // asumes same values in all channels
    //pixelsInBack.setChannel(0, _pixels.getChannel(0));
    pixelsInBack = _pixels.getChannel(0);
    
    bFlowDirty = true;
    
    condition.signal();
    
    unlock();
}
Esempio n. 19
0
/*
 * Generic initialisation
 */
void Simulation::initialize(int volumeSide)
{
	//Set parameters
	N = volumeSide;
	voxels = (N+2)*(N+2)*(N+2); 
	size = voxels * sizeof(float);

	solverSteps = 20;
	dt = 0.1; //timestep
	visc = 0.001; //viscosity (velocity)
	diff = 0.0005; //dampening (density)

	allocateBuffers();

	resampleKernel = new cl::Kernel(*opencl.program, "resample", &opencl.err);


	/* Future work:
	clCreateFromGLTexture(context(), CL_MEM_WRITE_ONLY, GL_TEXTURE_3D, 0, texture, &err);
	checkErr(err, "Creating OpenCL 3D texture");
	*/
}
void QCameraHAL3RawSnapshotTest::rawAllocateBuffers(int width, int height)
{
    mRawCaptureHandle = allocateBuffers(width, height, &mRawCaptureMemInfo);
}
bool	SimpleAudioDriver::start(IOService* inProvider)
{
	//	start the superclass
    bool theAnswer = IOService::start(inProvider);
    if(theAnswer)
	{
		//	create the work loop
		mWorkLoop = IOWorkLoop::workLoop();
		FailIfNULL(mWorkLoop, theAnswer = kIOReturnNoResources, Failure, "SimpleAudioDriver::start: couldn't allocate the work loop");
		
		//	create the command gate
		mCommandGate = IOCommandGate::commandGate(this);
		FailIfNULL(mWorkLoop, theAnswer = kIOReturnNoResources, Failure, "SimpleAudioDriver::start: couldn't allocate the command gate");
		
		//	attach it to the work loop
		mWorkLoop->addEventSource(mCommandGate);
		
		//	initialize the stuff tracked by the IORegistry
		mSampleRate = 44100;
		setProperty(kSimpleAudioDriver_RegistryKey_SampleRate, mSampleRate, sizeof(mSampleRate) * 8);
		
		mIOBufferFrameSize = 16384;
		setProperty(kSimpleAudioDriver_RegistryKey_RingBufferFrameSize, mIOBufferFrameSize, sizeof(mIOBufferFrameSize) * 8);
		
		char theDeviceUID[128];
		snprintf(theDeviceUID, 128, "SimpleAudioDevice-%d", static_cast<int>(random() % 100000));
		setProperty(kSimpleAudioDriver_RegistryKey_DeviceUID, theDeviceUID);

		//	allocate the IO buffers
		IOReturn theError = allocateBuffers();
		FailIfError(theError, theAnswer = false, Failure, "SimpleAudioDriver::start: allocating the buffers failed");
		
		//	initialize the timer that stands in for a real interrupt
		theError = initTimer();
		FailIfError(theError, freeBuffers(); theAnswer = false, Failure, "SimpleAudioDriver::start: initializing the timer failed");
		
		//	initialize the controls
		theError = initControls();
		FailIfError(theError, theAnswer = false, Failure, "SimpleAudioDriver::start: initializing the controls failed");
		
		//	publish ourselves
		registerService();
	}

    return theAnswer;

Failure:
	if(mCommandGate != NULL)
	{
		if(mWorkLoop != NULL)
		{
			mWorkLoop->removeEventSource(mCommandGate);
			mCommandGate->release();
			mCommandGate = NULL;
		}
	}
	
	if(mWorkLoop != NULL)
	{
		mWorkLoop->release();
		mWorkLoop = NULL;
	}
	
	freeBuffers();
	destroyTimer();
	
	return theAnswer;
}
Esempio n. 22
0
//Resizes the simulation volume to newN
void Simulation::resize(int newN)
{
	int N0 = N;
	N = newN;
	voxels = (N+2)*(N+2)*(N+2);
	size = voxels * sizeof(float);

	//Only dens, u, v, w need to be reallocated
	float* u_new = new float[voxels];
	float* v_new = new float[voxels];
	float* w_new = new float[voxels];
	float* dens_new = new float[voxels];

	memset(u_new, 0, size);
	memset(v_new, 0, size);
	memset(w_new, 0, size);
	memset(dens_new, 0, size);

	//New buffer objects
	cl::Buffer* buf_u_new = new cl::Buffer(*opencl.context, CL_MEM_USE_HOST_PTR, size, u_new, &opencl.err);
	cl::Buffer* buf_v_new = new cl::Buffer(*opencl.context, CL_MEM_USE_HOST_PTR, size, v_new, &opencl.err);
	cl::Buffer* buf_w_new = new cl::Buffer(*opencl.context, CL_MEM_USE_HOST_PTR, size, w_new, &opencl.err);
	cl::Buffer* buf_dens_new = new cl::Buffer(*opencl.context, CL_MEM_USE_HOST_PTR, size, dens_new, &opencl.err);

	/* Image3D:
	cl_image_format imageFormat;
	imageFormat.image_channel_order = CL_R;
	imageFormat.image_channel_data_type = CL_FLOAT;
	cl_mem image_dens_new = clCreateImage3D((*opencl.context)(), CL_MEM_READ_ONLY|CL_MEM_USE_HOST_PTR, &imageFormat, N+2, N+2, N+2, 0, 0, dens_new, &opencl.err);
	*/

	//Resample whole volume (including bounds)
	resampleKernel->setArg(0, N + 2);
	resampleKernel->setArg(1, N0 + 2);
	resampleKernel->setArg(2, *buf_dens_new);
	resampleKernel->setArg(3, *buf_dens);
	resampleKernel->setArg(4, *buf_u_new);
	resampleKernel->setArg(5, *buf_u);
	resampleKernel->setArg(6, *buf_v_new);
	resampleKernel->setArg(7, *buf_v);
	resampleKernel->setArg(8, *buf_w_new);
	resampleKernel->setArg(9, *buf_w);
	opencl.enqueue(*resampleKernel, cl::NullRange, cl::NDRange(min(N,N0) + 2, min(N,N0) + 2, min(N,N0) + 2), cl::NullRange);
	opencl.wait();

	deallocateBuffers(); //delete all current buffers
	allocateBuffers(false); //allocate only temporary buffers (_prev)

	//Assign the non-_prev buffers to resized data
	u = u_new;
	v = v_new;
	w = w_new;
	dens = dens_new;
	buf_u = buf_u_new;
	buf_v = buf_v_new;
	buf_w = buf_w_new;
	buf_dens = buf_dens_new;
	//image_dens = image_dens_new; //Image3D
	
	setKernelArguments(); //N has changed, arguments need to be re-set
}
Esempio n. 23
0
bool DepthSource::setup(DeviceController& deviceController)
{
	doRawDepth = deviceController.settings.doRawDepth;
	
	Status status = STATUS_OK;
	status = videoStream.create(deviceController.device, SENSOR_DEPTH);
	if (status == STATUS_OK)
	{
		ofLogVerbose() << "Find DepthSource stream PASS";
		status = videoStream.start();
		if (status == STATUS_OK)
		{
			ofLogVerbose() << "Start DepthSource stream PASS";
		}else 
		{
			
			ofLogError() << "Start DepthSource stream FAIL: " << OpenNI::getExtendedError();
			videoStream.destroy();
		}
	}else
	{
		ofLogError() << "Find DepthSource stream FAIL: " <<  OpenNI::getExtendedError();
	}
	if (videoStream.isValid())
	{
		if(!deviceController.settings.useOniFile && !deviceController.isKinect)
		{
			const VideoMode* requestedMode = deviceController.findMode(SENSOR_DEPTH); 
			if (requestedMode != NULL) 
			{
				videoStream.setVideoMode(*requestedMode);
			}
		}
		allocateBuffers();
		
		
		if (!deviceController.isKinect)
		{
			deviceMaxDepth	= videoStream.getMaxPixelValue();
		}else 
		{
			deviceMaxDepth = 10000;
		}
		ofLogVerbose() << "deviceMaxDepth: " << deviceMaxDepth;
		status = videoStream.addNewFrameListener(this);
		if (status == STATUS_OK)
		{
			ofLogVerbose() << "DepthSource videoStream addNewFrameListener PASS";
		}else
		{
			ofLogError() << "DepthSource videoStream addNewFrameListener FAIL: " <<  OpenNI::getExtendedError();
		}

		
		
		isOn = true;
	}else 
	{
		ofLogError() << "DepthSource is INVALID";
	}
	
	
	return isOn;
}
Esempio n. 24
0
static int Open(vlc_object_t *p_this)
{
    filter_t *p_filter = (filter_t *)p_this;
    audio_format_t *infmt = &p_filter->fmt_in.audio;
    audio_format_t *outfmt = &p_filter->fmt_out.audio;

    assert(infmt->channel_type != outfmt->channel_type);

    if (infmt->channel_type != AUDIO_CHANNEL_TYPE_AMBISONICS)
        return VLC_EGENERIC;

    if (infmt->i_format != VLC_CODEC_FL32 || outfmt->i_format != VLC_CODEC_FL32)
        return VLC_EGENERIC;

    filter_sys_t *p_sys;
    p_sys = p_filter->p_sys = (filter_sys_t*)new(std::nothrow)filter_sys_t();
    if (p_sys == NULL)
        return VLC_ENOMEM;

    p_sys->f_teta = 0.f;
    p_sys->f_phi = 0.f;
    p_sys->f_roll = 0.f;
    p_sys->f_zoom = 0.f;
    p_sys->i_rate = p_filter->fmt_in.audio.i_rate;
    p_sys->i_inputNb = p_filter->fmt_in.audio.i_channels;
    p_sys->i_outputNb = p_filter->fmt_out.audio.i_channels;

    if (allocateBuffers(p_sys) != VLC_SUCCESS)
    {
        delete p_sys;
        return VLC_ENOMEM;
    }

    p_sys->i_order = sqrt(infmt->i_channels) - 1;

    if (p_sys->i_order < 1)
    {
        msg_Err(p_filter, "Invalid number of Ambisonics channels");
        delete p_sys;
        return VLC_EGENERIC;
    }

    msg_Dbg(p_filter, "Order: %d %d", p_sys->i_order, infmt->i_channels);

    static const char *const options[] = { "headphones", NULL };
    config_ChainParse(p_filter, CFG_PREFIX, options, p_filter->p_cfg);

    unsigned i_tailLength = 0;
    if (p_filter->fmt_out.audio.i_channels == 2
     && var_InheritBool(p_filter, CFG_PREFIX "headphones"))
    {
        p_sys->mode = filter_sys_t::AMBISONICS_BINAURAL_DECODER;

        std::string HRTFPath = getHRTFPath(p_filter);
        msg_Dbg(p_filter, "Using the HRTF file: %s", HRTFPath.c_str());

        if (!p_sys->binauralDecoder.Configure(p_sys->i_order, true,
                p_sys->i_rate, AMB_BLOCK_TIME_LEN, i_tailLength,
                HRTFPath))
        {
            msg_Err(p_filter, "Error creating the binaural decoder.");
            delete p_sys;
            return VLC_EGENERIC;
        }
        p_sys->binauralDecoder.Reset();
    }
    else
    {
        p_sys->mode = filter_sys_t::AMBISONICS_DECODER;

        unsigned i_nbChannels = aout_FormatNbChannels(&p_filter->fmt_out.audio);
        if (!p_sys->speakerDecoder.Configure(p_sys->i_order, true,
            kAmblib_CustomSpeakerSetUp, i_nbChannels))
        {
            msg_Err(p_filter, "Error creating the Ambisonics decoder.");
            delete p_sys;
            return VLC_EGENERIC;
        }

        /* Speaker setup, inspired from:
         * https://www.dolby.com/us/en/guide/surround-sound-speaker-setup/7-1-setup.html
         * The position must follow the order of pi_vlc_chan_order_wg4 */
        unsigned s = 0;

        p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(30), 0.f, 1.f});
        p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(-30), 0.f, 1.f});

        if ((outfmt->i_physical_channels & AOUT_CHANS_MIDDLE) == AOUT_CHANS_MIDDLE)
        {
            p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(110), 0.f, 1.f});
            p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(-110), 0.f, 1.f});
        }

        if ((outfmt->i_physical_channels & AOUT_CHANS_REAR) == AOUT_CHANS_REAR)
        {
            p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(145), 0.f, 1.f});
            p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(-145), 0.f, 1.f});
        }

        if ((outfmt->i_physical_channels & AOUT_CHAN_CENTER) == AOUT_CHAN_CENTER)
            p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(0), 0.f, 1.f});

        if ((outfmt->i_physical_channels & AOUT_CHAN_LFE) == AOUT_CHAN_LFE)
            p_sys->speakerDecoder.SetPosition(s++, {DegreesToRadians(0), 0.f, 0.5f});

        /* Check we have setup the right number of speaker. */
        assert(s == i_nbChannels);

        p_sys->speakerDecoder.Refresh();
    }

    if (!p_sys->processor.Configure(p_sys->i_order, true, AMB_BLOCK_TIME_LEN, 0))
    {
        msg_Err(p_filter, "Error creating the ambisonic processor.");
        delete p_sys;
        return VLC_EGENERIC;
    }

    if (!p_sys->zoomer.Configure(p_sys->i_order, true, 0))
    {
        msg_Err(p_filter, "Error creating the ambisonic zoomer.");
        delete p_sys;
        return VLC_EGENERIC;
    }

    p_filter->pf_audio_filter = Mix;
    p_filter->pf_flush = Flush;
    p_filter->pf_change_viewpoint = ChangeViewpoint;

    return VLC_SUCCESS;
}
Esempio n. 25
0
static int OpenBinauralizer(vlc_object_t *p_this)
{
    filter_t *p_filter = (filter_t *)p_this;
    audio_format_t *infmt = &p_filter->fmt_in.audio;
    audio_format_t *outfmt = &p_filter->fmt_out.audio;

    filter_sys_t *p_sys;
    p_sys = p_filter->p_sys = (filter_sys_t*)new(std::nothrow)filter_sys_t();
    if (p_sys == NULL)
        return VLC_ENOMEM;

    p_sys->mode = filter_sys_t::BINAURALIZER;
    p_sys->i_rate = p_filter->fmt_in.audio.i_rate;
    p_sys->i_inputNb = p_filter->fmt_in.audio.i_channels;
    p_sys->i_outputNb = 2;

    if (allocateBuffers(p_sys) != VLC_SUCCESS)
    {
        delete p_sys;
        return VLC_ENOMEM;
    }

    unsigned s = 0;
    p_sys->speakers = new CAmbisonicSpeaker[infmt->i_channels]();

    p_sys->speakers[s++].SetPosition({DegreesToRadians(30), 0.f, 1.f});
    p_sys->speakers[s++].SetPosition({DegreesToRadians(-30), 0.f, 1.f});

    if ((infmt->i_physical_channels & AOUT_CHANS_MIDDLE) == AOUT_CHANS_MIDDLE)
    {
        /* Middle */
        p_sys->speakers[s++].SetPosition({DegreesToRadians(110), 0.f, 1.f});
        p_sys->speakers[s++].SetPosition({DegreesToRadians(-110), 0.f, 1.f});
    }

    if ((infmt->i_physical_channels & AOUT_CHANS_REAR) == AOUT_CHANS_REAR)
    {
        /* Rear */
        p_sys->speakers[s++].SetPosition({DegreesToRadians(145), 0.f, 1.f});
        p_sys->speakers[s++].SetPosition({DegreesToRadians(-145), 0.f, 1.f});
    }

    if ((infmt->i_physical_channels & AOUT_CHAN_CENTER) == AOUT_CHAN_CENTER)
        p_sys->speakers[s++].SetPosition({DegreesToRadians(0), 0.f, 1.f});

    if ((infmt->i_physical_channels & AOUT_CHAN_LFE) == AOUT_CHAN_LFE)
        p_sys->speakers[s++].SetPosition({DegreesToRadians(0), 0.f, 0.5f});

    std::string HRTFPath = getHRTFPath(p_filter);
    msg_Dbg(p_filter, "Using the HRTF file: %s", HRTFPath.c_str());

    unsigned i_tailLength = 0;
    if (!p_sys->binauralizer.Configure(p_sys->i_rate, AMB_BLOCK_TIME_LEN,
                                       p_sys->speakers, infmt->i_channels, i_tailLength,
                                       HRTFPath))
    {
        msg_Err(p_filter, "Error creating the binauralizer.");
        delete p_sys;
        return VLC_EGENERIC;
    }
    p_sys->binauralizer.Reset();

    outfmt->i_format = infmt->i_format = VLC_CODEC_FL32;
    outfmt->i_physical_channels = AOUT_CHANS_STEREO;
    aout_FormatPrepare(infmt);
    aout_FormatPrepare(outfmt);

    p_filter->pf_audio_filter = Mix;
    p_filter->pf_flush = Flush;
    p_filter->pf_change_viewpoint = ChangeViewpoint;

    return VLC_SUCCESS;
}
Esempio n. 26
0
kit::Sphere::Sphere(uint32_t rings, uint32_t sectors)
{
  
  allocateBuffers();
  m_indexCount = 0;
  
  // Generate sphere geometry on the cpu
  float const R = 1.0f / float(rings - 1);
  float const S = 1.0f / float(sectors - 1);

  std::vector<float> vertices;
  std::vector<uint32_t> indices;

  // First generate per-vertex data
  for (uint32_t r = 0; r < rings; r++)
  {
    for (uint32_t s = 0; s < sectors; s++)
    {
      float const x = cos(2.0f * glm::pi<float>() * float(s) * S) * sin(glm::pi<float>() * float(r) * R);
      float const z = sin(-glm::half_pi<float>() + glm::pi<float>() * float(r) * R);
      float const y = sin(2.0f * glm::pi<float>() * float(s) * S) * sin(glm::pi<float>() * float(r) * R);
      vertices.push_back(x);
      vertices.push_back(y);
      vertices.push_back(z);

      vertices.push_back(s*S);
      vertices.push_back(r*R);
    }
  }

  // Then generate index data
  for (uint32_t r = 0; r < rings-1; r++)
  {
    for (uint32_t s = 0; s < sectors-1; s++)
    {
      indices.push_back(r * sectors + s);
      indices.push_back(r * sectors + (s + 1));
      indices.push_back((r + 1) * sectors + (s + 1));

      
      indices.push_back(r * sectors + s);
      indices.push_back((r + 1) * sectors + (s + 1));
      indices.push_back((r + 1) * sectors + s);
    }
  }

  m_indexCount = (uint32_t)indices.size();

  // Upload sphere geometry to the gpu
  glBindVertexArray(m_glVertexArray);

  // Upload indices
  glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m_glVertexIndices);
  glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.size() * sizeof(uint32_t), &indices[0], GL_STATIC_DRAW);

  // Upload vertices 
  glBindBuffer(GL_ARRAY_BUFFER, m_glVertexBuffer);
  glBufferData(GL_ARRAY_BUFFER, vertices.size() * sizeof(float), &vertices[0], GL_STATIC_DRAW);

  // Total size
  uint32_t attributeSize = (sizeof(float)* 5);

  // Positions
  glEnableVertexAttribArray(0);
  glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, attributeSize, (void*)0);

  // Texture coordinates
  glEnableVertexAttribArray(1);
  glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, attributeSize, (void*)(sizeof(float)* 3));

  // Normals
  glEnableVertexAttribArray(2);
  glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, attributeSize, (void*)0);
}