Exemplo n.º 1
0
void Tonalize::
        brokenGpu(Tfr::Chunk& chunk )
{
    TIME_FILTER TaskTimer tt("TonalizeFilter");

    ::tonalizeFilter( chunk.transform_data,
                  chunk.minHz(), chunk.maxHz(), chunk.sample_rate );

    TIME_FILTER ComputationSynchronize();
}
Exemplo n.º 2
0
void MappedVboVoid::
        unmap(DataStorageVoid* datap)
{
    if (_is_mapped)
    {
        TIME_MAPPEDVBOVOID TaskInfo("Unmapping vbo %u of size %s", (unsigned)*_vbo, DataStorageVoid::getMemorySizeText(datap->numberOfBytes()).c_str());

#ifdef USE_CUDA
        // make sure data is located in cuda
        datap->AccessStorage<CudaGlobalStorage>( true, false );

    #ifdef CUDA_MEMCHECK_TEST
        // copy data back over the mapped memory
        *mapped_gl_mem = *datap;
    #endif

        // sync from cuda to vbo
        cudaGLUnmapBufferObject(*_vbo);

        // release resources
        mapped_gl_mem.reset();
        datap->DiscardAllData();
        _is_mapped = false;

        // The memory bound with Cuda-OpenGL-interop can be relied on. So
        // call cudaGetLastError to clear the cuda error state just in case.
        // (I'm not sure why but it might be related to cuda out-of-memory
        // errors elsewhere)
        cudaGetLastError();
#else
        // make sure data is located in cpu
        datap->AccessStorage<CpuMemoryStorage>( true, false );

        // sync from mem to vbo
        glBindBuffer(_vbo->vbo_type(), *_vbo);
        glUnmapBuffer(_vbo->vbo_type());
        glBindBuffer(_vbo->vbo_type(), 0);

        // release resources
        mapped_gl_mem.reset();
        datap->DiscardAllData();
        _is_mapped = false;
#endif

        TIME_MAPPEDVBOVOID ComputationSynchronize();

        if (_tt)
        {
            TaskInfo("Unmapped vbo %u of size %s", (unsigned)*_vbo, DataStorageVoid::getMemorySizeText(datap->numberOfBytes()).c_str());
            delete _tt;
            _tt = 0;
        }
    }
}
Exemplo n.º 3
0
void Reassign::
        brokenGpu(Tfr::Chunk& chunk )
{
    TIME_FILTER TaskTimer tt("ReassignFilter");

    for (unsigned reassignLoop=0;reassignLoop<1;reassignLoop++)
    {
        ::reassignFilter( chunk.transform_data,
                      chunk.minHz(), chunk.maxHz(), chunk.sample_rate );
    }

    TIME_FILTER ComputationSynchronize();
}
Exemplo n.º 4
0
void RectangleKernel::subchunk( Tfr::ChunkAndInverse& c ) {
    Chunk& chunk = *c.chunk;

    TIME_FILTER TaskTimer tt(boost::format("Rectangle %s") % chunk.getCoveredInterval ());

    Area area = {
            (float)(_s1 * chunk.sample_rate / chunk.original_sample_rate - chunk.chunk_offset.asFloat()),
            chunk.freqAxis.getFrequencyScalarNotClamped( _f1 ),
            (float)(_s2 * chunk.sample_rate / chunk.original_sample_rate - chunk.chunk_offset.asFloat()),
            chunk.freqAxis.getFrequencyScalarNotClamped( _f2 ) };

    ::removeRect( chunk.transform_data,
                  area,
                  _save_inside);

    TIME_FILTER ComputationSynchronize();
}
Exemplo n.º 5
0
void FftClFft::
        inverse(Tfr::ChunkData::Ptr input, DataStorage<float>::Ptr output, DataStorageSize n )
{
    unsigned denseWidth = n.width/2+1;
    unsigned redundantWidth = n.width;
    unsigned batchcount1 = output->numberOfElements()/redundantWidth,
             batchcount2 = input->numberOfElements()/denseWidth;

    EXCEPTION_ASSERT( batchcount1 == batchcount2 );
    EXCEPTION_ASSERT( (denseWidth-1)*2 == redundantWidth );
    EXCEPTION_ASSERT( redundantWidth*n.height == output->numberOfElements() );

    Tfr::ChunkData::Ptr redundantInput( new Tfr::ChunkData( n.height*redundantWidth ));

    {
        Tfr::ChunkElement* in = CpuMemoryStorage::ReadOnly<1>( input ).ptr();
        Tfr::ChunkElement* out = CpuMemoryStorage::WriteAll<1>( redundantInput ).ptr();
#pragma omp parallel for
        for (int i=0; i < (int)n.height; ++i)
        {
            unsigned x;
            for (x=0; x<denseWidth; ++x)
                out[i*redundantWidth + x] = in[i*denseWidth + x];
            for (; x<redundantWidth; ++x)
                out[i*redundantWidth + x] = conj(in[i*denseWidth + redundantWidth - x]);
        }
    }

    Tfr::ChunkData::Ptr complexoutput( new Tfr::ChunkData( output->size()));

    computeWithClFft(redundantInput, complexoutput, DataStorageSize( redundantWidth, n.height), FftDirection_Inverse);

    ::stftDiscardImag( complexoutput, output );

    TIME_STFT ComputationSynchronize();
}
Exemplo n.º 6
0
void MappedVboVoid::
        map(DataStorageVoid* datap)
{
    TIME_MAPPEDVBO_LOG _tt = new TaskTimer("Mapping vbo %u of size %s", (unsigned)*_vbo, DataStorageVoid::getMemorySizeText(datap->numberOfBytes()).c_str());

    TIME_MAPPEDVBOVOID TaskTimer tt("Mapping vbo %u of size %s", (unsigned)*_vbo, DataStorageVoid::getMemorySizeText(datap->numberOfBytes()).c_str());

    EXCEPTION_ASSERT( !_is_mapped );

    DataStorageSize sizeInBytes = datap->sizeInBytes();
    EXCEPTION_ASSERT( datap->numberOfBytes() == _vbo->size() );

#ifdef USE_CUDA
    void* g_data=0;
    _vbo->registerWithCuda();
    _is_mapped = (cudaSuccess == cudaGLMapBufferObject((void**)&g_data, *_vbo));


    cudaPitchedPtr cpp;
    cpp.ptr = g_data;
    cpp.pitch = sizeInBytes.width;
    cpp.xsize = sizeInBytes.width;
    cpp.ysize = sizeInBytes.height*sizeInBytes.depth;


    if (!_is_mapped)
        mapped_gl_mem.reset( new DataStorage<char>( sizeInBytes ));
    else
        mapped_gl_mem = CudaGlobalStorage::BorrowPitchedPtr<char>( sizeInBytes, cpp );


    EXCEPTION_ASSERT( 0==datap->FindStorage<CudaGlobalStorage>() );

    #ifdef CUDA_MEMCHECK_TEST
        *datap = *mapped_gl_mem;
    #else
        if (_is_mapped)
            new CudaGlobalStorage( datap, cpp, false ); // Memory managed by DataStorage
    #endif
#else

    glBindBuffer(_vbo->vbo_type(), *_vbo);
    void* data = glMapBuffer(_vbo->vbo_type(), GL_WRITE_ONLY);
    _is_mapped = 0!=data;
    glBindBuffer(_vbo->vbo_type(), 0);


    if (!_is_mapped)
        mapped_gl_mem.reset( new DataStorage<char>( sizeInBytes ));
    else
        mapped_gl_mem = CpuMemoryStorage::BorrowPtr<char>( sizeInBytes, (char*)data );


    EXCEPTION_ASSERT( 0==datap->FindStorage<CpuMemoryStorage>() );

    if (_is_mapped)
        new CpuMemoryStorage( datap, data, false ); // Memory managed by DataStorage
#endif

    TIME_MAPPEDVBOVOID ComputationSynchronize();
}