コード例 #1
0
ファイル: FWCamera.cpp プロジェクト: lynxis/libavg
CameraInfo* FWCamera::getCameraInfos(int deviceNumber)
{
#ifdef AVG_ENABLE_1394_2
    dc1394_t* pDC1394 = dc1394_new();
    if (pDC1394 == 0) {
        AVG_ASSERT(false);
        return NULL;
    }
    dc1394camera_list_t * pCameraList;
    int err=dc1394_camera_enumerate(pDC1394, &pCameraList);
    if (err != DC1394_SUCCESS) {
        AVG_ASSERT(false);
        return NULL;
    }
    if (pCameraList->num != 0) {
        dc1394camera_id_t id = pCameraList->ids[deviceNumber];
        dc1394camera_t * pCamera = dc1394_camera_new_unit(pDC1394, id.guid,
                id.unit);
        if (pCamera) {
            stringstream deviceID;
            deviceID << hex << id.guid;//pCamera->guid;
            CameraInfo* camInfo = new CameraInfo("Firewire", deviceID.str());

            getCameraControls(pCamera, camInfo);
            getCameraImageFormats(pCamera, camInfo);

            dc1394_camera_free(pCamera);
            dc1394_camera_free_list(pCameraList);
            dc1394_free(pDC1394);
            return camInfo;
        }
    }
#endif
    return NULL;
}
コード例 #2
0
ファイル: TexInfo.cpp プロジェクト: hoodie/libavg
int TexInfo::getGLInternalFormat() const
{
    switch (m_pf) {
        case I8:
            return GL_LUMINANCE;
        case A8:
            return GL_ALPHA;
        case R8G8B8A8:
        case R8G8B8X8:
            return GL_RGBA;
        case B8G8R8A8:
        case B8G8R8X8:
            AVG_ASSERT(!GLContext::getCurrent()->isGLES());
            return GL_RGBA;
#ifndef AVG_ENABLE_EGL            
        case R8:
            return GL_R8;
        case R32G32B32A32F:
            return GL_RGBA32F_ARB;
        case I32F:
            return GL_LUMINANCE32F_ARB;
#endif
        case R8G8B8:
        case B5G6R5:
            return GL_RGB;
        default:
            AVG_ASSERT(false);
            return 0;
    }
}
コード例 #3
0
ファイル: TestHelper.cpp プロジェクト: hoodie/libavg
void TestHelper::processTouchStatus(CursorEventPtr pEvent)
{
    map<int, TouchStatusPtr>::iterator it = m_Touches.find(pEvent->getCursorID());
    switch (pEvent->getType()) {
        case Event::CURSOR_DOWN: {
                AVG_ASSERT(it == m_Touches.end());
                TouchStatusPtr pTouchStatus(new TouchStatus(pEvent));
                m_Touches[pEvent->getCursorID()] = pTouchStatus;
            }
            break;
        case Event::CURSOR_MOTION:
        case Event::CURSOR_UP: {
                if (it == m_Touches.end()) {
                    cerr << "borked: " << pEvent->getCursorID() << ", " << 
                            pEvent->typeStr() << endl;
                }
                AVG_ASSERT(it != m_Touches.end());
                TouchStatusPtr pTouchStatus = (*it).second;
                pTouchStatus->pushEvent(pEvent);
            }
            break;
        default:
            AVG_ASSERT(false);
            break;
    }
}
コード例 #4
0
ファイル: FFMpegDemuxer.cpp プロジェクト: bluscr33n/libavg
AVPacket * FFMpegDemuxer::getPacket(int streamIndex)
{
    // Make sure enableStream was called on streamIndex.
    AVG_ASSERT(m_PacketLists.size() > 0);
    AVG_ASSERT(streamIndex > -1 && streamIndex < 10);

    if (m_PacketLists.find(streamIndex) == m_PacketLists.end()) {
        cerr << this << ": getPacket: Stream " << streamIndex << " not found." << endl;
        dump();
        AVG_ASSERT(false);
    }

    PacketList& curPacketList = m_PacketLists.find(streamIndex)->second;
    AVPacket* pPacket;
    if (!curPacketList.empty()) {
        // The stream has packets queued already.
        pPacket = curPacketList.front();
        curPacketList.pop_front();
    } else {
        // No packets queued for this stream -> read and queue packets until we get one
        // that is meant for this stream.
        do {
            pPacket = new AVPacket;
            memset(pPacket, 0, sizeof(AVPacket));
            int err = av_read_frame(m_pFormatContext, pPacket);
            if (err < 0) {
                // EOF or error
                if (err != int(AVERROR_EOF)) {
                    char sz[256];
                    av_strerror(err, sz, 256);
                    AVG_TRACE(Logger::category::PLAYER, Logger::severity::ERROR,
                            "Error decoding video: " << sz);
                }
                av_free_packet(pPacket);
                delete pPacket;
                pPacket = 0;
                return 0;
            }
            if (pPacket->stream_index != streamIndex) {
                if (m_PacketLists.find(pPacket->stream_index) != m_PacketLists.end()) {
                    // Relevant stream, but not ours
                    av_dup_packet(pPacket);
                    PacketList& otherPacketList = 
                            m_PacketLists.find(pPacket->stream_index)->second;
                    otherPacketList.push_back(pPacket);
                } else {
                    // Disabled stream
                    av_free_packet(pPacket);
                    delete pPacket;
                    pPacket = 0;
                } 
            } else {
                // Our stream
                av_dup_packet(pPacket);
            }
        } while (!pPacket || pPacket->stream_index != streamIndex);
    }

    return pPacket;
}
コード例 #5
0
ファイル: CMUCamera.cpp プロジェクト: cnxsoft/xibo4arm
BitmapPtr CMUCamera::getImage(bool bWait)
{
    if (bWait) {
        unsigned rc = WaitForSingleObject(m_pCamera->GetFrameEvent(), INFINITE);
        AVG_ASSERT(rc == WAIT_OBJECT_0);
    } else {
        unsigned rc = WaitForSingleObject(m_pCamera->GetFrameEvent(), 0);
        if (rc == WAIT_TIMEOUT) {
            // No frame yet
            return BitmapPtr();
        }
        AVG_ASSERT(rc == WAIT_OBJECT_0);
    }
    int rc2 = m_pCamera->AcquireImageEx(FALSE, NULL);
    if (rc2 != CAM_SUCCESS) {
        throw Exception(AVG_ERR_CAMERA_NONFATAL,
                "CMUCamera: Could not acquire image from camera. " +
                CMUErrorToString(rc2));
    }
    unsigned long captureBufferLength;
    unsigned char* pCaptureBuffer = m_pCamera->GetRawData(&captureBufferLength);

    BitmapPtr pCamBmp(new Bitmap(getImgSize(), getCamPF(), pCaptureBuffer, 
            captureBufferLength / getImgSize().y, false, "TempCameraBmp"));
    return convertCamFrameToDestPF(pCamBmp);
}
コード例 #6
0
ファイル: TexInfo.cpp プロジェクト: hoodie/libavg
int TexInfo::getGLFormat(PixelFormat pf)
{
    switch (pf) {
        case I8:
        case I32F:
            return GL_LUMINANCE;
        case A8:
            return GL_ALPHA;
        case R8G8B8A8:
        case R8G8B8X8:
            return GL_RGBA;
        case B8G8R8A8:
        case B8G8R8X8:
            AVG_ASSERT(!GLContext::getCurrent()->isGLES());
            return GL_BGRA;
#ifndef AVG_ENABLE_EGL
        case R8:
            return GL_RED;
        case R32G32B32A32F:
            return GL_BGRA;
#endif
        case R8G8B8:
        case B5G6R5:
            return GL_RGB;
        default:
            AVG_ASSERT(false);
            return 0;
    }
}
コード例 #7
0
void TestHelper::fakeTouchEvent(int id, Event::Type eventType,
        Event::Source source, const glm::vec2& pos, const glm::vec2& speed)
{
    checkEventType(eventType);
    // The id is modified to avoid collisions with real touch events.
    TouchEventPtr pEvent(new TouchEvent(id+std::numeric_limits<int>::max()/2, eventType, 
            IntPoint(pos), source, speed));
    map<int, TouchStatusPtr>::iterator it = m_Touches.find(pEvent->getCursorID());
    switch (pEvent->getType()) {
        case Event::CURSORDOWN: {
                AVG_ASSERT(it == m_Touches.end());
                TouchStatusPtr pTouchStatus(new TouchStatus(pEvent));
                m_Touches[pEvent->getCursorID()] = pTouchStatus;
            }
            break;
        case Event::CURSORMOTION:
        case Event::CURSORUP: {
                if (it == m_Touches.end()) {
                    cerr << "borked: " << pEvent->getCursorID() << ", " << 
                            pEvent->typeStr() << endl;
                }
                AVG_ASSERT(it != m_Touches.end());
                TouchStatusPtr pTouchStatus = (*it).second;
                pTouchStatus->pushEvent(pEvent);
            }
            break;
        default:
            AVG_ASSERT(false);
            break;
    }
}
コード例 #8
0
void VideoMsg::setAudio(AudioBufferPtr pAudioBuffer, float audioTime)
{
    AVG_ASSERT(m_MsgType == NONE);
    AVG_ASSERT(pAudioBuffer);
    m_MsgType = AUDIO;
    m_pAudioBuffer = pAudioBuffer;
    m_AudioTime = audioTime;
}
コード例 #9
0
void VideoMsg::setFrame(const std::vector<BitmapPtr>& pBmps, float frameTime)
{
    AVG_ASSERT(m_MsgType == NONE);
    AVG_ASSERT(pBmps.size() == 1 || pBmps.size() == 3 || pBmps.size() == 4);
    m_MsgType = FRAME;
    m_pBmps = pBmps;
    m_FrameTime = frameTime;
}
コード例 #10
0
ファイル: VideoWriterThread.cpp プロジェクト: libavg/libavg
void VideoWriterThread::openVideoCodec()
{
    AVCodec* videoCodec = avcodec_find_encoder(m_pVideoStream->codec->codec_id);
    AVG_ASSERT(videoCodec);

    int rc = avcodec_open2(m_pVideoStream->codec, videoCodec, 0);
    AVG_ASSERT(rc == 0);
}
コード例 #11
0
void VideoWriterThread::openVideoCodec()
{
    AVCodec* videoCodec = avcodec_find_encoder(m_pVideoStream->codec->codec_id);
    AVG_ASSERT(videoCodec);

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(53, 8, 0)
    int rc = avcodec_open2(m_pVideoStream->codec, videoCodec, 0);

#else
    int rc = avcodec_open(m_pVideoStream->codec, videoCodec);
#endif
    AVG_ASSERT(rc == 0);
}
コード例 #12
0
ファイル: VideoWriterThread.cpp プロジェクト: libavg/libavg
void VideoWriterThread::writeFrame(AVFrame* pFrame)
{
    ScopeTimer timer(ProfilingZoneWriteFrame);
    m_FramesWritten++;
    AVCodecContext* pCodecContext = m_pVideoStream->codec;
    AVPacket packet = { 0 };
    int ret;
    bool bGotOutput;

#if LIBAVCODEC_VERSION_INT > AV_VERSION_INT(54, 0, 0)
    av_init_packet(&packet);
    int got_output = 0;
    ret = avcodec_encode_video2(pCodecContext, &packet, pFrame, &got_output);
    AVG_ASSERT(ret >= 0);
    if ((pCodecContext->coded_frame->pts) != (long long)AV_NOPTS_VALUE) {
        packet.pts = av_rescale_q(pCodecContext->coded_frame->pts,
                pCodecContext->time_base, m_pVideoStream->time_base);
    }
    bGotOutput = (got_output != 0);
#else
    int out_size = avcodec_encode_video(pCodecContext, m_pVideoBuffer,
            VIDEO_BUFFER_SIZE, pFrame);
    if (out_size > 0) {
        av_init_packet(&packet);

        if ((pCodecContext->coded_frame->pts) != (long long)AV_NOPTS_VALUE) {
            packet.pts = av_rescale_q(pCodecContext->coded_frame->pts,
                    pCodecContext->time_base, m_pVideoStream->time_base);
        }

        if (pCodecContext->coded_frame->key_frame) {
            packet.flags |= AV_PKT_FLAG_KEY;
        }
        packet.stream_index = m_pVideoStream->index;
        packet.data = m_pVideoBuffer;
        packet.size = out_size;
    }
    bGotOutput = (out_size > 0);
#endif
    if (bGotOutput) {
        /* write the compressed frame in the media file */
        ret = av_interleaved_write_frame(m_pOutputFormatContext, &packet);
        av_free_packet(&packet);
        if (ret != 0) {
            AVG_TRACE(Logger::category::VIDEO, Logger::severity::ERROR,
                    getAVErrorString(ret));
        }
        AVG_ASSERT(ret == 0);
    }

}
コード例 #13
0
ファイル: VDPAUDecoder.cpp プロジェクト: JohnChu/libavg
void VDPAUDecoder::setupDecoder(AVCodecContext* pContext)
{
    VdpStatus status;

    // Create new decoder and mixer.
    VdpDecoderProfile profile = 0;
    switch (pContext->pix_fmt) {
        case PIX_FMT_VDPAU_MPEG1:
            profile = VDP_DECODER_PROFILE_MPEG1;
            break;
        case PIX_FMT_VDPAU_MPEG2:
            profile = VDP_DECODER_PROFILE_MPEG2_MAIN;
            break;
        case PIX_FMT_VDPAU_H264:
            profile = VDP_DECODER_PROFILE_H264_HIGH;
            break;
        case PIX_FMT_VDPAU_WMV3:
            profile = VDP_DECODER_PROFILE_VC1_SIMPLE;
            break;
        case PIX_FMT_VDPAU_VC1:
            profile = VDP_DECODER_PROFILE_VC1_SIMPLE;
            break;
        default:
            AVG_ASSERT(false);
    }
    status = vdp_decoder_create(getVDPAUDevice(), profile, m_Size.x, m_Size.y, 16,
            &m_VDPDecoder);
    AVG_ASSERT(status == VDP_STATUS_OK);

    m_PixFmt = pContext->pix_fmt;

    VdpVideoMixerFeature features[] = {
        VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL,
        VDP_VIDEO_MIXER_FEATURE_DEINTERLACE_TEMPORAL_SPATIAL,
    };
    VdpVideoMixerParameter params[] = { 
        VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_WIDTH,
        VDP_VIDEO_MIXER_PARAMETER_VIDEO_SURFACE_HEIGHT,
        VDP_VIDEO_MIXER_PARAMETER_CHROMA_TYPE,
        VDP_VIDEO_MIXER_PARAMETER_LAYERS
    };
    VdpChromaType chroma = VDP_CHROMA_TYPE_420;
    int  numLayers = 0;
    void const* paramValues [] = { &m_Size.x, &m_Size.y, &chroma, &numLayers };

    status = vdp_video_mixer_create(getVDPAUDevice(), 2, features, 4, params, 
            paramValues, &m_VDPMixer);
    AVG_ASSERT(status == VDP_STATUS_OK);

}
コード例 #14
0
ファイル: BmpTextureMover.cpp プロジェクト: gunawanw9/libavg
void BmpTextureMover::moveBmpToTexture(BitmapPtr pBmp, GLTexture& tex)
{
    AVG_ASSERT(pBmp->getSize() == tex.getSize());
    AVG_ASSERT(getSize() == pBmp->getSize());
    AVG_ASSERT(pBmp->getPixelFormat() == getPF());
    tex.activate();
    unsigned char * pStartPos = pBmp->getPixels();
    IntPoint size = tex.getSize();
    glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, size.x, size.y,
                    tex.getGLFormat(getPF()), tex.getGLType(getPF()),
                    pStartPos);
    tex.generateMipmaps();
    GLContext::checkError("BmpTextureMover::moveBmpToTexture: glTexSubImage2D()");
}
コード例 #15
0
ファイル: Contact.cpp プロジェクト: pararthshah/libavg-vaapi
void Contact::sendEventToListeners(CursorEventPtr pCursorEvent)
{
    switch (pCursorEvent->getType()) {
        case Event::CURSOR_DOWN:
            break;
        case Event::CURSOR_MOTION:
            notifySubscribers("CURSOR_MOTION", pCursorEvent);
            break;
        case Event::CURSOR_UP:
            notifySubscribers("CURSOR_UP", pCursorEvent);
            removeSubscribers();
            break;
        default:
            AVG_ASSERT_MSG(false, pCursorEvent->typeStr().c_str());
    }
    m_bSendingEvents = true;
    AVG_ASSERT(pCursorEvent->getContact() == shared_from_this());
    EventPtr pEvent = boost::dynamic_pointer_cast<Event>(pCursorEvent);
    m_bCurListenerIsDead = false;
    for (map<int, Listener>::iterator it = m_ListenerMap.begin(); 
            it != m_ListenerMap.end();)
    {
        Listener listener = it->second;
        m_CurListenerID = it->first;
        m_bCurListenerIsDead = false;
        switch (pCursorEvent->getType()) {
            case Event::CURSOR_MOTION:
                if (listener.m_pMotionCallback != Py_None) {
                    py::call<void>(listener.m_pMotionCallback, pEvent);
                }
                break;
            case Event::CURSOR_UP:
                if (listener.m_pUpCallback != Py_None) {
                    py::call<void>(listener.m_pUpCallback, pEvent);
                }
                break;
            default:
                AVG_ASSERT(false);
        }
        map<int, Listener>::iterator lastIt = it;
        ++it;
        if (m_bCurListenerIsDead) {
            m_ListenerMap.erase(lastIt);
            m_bCurListenerIsDead = false;
        }
    }
    m_bSendingEvents = false;
}
コード例 #16
0
ファイル: AudioMsg.cpp プロジェクト: JohnChu/libavg
void AudioMsg::setAudio(AudioBufferPtr pAudioBuffer, float audioTime)
{
    AVG_ASSERT(pAudioBuffer);
    setType(AUDIO);
    m_pAudioBuffer = pAudioBuffer;
    m_AudioTime = audioTime;
}
コード例 #17
0
void OffscreenCanvas::addDependentCanvas(CanvasPtr pCanvas)
{
    AVG_ASSERT(!(pCanvas == shared_from_this()));
    m_pDependentCanvases.push_back(pCanvas);
    Player::get()->newCanvasDependency(
            dynamic_pointer_cast<OffscreenCanvas>(shared_from_this()));
}
コード例 #18
0
ファイル: OGLHelper.cpp プロジェクト: JohnChu/libavg
GLfunction getProcAddress(const string& sName)
{
    AVG_ASSERT(glproc::s_hGLLib);
#ifdef _WIN32
    GLfunction pProc = (GLfunction)wglGetProcAddress(sName.c_str());
/*
    if (!pProc) {
        char szErr[512];
        FormatMessage((FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_FROM_SYSTEM),
                0, GetLastError(), MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
                szErr, 512, 0);
        throw Exception(AVG_ERR_VIDEO_GENERAL, 
                string("wglGetProcAddress("+sName+") failed: ") + szErr);
    }
*/
#else
    GLfunction pProc = (GLfunction)dlsym(glproc::s_hGLLib, sName.c_str());
    if (!pProc) {
        // If the name didn't work, try using an underscore :-).
        string sName_ = string("_")+sName;
        pProc = (GLfunction)dlsym(glproc::s_hGLLib, sName_.c_str());
    }
#endif
    return(pProc);
}
コード例 #19
0
ファイル: SyncVideoDecoder.cpp プロジェクト: hoodie/libavg
void SyncVideoDecoder::readFrame(AVFrame* pFrame)
{
    AVG_ASSERT(getState() == DECODING);
    ScopeTimer timer(DecodeProfilingZone); 

    if (m_bProcessingLastFrames) {
        // EOF received, but last frames still need to be decoded.
        bool bGotPicture = m_pFrameDecoder->decodeLastFrame(pFrame);
        if (!bGotPicture) {
            m_bProcessingLastFrames = false;
        }
    } else {        
        bool bDone = false;
        while (!bDone) {
            AVPacket* pPacket = m_pDemuxer->getPacket(getVStreamIndex());
            m_bFirstPacket = false;
            bool bGotPicture;
            if (pPacket) {
                bGotPicture = m_pFrameDecoder->decodePacket(pPacket, pFrame, 
                        m_bVideoSeekDone);
            } else {
                bGotPicture = m_pFrameDecoder->decodeLastFrame(pFrame);
            }
            if (bGotPicture && m_pFrameDecoder->isEOF()) {
                m_bProcessingLastFrames = true;
            }
            if (bGotPicture || m_pFrameDecoder->isEOF()) {
                bDone = true;
            }
        }
    }
}
コード例 #20
0
ファイル: SyncVideoDecoder.cpp プロジェクト: hoodie/libavg
FrameAvailableCode SyncVideoDecoder::getRenderedBmps(vector<BitmapPtr>& pBmps, 
        float timeWanted)
{
    AVG_ASSERT(getState() == DECODING);
    ScopeTimer timer(RenderToBmpProfilingZone);
    FrameAvailableCode frameAvailable;
    if (timeWanted == -1) {
        readFrame(m_pFrame);
        frameAvailable = FA_NEW_FRAME;
    } else {
        frameAvailable = readFrameForTime(m_pFrame, timeWanted);
    }
    if (frameAvailable == FA_USE_LAST_FRAME || isEOF()) {
        return FA_USE_LAST_FRAME;
    } else {
        allocFrameBmps(pBmps);
        if (pixelFormatIsPlanar(getPixelFormat())) {
            ScopeTimer timer(CopyImageProfilingZone);
            for (unsigned i = 0; i < pBmps.size(); ++i) {
                m_pFrameDecoder->copyPlaneToBmp(pBmps[i], m_pFrame->data[i],
                        m_pFrame->linesize[i]);
            }
        } else {
            m_pFrameDecoder->convertFrameToBmp(m_pFrame, pBmps[0]);
        }
        return FA_NEW_FRAME;
    }
}
コード例 #21
0
ファイル: CMUCamera.cpp プロジェクト: cnxsoft/xibo4arm
CameraInfo* CMUCamera::getCameraInfos(int deviceNumber)
{
#ifdef AVG_ENABLE_CMU1394
    C1394Camera* pCamera = new C1394Camera();
    int err = pCamera->RefreshCameraList();
    if (err <= 0) {
        return 0;
    }

    err = pCamera->SelectCamera(deviceNumber);
    if (err != CAM_SUCCESS) {
        AVG_ASSERT(false);
    }
    pCamera->InitCamera(true);

    long long uniqueID;
    pCamera->GetCameraUniqueID((PLARGE_INTEGER)&uniqueID);
    stringstream deviceID;
    deviceID << uniqueID;

    CameraInfo* pCamInfo = new CameraInfo("Firewire", deviceID.str());
    getCameraImageFormats(pCamera, pCamInfo);
    getCameraControls(pCamera, pCamInfo);

    delete pCamera;
    return pCamInfo;
#endif
    return NULL;
}
コード例 #22
0
ファイル: AudioMsg.cpp プロジェクト: JohnChu/libavg
void AudioMsg::setSeekDone(int seqNum, float seekTime)
{
    setType(SEEK_DONE);
    AVG_ASSERT(seqNum != -1);
    m_SeekSeqNum = seqNum;
    m_SeekTime = seekTime;
}
コード例 #23
0
void VideoMsg::setSeekDone(float seekVideoFrameTime, float seekAudioFrameTime)
{
    AVG_ASSERT(m_MsgType == NONE);
    m_MsgType = SEEK_DONE;
    m_SeekVideoFrameTime = seekVideoFrameTime;
    m_SeekAudioFrameTime = seekAudioFrameTime;
}
コード例 #24
0
ファイル: TexInfo.cpp プロジェクト: hoodie/libavg
int TexInfo::getGLType(PixelFormat pf)
{
    switch (pf) {
        case I8:
        case A8:
        case R8:
            return GL_UNSIGNED_BYTE;
        case R8G8B8A8:
        case R8G8B8X8:
        case B8G8R8A8:
        case B8G8R8X8:
#ifdef __APPLE__
            return GL_UNSIGNED_INT_8_8_8_8_REV;
#else
            return GL_UNSIGNED_BYTE;
#endif
        case R32G32B32A32F:
        case I32F:
            return GL_FLOAT;
        case R8G8B8:
            return GL_UNSIGNED_BYTE;
        case B5G6R5:
            return GL_UNSIGNED_SHORT_5_6_5;
        default:
            AVG_ASSERT(false);
            return 0;
    }
}
コード例 #25
0
ファイル: FilterWipeBorder.cpp プロジェクト: JohnChu/libavg
void FilterWipeBorder::applyInPlace(BitmapPtr pBmp)
{
    AVG_ASSERT(pBmp->getPixelFormat() == I8);
    if (m_NumPixels != 0) {
        int stride = pBmp->getStride();
        unsigned char * pPixels = pBmp->getPixels();
        IntPoint size = pBmp->getSize();
        IntPoint activeSize = pBmp->getSize()-IntPoint(2*m_NumPixels, 2*m_NumPixels);

        unsigned char * pSrcLine = pPixels+stride*m_NumPixels+m_NumPixels;
        for (int y = m_NumPixels-1; y >= 0; --y) {
            memcpy(pPixels+stride*y+m_NumPixels, pSrcLine, activeSize.x);
        }
        pSrcLine = pPixels+stride*(size.y-m_NumPixels-1)+m_NumPixels;
        for (int y = size.y-m_NumPixels; y < size.y; ++y) {
            memcpy(pPixels+stride*y+m_NumPixels, pSrcLine, activeSize.x);
        }

        for (int y = 0; y < size.y; ++y) {
            unsigned char src = *(pPixels+stride*y+m_NumPixels);
            memset(pPixels+stride*y, src, m_NumPixels);
            src = *(pPixels+stride*y+size.x-m_NumPixels-1);
            memset(pPixels+stride*y+size.x-m_NumPixels, src, m_NumPixels);
        }
    }
}
コード例 #26
0
ファイル: BitmapManagerMsg.cpp プロジェクト: cnxsoft/xibo4arm
void BitmapManagerMsg::executeCallback()
{
    AVG_ASSERT(m_MsgType != NONE);
    switch (m_MsgType) {
        case BITMAP:
            boost::python::call<void>(m_OnLoadedCb.ptr(), m_pBmp);
            break;

        case ERROR:
            boost::python::call<void>(m_OnLoadedCb.ptr(), m_pEx);
            break;
        
        default:
            AVG_ASSERT(false);
    }
}
コード例 #27
0
ファイル: FilterDilation.cpp プロジェクト: cnxsoft/xibo4arm
BitmapPtr FilterDilation::apply(BitmapPtr pSrcBmp) 
{
    AVG_ASSERT(pSrcBmp->getPixelFormat() == I8);
    IntPoint size = pSrcBmp->getSize();
    BitmapPtr pDestBmp = BitmapPtr(new Bitmap(size, I8, pSrcBmp->getName()));
    unsigned char * pSrcLine = pSrcBmp->getPixels();
    unsigned char * pNextSrcLine;
    unsigned char * pDestLine = pDestBmp->getPixels();
    for (int y = 0; y < size.y; y++) {
        pDestLine = pDestBmp->getPixels()+y*pDestBmp->getStride();
        unsigned char * pLastSrcLine = pSrcLine;
        pSrcLine = pSrcBmp->getPixels()+y*pSrcBmp->getStride();
        if (y < size.y-1) {
            pNextSrcLine = pSrcBmp->getPixels()+(y+1)*pSrcBmp->getStride();
        } else {
            pNextSrcLine = pSrcBmp->getPixels()+y*pSrcBmp->getStride();
        }
        pDestLine[0] = max(pSrcLine[0], max(pSrcLine[1], 
                max(pLastSrcLine[0], pNextSrcLine[0])));
        for (int x = 1; x < size.x-1; x++) { 
            pDestLine[x] = max(pSrcLine[x], max(pSrcLine[x-1], max(pSrcLine[x+1], 
                    max(pLastSrcLine[x], pNextSrcLine[x]))));
        }
        pDestLine[size.x-1] = max(pSrcLine[size.x-2], max(pSrcLine[size.x-1], 
                max(pLastSrcLine[size.x-1], pNextSrcLine[size.x-1])));
    }
    return pDestBmp;
}
コード例 #28
0
int AppleTrackpadInputDevice::callback(int device, Finger *data, int nFingers,
                                       double timestamp, int frame)
{
    AVG_ASSERT(s_pInstance != 0);
    s_pInstance->onData(device, data, nFingers, timestamp, frame);
    return 0;
}
コード例 #29
0
ファイル: FWCamera.cpp プロジェクト: lynxis/libavg
void FWCamera::getCameraImageFormats(dc1394camera_t* pCamera, CameraInfo* camInfo)
{
    dc1394video_modes_t videoModes;
    dc1394framerates_t framerates;
    dc1394error_t err = dc1394_video_get_supported_modes(pCamera, &videoModes);
    if (err != DC1394_SUCCESS) {
        AVG_ASSERT(false);
        return;
    }
    for (unsigned i = 0; i < videoModes.num; i++) {
        //Covers only libavg supported formats, other capabilities are ignored
        if (videoModes.modes[i] >= DC1394_VIDEO_MODE_320x240_YUV422
                && videoModes.modes[i] <= DC1394_VIDEO_MODE_1600x1200_MONO16){
            PixelFormat pixFormat = getPFFromVideoMode(videoModes.modes[i]);
            IntPoint size = getFrameSizeFromVideoMode(videoModes.modes[i]);
            FrameratesVector framerateList;
            err = dc1394_video_get_supported_framerates(pCamera, videoModes.modes[i],
                    &framerates);
            if (err != DC1394_SUCCESS) {
                AVG_LOG_WARNING("Camera: No framerates. Error was: " << err);
            } else {
                for (unsigned j = 0; j < framerates.num; j++)
                {
                    float rate = framerateToFloat(framerates.framerates[j]);
                    framerateList.push_back(rate);
                }
            }
            CameraImageFormat format = CameraImageFormat(size,pixFormat,framerateList);
            camInfo->addImageFormat(format);
        }
    }
}
コード例 #30
0
void VideoMsg::setVDPAUFrame(vdpau_render_state* pRenderState, float frameTime)
{
    AVG_ASSERT(m_MsgType == NONE);
    m_MsgType = VDPAU_FRAME;
    m_pRenderState = pRenderState;
    m_FrameTime = frameTime;
}