コード例 #1
0
ファイル: FFDecVTB.cpp プロジェクト: arthurzam/QMPlay2
	~VTBHwaccel()
	{
		QMutexLocker locker(&m_buffersMutex);
		for (quintptr buffer : asConst(m_buffers))
			CVPixelBufferRelease((CVPixelBufferRef)buffer);
		CVPixelBufferRelease(m_pixelBufferToRelease);
	}
コード例 #2
0
ファイル: qVideoEncoder.c プロジェクト: mainakbiswas/openqwaq
int qEncodeAPI(QEncoder* encoder, char* bytes, int byteSize)
{
    OSErr err;
    CVPixelBufferPoolRef pixelBufferPool;
    CVPixelBufferRef pixelBuffer;
    unsigned char* baseAddress;
    size_t bufferSize;

    // Grab a pixel buffer from the pool (ICMCompressionSessionEncodeFrame() needs the input
    // data to be passed in as a CVPixelBufferRef).
    pixelBufferPool = ICMCompressionSessionGetPixelBufferPool(encoder->session);
    err = CVPixelBufferPoolCreatePixelBuffer(kCFAllocatorDefault, pixelBufferPool, &pixelBuffer);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not obtain a pixel buffer from pool");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        return -5;
    }

    // Lock the pixel-buffer so that we can copy our data into it for encoding
    // XXXX: would be nice to avoid this copy.
    err = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not lock the pixel buffer");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        CVPixelBufferRelease(pixelBuffer);
        return -5;
    }
    baseAddress = CVPixelBufferGetBaseAddress(pixelBuffer);
//	bufferSize = CVPixelBufferGetWidth(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer) * 4;
    bufferSize = CVPixelBufferGetBytesPerRow(pixelBuffer) * CVPixelBufferGetHeight(pixelBuffer);

    // XXXX: for now, just for debugging.  For production, we should notice if this happens and deal with it "appropriately".
    if (byteSize != bufferSize) {
        fprintf(QSTDERR, "\nqEncodeQT(): input data size (%d) does not match pixel-buffer data size (%d)", byteSize, bufferSize);
    }

    // Copy the data and unlock the buffer
    memcpy(baseAddress, bytes, bufferSize);
    CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);

    // Encode the frame (now in pixel-buffer form).
    err = ICMCompressionSessionEncodeFrame(	encoder->session,
                                            pixelBuffer,
                                            0, 0, 0, // we're not specifying a frame time
                                            NULL,
                                            NULL,
                                            NULL);
    if (err != noErr) {
        fprintf(QSTDERR, "\nqEncodeQT(): could not encode the frame");
        fprintf(QSTDERR, "\n\tQUICKTIME ERROR CODE: %d", err);
        CVPixelBufferRelease(pixelBuffer);
        return -5;
    }

    CVPixelBufferRelease(pixelBuffer);
    return 0;
}
コード例 #3
0
ファイル: vda_h264_dec.c プロジェクト: 15806905685/FFmpeg
static void release_buffer(void *opaque, uint8_t *data)
{
    VDABufferContext *context = opaque;
    CVPixelBufferUnlockBaseAddress(context->cv_buffer, 0);
    CVPixelBufferRelease(context->cv_buffer);
    av_free(context);
}
コード例 #4
0
 PixelBufferSource::~PixelBufferSource()
 {
     if(m_pixelBuffer) {
         CVPixelBufferRelease((CVPixelBufferRef)m_pixelBuffer);
         m_pixelBuffer = nullptr;
     }
 }
コード例 #5
0
ファイル: vda.c プロジェクト: AsamQi/vlc
static int Extract( vlc_va_t *external, picture_t *p_picture, void *opaque,
                    uint8_t *data )
{
    vlc_va_vda_t *p_va = vlc_va_vda_Get( external );
    CVPixelBufferRef cv_buffer = ( CVPixelBufferRef )data;

    if( !cv_buffer )
    {
        msg_Dbg( p_va->p_log, "Frame buffer is empty.");
        return VLC_EGENERIC;
    }
    if (!CVPixelBufferGetDataSize(cv_buffer) > 0)
    {
        msg_Dbg( p_va->p_log, "Empty frame buffer");
        return VLC_EGENERIC;
    }

    if( p_va->hw_ctx.cv_pix_fmt_type == kCVPixelFormatType_420YpCbCr8Planar )
    {
        if( !p_va->image_cache.buffer ) {
            CVPixelBufferRelease( cv_buffer );
            return VLC_EGENERIC;
        }

        vda_Copy420YpCbCr8Planar( p_picture,
                                  cv_buffer,
                                  p_va->hw_ctx.width,
                                  p_va->hw_ctx.height,
                                  &p_va->image_cache );
    }
    else
        vda_Copy422YpCbCr8( p_picture, cv_buffer );
    (void) opaque;
    return VLC_SUCCESS;
}
コード例 #6
0
ファイル: vda.c プロジェクト: DakaiTV/DakaiVLC
/*****************************************************************************
 * vda_Copy420YpCbCr8Planar: copy y420 CVPixelBuffer to picture_t
 *****************************************************************************/
static void vda_Copy420YpCbCr8Planar( picture_t *p_pic,
                                      CVPixelBufferRef buffer,
                                      unsigned i_width,
                                      unsigned i_height,
                                      copy_cache_t *cache )
{
    uint8_t *pp_plane[3];
    size_t  pi_pitch[3];

    if (!buffer)
        return;

    CVPixelBufferLockBaseAddress( buffer, 0 );

    for( int i = 0; i < 3; i++ )
    {
        pp_plane[i] = CVPixelBufferGetBaseAddressOfPlane( buffer, i );
        pi_pitch[i] = CVPixelBufferGetBytesPerRowOfPlane( buffer, i );
    }

    CopyFromYv12( p_pic, pp_plane, pi_pitch,
                  i_width, i_height, cache );

    CVPixelBufferUnlockBaseAddress( buffer, 0 );
    CVPixelBufferRelease( buffer );
}
コード例 #7
0
ファイル: vda.c プロジェクト: DakaiTV/DakaiVLC
/*****************************************************************************
 * vda_Copy422YpCbCr8: copy 2vuy CVPixelBuffer to picture_t
 *****************************************************************************/
static void vda_Copy422YpCbCr8( picture_t *p_pic,
                                CVPixelBufferRef buffer )
{
    int i_plane, i_line, i_dst_stride, i_src_stride;
    uint8_t *p_dst, *p_src;

    CVPixelBufferLockBaseAddress( buffer, 0 );

    for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ )
    {
        p_dst = p_pic->p[i_plane].p_pixels;
        p_src = CVPixelBufferGetBaseAddressOfPlane( buffer, i_plane );
        i_dst_stride  = p_pic->p[i_plane].i_pitch;
        i_src_stride  = CVPixelBufferGetBytesPerRowOfPlane( buffer, i_plane );

        for( i_line = 0; i_line < p_pic->p[i_plane].i_visible_lines ; i_line++ )
        {
            memcpy( p_dst, p_src, i_src_stride );

            p_src += i_src_stride;
            p_dst += i_dst_stride;
        }
    }

    CVPixelBufferUnlockBaseAddress( buffer, 0 );
    CVPixelBufferRelease( buffer );
}
コード例 #8
0
ファイル: QTPixelBuffer.cpp プロジェクト: 1833183060/wke
void QTPixelBuffer::adopt(CVPixelBufferRef ref)
{
    if (ref == m_pixelBuffer)
        return;
    CVPixelBufferRelease(m_pixelBuffer);
    m_pixelBuffer = ref;
}
コード例 #9
0
ファイル: vda_h264.c プロジェクト: 63n/FFmpeg
static int vda_old_h264_end_frame(AVCodecContext *avctx)
{
    H264Context *h                      = avctx->priv_data;
    VDAContext *vda                     = avctx->internal->hwaccel_priv_data;
    struct vda_context *vda_ctx         = avctx->hwaccel_context;
    AVFrame *frame                      = &h->cur_pic_ptr->f;
    struct vda_buffer *context;
    AVBufferRef *buffer;
    int status;

    if (!vda_ctx->decoder || !vda->bitstream)
        return -1;

    status = vda_sync_decode(vda, vda_ctx);
    frame->data[3] = (void*)vda_ctx->cv_buffer;

    if (status)
        av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);

    if (!vda_ctx->use_ref_buffer || status)
        return status;

    context = av_mallocz(sizeof(*context));
    buffer = av_buffer_create(NULL, 0, vda_h264_release_buffer, context, 0);
    if (!context || !buffer) {
        CVPixelBufferRelease(vda_ctx->cv_buffer);
        av_free(context);
        return -1;
    }

    context->cv_buffer = vda_ctx->cv_buffer;
    frame->buf[3] = buffer;

    return status;
}
コード例 #10
0
ファイル: FFDecVTB.cpp プロジェクト: arthurzam/QMPlay2
	void clear(bool contextChange) override
	{
		Q_UNUSED(contextChange)
		CVPixelBufferRelease(m_pixelBufferToRelease);
		m_pixelBufferToRelease = nullptr;
		m_glTextures = nullptr;
	}
コード例 #11
0
ファイル: vda.c プロジェクト: KindDragon/FFmpeg
void ff_vda_release_vda_frame(vda_frame *frame)
{
    if (frame) {
        CVPixelBufferRelease(frame->cv_buffer);
        av_freep(&frame);
    }
}
コード例 #12
0
ファイル: quicktimedrv.c プロジェクト: BigBoss21X/vice-emu
static OSStatus finish_video(void)
{
    video_ready = 0;
    
    // ----- PixelBuffer -----
    CVPixelBufferRelease(pixelBuffer);

    // ----- Codec -----
    
    OSErr theError = ICMCompressionSessionCompleteFrames(videoCompressionSession, true, 0, 0);
    if (theError)
        log_debug("quicktime_video: error completing frames!");
        
    ICMCompressionSessionRelease(videoCompressionSession);
	
    // ----- Movie -----

    //End media editing
    theError = EndMediaEdits(videoMedia);
    if (theError)
        log_debug("quicktime_video: error ending media edits");

    theError = ExtendMediaDecodeDurationToDisplayEndTime(videoMedia, NULL);
    if (theError)
        log_debug("quicktime_video: error setting decode duration!");

    //Add media to track
    theError = InsertMediaIntoTrack(videoTrack, 0, 0, GetMediaDisplayDuration(videoMedia), fixed1);
    if (theError)
        log_debug("quicktime_video: error inserting media into track!");

    videoTrack=NULL;
    videoMedia=NULL;
    return theError;
}
コード例 #13
0
ファイル: vda_h264.c プロジェクト: TheRyuu/libav
static int vda_h264_uninit(AVCodecContext *avctx)
{
    VDAContext *vda = avctx->internal->hwaccel_priv_data;
    av_freep(&vda->bitstream);
    if (vda->frame)
        CVPixelBufferRelease(vda->frame);
    return 0;
}
コード例 #14
0
ファイル: privatedecoder_vda.cpp プロジェクト: stunami/mythtv
void PrivateDecoderVDA::PopDecodedFrame(void)
{
    QMutexLocker lock(&m_frame_lock);
    if (m_decoded_frames.isEmpty())
        return;
    CVPixelBufferRelease(m_decoded_frames.last().buffer);
    m_decoded_frames.removeLast();
}
コード例 #15
0
ファイル: FFDecVTB.cpp プロジェクト: arthurzam/QMPlay2
	CopyResult copyFrame(const VideoFrame &videoFrame, Field field) override
	{
		Q_UNUSED(field)

		{
			QMutexLocker locker(&m_buffersMutex);
			const int idx = m_buffers.indexOf(videoFrame.surfaceId);
			if (idx < 0)
				return CopyNotReady;
			m_buffers.removeAt(idx);
			while (m_buffers.size() > 5)
				CVPixelBufferRelease((CVPixelBufferRef)m_buffers.takeFirst());
		}

		CVPixelBufferRef pixelBuffer = (CVPixelBufferRef)videoFrame.surfaceId;
		CGLContextObj glCtx = CGLGetCurrentContext();

		IOSurfaceRef surface = CVPixelBufferGetIOSurface(pixelBuffer);

		const OSType pixelFormat = IOSurfaceGetPixelFormat(surface);
		if (pixelFormat != kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange)
		{
			CVPixelBufferRelease(pixelBuffer);
			return CopyError;
		}

		glBindTexture(GL_TEXTURE_RECTANGLE_ARB, m_glTextures[0]);
		if (CGLTexImageIOSurface2D(glCtx, GL_TEXTURE_RECTANGLE_ARB, GL_R8, videoFrame.size.getWidth(0), videoFrame.size.getHeight(0), GL_RED, GL_UNSIGNED_BYTE, surface, 0) != kCGLNoError)
		{
			CVPixelBufferRelease(pixelBuffer);
			return CopyError;
		}

		glBindTexture(GL_TEXTURE_RECTANGLE_ARB, m_glTextures[1]);
		if (CGLTexImageIOSurface2D(glCtx, GL_TEXTURE_RECTANGLE_ARB, GL_RG8, videoFrame.size.getWidth(1), videoFrame.size.getHeight(1), GL_RG, GL_UNSIGNED_BYTE, surface, 1) != kCGLNoError)
		{
			CVPixelBufferRelease(pixelBuffer);
			return CopyError;
		}

		CVPixelBufferRelease(m_pixelBufferToRelease);
		m_pixelBufferToRelease = pixelBuffer;

		return CopyOk;
	}
コード例 #16
0
ファイル: VideoDecoderVDA.cpp プロジェクト: csuncs89/QtAV
void VideoDecoderVDAPrivate::releaseBuffer(void *opaque, uint8_t *data)
{
    Q_UNUSED(opaque);
    // released in getBuffer?
    CVPixelBufferRef cv_buffer = (CVPixelBufferRef)data;
    if (!cv_buffer)
        return;
    CVPixelBufferRelease(cv_buffer);
}
コード例 #17
0
ファイル: VideoDecoderVDA.cpp プロジェクト: nbuxrr/QtAV
VideoFrame VideoDecoderVDA::frame()
{
    DPTR_D(VideoDecoderVDA);
    CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
    if (!cv_buffer) {
        qDebug("Frame buffer is empty.");
        return VideoFrame();
    }
    if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
        qDebug("Empty frame buffer");
        return VideoFrame();
    }
    VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
    if (pixfmt == VideoFormat::Format_Invalid) {
        qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
        return VideoFrame();
    }
    // we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
    class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
        bool glinterop;
        CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
    public:
        SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
            //CVPixelBufferRetain(cvbuf);
        }
        ~SurfaceInteropCVBuffer() {
            CVPixelBufferRelease(cvbuf);
        }
        void* mapToHost(const VideoFormat &format, void *handle, int plane) {
            Q_UNUSED(plane);
            CVPixelBufferLockBaseAddress(cvbuf, 0);
            const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
            if (!fmt.isValid()) {
                CVPixelBufferUnlockBaseAddress(cvbuf, 0);
                return NULL;
            }
            const int w = CVPixelBufferGetWidth(cvbuf);
            const int h = CVPixelBufferGetHeight(cvbuf);
            uint8_t *src[3];
            int pitch[3];
            for (int i = 0; i <fmt.planeCount(); ++i) {
                // get address results in internal copy
                src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
                pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
            }
            CVPixelBufferUnlockBaseAddress(cvbuf, 0);
            //CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
            VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
            if (fmt != format)
                frame = frame.to(format);
            VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
            frame.setTimestamp(f->timestamp());
            frame.setDisplayAspectRatio(f->displayAspectRatio());
            *f = frame;
            return f;
        }
コード例 #18
0
ファイル: vda.c プロジェクト: AsamQi/vlc
static void Release( void *opaque, uint8_t *data )
{
#if 0
    CVPixelBufferRef cv_buffer = ( CVPixelBufferRef )p_ff->data[3];

    if ( cv_buffer )
        CVPixelBufferRelease( cv_buffer );
#endif
    (void) opaque; (void) data;
}
コード例 #19
0
ファイル: hwacc_vda.cpp プロジェクト: akhilo/cmplayer
mp_image *HwAccVda::getImage(mp_image *mpi) {
	auto buffer = (CVPixelBufferRef)mpi->planes[3];
	auto release = [] (void *arg) {
		CVPixelBufferRef buffer = (CVPixelBufferRef)arg;
		CVPixelBufferRelease(buffer);
	};
	CVPixelBufferRetain(buffer);
	auto img = null_mp_image(IMGFMT_VDA, size().width(), size().height(), buffer, release);
	mp_image_copy_attributes(img, mpi);
	img->planes[3] = mpi->planes[3];
	return img;
}
コード例 #20
0
	virtual void releaseData()
	{
		if (m_ref)
		{
			if (m_locked)
			{
				CVPixelBufferUnlockBaseAddress(m_ref, 0);
				m_locked = false;
			}
			
			CVPixelBufferRelease(m_ref);
			m_ref = NULL;
		}
	}
コード例 #21
0
void
gst_apple_core_video_pixel_buffer_unref (GstAppleCoreVideoPixelBuffer * gpixbuf)
{
  if (g_atomic_int_dec_and_test (&gpixbuf->refcount)) {
    if (gpixbuf->lock_state != GST_APPLE_CORE_VIDEO_MEMORY_UNLOCKED) {
      GST_ERROR
          ("%p: CVPixelBuffer memory still locked (lock_count = %d), likely forgot to unmap GstAppleCoreVideoMemory",
          gpixbuf, gpixbuf->lock_count);
    }
    CVPixelBufferRelease (gpixbuf->buf);
    g_mutex_clear (&gpixbuf->mutex);
    g_slice_free (GstAppleCoreVideoPixelBuffer, gpixbuf);
  }
}
コード例 #22
0
ファイル: vtb_decode.c プロジェクト: emmanouil/gpac
static void VTBDec_on_frame(void *opaque, void *sourceFrameRefCon, OSStatus status, VTDecodeInfoFlags flags, CVImageBufferRef image, CMTime pts, CMTime duration)
{
	VTBDec *ctx = (VTBDec *)opaque;
    if (ctx->frame) {
        CVPixelBufferRelease(ctx->frame);
        ctx->frame = NULL;
    }
	if (status) ctx->last_error = GF_NON_COMPLIANT_BITSTREAM;
	
    if (!image) {
        GF_LOG(GF_LOG_DEBUG, GF_LOG_CODEC, ("[VTB] No output buffer - status %d\n", status));
        return;
    }
    ctx->frame = CVPixelBufferRetain(image);
}
コード例 #23
0
ファイル: DVDVideoCodecVDA.cpp プロジェクト: RoboSK/xbmc
void CDVDVideoCodecVDA::DisplayQueuePop(void)
{
  CCocoaAutoPool pool;
  if (!m_display_queue || m_queue_depth == 0)
    return;

  // pop the top frame off the queue
  pthread_mutex_lock(&m_queue_mutex);
  frame_queue *top_frame = m_display_queue;
  m_display_queue = m_display_queue->nextframe;
  m_queue_depth--;
  pthread_mutex_unlock(&m_queue_mutex);

  // and release it
  CVPixelBufferRelease(top_frame->pixel_buffer_ref);
  free(top_frame);
}
コード例 #24
0
ファイル: vda_h264.c プロジェクト: TheRyuu/libav
void ff_vda_output_callback(void *opaque,
                            CFDictionaryRef user_info,
                            OSStatus status,
                            uint32_t infoFlags,
                            CVImageBufferRef image_buffer)
{
    AVCodecContext *ctx = opaque;
    VDAContext *vda = ctx->internal->hwaccel_priv_data;


    if (vda->frame) {
        CVPixelBufferRelease(vda->frame);
        vda->frame = NULL;
    }

    if (!image_buffer)
        return;

    vda->frame = CVPixelBufferRetain(image_buffer);
}
コード例 #25
0
ファイル: vda_h264.c プロジェクト: TheRyuu/libav
static void release_buffer(void *opaque, uint8_t *data)
{
    CVImageBufferRef frame = (CVImageBufferRef)data;
    CVPixelBufferRelease(frame);
}
コード例 #26
0
ファイル: privatedecoder_vda.cpp プロジェクト: stunami/mythtv
int  PrivateDecoderVDA::GetFrame(AVStream *stream,
                                 AVFrame *picture,
                                 int *got_picture_ptr,
                                 AVPacket *pkt)
{
    if (!pkt)

    CocoaAutoReleasePool pool;
    int result = -1;
    if (!m_lib || !stream)
        return result;

    AVCodecContext *avctx = stream->codec;
    if (!avctx)
        return result;

    if (pkt)
    {
        CFDataRef avc_demux;
        CFDictionaryRef params;
        if (m_annexb)
        {
            // convert demuxer packet from bytestream (AnnexB) to bitstream
            AVIOContext *pb;
            int demuxer_bytes;
            uint8_t *demuxer_content;

            if(avio_open_dyn_buf(&pb) < 0)
            {
                return result;
            }
            demuxer_bytes = avc_parse_nal_units(pb, pkt->data, pkt->size);
            demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content);
            avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes);
            av_free(demuxer_content);
        }
        else if (m_convert_3byteTo4byteNALSize)
        {
            // convert demuxer packet from 3 byte NAL sizes to 4 byte
            AVIOContext *pb;
            if (avio_open_dyn_buf(&pb) < 0)
            {
                return result;
            }

            uint32_t nal_size;
            uint8_t *end = pkt->data + pkt->size;
            uint8_t *nal_start = pkt->data;
            while (nal_start < end)
            {
                nal_size = VDA_RB24(nal_start);
                avio_wb32(pb, nal_size);
                nal_start += 3;
                avio_write(pb, nal_start, nal_size);
                nal_start += nal_size;
            }

            uint8_t *demuxer_content;
            int demuxer_bytes = avio_close_dyn_buf(pb, &demuxer_content);
            avc_demux = CFDataCreate(kCFAllocatorDefault, demuxer_content, demuxer_bytes);
            av_free(demuxer_content);
        }
        else
        {
            avc_demux = CFDataCreate(kCFAllocatorDefault, pkt->data, pkt->size);
        }

        CFStringRef keys[4] = { CFSTR("FRAME_PTS"),
                                CFSTR("FRAME_INTERLACED"), CFSTR("FRAME_TFF"),
                                CFSTR("FRAME_REPEAT") };
        CFNumberRef values[5];
        values[0] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type,
                                   &pkt->pts);
        values[1] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type,
                                   &picture->interlaced_frame);
        values[2] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type,
                                   &picture->top_field_first);
        values[3] = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt8Type,
                                   &picture->repeat_pict);
        params = CFDictionaryCreate(kCFAllocatorDefault, (const void **)&keys,
                                    (const void **)&values, 4,
                                    &kCFTypeDictionaryKeyCallBacks,
                                    &kCFTypeDictionaryValueCallBacks);

        INIT_ST;
        vda_st = m_lib->decoderDecode((VDADecoder)m_decoder, 0, avc_demux, params);
        CHECK_ST;
        if (vda_st == kVDADecoderNoErr)
            result = pkt->size;
        CFRelease(avc_demux);
        CFRelease(params);
    }

    if (m_decoded_frames.size() < m_max_ref_frames)
        return result;

    *got_picture_ptr = 1;
    m_frame_lock.lock();
    VDAFrame vdaframe = m_decoded_frames.takeLast();
    m_frame_lock.unlock();

    if (avctx->get_buffer(avctx, picture) < 0)
        return -1;

    picture->reordered_opaque = vdaframe.pts;
    picture->interlaced_frame = vdaframe.interlaced_frame;
    picture->top_field_first  = vdaframe.top_field_first;
    picture->repeat_pict      = vdaframe.repeat_pict;
    VideoFrame *frame         = (VideoFrame*)picture->opaque;

    PixelFormat in_fmt  = PIX_FMT_NONE;
    PixelFormat out_fmt = PIX_FMT_NONE;
    if (vdaframe.format == 'BGRA')
        in_fmt = PIX_FMT_BGRA;
    else if (vdaframe.format == '2vuy')
        in_fmt = PIX_FMT_UYVY422;

    if (frame->codec == FMT_YV12)
        out_fmt = PIX_FMT_YUV420P;

    if (out_fmt != PIX_FMT_NONE && in_fmt != PIX_FMT_NONE && frame->buf)
    {
        CVPixelBufferLockBaseAddress(vdaframe.buffer, 0);
        uint8_t* base = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(vdaframe.buffer, 0);
        AVPicture img_in, img_out;
        avpicture_fill(&img_out, (uint8_t *)frame->buf, out_fmt,
                       frame->width, frame->height);
        avpicture_fill(&img_in, base, in_fmt,
                       frame->width, frame->height);
        myth_sws_img_convert(&img_out, out_fmt, &img_in, in_fmt,
                       frame->width, frame->height);
        CVPixelBufferUnlockBaseAddress(vdaframe.buffer, 0);
    }
    else
    {
        LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to convert decoded frame.");
    }

    CVPixelBufferRelease(vdaframe.buffer);
    return result;
}
コード例 #27
0
ファイル: vda.c プロジェクト: Aseeker/mpv
static void cv_release(void *pbuf)
{
    CVPixelBufferRelease((CVPixelBufferRef)pbuf);
}
コード例 #28
0
ファイル: pipeline-vda.cpp プロジェクト: 499940913/moon
void
MoonVDADecoder::Cleanup (MediaFrame *frame)
{
	CVPixelBufferUnlockBaseAddress ((CVPixelBufferRef) frame->decoder_specific_data, 0);
	CVPixelBufferRelease ((CVPixelBufferRef) frame->decoder_specific_data);
}
コード例 #29
0
ファイル: qtsplitter.c プロジェクト: Kelimion/wine
static DWORD WINAPI QTSplitter_thread(LPVOID data)
{
    QTSplitter *This = (QTSplitter *)data;
    HRESULT hr = S_OK;
    TimeValue next_time;
    CVPixelBufferRef pixelBuffer = NULL;
    OSStatus err;
    TimeRecord tr;

    WaitForSingleObject(This->runEvent, -1);

    EnterCriticalSection(&This->csReceive);
    if (!This->pQTMovie)
    {
        LeaveCriticalSection(&This->csReceive);
        return 0;
    }

    This->state = State_Running;
    /* Prime the pump:  Needed for MPEG streams */
    GetMovieNextInterestingTime(This->pQTMovie, nextTimeEdgeOK | nextTimeStep, 0, NULL, This->movie_time, 1, &next_time, NULL);

    GetMovieTime(This->pQTMovie, &tr);

    if (This->pAudio_Pin)
        QT_Create_Extract_Session(This);

    LeaveCriticalSection(&This->csReceive);

    do
    {
        LONGLONG tStart=0, tStop=0;
        LONGLONG mStart=0, mStop=0;
        float time;

        EnterCriticalSection(&This->csReceive);
        if (!This->pQTMovie)
        {
            LeaveCriticalSection(&This->csReceive);
            return 0;
        }

        GetMovieNextInterestingTime(This->pQTMovie, nextTimeStep, 0, NULL, This->movie_time, 1, &next_time, NULL);

        if (next_time == -1)
        {
            TRACE("No next time\n");
            LeaveCriticalSection(&This->csReceive);
            break;
        }

        tr.value = SInt64ToWide(next_time);
        SetMovieTime(This->pQTMovie, &tr);
        MoviesTask(This->pQTMovie,0);
        QTVisualContextTask(This->vContext);

        TRACE("In loop at time %ld\n",This->movie_time);
        TRACE("In Next time %ld\n",next_time);

        mStart = This->movie_time;
        mStop = next_time;

        time = (float)(This->movie_time - This->movie_start) / This->movie_scale;
        tStart = time * 10000000;
        time = (float)(next_time - This->movie_start) / This->movie_scale;
        tStop = time * 10000000;

        /* Deliver Audio */
        if (This->pAudio_Pin && This->pAudio_Pin->pin.pin.pConnectedTo && This->aSession)
        {
            int data_size=0;
            BYTE* ptr;
            IMediaSample *sample = NULL;
            AudioBufferList aData;
            UInt32 flags;
            UInt32 frames;
            WAVEFORMATEX* pvi;
            float duration;

            pvi = (WAVEFORMATEX*)This->pAudio_Pin->pmt->pbFormat;

            hr = BaseOutputPinImpl_GetDeliveryBuffer(&This->pAudio_Pin->pin, &sample, NULL, NULL, 0);

            if (FAILED(hr))
            {
                ERR("Audio: Unable to get delivery buffer (%x)\n", hr);
                goto audio_error;
            }

            hr = IMediaSample_GetPointer(sample, &ptr);
            if (FAILED(hr))
            {
                ERR("Audio: Unable to get pointer to buffer (%x)\n", hr);
                goto audio_error;
            }

            duration = (float)next_time / This->movie_scale;
            time = (float)This->movie_time / This->movie_scale;
            duration -= time;
            frames = pvi->nSamplesPerSec * duration;
            TRACE("Need audio for %f seconds (%li frames)\n",duration,frames);

            data_size = IMediaSample_GetSize(sample);
            if (data_size < frames * pvi->nBlockAlign)
                FIXME("Audio buffer is too small\n");

            aData.mNumberBuffers = 1;
            aData.mBuffers[0].mNumberChannels = pvi->nChannels;
            aData.mBuffers[0].mDataByteSize = data_size;
            aData.mBuffers[0].mData = ptr;

            err = MovieAudioExtractionFillBuffer(This->aSession, &frames, &aData, &flags);
            if (frames == 0)
            {
                TimeRecord etr;

                /* Ran out of frames, Restart the extraction session */
                TRACE("Restarting extraction session\n");
                MovieAudioExtractionEnd(This->aSession);
                This->aSession = NULL;
                QT_Create_Extract_Session(This);

                etr = tr;
                etr.value = SInt64ToWide(This->movie_time);
                MovieAudioExtractionSetProperty(This->aSession,
                    kQTPropertyClass_MovieAudioExtraction_Movie,
                    kQTMovieAudioExtractionMoviePropertyID_CurrentTime,
                    sizeof(TimeRecord), &etr );

                frames = pvi->nSamplesPerSec * duration;
                aData.mNumberBuffers = 1;
                aData.mBuffers[0].mNumberChannels = pvi->nChannels;
                aData.mBuffers[0].mDataByteSize = data_size;
                aData.mBuffers[0].mData = ptr;

                MovieAudioExtractionFillBuffer(This->aSession, &frames, &aData, &flags);
            }

            TRACE("Got %i frames\n",(int)frames);

            IMediaSample_SetActualDataLength(sample, frames * pvi->nBlockAlign);

            IMediaSample_SetMediaTime(sample, &mStart, &mStop);
            IMediaSample_SetTime(sample, &tStart, &tStop);

            hr = OutputQueue_Receive(This->pAudio_Pin->queue, sample);
            TRACE("Audio Delivered (%x)\n",hr);

audio_error:
            if (sample)
                IMediaSample_Release(sample);
        }
        else
            TRACE("Audio Pin not connected or no Audio\n");

        /* Deliver Video */
        if (This->pVideo_Pin && QTVisualContextIsNewImageAvailable(This->vContext,0))
        {
            err = QTVisualContextCopyImageForTime(This->vContext, NULL, NULL, &pixelBuffer);
            if (err == noErr)
            {
                int data_size=0;
                BYTE* ptr;
                IMediaSample *sample = NULL;

                hr = BaseOutputPinImpl_GetDeliveryBuffer(&This->pVideo_Pin->pin, &sample, NULL, NULL, 0);
                if (FAILED(hr))
                {
                    ERR("Video: Unable to get delivery buffer (%x)\n", hr);
                    goto video_error;
                }

                data_size = IMediaSample_GetSize(sample);
                if (data_size < This->outputSize)
                {
                    ERR("Sample size is too small %d < %d\n", data_size, This->outputSize)
    ;
                    hr = E_FAIL;
                    goto video_error;
                }

                hr = IMediaSample_GetPointer(sample, &ptr);
                if (FAILED(hr))
                {
                    ERR("Video: Unable to get pointer to buffer (%x)\n", hr);
                    goto video_error;
                }

                hr = AccessPixelBufferPixels( pixelBuffer, ptr);
                if (FAILED(hr))
                {
                    ERR("Failed to access Pixels\n");
                    goto video_error;
                }

                IMediaSample_SetActualDataLength(sample, This->outputSize);

                IMediaSample_SetMediaTime(sample, &mStart, &mStop);
                IMediaSample_SetTime(sample, &tStart, &tStop);

                hr = OutputQueue_Receive(This->pVideo_Pin->queue, sample);
                TRACE("Video Delivered (%x)\n",hr);

    video_error:
                if (sample)
                    IMediaSample_Release(sample);
                if (pixelBuffer)
                    CVPixelBufferRelease(pixelBuffer);
            }
        }
        else
            TRACE("No video to deliver\n");

        This->movie_time = next_time;
        LeaveCriticalSection(&This->csReceive);
    } while (hr == S_OK);

    This->state = State_Stopped;
    if (This->pAudio_Pin)
        OutputQueue_EOS(This->pAudio_Pin->queue);
    if (This->pVideo_Pin)
        OutputQueue_EOS(This->pVideo_Pin->queue);

    return hr;
}
コード例 #30
0
ファイル: vtenc.c プロジェクト: asdlei00/gst-plugins-bad
static GstFlowReturn
gst_vtenc_encode_frame (GstVTEnc * self, GstBuffer * buf)
{
  GstVTApi *vt = self->ctx->vt;
  CMTime ts, duration;
  GstCoreMediaMeta *meta;
  CVPixelBufferRef pbuf = NULL;
  VTStatus vt_status;
  GstFlowReturn ret = GST_FLOW_OK;
  guint i;

  self->cur_inbuf = buf;

  ts = CMTimeMake (GST_TIME_AS_MSECONDS (GST_BUFFER_TIMESTAMP (buf)), 1000);
  duration = CMTimeMake
      (GST_TIME_AS_MSECONDS (GST_BUFFER_DURATION (buf)), 1000);

  meta = gst_buffer_get_core_media_meta (buf);
  if (meta != NULL) {
    pbuf = gst_core_media_buffer_get_pixel_buffer (buf);
  }

  if (pbuf == NULL) {
    GstVTEncFrame *frame;
    CVReturn cv_ret;

    frame = gst_vtenc_frame_new (buf, &self->video_info);
    if (!frame)
      goto cv_error;

    {
      const size_t num_planes = GST_VIDEO_FRAME_N_PLANES (&frame->videoframe);
      void *plane_base_addresses[GST_VIDEO_MAX_PLANES];
      size_t plane_widths[GST_VIDEO_MAX_PLANES];
      size_t plane_heights[GST_VIDEO_MAX_PLANES];
      size_t plane_bytes_per_row[GST_VIDEO_MAX_PLANES];
      OSType pixel_format_type;
      size_t i;

      for (i = 0; i < num_planes; i++) {
        plane_base_addresses[i] =
            GST_VIDEO_FRAME_PLANE_DATA (&frame->videoframe, i);
        plane_widths[i] = GST_VIDEO_FRAME_COMP_WIDTH (&frame->videoframe, i);
        plane_heights[i] = GST_VIDEO_FRAME_COMP_HEIGHT (&frame->videoframe, i);
        plane_bytes_per_row[i] =
            GST_VIDEO_FRAME_COMP_STRIDE (&frame->videoframe, i);
        plane_bytes_per_row[i] =
            GST_VIDEO_FRAME_COMP_STRIDE (&frame->videoframe, i);
      }

      switch (GST_VIDEO_INFO_FORMAT (&self->video_info)) {
        case GST_VIDEO_FORMAT_I420:
          pixel_format_type = kCVPixelFormatType_420YpCbCr8Planar;
          break;
        case GST_VIDEO_FORMAT_NV12:
          pixel_format_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
          break;
        default:
          goto cv_error;
      }

      cv_ret = CVPixelBufferCreateWithPlanarBytes (NULL,
          self->negotiated_width, self->negotiated_height,
          pixel_format_type,
          frame,
          GST_VIDEO_FRAME_SIZE (&frame->videoframe),
          num_planes,
          plane_base_addresses,
          plane_widths,
          plane_heights,
          plane_bytes_per_row, gst_pixel_buffer_release_cb, frame, NULL, &pbuf);
      if (cv_ret != kCVReturnSuccess) {
        gst_vtenc_frame_free (frame);
        goto cv_error;
      }
    }
  }

  GST_OBJECT_LOCK (self);

  self->expect_keyframe = CFDictionaryContainsKey (self->options,
      *(vt->kVTEncodeFrameOptionKey_ForceKeyFrame));
  if (self->expect_keyframe)
    gst_vtenc_clear_cached_caps_downstream (self);

  vt_status = self->ctx->vt->VTCompressionSessionEncodeFrame (self->session,
      pbuf, ts, duration, self->options, NULL, NULL);

  if (vt_status != 0) {
    GST_WARNING_OBJECT (self, "VTCompressionSessionEncodeFrame returned %d",
        vt_status);
  }

  self->ctx->vt->VTCompressionSessionCompleteFrames (self->session,
      kCMTimeInvalid);

  GST_OBJECT_UNLOCK (self);

  CVPixelBufferRelease (pbuf);
  self->cur_inbuf = NULL;
  gst_buffer_unref (buf);

  if (self->cur_outbufs->len > 0) {
    meta =
        gst_buffer_get_core_media_meta (g_ptr_array_index (self->cur_outbufs,
            0));
    if (!gst_vtenc_negotiate_downstream (self, meta->sample_buf))
      ret = GST_FLOW_NOT_NEGOTIATED;
  }

  for (i = 0; i != self->cur_outbufs->len; i++) {
    GstBuffer *buf = g_ptr_array_index (self->cur_outbufs, i);

    if (ret == GST_FLOW_OK) {
      ret = gst_pad_push (self->srcpad, buf);
    } else {
      gst_buffer_unref (buf);
    }
  }
  g_ptr_array_set_size (self->cur_outbufs, 0);

  return ret;

cv_error:
  {
    self->cur_inbuf = NULL;
    gst_buffer_unref (buf);

    return GST_FLOW_ERROR;
  }
}